id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
16,100
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_alert_destination_count
def get_alert_destination_count(self, channel=None): """Get the number of supported alert destinations :param channel: Channel for alerts to be examined, defaults to current """ if channel is None: channel = self.get_network_channel() rqdata = (channel, 0x11, 0, 0) rsp = self.xraw_command(netfn=0xc, command=2, data=rqdata) return ord(rsp['data'][1])
python
def get_alert_destination_count(self, channel=None): """Get the number of supported alert destinations :param channel: Channel for alerts to be examined, defaults to current """ if channel is None: channel = self.get_network_channel() rqdata = (channel, 0x11, 0, 0) rsp = self.xraw_command(netfn=0xc, command=2, data=rqdata) return ord(rsp['data'][1])
[ "def", "get_alert_destination_count", "(", "self", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "rqdata", "=", "(", "channel", ",", "0x11", ",", "0", ",", "0", ")", "rsp", "=", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "2", ",", "data", "=", "rqdata", ")", "return", "ord", "(", "rsp", "[", "'data'", "]", "[", "1", "]", ")" ]
Get the number of supported alert destinations :param channel: Channel for alerts to be examined, defaults to current
[ "Get", "the", "number", "of", "supported", "alert", "destinations" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L996-L1005
16,101
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_alert_destination
def get_alert_destination(self, destination=0, channel=None): """Get alert destination Get a specified alert destination. Returns a dictionary of relevant configuration. The following keys may be present: acknowledge_required - Indicates whether the target expects an acknowledgement acknowledge_timeout - How long it will wait for an acknowledgment before retrying retries - How many attempts will be made to deliver the alert to this destination address_format - 'ipv4' or 'ipv6' address - The IP address of the target :param destination: The destination number. Defaults to 0 :param channel: The channel for alerting. Defaults to current channel """ destinfo = {} if channel is None: channel = self.get_network_channel() rqdata = (channel, 18, destination, 0) rsp = self.xraw_command(netfn=0xc, command=2, data=rqdata) dtype, acktimeout, retries = struct.unpack('BBB', rsp['data'][2:]) destinfo['acknowledge_required'] = dtype & 0b10000000 == 0b10000000 # Ignore destination type for now... if destinfo['acknowledge_required']: destinfo['acknowledge_timeout'] = acktimeout destinfo['retries'] = retries rqdata = (channel, 19, destination, 0) rsp = self.xraw_command(netfn=0xc, command=2, data=rqdata) if ord(rsp['data'][2]) & 0b11110000 == 0: destinfo['address_format'] = 'ipv4' destinfo['address'] = socket.inet_ntoa(rsp['data'][4:8]) elif ord(rsp['data'][2]) & 0b11110000 == 0b10000: destinfo['address_format'] = 'ipv6' destinfo['address'] = socket.inet_ntop(socket.AF_INET6, rsp['data'][3:]) return destinfo
python
def get_alert_destination(self, destination=0, channel=None): """Get alert destination Get a specified alert destination. Returns a dictionary of relevant configuration. The following keys may be present: acknowledge_required - Indicates whether the target expects an acknowledgement acknowledge_timeout - How long it will wait for an acknowledgment before retrying retries - How many attempts will be made to deliver the alert to this destination address_format - 'ipv4' or 'ipv6' address - The IP address of the target :param destination: The destination number. Defaults to 0 :param channel: The channel for alerting. Defaults to current channel """ destinfo = {} if channel is None: channel = self.get_network_channel() rqdata = (channel, 18, destination, 0) rsp = self.xraw_command(netfn=0xc, command=2, data=rqdata) dtype, acktimeout, retries = struct.unpack('BBB', rsp['data'][2:]) destinfo['acknowledge_required'] = dtype & 0b10000000 == 0b10000000 # Ignore destination type for now... if destinfo['acknowledge_required']: destinfo['acknowledge_timeout'] = acktimeout destinfo['retries'] = retries rqdata = (channel, 19, destination, 0) rsp = self.xraw_command(netfn=0xc, command=2, data=rqdata) if ord(rsp['data'][2]) & 0b11110000 == 0: destinfo['address_format'] = 'ipv4' destinfo['address'] = socket.inet_ntoa(rsp['data'][4:8]) elif ord(rsp['data'][2]) & 0b11110000 == 0b10000: destinfo['address_format'] = 'ipv6' destinfo['address'] = socket.inet_ntop(socket.AF_INET6, rsp['data'][3:]) return destinfo
[ "def", "get_alert_destination", "(", "self", ",", "destination", "=", "0", ",", "channel", "=", "None", ")", ":", "destinfo", "=", "{", "}", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "rqdata", "=", "(", "channel", ",", "18", ",", "destination", ",", "0", ")", "rsp", "=", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "2", ",", "data", "=", "rqdata", ")", "dtype", ",", "acktimeout", ",", "retries", "=", "struct", ".", "unpack", "(", "'BBB'", ",", "rsp", "[", "'data'", "]", "[", "2", ":", "]", ")", "destinfo", "[", "'acknowledge_required'", "]", "=", "dtype", "&", "0b10000000", "==", "0b10000000", "# Ignore destination type for now...", "if", "destinfo", "[", "'acknowledge_required'", "]", ":", "destinfo", "[", "'acknowledge_timeout'", "]", "=", "acktimeout", "destinfo", "[", "'retries'", "]", "=", "retries", "rqdata", "=", "(", "channel", ",", "19", ",", "destination", ",", "0", ")", "rsp", "=", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "2", ",", "data", "=", "rqdata", ")", "if", "ord", "(", "rsp", "[", "'data'", "]", "[", "2", "]", ")", "&", "0b11110000", "==", "0", ":", "destinfo", "[", "'address_format'", "]", "=", "'ipv4'", "destinfo", "[", "'address'", "]", "=", "socket", ".", "inet_ntoa", "(", "rsp", "[", "'data'", "]", "[", "4", ":", "8", "]", ")", "elif", "ord", "(", "rsp", "[", "'data'", "]", "[", "2", "]", ")", "&", "0b11110000", "==", "0b10000", ":", "destinfo", "[", "'address_format'", "]", "=", "'ipv6'", "destinfo", "[", "'address'", "]", "=", "socket", ".", "inet_ntop", "(", "socket", ".", "AF_INET6", ",", "rsp", "[", "'data'", "]", "[", "3", ":", "]", ")", "return", "destinfo" ]
Get alert destination Get a specified alert destination. Returns a dictionary of relevant configuration. The following keys may be present: acknowledge_required - Indicates whether the target expects an acknowledgement acknowledge_timeout - How long it will wait for an acknowledgment before retrying retries - How many attempts will be made to deliver the alert to this destination address_format - 'ipv4' or 'ipv6' address - The IP address of the target :param destination: The destination number. Defaults to 0 :param channel: The channel for alerting. Defaults to current channel
[ "Get", "alert", "destination" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1007-L1044
16,102
openstack/pyghmi
pyghmi/ipmi/command.py
Command.clear_alert_destination
def clear_alert_destination(self, destination=0, channel=None): """Clear an alert destination Remove the specified alert destination configuration. :param destination: The destination to clear (defaults to 0) """ if channel is None: channel = self.get_network_channel() self.set_alert_destination( '0.0.0.0', False, 0, 0, destination, channel)
python
def clear_alert_destination(self, destination=0, channel=None): """Clear an alert destination Remove the specified alert destination configuration. :param destination: The destination to clear (defaults to 0) """ if channel is None: channel = self.get_network_channel() self.set_alert_destination( '0.0.0.0', False, 0, 0, destination, channel)
[ "def", "clear_alert_destination", "(", "self", ",", "destination", "=", "0", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "self", ".", "set_alert_destination", "(", "'0.0.0.0'", ",", "False", ",", "0", ",", "0", ",", "destination", ",", "channel", ")" ]
Clear an alert destination Remove the specified alert destination configuration. :param destination: The destination to clear (defaults to 0)
[ "Clear", "an", "alert", "destination" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1046-L1056
16,103
openstack/pyghmi
pyghmi/ipmi/command.py
Command.set_alert_community
def set_alert_community(self, community, channel=None): """Set the community string for alerts This configures the string the BMC will use as the community string for PET alerts/traps. :param community: The community string :param channel: The LAN channel (defaults to auto detect) """ if channel is None: channel = self.get_network_channel() community = community.encode('utf-8') community += b'\x00' * (18 - len(community)) cmddata = bytearray((channel, 16)) cmddata += community self.xraw_command(netfn=0xc, command=1, data=cmddata)
python
def set_alert_community(self, community, channel=None): """Set the community string for alerts This configures the string the BMC will use as the community string for PET alerts/traps. :param community: The community string :param channel: The LAN channel (defaults to auto detect) """ if channel is None: channel = self.get_network_channel() community = community.encode('utf-8') community += b'\x00' * (18 - len(community)) cmddata = bytearray((channel, 16)) cmddata += community self.xraw_command(netfn=0xc, command=1, data=cmddata)
[ "def", "set_alert_community", "(", "self", ",", "community", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "community", "=", "community", ".", "encode", "(", "'utf-8'", ")", "community", "+=", "b'\\x00'", "*", "(", "18", "-", "len", "(", "community", ")", ")", "cmddata", "=", "bytearray", "(", "(", "channel", ",", "16", ")", ")", "cmddata", "+=", "community", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "1", ",", "data", "=", "cmddata", ")" ]
Set the community string for alerts This configures the string the BMC will use as the community string for PET alerts/traps. :param community: The community string :param channel: The LAN channel (defaults to auto detect)
[ "Set", "the", "community", "string", "for", "alerts" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1058-L1073
16,104
openstack/pyghmi
pyghmi/ipmi/command.py
Command._assure_alert_policy
def _assure_alert_policy(self, channel, destination): """Make sure an alert policy exists Each policy will be a dict with the following keys: -'index' - The policy index number :returns: An iterable of currently configured alert policies """ # First we do a get PEF configuration parameters to get the count # of entries. We have no guarantee that the meaningful data will # be contiguous rsp = self.xraw_command(netfn=4, command=0x13, data=(8, 0, 0)) numpol = ord(rsp['data'][1]) desiredchandest = (channel << 4) | destination availpolnum = None for polnum in range(1, numpol + 1): currpol = self.xraw_command(netfn=4, command=0x13, data=(9, polnum, 0)) polidx, chandest = struct.unpack_from('>BB', currpol['data'][2:4]) if not polidx & 0b1000: if availpolnum is None: availpolnum = polnum continue if chandest == desiredchandest: return True # If chandest did not equal desiredchandest ever, we need to use a slot if availpolnum is None: raise Exception("No available alert policy entry") # 24 = 1 << 4 | 8 # 1 == set to which this rule belongs # 8 == 0b1000, in other words, enable this policy, always send to # indicated destination self.xraw_command(netfn=4, command=0x12, data=(9, availpolnum, 24, desiredchandest, 0))
python
def _assure_alert_policy(self, channel, destination): """Make sure an alert policy exists Each policy will be a dict with the following keys: -'index' - The policy index number :returns: An iterable of currently configured alert policies """ # First we do a get PEF configuration parameters to get the count # of entries. We have no guarantee that the meaningful data will # be contiguous rsp = self.xraw_command(netfn=4, command=0x13, data=(8, 0, 0)) numpol = ord(rsp['data'][1]) desiredchandest = (channel << 4) | destination availpolnum = None for polnum in range(1, numpol + 1): currpol = self.xraw_command(netfn=4, command=0x13, data=(9, polnum, 0)) polidx, chandest = struct.unpack_from('>BB', currpol['data'][2:4]) if not polidx & 0b1000: if availpolnum is None: availpolnum = polnum continue if chandest == desiredchandest: return True # If chandest did not equal desiredchandest ever, we need to use a slot if availpolnum is None: raise Exception("No available alert policy entry") # 24 = 1 << 4 | 8 # 1 == set to which this rule belongs # 8 == 0b1000, in other words, enable this policy, always send to # indicated destination self.xraw_command(netfn=4, command=0x12, data=(9, availpolnum, 24, desiredchandest, 0))
[ "def", "_assure_alert_policy", "(", "self", ",", "channel", ",", "destination", ")", ":", "# First we do a get PEF configuration parameters to get the count", "# of entries. We have no guarantee that the meaningful data will", "# be contiguous", "rsp", "=", "self", ".", "xraw_command", "(", "netfn", "=", "4", ",", "command", "=", "0x13", ",", "data", "=", "(", "8", ",", "0", ",", "0", ")", ")", "numpol", "=", "ord", "(", "rsp", "[", "'data'", "]", "[", "1", "]", ")", "desiredchandest", "=", "(", "channel", "<<", "4", ")", "|", "destination", "availpolnum", "=", "None", "for", "polnum", "in", "range", "(", "1", ",", "numpol", "+", "1", ")", ":", "currpol", "=", "self", ".", "xraw_command", "(", "netfn", "=", "4", ",", "command", "=", "0x13", ",", "data", "=", "(", "9", ",", "polnum", ",", "0", ")", ")", "polidx", ",", "chandest", "=", "struct", ".", "unpack_from", "(", "'>BB'", ",", "currpol", "[", "'data'", "]", "[", "2", ":", "4", "]", ")", "if", "not", "polidx", "&", "0b1000", ":", "if", "availpolnum", "is", "None", ":", "availpolnum", "=", "polnum", "continue", "if", "chandest", "==", "desiredchandest", ":", "return", "True", "# If chandest did not equal desiredchandest ever, we need to use a slot", "if", "availpolnum", "is", "None", ":", "raise", "Exception", "(", "\"No available alert policy entry\"", ")", "# 24 = 1 << 4 | 8", "# 1 == set to which this rule belongs", "# 8 == 0b1000, in other words, enable this policy, always send to", "# indicated destination", "self", ".", "xraw_command", "(", "netfn", "=", "4", ",", "command", "=", "0x12", ",", "data", "=", "(", "9", ",", "availpolnum", ",", "24", ",", "desiredchandest", ",", "0", ")", ")" ]
Make sure an alert policy exists Each policy will be a dict with the following keys: -'index' - The policy index number :returns: An iterable of currently configured alert policies
[ "Make", "sure", "an", "alert", "policy", "exists" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1075-L1108
16,105
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_alert_community
def get_alert_community(self, channel=None): """Get the current community string for alerts Returns the community string that will be in SNMP traps from this BMC :param channel: The channel to get configuration for, autodetect by default :returns: The community string """ if channel is None: channel = self.get_network_channel() rsp = self.xraw_command(netfn=0xc, command=2, data=(channel, 16, 0, 0)) return rsp['data'][1:].partition('\x00')[0]
python
def get_alert_community(self, channel=None): """Get the current community string for alerts Returns the community string that will be in SNMP traps from this BMC :param channel: The channel to get configuration for, autodetect by default :returns: The community string """ if channel is None: channel = self.get_network_channel() rsp = self.xraw_command(netfn=0xc, command=2, data=(channel, 16, 0, 0)) return rsp['data'][1:].partition('\x00')[0]
[ "def", "get_alert_community", "(", "self", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "rsp", "=", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "2", ",", "data", "=", "(", "channel", ",", "16", ",", "0", ",", "0", ")", ")", "return", "rsp", "[", "'data'", "]", "[", "1", ":", "]", ".", "partition", "(", "'\\x00'", ")", "[", "0", "]" ]
Get the current community string for alerts Returns the community string that will be in SNMP traps from this BMC :param channel: The channel to get configuration for, autodetect by default :returns: The community string
[ "Get", "the", "current", "community", "string", "for", "alerts" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1110-L1123
16,106
openstack/pyghmi
pyghmi/ipmi/command.py
Command.set_alert_destination
def set_alert_destination(self, ip=None, acknowledge_required=None, acknowledge_timeout=None, retries=None, destination=0, channel=None): """Configure one or more parameters of an alert destination If any parameter is 'None' (default), that parameter is left unchanged. Otherwise, all given parameters are set by this command. :param ip: IP address of the destination. It is currently expected that the calling code will handle any name lookup and present this data as IP address. :param acknowledge_required: Whether or not the target should expect an acknowledgement from this alert target. :param acknowledge_timeout: Time to wait for acknowledgement if enabled :param retries: How many times to attempt transmit of an alert. :param destination: Destination index, defaults to 0. :param channel: The channel to configure the alert on. Defaults to current """ if channel is None: channel = self.get_network_channel() if ip is not None: destdata = bytearray((channel, 19, destination)) try: parsedip = socket.inet_aton(ip) destdata.extend((0, 0)) destdata.extend(parsedip) destdata.extend(b'\x00\x00\x00\x00\x00\x00') except socket.error: if self._supports_standard_ipv6: parsedip = socket.inet_pton(socket.AF_INET6, ip) destdata.append(0b10000000) destdata.extend(parsedip) else: destdata = None self.oem_init() self._oem.set_alert_ipv6_destination(ip, destination, channel) if destdata: self.xraw_command(netfn=0xc, command=1, data=destdata) if (acknowledge_required is not None or retries is not None or acknowledge_timeout is not None): currtype = self.xraw_command(netfn=0xc, command=2, data=( channel, 18, destination, 0)) if currtype['data'][0] != b'\x11': raise exc.PyghmiException("Unknown parameter format") currtype = bytearray(currtype['data'][1:]) if acknowledge_required is not None: if acknowledge_required: currtype[1] |= 0b10000000 else: currtype[1] &= 0b1111111 if acknowledge_timeout is not None: currtype[2] = acknowledge_timeout if retries is not None: currtype[3] = retries destreq = bytearray((channel, 18)) destreq.extend(currtype) self.xraw_command(netfn=0xc, command=1, data=destreq) if not ip == '0.0.0.0': self._assure_alert_policy(channel, destination)
python
def set_alert_destination(self, ip=None, acknowledge_required=None, acknowledge_timeout=None, retries=None, destination=0, channel=None): """Configure one or more parameters of an alert destination If any parameter is 'None' (default), that parameter is left unchanged. Otherwise, all given parameters are set by this command. :param ip: IP address of the destination. It is currently expected that the calling code will handle any name lookup and present this data as IP address. :param acknowledge_required: Whether or not the target should expect an acknowledgement from this alert target. :param acknowledge_timeout: Time to wait for acknowledgement if enabled :param retries: How many times to attempt transmit of an alert. :param destination: Destination index, defaults to 0. :param channel: The channel to configure the alert on. Defaults to current """ if channel is None: channel = self.get_network_channel() if ip is not None: destdata = bytearray((channel, 19, destination)) try: parsedip = socket.inet_aton(ip) destdata.extend((0, 0)) destdata.extend(parsedip) destdata.extend(b'\x00\x00\x00\x00\x00\x00') except socket.error: if self._supports_standard_ipv6: parsedip = socket.inet_pton(socket.AF_INET6, ip) destdata.append(0b10000000) destdata.extend(parsedip) else: destdata = None self.oem_init() self._oem.set_alert_ipv6_destination(ip, destination, channel) if destdata: self.xraw_command(netfn=0xc, command=1, data=destdata) if (acknowledge_required is not None or retries is not None or acknowledge_timeout is not None): currtype = self.xraw_command(netfn=0xc, command=2, data=( channel, 18, destination, 0)) if currtype['data'][0] != b'\x11': raise exc.PyghmiException("Unknown parameter format") currtype = bytearray(currtype['data'][1:]) if acknowledge_required is not None: if acknowledge_required: currtype[1] |= 0b10000000 else: currtype[1] &= 0b1111111 if acknowledge_timeout is not None: currtype[2] = acknowledge_timeout if retries is not None: currtype[3] = retries destreq = bytearray((channel, 18)) destreq.extend(currtype) self.xraw_command(netfn=0xc, command=1, data=destreq) if not ip == '0.0.0.0': self._assure_alert_policy(channel, destination)
[ "def", "set_alert_destination", "(", "self", ",", "ip", "=", "None", ",", "acknowledge_required", "=", "None", ",", "acknowledge_timeout", "=", "None", ",", "retries", "=", "None", ",", "destination", "=", "0", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "if", "ip", "is", "not", "None", ":", "destdata", "=", "bytearray", "(", "(", "channel", ",", "19", ",", "destination", ")", ")", "try", ":", "parsedip", "=", "socket", ".", "inet_aton", "(", "ip", ")", "destdata", ".", "extend", "(", "(", "0", ",", "0", ")", ")", "destdata", ".", "extend", "(", "parsedip", ")", "destdata", ".", "extend", "(", "b'\\x00\\x00\\x00\\x00\\x00\\x00'", ")", "except", "socket", ".", "error", ":", "if", "self", ".", "_supports_standard_ipv6", ":", "parsedip", "=", "socket", ".", "inet_pton", "(", "socket", ".", "AF_INET6", ",", "ip", ")", "destdata", ".", "append", "(", "0b10000000", ")", "destdata", ".", "extend", "(", "parsedip", ")", "else", ":", "destdata", "=", "None", "self", ".", "oem_init", "(", ")", "self", ".", "_oem", ".", "set_alert_ipv6_destination", "(", "ip", ",", "destination", ",", "channel", ")", "if", "destdata", ":", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "1", ",", "data", "=", "destdata", ")", "if", "(", "acknowledge_required", "is", "not", "None", "or", "retries", "is", "not", "None", "or", "acknowledge_timeout", "is", "not", "None", ")", ":", "currtype", "=", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "2", ",", "data", "=", "(", "channel", ",", "18", ",", "destination", ",", "0", ")", ")", "if", "currtype", "[", "'data'", "]", "[", "0", "]", "!=", "b'\\x11'", ":", "raise", "exc", ".", "PyghmiException", "(", "\"Unknown parameter format\"", ")", "currtype", "=", "bytearray", "(", "currtype", "[", "'data'", "]", "[", "1", ":", "]", ")", "if", "acknowledge_required", "is", "not", "None", ":", "if", "acknowledge_required", ":", "currtype", "[", "1", "]", "|=", "0b10000000", "else", ":", "currtype", "[", "1", "]", "&=", "0b1111111", "if", "acknowledge_timeout", "is", "not", "None", ":", "currtype", "[", "2", "]", "=", "acknowledge_timeout", "if", "retries", "is", "not", "None", ":", "currtype", "[", "3", "]", "=", "retries", "destreq", "=", "bytearray", "(", "(", "channel", ",", "18", ")", ")", "destreq", ".", "extend", "(", "currtype", ")", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "1", ",", "data", "=", "destreq", ")", "if", "not", "ip", "==", "'0.0.0.0'", ":", "self", ".", "_assure_alert_policy", "(", "channel", ",", "destination", ")" ]
Configure one or more parameters of an alert destination If any parameter is 'None' (default), that parameter is left unchanged. Otherwise, all given parameters are set by this command. :param ip: IP address of the destination. It is currently expected that the calling code will handle any name lookup and present this data as IP address. :param acknowledge_required: Whether or not the target should expect an acknowledgement from this alert target. :param acknowledge_timeout: Time to wait for acknowledgement if enabled :param retries: How many times to attempt transmit of an alert. :param destination: Destination index, defaults to 0. :param channel: The channel to configure the alert on. Defaults to current
[ "Configure", "one", "or", "more", "parameters", "of", "an", "alert", "destination" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1137-L1197
16,107
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_hostname
def get_hostname(self): """Get the hostname used by the BMC in various contexts This can vary somewhat in interpretation, but generally speaking this should be the name that shows up on UIs and in DHCP requests and DNS registration requests, as applicable. :return: current hostname """ self.oem_init() try: return self._oem.get_hostname() except exc.UnsupportedFunctionality: # Use the DCMI MCI field as a fallback, since it's the closest # thing in the IPMI Spec for this return self.get_mci()
python
def get_hostname(self): """Get the hostname used by the BMC in various contexts This can vary somewhat in interpretation, but generally speaking this should be the name that shows up on UIs and in DHCP requests and DNS registration requests, as applicable. :return: current hostname """ self.oem_init() try: return self._oem.get_hostname() except exc.UnsupportedFunctionality: # Use the DCMI MCI field as a fallback, since it's the closest # thing in the IPMI Spec for this return self.get_mci()
[ "def", "get_hostname", "(", "self", ")", ":", "self", ".", "oem_init", "(", ")", "try", ":", "return", "self", ".", "_oem", ".", "get_hostname", "(", ")", "except", "exc", ".", "UnsupportedFunctionality", ":", "# Use the DCMI MCI field as a fallback, since it's the closest", "# thing in the IPMI Spec for this", "return", "self", ".", "get_mci", "(", ")" ]
Get the hostname used by the BMC in various contexts This can vary somewhat in interpretation, but generally speaking this should be the name that shows up on UIs and in DHCP requests and DNS registration requests, as applicable. :return: current hostname
[ "Get", "the", "hostname", "used", "by", "the", "BMC", "in", "various", "contexts" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1199-L1214
16,108
openstack/pyghmi
pyghmi/ipmi/command.py
Command.set_hostname
def set_hostname(self, hostname): """Set the hostname to be used by the BMC in various contexts. See get_hostname for details :param hostname: The hostname to set :return: Nothing """ self.oem_init() try: return self._oem.set_hostname(hostname) except exc.UnsupportedFunctionality: return self.set_mci(hostname)
python
def set_hostname(self, hostname): """Set the hostname to be used by the BMC in various contexts. See get_hostname for details :param hostname: The hostname to set :return: Nothing """ self.oem_init() try: return self._oem.set_hostname(hostname) except exc.UnsupportedFunctionality: return self.set_mci(hostname)
[ "def", "set_hostname", "(", "self", ",", "hostname", ")", ":", "self", ".", "oem_init", "(", ")", "try", ":", "return", "self", ".", "_oem", ".", "set_hostname", "(", "hostname", ")", "except", "exc", ".", "UnsupportedFunctionality", ":", "return", "self", ".", "set_mci", "(", "hostname", ")" ]
Set the hostname to be used by the BMC in various contexts. See get_hostname for details :param hostname: The hostname to set :return: Nothing
[ "Set", "the", "hostname", "to", "be", "used", "by", "the", "BMC", "in", "various", "contexts", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1223-L1235
16,109
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_channel_access
def get_channel_access(self, channel=None, read_mode='volatile'): """Get channel access :param channel: number [1:7] :param read_mode: non_volatile = get non-volatile Channel Access volatile = get present volatile (active) setting of Channel Access :return: A Python dict with the following keys/values: { - alerting: - per_msg_auth: - user_level_auth: - access_mode:{ 0: 'disabled', 1: 'pre_boot', 2: 'always', 3: 'shared' } - privilege_level: { 1: 'callback', 2: 'user', 3: 'operator', 4: 'administrator', 5: 'proprietary', } } """ if channel is None: channel = self.get_network_channel() data = [] data.append(channel & 0b00001111) b = 0 read_modes = { 'non_volatile': 1, 'volatile': 2, } b |= (read_modes[read_mode] << 6) & 0b11000000 data.append(b) response = self.raw_command(netfn=0x06, command=0x41, data=data) if 'error' in response: raise Exception(response['error']) data = response['data'] if len(data) != 2: raise Exception('expecting 2 data bytes') r = {} r['alerting'] = data[0] & 0b10000000 > 0 r['per_msg_auth'] = data[0] & 0b01000000 > 0 r['user_level_auth'] = data[0] & 0b00100000 > 0 access_modes = { 0: 'disabled', 1: 'pre_boot', 2: 'always', 3: 'shared' } r['access_mode'] = access_modes[data[0] & 0b00000011] privilege_levels = { 0: 'reserved', 1: 'callback', 2: 'user', 3: 'operator', 4: 'administrator', 5: 'proprietary', # 0x0F: 'no_access' } r['privilege_level'] = privilege_levels[data[1] & 0b00001111] return r
python
def get_channel_access(self, channel=None, read_mode='volatile'): """Get channel access :param channel: number [1:7] :param read_mode: non_volatile = get non-volatile Channel Access volatile = get present volatile (active) setting of Channel Access :return: A Python dict with the following keys/values: { - alerting: - per_msg_auth: - user_level_auth: - access_mode:{ 0: 'disabled', 1: 'pre_boot', 2: 'always', 3: 'shared' } - privilege_level: { 1: 'callback', 2: 'user', 3: 'operator', 4: 'administrator', 5: 'proprietary', } } """ if channel is None: channel = self.get_network_channel() data = [] data.append(channel & 0b00001111) b = 0 read_modes = { 'non_volatile': 1, 'volatile': 2, } b |= (read_modes[read_mode] << 6) & 0b11000000 data.append(b) response = self.raw_command(netfn=0x06, command=0x41, data=data) if 'error' in response: raise Exception(response['error']) data = response['data'] if len(data) != 2: raise Exception('expecting 2 data bytes') r = {} r['alerting'] = data[0] & 0b10000000 > 0 r['per_msg_auth'] = data[0] & 0b01000000 > 0 r['user_level_auth'] = data[0] & 0b00100000 > 0 access_modes = { 0: 'disabled', 1: 'pre_boot', 2: 'always', 3: 'shared' } r['access_mode'] = access_modes[data[0] & 0b00000011] privilege_levels = { 0: 'reserved', 1: 'callback', 2: 'user', 3: 'operator', 4: 'administrator', 5: 'proprietary', # 0x0F: 'no_access' } r['privilege_level'] = privilege_levels[data[1] & 0b00001111] return r
[ "def", "get_channel_access", "(", "self", ",", "channel", "=", "None", ",", "read_mode", "=", "'volatile'", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "data", "=", "[", "]", "data", ".", "append", "(", "channel", "&", "0b00001111", ")", "b", "=", "0", "read_modes", "=", "{", "'non_volatile'", ":", "1", ",", "'volatile'", ":", "2", ",", "}", "b", "|=", "(", "read_modes", "[", "read_mode", "]", "<<", "6", ")", "&", "0b11000000", "data", ".", "append", "(", "b", ")", "response", "=", "self", ".", "raw_command", "(", "netfn", "=", "0x06", ",", "command", "=", "0x41", ",", "data", "=", "data", ")", "if", "'error'", "in", "response", ":", "raise", "Exception", "(", "response", "[", "'error'", "]", ")", "data", "=", "response", "[", "'data'", "]", "if", "len", "(", "data", ")", "!=", "2", ":", "raise", "Exception", "(", "'expecting 2 data bytes'", ")", "r", "=", "{", "}", "r", "[", "'alerting'", "]", "=", "data", "[", "0", "]", "&", "0b10000000", ">", "0", "r", "[", "'per_msg_auth'", "]", "=", "data", "[", "0", "]", "&", "0b01000000", ">", "0", "r", "[", "'user_level_auth'", "]", "=", "data", "[", "0", "]", "&", "0b00100000", ">", "0", "access_modes", "=", "{", "0", ":", "'disabled'", ",", "1", ":", "'pre_boot'", ",", "2", ":", "'always'", ",", "3", ":", "'shared'", "}", "r", "[", "'access_mode'", "]", "=", "access_modes", "[", "data", "[", "0", "]", "&", "0b00000011", "]", "privilege_levels", "=", "{", "0", ":", "'reserved'", ",", "1", ":", "'callback'", ",", "2", ":", "'user'", ",", "3", ":", "'operator'", ",", "4", ":", "'administrator'", ",", "5", ":", "'proprietary'", ",", "# 0x0F: 'no_access'", "}", "r", "[", "'privilege_level'", "]", "=", "privilege_levels", "[", "data", "[", "1", "]", "&", "0b00001111", "]", "return", "r" ]
Get channel access :param channel: number [1:7] :param read_mode: non_volatile = get non-volatile Channel Access volatile = get present volatile (active) setting of Channel Access :return: A Python dict with the following keys/values: { - alerting: - per_msg_auth: - user_level_auth: - access_mode:{ 0: 'disabled', 1: 'pre_boot', 2: 'always', 3: 'shared' } - privilege_level: { 1: 'callback', 2: 'user', 3: 'operator', 4: 'administrator', 5: 'proprietary', } }
[ "Get", "channel", "access" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1394-L1463
16,110
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_channel_info
def get_channel_info(self, channel=None): """Get channel info :param channel: number [1:7] :return: session_support: no_session: channel is session-less single: channel is single-session multi: channel is multi-session auto: channel is session-based (channel could alternate between single- and multi-session operation, as can occur with a serial/modem channel that supports connection mode auto-detect) """ if channel is None: channel = self.get_network_channel() data = [] data.append(channel & 0b00001111) response = self.raw_command(netfn=0x06, command=0x42, data=data) if 'error' in response: raise Exception(response['error']) data = response['data'] if len(data) != 9: raise Exception('expecting 10 data bytes got: {0}'.format(data)) r = {} r['Actual channel'] = data[0] & 0b00000111 channel_medium_types = { 0: 'reserved', 1: 'IPMB', 2: 'ICMB v1.0', 3: 'ICMB v0.9', 4: '802.3 LAN', 5: 'Asynch. Serial/Modem (RS-232)', 6: 'Other LAN', 7: 'PCI SMBus', 8: 'SMBus v1.0/1.1', 9: 'SMBus v2.0', 0x0a: 'reserved for USB 1.x', 0x0b: 'reserved for USB 2.x', 0x0c: 'System Interface (KCS, SMIC, or BT)', # 60h-7Fh: OEM # all other reserved } t = data[1] & 0b01111111 if t in channel_medium_types: r['Channel Medium type'] = channel_medium_types[t] else: r['Channel Medium type'] = 'OEM {:02X}'.format(t) r['5-bit Channel IPMI Messaging Protocol Type'] = data[2] & 0b00001111 session_supports = { 0: 'no_session', 1: 'single', 2: 'multi', 3: 'auto' } r['session_support'] = session_supports[(data[3] & 0b11000000) >> 6] r['active_session_count'] = data[3] & 0b00111111 r['Vendor ID'] = [data[4], data[5], data[6]] r['Auxiliary Channel Info'] = [data[7], data[8]] return r
python
def get_channel_info(self, channel=None): """Get channel info :param channel: number [1:7] :return: session_support: no_session: channel is session-less single: channel is single-session multi: channel is multi-session auto: channel is session-based (channel could alternate between single- and multi-session operation, as can occur with a serial/modem channel that supports connection mode auto-detect) """ if channel is None: channel = self.get_network_channel() data = [] data.append(channel & 0b00001111) response = self.raw_command(netfn=0x06, command=0x42, data=data) if 'error' in response: raise Exception(response['error']) data = response['data'] if len(data) != 9: raise Exception('expecting 10 data bytes got: {0}'.format(data)) r = {} r['Actual channel'] = data[0] & 0b00000111 channel_medium_types = { 0: 'reserved', 1: 'IPMB', 2: 'ICMB v1.0', 3: 'ICMB v0.9', 4: '802.3 LAN', 5: 'Asynch. Serial/Modem (RS-232)', 6: 'Other LAN', 7: 'PCI SMBus', 8: 'SMBus v1.0/1.1', 9: 'SMBus v2.0', 0x0a: 'reserved for USB 1.x', 0x0b: 'reserved for USB 2.x', 0x0c: 'System Interface (KCS, SMIC, or BT)', # 60h-7Fh: OEM # all other reserved } t = data[1] & 0b01111111 if t in channel_medium_types: r['Channel Medium type'] = channel_medium_types[t] else: r['Channel Medium type'] = 'OEM {:02X}'.format(t) r['5-bit Channel IPMI Messaging Protocol Type'] = data[2] & 0b00001111 session_supports = { 0: 'no_session', 1: 'single', 2: 'multi', 3: 'auto' } r['session_support'] = session_supports[(data[3] & 0b11000000) >> 6] r['active_session_count'] = data[3] & 0b00111111 r['Vendor ID'] = [data[4], data[5], data[6]] r['Auxiliary Channel Info'] = [data[7], data[8]] return r
[ "def", "get_channel_info", "(", "self", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "data", "=", "[", "]", "data", ".", "append", "(", "channel", "&", "0b00001111", ")", "response", "=", "self", ".", "raw_command", "(", "netfn", "=", "0x06", ",", "command", "=", "0x42", ",", "data", "=", "data", ")", "if", "'error'", "in", "response", ":", "raise", "Exception", "(", "response", "[", "'error'", "]", ")", "data", "=", "response", "[", "'data'", "]", "if", "len", "(", "data", ")", "!=", "9", ":", "raise", "Exception", "(", "'expecting 10 data bytes got: {0}'", ".", "format", "(", "data", ")", ")", "r", "=", "{", "}", "r", "[", "'Actual channel'", "]", "=", "data", "[", "0", "]", "&", "0b00000111", "channel_medium_types", "=", "{", "0", ":", "'reserved'", ",", "1", ":", "'IPMB'", ",", "2", ":", "'ICMB v1.0'", ",", "3", ":", "'ICMB v0.9'", ",", "4", ":", "'802.3 LAN'", ",", "5", ":", "'Asynch. Serial/Modem (RS-232)'", ",", "6", ":", "'Other LAN'", ",", "7", ":", "'PCI SMBus'", ",", "8", ":", "'SMBus v1.0/1.1'", ",", "9", ":", "'SMBus v2.0'", ",", "0x0a", ":", "'reserved for USB 1.x'", ",", "0x0b", ":", "'reserved for USB 2.x'", ",", "0x0c", ":", "'System Interface (KCS, SMIC, or BT)'", ",", "# 60h-7Fh: OEM", "# all other reserved", "}", "t", "=", "data", "[", "1", "]", "&", "0b01111111", "if", "t", "in", "channel_medium_types", ":", "r", "[", "'Channel Medium type'", "]", "=", "channel_medium_types", "[", "t", "]", "else", ":", "r", "[", "'Channel Medium type'", "]", "=", "'OEM {:02X}'", ".", "format", "(", "t", ")", "r", "[", "'5-bit Channel IPMI Messaging Protocol Type'", "]", "=", "data", "[", "2", "]", "&", "0b00001111", "session_supports", "=", "{", "0", ":", "'no_session'", ",", "1", ":", "'single'", ",", "2", ":", "'multi'", ",", "3", ":", "'auto'", "}", "r", "[", "'session_support'", "]", "=", "session_supports", "[", "(", "data", "[", "3", "]", "&", "0b11000000", ")", ">>", "6", "]", "r", "[", "'active_session_count'", "]", "=", "data", "[", "3", "]", "&", "0b00111111", "r", "[", "'Vendor ID'", "]", "=", "[", "data", "[", "4", "]", ",", "data", "[", "5", "]", ",", "data", "[", "6", "]", "]", "r", "[", "'Auxiliary Channel Info'", "]", "=", "[", "data", "[", "7", "]", ",", "data", "[", "8", "]", "]", "return", "r" ]
Get channel info :param channel: number [1:7] :return: session_support: no_session: channel is session-less single: channel is single-session multi: channel is multi-session auto: channel is session-based (channel could alternate between single- and multi-session operation, as can occur with a serial/modem channel that supports connection mode auto-detect)
[ "Get", "channel", "info" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1465-L1524
16,111
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_firmware
def get_firmware(self, components=()): """Retrieve OEM Firmware information """ self.oem_init() mcinfo = self.xraw_command(netfn=6, command=1) bmcver = '{0}.{1}'.format( ord(mcinfo['data'][2]), hex(ord(mcinfo['data'][3]))[2:]) return self._oem.get_oem_firmware(bmcver, components)
python
def get_firmware(self, components=()): """Retrieve OEM Firmware information """ self.oem_init() mcinfo = self.xraw_command(netfn=6, command=1) bmcver = '{0}.{1}'.format( ord(mcinfo['data'][2]), hex(ord(mcinfo['data'][3]))[2:]) return self._oem.get_oem_firmware(bmcver, components)
[ "def", "get_firmware", "(", "self", ",", "components", "=", "(", ")", ")", ":", "self", ".", "oem_init", "(", ")", "mcinfo", "=", "self", ".", "xraw_command", "(", "netfn", "=", "6", ",", "command", "=", "1", ")", "bmcver", "=", "'{0}.{1}'", ".", "format", "(", "ord", "(", "mcinfo", "[", "'data'", "]", "[", "2", "]", ")", ",", "hex", "(", "ord", "(", "mcinfo", "[", "'data'", "]", "[", "3", "]", ")", ")", "[", "2", ":", "]", ")", "return", "self", ".", "_oem", ".", "get_oem_firmware", "(", "bmcver", ",", "components", ")" ]
Retrieve OEM Firmware information
[ "Retrieve", "OEM", "Firmware", "information" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1886-L1893
16,112
openstack/pyghmi
pyghmi/ipmi/command.py
Command.update_firmware
def update_firmware(self, file, data=None, progress=None, bank=None): """Send file to BMC to perform firmware update :param filename: The filename to upload to the target BMC :param data: The payload of the firmware. Default is to read from specified filename. :param progress: A callback that will be given a dict describing update process. Provide if :param bank: Indicate a target 'bank' of firmware if supported """ self.oem_init() if progress is None: progress = lambda x: True return self._oem.update_firmware(file, data, progress, bank)
python
def update_firmware(self, file, data=None, progress=None, bank=None): """Send file to BMC to perform firmware update :param filename: The filename to upload to the target BMC :param data: The payload of the firmware. Default is to read from specified filename. :param progress: A callback that will be given a dict describing update process. Provide if :param bank: Indicate a target 'bank' of firmware if supported """ self.oem_init() if progress is None: progress = lambda x: True return self._oem.update_firmware(file, data, progress, bank)
[ "def", "update_firmware", "(", "self", ",", "file", ",", "data", "=", "None", ",", "progress", "=", "None", ",", "bank", "=", "None", ")", ":", "self", ".", "oem_init", "(", ")", "if", "progress", "is", "None", ":", "progress", "=", "lambda", "x", ":", "True", "return", "self", ".", "_oem", ".", "update_firmware", "(", "file", ",", "data", ",", "progress", ",", "bank", ")" ]
Send file to BMC to perform firmware update :param filename: The filename to upload to the target BMC :param data: The payload of the firmware. Default is to read from specified filename. :param progress: A callback that will be given a dict describing update process. Provide if :param bank: Indicate a target 'bank' of firmware if supported
[ "Send", "file", "to", "BMC", "to", "perform", "firmware", "update" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1936-L1949
16,113
openstack/pyghmi
pyghmi/ipmi/command.py
Command.attach_remote_media
def attach_remote_media(self, url, username=None, password=None): """Attach remote media by url Given a url, attach remote media (cd/usb image) to the target system. :param url: URL to indicate where to find image (protocol support varies by BMC) :param username: Username for endpoint to use when accessing the URL. If applicable, 'domain' would be indicated by '@' or '\' syntax. :param password: Password for endpoint to use when accessing the URL. """ self.oem_init() return self._oem.attach_remote_media(url, username, password)
python
def attach_remote_media(self, url, username=None, password=None): """Attach remote media by url Given a url, attach remote media (cd/usb image) to the target system. :param url: URL to indicate where to find image (protocol support varies by BMC) :param username: Username for endpoint to use when accessing the URL. If applicable, 'domain' would be indicated by '@' or '\' syntax. :param password: Password for endpoint to use when accessing the URL. """ self.oem_init() return self._oem.attach_remote_media(url, username, password)
[ "def", "attach_remote_media", "(", "self", ",", "url", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "self", ".", "oem_init", "(", ")", "return", "self", ".", "_oem", ".", "attach_remote_media", "(", "url", ",", "username", ",", "password", ")" ]
Attach remote media by url Given a url, attach remote media (cd/usb image) to the target system. :param url: URL to indicate where to find image (protocol support varies by BMC) :param username: Username for endpoint to use when accessing the URL. If applicable, 'domain' would be indicated by '@' or '\' syntax. :param password: Password for endpoint to use when accessing the URL.
[ "Attach", "remote", "media", "by", "url" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1951-L1964
16,114
openstack/pyghmi
pyghmi/ipmi/command.py
Command.upload_media
def upload_media(self, filename, progress=None): """Upload a file to be hosted on the target BMC This will upload the specified data to the BMC so that it will make it available to the system as an emulated USB device. :param filename: The filename to use, the basename of the parameter will be given to the bmc. :param filename: Optional callback for progress updates """ self.oem_init() return self._oem.upload_media(filename, progress)
python
def upload_media(self, filename, progress=None): """Upload a file to be hosted on the target BMC This will upload the specified data to the BMC so that it will make it available to the system as an emulated USB device. :param filename: The filename to use, the basename of the parameter will be given to the bmc. :param filename: Optional callback for progress updates """ self.oem_init() return self._oem.upload_media(filename, progress)
[ "def", "upload_media", "(", "self", ",", "filename", ",", "progress", "=", "None", ")", ":", "self", ".", "oem_init", "(", ")", "return", "self", ".", "_oem", ".", "upload_media", "(", "filename", ",", "progress", ")" ]
Upload a file to be hosted on the target BMC This will upload the specified data to the BMC so that it will make it available to the system as an emulated USB device. :param filename: The filename to use, the basename of the parameter will be given to the bmc. :param filename: Optional callback for progress updates
[ "Upload", "a", "file", "to", "be", "hosted", "on", "the", "target", "BMC" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1970-L1982
16,115
openstack/pyghmi
pyghmi/ipmi/oem/generic.py
OEMHandler.process_event
def process_event(self, event, ipmicmd, seldata): """Modify an event according with OEM understanding. Given an event, allow an OEM module to augment it. For example, event data fields can have OEM bytes. Other times an OEM may wish to apply some transform to some field to suit their conventions. """ event['oem_handler'] = None evdata = event['event_data_bytes'] if evdata[0] & 0b11000000 == 0b10000000: event['oem_byte2'] = evdata[1] if evdata[0] & 0b110000 == 0b100000: event['oem_byte3'] = evdata[2]
python
def process_event(self, event, ipmicmd, seldata): """Modify an event according with OEM understanding. Given an event, allow an OEM module to augment it. For example, event data fields can have OEM bytes. Other times an OEM may wish to apply some transform to some field to suit their conventions. """ event['oem_handler'] = None evdata = event['event_data_bytes'] if evdata[0] & 0b11000000 == 0b10000000: event['oem_byte2'] = evdata[1] if evdata[0] & 0b110000 == 0b100000: event['oem_byte3'] = evdata[2]
[ "def", "process_event", "(", "self", ",", "event", ",", "ipmicmd", ",", "seldata", ")", ":", "event", "[", "'oem_handler'", "]", "=", "None", "evdata", "=", "event", "[", "'event_data_bytes'", "]", "if", "evdata", "[", "0", "]", "&", "0b11000000", "==", "0b10000000", ":", "event", "[", "'oem_byte2'", "]", "=", "evdata", "[", "1", "]", "if", "evdata", "[", "0", "]", "&", "0b110000", "==", "0b100000", ":", "event", "[", "'oem_byte3'", "]", "=", "evdata", "[", "2", "]" ]
Modify an event according with OEM understanding. Given an event, allow an OEM module to augment it. For example, event data fields can have OEM bytes. Other times an OEM may wish to apply some transform to some field to suit their conventions.
[ "Modify", "an", "event", "according", "with", "OEM", "understanding", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/generic.py#L45-L57
16,116
openstack/pyghmi
pyghmi/ipmi/console.py
Console._got_session
def _got_session(self, response): """Private function to navigate SOL payload activation """ if 'error' in response: self._print_error(response['error']) return if not self.ipmi_session: self.callgotsession = response return # Send activate sol payload directive # netfn= 6 (application) # command = 0x48 (activate payload) # data = (1, sol payload type # 1, first instance # 0b11000000, -encrypt, authenticate, # disable serial/modem alerts, CTS fine # 0, 0, 0 reserved response = self.ipmi_session.raw_command(netfn=0x6, command=0x48, data=(1, 1, 192, 0, 0, 0)) # given that these are specific to the command, # it's probably best if one can grep the error # here instead of in constants sol_activate_codes = { 0x81: 'SOL is disabled', 0x82: 'Maximum SOL session count reached', 0x83: 'Cannot activate payload with encryption', 0x84: 'Cannot activate payload without encryption', } if 'code' in response and response['code']: if response['code'] in constants.ipmi_completion_codes: self._print_error( constants.ipmi_completion_codes[response['code']]) return elif response['code'] == 0x80: if self.force_session and not self.retriedpayload: self.retriedpayload = 1 sessrsp = self.ipmi_session.raw_command( netfn=0x6, command=0x49, data=(1, 1, 0, 0, 0, 0)) self._got_session(sessrsp) return else: self._print_error('SOL Session active for another client') return elif response['code'] in sol_activate_codes: self._print_error(sol_activate_codes[response['code']]) return else: self._print_error( 'SOL encountered Unrecognized error code %d' % response['code']) return if 'error' in response: self._print_error(response['error']) return self.activated = True # data[0:3] is reserved except for the test mode, which we don't use data = response['data'] self.maxoutcount = (data[5] << 8) + data[4] # BMC tells us this is the maximum allowed size # data[6:7] is the promise of how small packets are going to be, but we # don't have any reason to worry about it # some BMCs disagree on the endianness, so do both valid_ports = (self.port, struct.unpack( '<H', struct.pack('>H', self.port))[0]) if (data[8] + (data[9] << 8)) not in valid_ports: # TODO(jbjohnso): support atypical SOL port number raise NotImplementedError("Non-standard SOL Port Number") # ignore data[10:11] for now, the vlan detail, shouldn't matter to this # code anyway... # NOTE(jbjohnso): # We will use a special purpose keepalive if self.ipmi_session.sol_handler is not None: # If there is erroneously another SOL handler already, notify # it of newly established session self.ipmi_session.sol_handler({'error': 'Session Disconnected'}) self.keepaliveid = self.ipmi_session.register_keepalive( cmd={'netfn': 6, 'command': 0x4b, 'data': (1, 1)}, callback=self._got_payload_instance_info) self.ipmi_session.sol_handler = self._got_sol_payload self.connected = True # self._sendpendingoutput() checks len(self._sendpendingoutput) self._sendpendingoutput()
python
def _got_session(self, response): """Private function to navigate SOL payload activation """ if 'error' in response: self._print_error(response['error']) return if not self.ipmi_session: self.callgotsession = response return # Send activate sol payload directive # netfn= 6 (application) # command = 0x48 (activate payload) # data = (1, sol payload type # 1, first instance # 0b11000000, -encrypt, authenticate, # disable serial/modem alerts, CTS fine # 0, 0, 0 reserved response = self.ipmi_session.raw_command(netfn=0x6, command=0x48, data=(1, 1, 192, 0, 0, 0)) # given that these are specific to the command, # it's probably best if one can grep the error # here instead of in constants sol_activate_codes = { 0x81: 'SOL is disabled', 0x82: 'Maximum SOL session count reached', 0x83: 'Cannot activate payload with encryption', 0x84: 'Cannot activate payload without encryption', } if 'code' in response and response['code']: if response['code'] in constants.ipmi_completion_codes: self._print_error( constants.ipmi_completion_codes[response['code']]) return elif response['code'] == 0x80: if self.force_session and not self.retriedpayload: self.retriedpayload = 1 sessrsp = self.ipmi_session.raw_command( netfn=0x6, command=0x49, data=(1, 1, 0, 0, 0, 0)) self._got_session(sessrsp) return else: self._print_error('SOL Session active for another client') return elif response['code'] in sol_activate_codes: self._print_error(sol_activate_codes[response['code']]) return else: self._print_error( 'SOL encountered Unrecognized error code %d' % response['code']) return if 'error' in response: self._print_error(response['error']) return self.activated = True # data[0:3] is reserved except for the test mode, which we don't use data = response['data'] self.maxoutcount = (data[5] << 8) + data[4] # BMC tells us this is the maximum allowed size # data[6:7] is the promise of how small packets are going to be, but we # don't have any reason to worry about it # some BMCs disagree on the endianness, so do both valid_ports = (self.port, struct.unpack( '<H', struct.pack('>H', self.port))[0]) if (data[8] + (data[9] << 8)) not in valid_ports: # TODO(jbjohnso): support atypical SOL port number raise NotImplementedError("Non-standard SOL Port Number") # ignore data[10:11] for now, the vlan detail, shouldn't matter to this # code anyway... # NOTE(jbjohnso): # We will use a special purpose keepalive if self.ipmi_session.sol_handler is not None: # If there is erroneously another SOL handler already, notify # it of newly established session self.ipmi_session.sol_handler({'error': 'Session Disconnected'}) self.keepaliveid = self.ipmi_session.register_keepalive( cmd={'netfn': 6, 'command': 0x4b, 'data': (1, 1)}, callback=self._got_payload_instance_info) self.ipmi_session.sol_handler = self._got_sol_payload self.connected = True # self._sendpendingoutput() checks len(self._sendpendingoutput) self._sendpendingoutput()
[ "def", "_got_session", "(", "self", ",", "response", ")", ":", "if", "'error'", "in", "response", ":", "self", ".", "_print_error", "(", "response", "[", "'error'", "]", ")", "return", "if", "not", "self", ".", "ipmi_session", ":", "self", ".", "callgotsession", "=", "response", "return", "# Send activate sol payload directive", "# netfn= 6 (application)", "# command = 0x48 (activate payload)", "# data = (1, sol payload type", "# 1, first instance", "# 0b11000000, -encrypt, authenticate,", "# disable serial/modem alerts, CTS fine", "# 0, 0, 0 reserved", "response", "=", "self", ".", "ipmi_session", ".", "raw_command", "(", "netfn", "=", "0x6", ",", "command", "=", "0x48", ",", "data", "=", "(", "1", ",", "1", ",", "192", ",", "0", ",", "0", ",", "0", ")", ")", "# given that these are specific to the command,", "# it's probably best if one can grep the error", "# here instead of in constants", "sol_activate_codes", "=", "{", "0x81", ":", "'SOL is disabled'", ",", "0x82", ":", "'Maximum SOL session count reached'", ",", "0x83", ":", "'Cannot activate payload with encryption'", ",", "0x84", ":", "'Cannot activate payload without encryption'", ",", "}", "if", "'code'", "in", "response", "and", "response", "[", "'code'", "]", ":", "if", "response", "[", "'code'", "]", "in", "constants", ".", "ipmi_completion_codes", ":", "self", ".", "_print_error", "(", "constants", ".", "ipmi_completion_codes", "[", "response", "[", "'code'", "]", "]", ")", "return", "elif", "response", "[", "'code'", "]", "==", "0x80", ":", "if", "self", ".", "force_session", "and", "not", "self", ".", "retriedpayload", ":", "self", ".", "retriedpayload", "=", "1", "sessrsp", "=", "self", ".", "ipmi_session", ".", "raw_command", "(", "netfn", "=", "0x6", ",", "command", "=", "0x49", ",", "data", "=", "(", "1", ",", "1", ",", "0", ",", "0", ",", "0", ",", "0", ")", ")", "self", ".", "_got_session", "(", "sessrsp", ")", "return", "else", ":", "self", ".", "_print_error", "(", "'SOL Session active for another client'", ")", "return", "elif", "response", "[", "'code'", "]", "in", "sol_activate_codes", ":", "self", ".", "_print_error", "(", "sol_activate_codes", "[", "response", "[", "'code'", "]", "]", ")", "return", "else", ":", "self", ".", "_print_error", "(", "'SOL encountered Unrecognized error code %d'", "%", "response", "[", "'code'", "]", ")", "return", "if", "'error'", "in", "response", ":", "self", ".", "_print_error", "(", "response", "[", "'error'", "]", ")", "return", "self", ".", "activated", "=", "True", "# data[0:3] is reserved except for the test mode, which we don't use", "data", "=", "response", "[", "'data'", "]", "self", ".", "maxoutcount", "=", "(", "data", "[", "5", "]", "<<", "8", ")", "+", "data", "[", "4", "]", "# BMC tells us this is the maximum allowed size", "# data[6:7] is the promise of how small packets are going to be, but we", "# don't have any reason to worry about it", "# some BMCs disagree on the endianness, so do both", "valid_ports", "=", "(", "self", ".", "port", ",", "struct", ".", "unpack", "(", "'<H'", ",", "struct", ".", "pack", "(", "'>H'", ",", "self", ".", "port", ")", ")", "[", "0", "]", ")", "if", "(", "data", "[", "8", "]", "+", "(", "data", "[", "9", "]", "<<", "8", ")", ")", "not", "in", "valid_ports", ":", "# TODO(jbjohnso): support atypical SOL port number", "raise", "NotImplementedError", "(", "\"Non-standard SOL Port Number\"", ")", "# ignore data[10:11] for now, the vlan detail, shouldn't matter to this", "# code anyway...", "# NOTE(jbjohnso):", "# We will use a special purpose keepalive", "if", "self", ".", "ipmi_session", ".", "sol_handler", "is", "not", "None", ":", "# If there is erroneously another SOL handler already, notify", "# it of newly established session", "self", ".", "ipmi_session", ".", "sol_handler", "(", "{", "'error'", ":", "'Session Disconnected'", "}", ")", "self", ".", "keepaliveid", "=", "self", ".", "ipmi_session", ".", "register_keepalive", "(", "cmd", "=", "{", "'netfn'", ":", "6", ",", "'command'", ":", "0x4b", ",", "'data'", ":", "(", "1", ",", "1", ")", "}", ",", "callback", "=", "self", ".", "_got_payload_instance_info", ")", "self", ".", "ipmi_session", ".", "sol_handler", "=", "self", ".", "_got_sol_payload", "self", ".", "connected", "=", "True", "# self._sendpendingoutput() checks len(self._sendpendingoutput)", "self", ".", "_sendpendingoutput", "(", ")" ]
Private function to navigate SOL payload activation
[ "Private", "function", "to", "navigate", "SOL", "payload", "activation" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/console.py#L77-L160
16,117
openstack/pyghmi
pyghmi/ipmi/console.py
Console._got_cons_input
def _got_cons_input(self, handle): """Callback for handle events detected by ipmi session """ self._addpendingdata(handle.read()) if not self.awaitingack: self._sendpendingoutput()
python
def _got_cons_input(self, handle): """Callback for handle events detected by ipmi session """ self._addpendingdata(handle.read()) if not self.awaitingack: self._sendpendingoutput()
[ "def", "_got_cons_input", "(", "self", ",", "handle", ")", ":", "self", ".", "_addpendingdata", "(", "handle", ".", "read", "(", ")", ")", "if", "not", "self", ".", "awaitingack", ":", "self", ".", "_sendpendingoutput", "(", ")" ]
Callback for handle events detected by ipmi session
[ "Callback", "for", "handle", "events", "detected", "by", "ipmi", "session" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/console.py#L192-L197
16,118
openstack/pyghmi
pyghmi/ipmi/console.py
Console.close
def close(self): """Shut down an SOL session, """ if self.ipmi_session: self.ipmi_session.unregister_keepalive(self.keepaliveid) if self.activated: try: self.ipmi_session.raw_command(netfn=6, command=0x49, data=(1, 1, 0, 0, 0, 0)) except exc.IpmiException: # if underlying ipmi session is not working, then # run with the implicit success pass
python
def close(self): """Shut down an SOL session, """ if self.ipmi_session: self.ipmi_session.unregister_keepalive(self.keepaliveid) if self.activated: try: self.ipmi_session.raw_command(netfn=6, command=0x49, data=(1, 1, 0, 0, 0, 0)) except exc.IpmiException: # if underlying ipmi session is not working, then # run with the implicit success pass
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "ipmi_session", ":", "self", ".", "ipmi_session", ".", "unregister_keepalive", "(", "self", ".", "keepaliveid", ")", "if", "self", ".", "activated", ":", "try", ":", "self", ".", "ipmi_session", ".", "raw_command", "(", "netfn", "=", "6", ",", "command", "=", "0x49", ",", "data", "=", "(", "1", ",", "1", ",", "0", ",", "0", ",", "0", ",", "0", ")", ")", "except", "exc", ".", "IpmiException", ":", "# if underlying ipmi session is not working, then", "# run with the implicit success", "pass" ]
Shut down an SOL session,
[ "Shut", "down", "an", "SOL", "session" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/console.py#L199-L211
16,119
openstack/pyghmi
pyghmi/ipmi/console.py
ServerConsole._got_sol_payload
def _got_sol_payload(self, payload): """SOL payload callback """ # TODO(jbjohnso) test cases to throw some likely scenarios at functions # for example, retry with new data, retry with no new data # retry with unexpected sequence number if type(payload) == dict: # we received an error condition self.activated = False self._print_error(payload) return newseq = payload[0] & 0b1111 ackseq = payload[1] & 0b1111 ackcount = payload[2] nacked = payload[3] & 0b1000000 breakdetected = payload[3] & 0b10000 # for now, ignore overrun. I assume partial NACK for this reason or # for no reason would be treated the same, new payload with partial # data. remdata = "" remdatalen = 0 flag = 0 if not self.poweredon: flag |= 0b1100000 if not self.activated: flag |= 0b1010000 if newseq != 0: # this packet at least has some data to send to us.. if len(payload) > 4: remdatalen = len(payload[4:]) # store remote len before dupe # retry logic, we must ack *this* many even if it is # a retry packet with new partial data remdata = bytes(payload[4:]) if newseq == self.remseq: # it is a retry, but could have new data if remdatalen > self.lastsize: remdata = bytes(remdata[4 + self.lastsize:]) else: # no new data... remdata = "" else: # TODO(jbjohnso) what if remote sequence number is wrong?? self.remseq = newseq self.lastsize = remdatalen ackpayload = bytearray((0, self.remseq, remdatalen, flag)) # Why not put pending data into the ack? because it's rare # and might be hard to decide what to do in the context of # retry situation try: self.send_payload(ackpayload, retry=False) except exc.IpmiException: # if the session is broken, then close the SOL session self.close() if remdata: # Do not subject callers to empty data self._print_data(remdata) if self.myseq != 0 and ackseq == self.myseq: # the bmc has something # to say about last xmit self.awaitingack = False if nacked and not breakdetected: # the BMC was in some way unhappy newtext = self.lastpayload[4 + ackcount:] with self.outputlock: if (self.pendingoutput and not isinstance(self.pendingoutput[0], dict)): self.pendingoutput[0] = newtext + self.pendingoutput[0] else: self.pendingoutput = [newtext] + self.pendingoutput # self._sendpendingoutput() checks len(self._sendpendingoutput) self._sendpendingoutput() elif ackseq != 0 and self.awaitingack: # if an ack packet came in, but did not match what we # expected, retry our payload now. # the situation that was triggered was a senseless retry # when data came in while we xmitted. In theory, a BMC # should handle a retry correctly, but some do not, so # try to mitigate by avoiding overeager retries # occasional retry of a packet # sooner than timeout suggests is evidently a big deal self.send_payload(payload=self.lastpayload)
python
def _got_sol_payload(self, payload): """SOL payload callback """ # TODO(jbjohnso) test cases to throw some likely scenarios at functions # for example, retry with new data, retry with no new data # retry with unexpected sequence number if type(payload) == dict: # we received an error condition self.activated = False self._print_error(payload) return newseq = payload[0] & 0b1111 ackseq = payload[1] & 0b1111 ackcount = payload[2] nacked = payload[3] & 0b1000000 breakdetected = payload[3] & 0b10000 # for now, ignore overrun. I assume partial NACK for this reason or # for no reason would be treated the same, new payload with partial # data. remdata = "" remdatalen = 0 flag = 0 if not self.poweredon: flag |= 0b1100000 if not self.activated: flag |= 0b1010000 if newseq != 0: # this packet at least has some data to send to us.. if len(payload) > 4: remdatalen = len(payload[4:]) # store remote len before dupe # retry logic, we must ack *this* many even if it is # a retry packet with new partial data remdata = bytes(payload[4:]) if newseq == self.remseq: # it is a retry, but could have new data if remdatalen > self.lastsize: remdata = bytes(remdata[4 + self.lastsize:]) else: # no new data... remdata = "" else: # TODO(jbjohnso) what if remote sequence number is wrong?? self.remseq = newseq self.lastsize = remdatalen ackpayload = bytearray((0, self.remseq, remdatalen, flag)) # Why not put pending data into the ack? because it's rare # and might be hard to decide what to do in the context of # retry situation try: self.send_payload(ackpayload, retry=False) except exc.IpmiException: # if the session is broken, then close the SOL session self.close() if remdata: # Do not subject callers to empty data self._print_data(remdata) if self.myseq != 0 and ackseq == self.myseq: # the bmc has something # to say about last xmit self.awaitingack = False if nacked and not breakdetected: # the BMC was in some way unhappy newtext = self.lastpayload[4 + ackcount:] with self.outputlock: if (self.pendingoutput and not isinstance(self.pendingoutput[0], dict)): self.pendingoutput[0] = newtext + self.pendingoutput[0] else: self.pendingoutput = [newtext] + self.pendingoutput # self._sendpendingoutput() checks len(self._sendpendingoutput) self._sendpendingoutput() elif ackseq != 0 and self.awaitingack: # if an ack packet came in, but did not match what we # expected, retry our payload now. # the situation that was triggered was a senseless retry # when data came in while we xmitted. In theory, a BMC # should handle a retry correctly, but some do not, so # try to mitigate by avoiding overeager retries # occasional retry of a packet # sooner than timeout suggests is evidently a big deal self.send_payload(payload=self.lastpayload)
[ "def", "_got_sol_payload", "(", "self", ",", "payload", ")", ":", "# TODO(jbjohnso) test cases to throw some likely scenarios at functions", "# for example, retry with new data, retry with no new data", "# retry with unexpected sequence number", "if", "type", "(", "payload", ")", "==", "dict", ":", "# we received an error condition", "self", ".", "activated", "=", "False", "self", ".", "_print_error", "(", "payload", ")", "return", "newseq", "=", "payload", "[", "0", "]", "&", "0b1111", "ackseq", "=", "payload", "[", "1", "]", "&", "0b1111", "ackcount", "=", "payload", "[", "2", "]", "nacked", "=", "payload", "[", "3", "]", "&", "0b1000000", "breakdetected", "=", "payload", "[", "3", "]", "&", "0b10000", "# for now, ignore overrun. I assume partial NACK for this reason or", "# for no reason would be treated the same, new payload with partial", "# data.", "remdata", "=", "\"\"", "remdatalen", "=", "0", "flag", "=", "0", "if", "not", "self", ".", "poweredon", ":", "flag", "|=", "0b1100000", "if", "not", "self", ".", "activated", ":", "flag", "|=", "0b1010000", "if", "newseq", "!=", "0", ":", "# this packet at least has some data to send to us..", "if", "len", "(", "payload", ")", ">", "4", ":", "remdatalen", "=", "len", "(", "payload", "[", "4", ":", "]", ")", "# store remote len before dupe", "# retry logic, we must ack *this* many even if it is", "# a retry packet with new partial data", "remdata", "=", "bytes", "(", "payload", "[", "4", ":", "]", ")", "if", "newseq", "==", "self", ".", "remseq", ":", "# it is a retry, but could have new data", "if", "remdatalen", ">", "self", ".", "lastsize", ":", "remdata", "=", "bytes", "(", "remdata", "[", "4", "+", "self", ".", "lastsize", ":", "]", ")", "else", ":", "# no new data...", "remdata", "=", "\"\"", "else", ":", "# TODO(jbjohnso) what if remote sequence number is wrong??", "self", ".", "remseq", "=", "newseq", "self", ".", "lastsize", "=", "remdatalen", "ackpayload", "=", "bytearray", "(", "(", "0", ",", "self", ".", "remseq", ",", "remdatalen", ",", "flag", ")", ")", "# Why not put pending data into the ack? because it's rare", "# and might be hard to decide what to do in the context of", "# retry situation", "try", ":", "self", ".", "send_payload", "(", "ackpayload", ",", "retry", "=", "False", ")", "except", "exc", ".", "IpmiException", ":", "# if the session is broken, then close the SOL session", "self", ".", "close", "(", ")", "if", "remdata", ":", "# Do not subject callers to empty data", "self", ".", "_print_data", "(", "remdata", ")", "if", "self", ".", "myseq", "!=", "0", "and", "ackseq", "==", "self", ".", "myseq", ":", "# the bmc has something", "# to say about last xmit", "self", ".", "awaitingack", "=", "False", "if", "nacked", "and", "not", "breakdetected", ":", "# the BMC was in some way unhappy", "newtext", "=", "self", ".", "lastpayload", "[", "4", "+", "ackcount", ":", "]", "with", "self", ".", "outputlock", ":", "if", "(", "self", ".", "pendingoutput", "and", "not", "isinstance", "(", "self", ".", "pendingoutput", "[", "0", "]", ",", "dict", ")", ")", ":", "self", ".", "pendingoutput", "[", "0", "]", "=", "newtext", "+", "self", ".", "pendingoutput", "[", "0", "]", "else", ":", "self", ".", "pendingoutput", "=", "[", "newtext", "]", "+", "self", ".", "pendingoutput", "# self._sendpendingoutput() checks len(self._sendpendingoutput)", "self", ".", "_sendpendingoutput", "(", ")", "elif", "ackseq", "!=", "0", "and", "self", ".", "awaitingack", ":", "# if an ack packet came in, but did not match what we", "# expected, retry our payload now.", "# the situation that was triggered was a senseless retry", "# when data came in while we xmitted. In theory, a BMC", "# should handle a retry correctly, but some do not, so", "# try to mitigate by avoiding overeager retries", "# occasional retry of a packet", "# sooner than timeout suggests is evidently a big deal", "self", ".", "send_payload", "(", "payload", "=", "self", ".", "lastpayload", ")" ]
SOL payload callback
[ "SOL", "payload", "callback" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/console.py#L459-L531
16,120
openstack/pyghmi
pyghmi/ipmi/oem/lenovo/handler.py
OEMHandler.is_fpc
def is_fpc(self): """True if the target is a Lenovo nextscale fan power controller """ if self.has_imm or self.has_xcc: return None if self._fpc_variant is not None: return self._fpc_variant fpc_ids = ((19046, 32, 1063), (20301, 32, 462)) smm_id = (19046, 32, 1180) currid = (self.oemid['manufacturer_id'], self.oemid['device_id'], self.oemid['product_id']) if currid in fpc_ids: self._fpc_variant = 6 elif currid == smm_id: self._fpc_variant = 2 return self._fpc_variant
python
def is_fpc(self): """True if the target is a Lenovo nextscale fan power controller """ if self.has_imm or self.has_xcc: return None if self._fpc_variant is not None: return self._fpc_variant fpc_ids = ((19046, 32, 1063), (20301, 32, 462)) smm_id = (19046, 32, 1180) currid = (self.oemid['manufacturer_id'], self.oemid['device_id'], self.oemid['product_id']) if currid in fpc_ids: self._fpc_variant = 6 elif currid == smm_id: self._fpc_variant = 2 return self._fpc_variant
[ "def", "is_fpc", "(", "self", ")", ":", "if", "self", ".", "has_imm", "or", "self", ".", "has_xcc", ":", "return", "None", "if", "self", ".", "_fpc_variant", "is", "not", "None", ":", "return", "self", ".", "_fpc_variant", "fpc_ids", "=", "(", "(", "19046", ",", "32", ",", "1063", ")", ",", "(", "20301", ",", "32", ",", "462", ")", ")", "smm_id", "=", "(", "19046", ",", "32", ",", "1180", ")", "currid", "=", "(", "self", ".", "oemid", "[", "'manufacturer_id'", "]", ",", "self", ".", "oemid", "[", "'device_id'", "]", ",", "self", ".", "oemid", "[", "'product_id'", "]", ")", "if", "currid", "in", "fpc_ids", ":", "self", ".", "_fpc_variant", "=", "6", "elif", "currid", "==", "smm_id", ":", "self", ".", "_fpc_variant", "=", "2", "return", "self", ".", "_fpc_variant" ]
True if the target is a Lenovo nextscale fan power controller
[ "True", "if", "the", "target", "is", "a", "Lenovo", "nextscale", "fan", "power", "controller" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/lenovo/handler.py#L335-L350
16,121
openstack/pyghmi
pyghmi/ipmi/oem/lenovo/handler.py
OEMHandler.has_tsm
def has_tsm(self): """True if this particular server have a TSM based service processor """ if (self.oemid['manufacturer_id'] == 19046 and self.oemid['device_id'] == 32): try: self.ipmicmd.xraw_command(netfn=0x3a, command=0xf) except pygexc.IpmiException as ie: if ie.ipmicode == 193: return False raise return True return False
python
def has_tsm(self): """True if this particular server have a TSM based service processor """ if (self.oemid['manufacturer_id'] == 19046 and self.oemid['device_id'] == 32): try: self.ipmicmd.xraw_command(netfn=0x3a, command=0xf) except pygexc.IpmiException as ie: if ie.ipmicode == 193: return False raise return True return False
[ "def", "has_tsm", "(", "self", ")", ":", "if", "(", "self", ".", "oemid", "[", "'manufacturer_id'", "]", "==", "19046", "and", "self", ".", "oemid", "[", "'device_id'", "]", "==", "32", ")", ":", "try", ":", "self", ".", "ipmicmd", ".", "xraw_command", "(", "netfn", "=", "0x3a", ",", "command", "=", "0xf", ")", "except", "pygexc", ".", "IpmiException", "as", "ie", ":", "if", "ie", ".", "ipmicode", "==", "193", ":", "return", "False", "raise", "return", "True", "return", "False" ]
True if this particular server have a TSM based service processor
[ "True", "if", "this", "particular", "server", "have", "a", "TSM", "based", "service", "processor" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/lenovo/handler.py#L359-L371
16,122
openstack/pyghmi
pyghmi/ipmi/oem/lenovo/handler.py
OEMHandler.set_oem_capping_enabled
def set_oem_capping_enabled(self, enable): """Set PSU based power capping :param enable: True for enable and False for disable """ # 1 - Enable power capping(default) if enable: statecode = 1 # 0 - Disable power capping else: statecode = 0 if self.has_tsm: self.ipmicmd.xraw_command(netfn=0x3a, command=0x1a, data=(3, statecode)) return True
python
def set_oem_capping_enabled(self, enable): """Set PSU based power capping :param enable: True for enable and False for disable """ # 1 - Enable power capping(default) if enable: statecode = 1 # 0 - Disable power capping else: statecode = 0 if self.has_tsm: self.ipmicmd.xraw_command(netfn=0x3a, command=0x1a, data=(3, statecode)) return True
[ "def", "set_oem_capping_enabled", "(", "self", ",", "enable", ")", ":", "# 1 - Enable power capping(default)", "if", "enable", ":", "statecode", "=", "1", "# 0 - Disable power capping", "else", ":", "statecode", "=", "0", "if", "self", ".", "has_tsm", ":", "self", ".", "ipmicmd", ".", "xraw_command", "(", "netfn", "=", "0x3a", ",", "command", "=", "0x1a", ",", "data", "=", "(", "3", ",", "statecode", ")", ")", "return", "True" ]
Set PSU based power capping :param enable: True for enable and False for disable
[ "Set", "PSU", "based", "power", "capping" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/lenovo/handler.py#L636-L650
16,123
openstack/pyghmi
pyghmi/ipmi/private/util.py
decode_wireformat_uuid
def decode_wireformat_uuid(rawguid): """Decode a wire format UUID It handles the rather particular scheme where half is little endian and half is big endian. It returns a string like dmidecode would output. """ if isinstance(rawguid, list): rawguid = bytearray(rawguid) lebytes = struct.unpack_from('<IHH', buffer(rawguid[:8])) bebytes = struct.unpack_from('>HHI', buffer(rawguid[8:])) return '{0:08X}-{1:04X}-{2:04X}-{3:04X}-{4:04X}{5:08X}'.format( lebytes[0], lebytes[1], lebytes[2], bebytes[0], bebytes[1], bebytes[2])
python
def decode_wireformat_uuid(rawguid): """Decode a wire format UUID It handles the rather particular scheme where half is little endian and half is big endian. It returns a string like dmidecode would output. """ if isinstance(rawguid, list): rawguid = bytearray(rawguid) lebytes = struct.unpack_from('<IHH', buffer(rawguid[:8])) bebytes = struct.unpack_from('>HHI', buffer(rawguid[8:])) return '{0:08X}-{1:04X}-{2:04X}-{3:04X}-{4:04X}{5:08X}'.format( lebytes[0], lebytes[1], lebytes[2], bebytes[0], bebytes[1], bebytes[2])
[ "def", "decode_wireformat_uuid", "(", "rawguid", ")", ":", "if", "isinstance", "(", "rawguid", ",", "list", ")", ":", "rawguid", "=", "bytearray", "(", "rawguid", ")", "lebytes", "=", "struct", ".", "unpack_from", "(", "'<IHH'", ",", "buffer", "(", "rawguid", "[", ":", "8", "]", ")", ")", "bebytes", "=", "struct", ".", "unpack_from", "(", "'>HHI'", ",", "buffer", "(", "rawguid", "[", "8", ":", "]", ")", ")", "return", "'{0:08X}-{1:04X}-{2:04X}-{3:04X}-{4:04X}{5:08X}'", ".", "format", "(", "lebytes", "[", "0", "]", ",", "lebytes", "[", "1", "]", ",", "lebytes", "[", "2", "]", ",", "bebytes", "[", "0", "]", ",", "bebytes", "[", "1", "]", ",", "bebytes", "[", "2", "]", ")" ]
Decode a wire format UUID It handles the rather particular scheme where half is little endian and half is big endian. It returns a string like dmidecode would output.
[ "Decode", "a", "wire", "format", "UUID" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/util.py#L41-L52
16,124
openstack/pyghmi
pyghmi/ipmi/private/util.py
urlsplit
def urlsplit(url): """Split an arbitrary url into protocol, host, rest The standard urlsplit does not want to provide 'netloc' for arbitrary protocols, this works around that. :param url: The url to split into component parts """ proto, rest = url.split(':', 1) host = '' if rest[:2] == '//': host, rest = rest[2:].split('/', 1) rest = '/' + rest return proto, host, rest
python
def urlsplit(url): """Split an arbitrary url into protocol, host, rest The standard urlsplit does not want to provide 'netloc' for arbitrary protocols, this works around that. :param url: The url to split into component parts """ proto, rest = url.split(':', 1) host = '' if rest[:2] == '//': host, rest = rest[2:].split('/', 1) rest = '/' + rest return proto, host, rest
[ "def", "urlsplit", "(", "url", ")", ":", "proto", ",", "rest", "=", "url", ".", "split", "(", "':'", ",", "1", ")", "host", "=", "''", "if", "rest", "[", ":", "2", "]", "==", "'//'", ":", "host", ",", "rest", "=", "rest", "[", "2", ":", "]", ".", "split", "(", "'/'", ",", "1", ")", "rest", "=", "'/'", "+", "rest", "return", "proto", ",", "host", ",", "rest" ]
Split an arbitrary url into protocol, host, rest The standard urlsplit does not want to provide 'netloc' for arbitrary protocols, this works around that. :param url: The url to split into component parts
[ "Split", "an", "arbitrary", "url", "into", "protocol", "host", "rest" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/util.py#L55-L68
16,125
openstack/pyghmi
pyghmi/ipmi/private/util.py
get_ipv4
def get_ipv4(hostname): """Get list of ipv4 addresses for hostname """ addrinfo = socket.getaddrinfo(hostname, None, socket.AF_INET, socket.SOCK_STREAM) return [addrinfo[x][4][0] for x in range(len(addrinfo))]
python
def get_ipv4(hostname): """Get list of ipv4 addresses for hostname """ addrinfo = socket.getaddrinfo(hostname, None, socket.AF_INET, socket.SOCK_STREAM) return [addrinfo[x][4][0] for x in range(len(addrinfo))]
[ "def", "get_ipv4", "(", "hostname", ")", ":", "addrinfo", "=", "socket", ".", "getaddrinfo", "(", "hostname", ",", "None", ",", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "return", "[", "addrinfo", "[", "x", "]", "[", "4", "]", "[", "0", "]", "for", "x", "in", "range", "(", "len", "(", "addrinfo", ")", ")", "]" ]
Get list of ipv4 addresses for hostname
[ "Get", "list", "of", "ipv4", "addresses", "for", "hostname" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/util.py#L71-L77
16,126
openstack/pyghmi
pyghmi/ipmi/private/session.py
_aespad
def _aespad(data): """ipmi demands a certain pad scheme, per table 13-20 AES-CBC encrypted payload fields. """ currlen = len(data) + 1 # need to count the pad length field as well neededpad = currlen % 16 if neededpad: # if it happens to be zero, hurray, but otherwise invert the # sense of the padding neededpad = 16 - neededpad padval = 1 pad = bytearray(neededpad) while padval <= neededpad: pad[padval - 1] = padval padval += 1 pad.append(neededpad) return pad
python
def _aespad(data): """ipmi demands a certain pad scheme, per table 13-20 AES-CBC encrypted payload fields. """ currlen = len(data) + 1 # need to count the pad length field as well neededpad = currlen % 16 if neededpad: # if it happens to be zero, hurray, but otherwise invert the # sense of the padding neededpad = 16 - neededpad padval = 1 pad = bytearray(neededpad) while padval <= neededpad: pad[padval - 1] = padval padval += 1 pad.append(neededpad) return pad
[ "def", "_aespad", "(", "data", ")", ":", "currlen", "=", "len", "(", "data", ")", "+", "1", "# need to count the pad length field as well", "neededpad", "=", "currlen", "%", "16", "if", "neededpad", ":", "# if it happens to be zero, hurray, but otherwise invert the", "# sense of the padding", "neededpad", "=", "16", "-", "neededpad", "padval", "=", "1", "pad", "=", "bytearray", "(", "neededpad", ")", "while", "padval", "<=", "neededpad", ":", "pad", "[", "padval", "-", "1", "]", "=", "padval", "padval", "+=", "1", "pad", ".", "append", "(", "neededpad", ")", "return", "pad" ]
ipmi demands a certain pad scheme, per table 13-20 AES-CBC encrypted payload fields.
[ "ipmi", "demands", "a", "certain", "pad", "scheme", "per", "table", "13", "-", "20", "AES", "-", "CBC", "encrypted", "payload", "fields", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/session.py#L252-L267
16,127
openstack/pyghmi
pyghmi/ipmi/private/session.py
Session._make_bridge_request_msg
def _make_bridge_request_msg(self, channel, netfn, command): """This function generate message for bridge request. It is a part of ipmi payload. """ head = bytearray((constants.IPMI_BMC_ADDRESS, constants.netfn_codes['application'] << 2)) check_sum = _checksum(*head) # NOTE(fengqian): according IPMI Figure 14-11, rqSWID is set to 81h boday = bytearray((0x81, self.seqlun, constants.IPMI_SEND_MESSAGE_CMD, 0x40 | channel)) # NOTE(fengqian): Track request self._add_request_entry((constants.netfn_codes['application'] + 1, self.seqlun, constants.IPMI_SEND_MESSAGE_CMD)) return head + bytearray((check_sum,)) + boday
python
def _make_bridge_request_msg(self, channel, netfn, command): """This function generate message for bridge request. It is a part of ipmi payload. """ head = bytearray((constants.IPMI_BMC_ADDRESS, constants.netfn_codes['application'] << 2)) check_sum = _checksum(*head) # NOTE(fengqian): according IPMI Figure 14-11, rqSWID is set to 81h boday = bytearray((0x81, self.seqlun, constants.IPMI_SEND_MESSAGE_CMD, 0x40 | channel)) # NOTE(fengqian): Track request self._add_request_entry((constants.netfn_codes['application'] + 1, self.seqlun, constants.IPMI_SEND_MESSAGE_CMD)) return head + bytearray((check_sum,)) + boday
[ "def", "_make_bridge_request_msg", "(", "self", ",", "channel", ",", "netfn", ",", "command", ")", ":", "head", "=", "bytearray", "(", "(", "constants", ".", "IPMI_BMC_ADDRESS", ",", "constants", ".", "netfn_codes", "[", "'application'", "]", "<<", "2", ")", ")", "check_sum", "=", "_checksum", "(", "*", "head", ")", "# NOTE(fengqian): according IPMI Figure 14-11, rqSWID is set to 81h", "boday", "=", "bytearray", "(", "(", "0x81", ",", "self", ".", "seqlun", ",", "constants", ".", "IPMI_SEND_MESSAGE_CMD", ",", "0x40", "|", "channel", ")", ")", "# NOTE(fengqian): Track request", "self", ".", "_add_request_entry", "(", "(", "constants", ".", "netfn_codes", "[", "'application'", "]", "+", "1", ",", "self", ".", "seqlun", ",", "constants", ".", "IPMI_SEND_MESSAGE_CMD", ")", ")", "return", "head", "+", "bytearray", "(", "(", "check_sum", ",", ")", ")", "+", "boday" ]
This function generate message for bridge request. It is a part of ipmi payload.
[ "This", "function", "generate", "message", "for", "bridge", "request", ".", "It", "is", "a", "part", "of", "ipmi", "payload", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/session.py#L611-L624
16,128
openstack/pyghmi
pyghmi/ipmi/private/session.py
Session.wait_for_rsp
def wait_for_rsp(cls, timeout=None, callout=True): """IPMI Session Event loop iteration This watches for any activity on IPMI handles and handles registered by register_handle_callback. Callers are satisfied in the order that packets return from network, not in the order of calling. :param timeout: Maximum time to wait for data to come across. If unspecified, will autodetect based on earliest timeout """ global iosockets # Assume: # Instance A sends request to packet B # Then Instance C sends request to BMC D # BMC D was faster, so data comes back before BMC B # Instance C gets to go ahead of Instance A, because # Instance C can get work done, but instance A cannot curtime = _monotonic_time() # There ar a number of parties that each has their own timeout # The caller can specify a deadline in timeout argument # each session with active outbound payload has callback to # handle retry/timout error # each session that is 'alive' wants to send a keepalive ever so often. # We want to make sure the most strict request is honored and block for # no more time than that, so that whatever part(ies) need to service in # a deadline, will be honored if timeout != 0: with util.protect(WAITING_SESSIONS): for session, parms in dictitems(cls.waiting_sessions): if parms['timeout'] <= curtime: timeout = 0 # exit after one guaranteed pass break if (timeout is not None and timeout < parms['timeout'] - curtime): continue # timeout smaller than the current session # needs timeout = parms['timeout'] - curtime # set new timeout # value with util.protect(KEEPALIVE_SESSIONS): for session, parms in dictitems(cls.keepalive_sessions): if parms['timeout'] <= curtime: timeout = 0 break if (timeout is not None and timeout < parms['timeout'] - curtime): continue timeout = parms['timeout'] - curtime # If the loop above found no sessions wanting *and* the caller had no # timeout, exit function. In this case there is no way a session # could be waiting so we can always return 0 while cls.iterwaiters: waiter = cls.iterwaiters.pop() waiter({'success': True}) # cause a quick exit from the event loop iteration for calling code # to be able to reasonably set up for the next iteration before # a long select comes along if timeout is not None: timeout = 0 if timeout is None: return 0 if _poller(timeout=timeout): while sessionqueue: relsession = sessionqueue.popleft() relsession.process_pktqueue() sessionstodel = [] sessionstokeepalive = [] with util.protect(KEEPALIVE_SESSIONS): for session, parms in dictitems(cls.keepalive_sessions): # if the session is busy inside a command, defer invoking # keepalive until incommand is no longer the case if parms['timeout'] < curtime and not session._isincommand(): cls.keepalive_sessions[session]['timeout'] = \ _monotonic_time() + MAX_IDLE - (random.random() * 4.9) sessionstokeepalive.append(session) for session in sessionstokeepalive: session._keepalive() with util.protect(WAITING_SESSIONS): for session, parms in dictitems(cls.waiting_sessions): if parms['timeout'] < curtime: # timeout has expired, time to # give up on it and trigger timeout # response in the respective session # defer deletion until after loop sessionstodel.append(session) # to avoid confusing the for loop for session in sessionstodel: cls.waiting_sessions.pop(session, None) # one loop iteration to make sure recursion doesn't induce # redundant timeouts for session in sessionstodel: session._timedout() return len(cls.waiting_sessions)
python
def wait_for_rsp(cls, timeout=None, callout=True): """IPMI Session Event loop iteration This watches for any activity on IPMI handles and handles registered by register_handle_callback. Callers are satisfied in the order that packets return from network, not in the order of calling. :param timeout: Maximum time to wait for data to come across. If unspecified, will autodetect based on earliest timeout """ global iosockets # Assume: # Instance A sends request to packet B # Then Instance C sends request to BMC D # BMC D was faster, so data comes back before BMC B # Instance C gets to go ahead of Instance A, because # Instance C can get work done, but instance A cannot curtime = _monotonic_time() # There ar a number of parties that each has their own timeout # The caller can specify a deadline in timeout argument # each session with active outbound payload has callback to # handle retry/timout error # each session that is 'alive' wants to send a keepalive ever so often. # We want to make sure the most strict request is honored and block for # no more time than that, so that whatever part(ies) need to service in # a deadline, will be honored if timeout != 0: with util.protect(WAITING_SESSIONS): for session, parms in dictitems(cls.waiting_sessions): if parms['timeout'] <= curtime: timeout = 0 # exit after one guaranteed pass break if (timeout is not None and timeout < parms['timeout'] - curtime): continue # timeout smaller than the current session # needs timeout = parms['timeout'] - curtime # set new timeout # value with util.protect(KEEPALIVE_SESSIONS): for session, parms in dictitems(cls.keepalive_sessions): if parms['timeout'] <= curtime: timeout = 0 break if (timeout is not None and timeout < parms['timeout'] - curtime): continue timeout = parms['timeout'] - curtime # If the loop above found no sessions wanting *and* the caller had no # timeout, exit function. In this case there is no way a session # could be waiting so we can always return 0 while cls.iterwaiters: waiter = cls.iterwaiters.pop() waiter({'success': True}) # cause a quick exit from the event loop iteration for calling code # to be able to reasonably set up for the next iteration before # a long select comes along if timeout is not None: timeout = 0 if timeout is None: return 0 if _poller(timeout=timeout): while sessionqueue: relsession = sessionqueue.popleft() relsession.process_pktqueue() sessionstodel = [] sessionstokeepalive = [] with util.protect(KEEPALIVE_SESSIONS): for session, parms in dictitems(cls.keepalive_sessions): # if the session is busy inside a command, defer invoking # keepalive until incommand is no longer the case if parms['timeout'] < curtime and not session._isincommand(): cls.keepalive_sessions[session]['timeout'] = \ _monotonic_time() + MAX_IDLE - (random.random() * 4.9) sessionstokeepalive.append(session) for session in sessionstokeepalive: session._keepalive() with util.protect(WAITING_SESSIONS): for session, parms in dictitems(cls.waiting_sessions): if parms['timeout'] < curtime: # timeout has expired, time to # give up on it and trigger timeout # response in the respective session # defer deletion until after loop sessionstodel.append(session) # to avoid confusing the for loop for session in sessionstodel: cls.waiting_sessions.pop(session, None) # one loop iteration to make sure recursion doesn't induce # redundant timeouts for session in sessionstodel: session._timedout() return len(cls.waiting_sessions)
[ "def", "wait_for_rsp", "(", "cls", ",", "timeout", "=", "None", ",", "callout", "=", "True", ")", ":", "global", "iosockets", "# Assume:", "# Instance A sends request to packet B", "# Then Instance C sends request to BMC D", "# BMC D was faster, so data comes back before BMC B", "# Instance C gets to go ahead of Instance A, because", "# Instance C can get work done, but instance A cannot", "curtime", "=", "_monotonic_time", "(", ")", "# There ar a number of parties that each has their own timeout", "# The caller can specify a deadline in timeout argument", "# each session with active outbound payload has callback to", "# handle retry/timout error", "# each session that is 'alive' wants to send a keepalive ever so often.", "# We want to make sure the most strict request is honored and block for", "# no more time than that, so that whatever part(ies) need to service in", "# a deadline, will be honored", "if", "timeout", "!=", "0", ":", "with", "util", ".", "protect", "(", "WAITING_SESSIONS", ")", ":", "for", "session", ",", "parms", "in", "dictitems", "(", "cls", ".", "waiting_sessions", ")", ":", "if", "parms", "[", "'timeout'", "]", "<=", "curtime", ":", "timeout", "=", "0", "# exit after one guaranteed pass", "break", "if", "(", "timeout", "is", "not", "None", "and", "timeout", "<", "parms", "[", "'timeout'", "]", "-", "curtime", ")", ":", "continue", "# timeout smaller than the current session", "# needs", "timeout", "=", "parms", "[", "'timeout'", "]", "-", "curtime", "# set new timeout", "# value", "with", "util", ".", "protect", "(", "KEEPALIVE_SESSIONS", ")", ":", "for", "session", ",", "parms", "in", "dictitems", "(", "cls", ".", "keepalive_sessions", ")", ":", "if", "parms", "[", "'timeout'", "]", "<=", "curtime", ":", "timeout", "=", "0", "break", "if", "(", "timeout", "is", "not", "None", "and", "timeout", "<", "parms", "[", "'timeout'", "]", "-", "curtime", ")", ":", "continue", "timeout", "=", "parms", "[", "'timeout'", "]", "-", "curtime", "# If the loop above found no sessions wanting *and* the caller had no", "# timeout, exit function. In this case there is no way a session", "# could be waiting so we can always return 0", "while", "cls", ".", "iterwaiters", ":", "waiter", "=", "cls", ".", "iterwaiters", ".", "pop", "(", ")", "waiter", "(", "{", "'success'", ":", "True", "}", ")", "# cause a quick exit from the event loop iteration for calling code", "# to be able to reasonably set up for the next iteration before", "# a long select comes along", "if", "timeout", "is", "not", "None", ":", "timeout", "=", "0", "if", "timeout", "is", "None", ":", "return", "0", "if", "_poller", "(", "timeout", "=", "timeout", ")", ":", "while", "sessionqueue", ":", "relsession", "=", "sessionqueue", ".", "popleft", "(", ")", "relsession", ".", "process_pktqueue", "(", ")", "sessionstodel", "=", "[", "]", "sessionstokeepalive", "=", "[", "]", "with", "util", ".", "protect", "(", "KEEPALIVE_SESSIONS", ")", ":", "for", "session", ",", "parms", "in", "dictitems", "(", "cls", ".", "keepalive_sessions", ")", ":", "# if the session is busy inside a command, defer invoking", "# keepalive until incommand is no longer the case", "if", "parms", "[", "'timeout'", "]", "<", "curtime", "and", "not", "session", ".", "_isincommand", "(", ")", ":", "cls", ".", "keepalive_sessions", "[", "session", "]", "[", "'timeout'", "]", "=", "_monotonic_time", "(", ")", "+", "MAX_IDLE", "-", "(", "random", ".", "random", "(", ")", "*", "4.9", ")", "sessionstokeepalive", ".", "append", "(", "session", ")", "for", "session", "in", "sessionstokeepalive", ":", "session", ".", "_keepalive", "(", ")", "with", "util", ".", "protect", "(", "WAITING_SESSIONS", ")", ":", "for", "session", ",", "parms", "in", "dictitems", "(", "cls", ".", "waiting_sessions", ")", ":", "if", "parms", "[", "'timeout'", "]", "<", "curtime", ":", "# timeout has expired, time to", "# give up on it and trigger timeout", "# response in the respective session", "# defer deletion until after loop", "sessionstodel", ".", "append", "(", "session", ")", "# to avoid confusing the for loop", "for", "session", "in", "sessionstodel", ":", "cls", ".", "waiting_sessions", ".", "pop", "(", "session", ",", "None", ")", "# one loop iteration to make sure recursion doesn't induce", "# redundant timeouts", "for", "session", "in", "sessionstodel", ":", "session", ".", "_timedout", "(", ")", "return", "len", "(", "cls", ".", "waiting_sessions", ")" ]
IPMI Session Event loop iteration This watches for any activity on IPMI handles and handles registered by register_handle_callback. Callers are satisfied in the order that packets return from network, not in the order of calling. :param timeout: Maximum time to wait for data to come across. If unspecified, will autodetect based on earliest timeout
[ "IPMI", "Session", "Event", "loop", "iteration" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/session.py#L1078-L1169
16,129
openstack/pyghmi
pyghmi/ipmi/private/session.py
Session.register_keepalive
def register_keepalive(self, cmd, callback): """Register custom keepalive IPMI command This is mostly intended for use by the console code. calling code would have an easier time just scheduling in their own threading scheme. Such a behavior would naturally cause the default keepalive to not occur anyway if the calling code is at least as aggressive about timing as pyghmi :param cmd: A dict of arguments to be passed into raw_command :param callback: A function to be called with results of the keepalive :returns: value to identify registration for unregister_keepalive """ regid = random.random() if self._customkeepalives is None: self._customkeepalives = {regid: (cmd, callback)} else: while regid in self._customkeepalives: regid = random.random() self._customkeepalives[regid] = (cmd, callback) return regid
python
def register_keepalive(self, cmd, callback): """Register custom keepalive IPMI command This is mostly intended for use by the console code. calling code would have an easier time just scheduling in their own threading scheme. Such a behavior would naturally cause the default keepalive to not occur anyway if the calling code is at least as aggressive about timing as pyghmi :param cmd: A dict of arguments to be passed into raw_command :param callback: A function to be called with results of the keepalive :returns: value to identify registration for unregister_keepalive """ regid = random.random() if self._customkeepalives is None: self._customkeepalives = {regid: (cmd, callback)} else: while regid in self._customkeepalives: regid = random.random() self._customkeepalives[regid] = (cmd, callback) return regid
[ "def", "register_keepalive", "(", "self", ",", "cmd", ",", "callback", ")", ":", "regid", "=", "random", ".", "random", "(", ")", "if", "self", ".", "_customkeepalives", "is", "None", ":", "self", ".", "_customkeepalives", "=", "{", "regid", ":", "(", "cmd", ",", "callback", ")", "}", "else", ":", "while", "regid", "in", "self", ".", "_customkeepalives", ":", "regid", "=", "random", ".", "random", "(", ")", "self", ".", "_customkeepalives", "[", "regid", "]", "=", "(", "cmd", ",", "callback", ")", "return", "regid" ]
Register custom keepalive IPMI command This is mostly intended for use by the console code. calling code would have an easier time just scheduling in their own threading scheme. Such a behavior would naturally cause the default keepalive to not occur anyway if the calling code is at least as aggressive about timing as pyghmi :param cmd: A dict of arguments to be passed into raw_command :param callback: A function to be called with results of the keepalive :returns: value to identify registration for unregister_keepalive
[ "Register", "custom", "keepalive", "IPMI", "command" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/session.py#L1171-L1191
16,130
openstack/pyghmi
pyghmi/ipmi/private/session.py
Session._keepalive
def _keepalive(self): """Performs a keepalive to avoid idle disconnect """ try: keptalive = False if self._customkeepalives: kaids = list(self._customkeepalives.keys()) for keepalive in kaids: try: cmd, callback = self._customkeepalives[keepalive] except TypeError: # raw_command made customkeepalives None break except KeyError: # raw command ultimately caused a keepalive to # deregister continue if callable(cmd): cmd() continue keptalive = True cmd['callback'] = self._keepalive_wrapper(callback) self.raw_command(**cmd) if not keptalive: if self.incommand: # if currently in command, no cause to keepalive return self.raw_command(netfn=6, command=1, callback=self._keepalive_wrapper(None)) except exc.IpmiException: self._mark_broken()
python
def _keepalive(self): """Performs a keepalive to avoid idle disconnect """ try: keptalive = False if self._customkeepalives: kaids = list(self._customkeepalives.keys()) for keepalive in kaids: try: cmd, callback = self._customkeepalives[keepalive] except TypeError: # raw_command made customkeepalives None break except KeyError: # raw command ultimately caused a keepalive to # deregister continue if callable(cmd): cmd() continue keptalive = True cmd['callback'] = self._keepalive_wrapper(callback) self.raw_command(**cmd) if not keptalive: if self.incommand: # if currently in command, no cause to keepalive return self.raw_command(netfn=6, command=1, callback=self._keepalive_wrapper(None)) except exc.IpmiException: self._mark_broken()
[ "def", "_keepalive", "(", "self", ")", ":", "try", ":", "keptalive", "=", "False", "if", "self", ".", "_customkeepalives", ":", "kaids", "=", "list", "(", "self", ".", "_customkeepalives", ".", "keys", "(", ")", ")", "for", "keepalive", "in", "kaids", ":", "try", ":", "cmd", ",", "callback", "=", "self", ".", "_customkeepalives", "[", "keepalive", "]", "except", "TypeError", ":", "# raw_command made customkeepalives None", "break", "except", "KeyError", ":", "# raw command ultimately caused a keepalive to", "# deregister", "continue", "if", "callable", "(", "cmd", ")", ":", "cmd", "(", ")", "continue", "keptalive", "=", "True", "cmd", "[", "'callback'", "]", "=", "self", ".", "_keepalive_wrapper", "(", "callback", ")", "self", ".", "raw_command", "(", "*", "*", "cmd", ")", "if", "not", "keptalive", ":", "if", "self", ".", "incommand", ":", "# if currently in command, no cause to keepalive", "return", "self", ".", "raw_command", "(", "netfn", "=", "6", ",", "command", "=", "1", ",", "callback", "=", "self", ".", "_keepalive_wrapper", "(", "None", ")", ")", "except", "exc", ".", "IpmiException", ":", "self", ".", "_mark_broken", "(", ")" ]
Performs a keepalive to avoid idle disconnect
[ "Performs", "a", "keepalive", "to", "avoid", "idle", "disconnect" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/session.py#L1215-L1245
16,131
openstack/pyghmi
pyghmi/util/webclient.py
SecureHTTPConnection.download
def download(self, url, file): """Download a file to filename or file object """ if isinstance(file, str) or isinstance(file, unicode): file = open(file, 'wb') webclient = self.dupe() webclient.request('GET', url) rsp = webclient.getresponse() self._currdl = rsp self._dlfile = file for chunk in iter(lambda: rsp.read(16384), ''): file.write(chunk) self._currdl = None file.close()
python
def download(self, url, file): """Download a file to filename or file object """ if isinstance(file, str) or isinstance(file, unicode): file = open(file, 'wb') webclient = self.dupe() webclient.request('GET', url) rsp = webclient.getresponse() self._currdl = rsp self._dlfile = file for chunk in iter(lambda: rsp.read(16384), ''): file.write(chunk) self._currdl = None file.close()
[ "def", "download", "(", "self", ",", "url", ",", "file", ")", ":", "if", "isinstance", "(", "file", ",", "str", ")", "or", "isinstance", "(", "file", ",", "unicode", ")", ":", "file", "=", "open", "(", "file", ",", "'wb'", ")", "webclient", "=", "self", ".", "dupe", "(", ")", "webclient", ".", "request", "(", "'GET'", ",", "url", ")", "rsp", "=", "webclient", ".", "getresponse", "(", ")", "self", ".", "_currdl", "=", "rsp", "self", ".", "_dlfile", "=", "file", "for", "chunk", "in", "iter", "(", "lambda", ":", "rsp", ".", "read", "(", "16384", ")", ",", "''", ")", ":", "file", ".", "write", "(", "chunk", ")", "self", ".", "_currdl", "=", "None", "file", ".", "close", "(", ")" ]
Download a file to filename or file object
[ "Download", "a", "file", "to", "filename", "or", "file", "object" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/util/webclient.py#L202-L216
16,132
openstack/pyghmi
pyghmi/util/webclient.py
SecureHTTPConnection.upload
def upload(self, url, filename, data=None, formname=None, otherfields=()): """Upload a file to the url :param url: :param filename: The name of the file :param data: A file object or data to use rather than reading from the file. :return: """ if data is None: data = open(filename, 'rb') self._upbuffer = StringIO.StringIO(get_upload_form(filename, data, formname, otherfields)) ulheaders = self.stdheaders.copy() ulheaders['Content-Type'] = 'multipart/form-data; boundary=' + BND ulheaders['Content-Length'] = len(uploadforms[filename]) self.ulsize = len(uploadforms[filename]) webclient = self.dupe() webclient.request('POST', url, self._upbuffer, ulheaders) rsp = webclient.getresponse() # peer updates in progress should already have pointers, # subsequent transactions will cause memory to needlessly double, # but easiest way to keep memory relatively low try: del uploadforms[filename] except KeyError: # something could have already deleted it pass self.rspstatus = rsp.status if rsp.status != 200: raise Exception('Unexpected response in file upload: ' + rsp.read()) return rsp.read()
python
def upload(self, url, filename, data=None, formname=None, otherfields=()): """Upload a file to the url :param url: :param filename: The name of the file :param data: A file object or data to use rather than reading from the file. :return: """ if data is None: data = open(filename, 'rb') self._upbuffer = StringIO.StringIO(get_upload_form(filename, data, formname, otherfields)) ulheaders = self.stdheaders.copy() ulheaders['Content-Type'] = 'multipart/form-data; boundary=' + BND ulheaders['Content-Length'] = len(uploadforms[filename]) self.ulsize = len(uploadforms[filename]) webclient = self.dupe() webclient.request('POST', url, self._upbuffer, ulheaders) rsp = webclient.getresponse() # peer updates in progress should already have pointers, # subsequent transactions will cause memory to needlessly double, # but easiest way to keep memory relatively low try: del uploadforms[filename] except KeyError: # something could have already deleted it pass self.rspstatus = rsp.status if rsp.status != 200: raise Exception('Unexpected response in file upload: ' + rsp.read()) return rsp.read()
[ "def", "upload", "(", "self", ",", "url", ",", "filename", ",", "data", "=", "None", ",", "formname", "=", "None", ",", "otherfields", "=", "(", ")", ")", ":", "if", "data", "is", "None", ":", "data", "=", "open", "(", "filename", ",", "'rb'", ")", "self", ".", "_upbuffer", "=", "StringIO", ".", "StringIO", "(", "get_upload_form", "(", "filename", ",", "data", ",", "formname", ",", "otherfields", ")", ")", "ulheaders", "=", "self", ".", "stdheaders", ".", "copy", "(", ")", "ulheaders", "[", "'Content-Type'", "]", "=", "'multipart/form-data; boundary='", "+", "BND", "ulheaders", "[", "'Content-Length'", "]", "=", "len", "(", "uploadforms", "[", "filename", "]", ")", "self", ".", "ulsize", "=", "len", "(", "uploadforms", "[", "filename", "]", ")", "webclient", "=", "self", ".", "dupe", "(", ")", "webclient", ".", "request", "(", "'POST'", ",", "url", ",", "self", ".", "_upbuffer", ",", "ulheaders", ")", "rsp", "=", "webclient", ".", "getresponse", "(", ")", "# peer updates in progress should already have pointers,", "# subsequent transactions will cause memory to needlessly double,", "# but easiest way to keep memory relatively low", "try", ":", "del", "uploadforms", "[", "filename", "]", "except", "KeyError", ":", "# something could have already deleted it", "pass", "self", ".", "rspstatus", "=", "rsp", ".", "status", "if", "rsp", ".", "status", "!=", "200", ":", "raise", "Exception", "(", "'Unexpected response in file upload: '", "+", "rsp", ".", "read", "(", ")", ")", "return", "rsp", ".", "read", "(", ")" ]
Upload a file to the url :param url: :param filename: The name of the file :param data: A file object or data to use rather than reading from the file. :return:
[ "Upload", "a", "file", "to", "the", "url" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/util/webclient.py#L224-L257
16,133
openstack/pyghmi
pyghmi/ipmi/oem/lenovo/inventory.py
parse_inventory_category_entry
def parse_inventory_category_entry(raw, fields): """Parses one entry in an inventory category. :param raw: the raw data to the entry. May contain more than one entry, only one entry will be read in that case. :param fields: an iterable of EntryField objects to be used for parsing the entry. :returns: dict -- a tuple with the number of bytes read and a dictionary representing the entry. """ r = raw obj = {} bytes_read = 0 discard = False for field in fields: value = struct.unpack_from(field.fmt, r)[0] read = struct.calcsize(field.fmt) bytes_read += read r = r[read:] # If this entry is not actually present, just parse and then discard it if field.presence and not bool(value): discard = True if not field.include: continue if (field.fmt[-1] == "s"): value = value.rstrip("\x00") if (field.mapper and value in field.mapper): value = field.mapper[value] if (field.valuefunc): value = field.valuefunc(value) if not field.multivaluefunc: obj[field.name] = value else: for key in value: obj[key] = value[key] if discard: obj = None return bytes_read, obj
python
def parse_inventory_category_entry(raw, fields): """Parses one entry in an inventory category. :param raw: the raw data to the entry. May contain more than one entry, only one entry will be read in that case. :param fields: an iterable of EntryField objects to be used for parsing the entry. :returns: dict -- a tuple with the number of bytes read and a dictionary representing the entry. """ r = raw obj = {} bytes_read = 0 discard = False for field in fields: value = struct.unpack_from(field.fmt, r)[0] read = struct.calcsize(field.fmt) bytes_read += read r = r[read:] # If this entry is not actually present, just parse and then discard it if field.presence and not bool(value): discard = True if not field.include: continue if (field.fmt[-1] == "s"): value = value.rstrip("\x00") if (field.mapper and value in field.mapper): value = field.mapper[value] if (field.valuefunc): value = field.valuefunc(value) if not field.multivaluefunc: obj[field.name] = value else: for key in value: obj[key] = value[key] if discard: obj = None return bytes_read, obj
[ "def", "parse_inventory_category_entry", "(", "raw", ",", "fields", ")", ":", "r", "=", "raw", "obj", "=", "{", "}", "bytes_read", "=", "0", "discard", "=", "False", "for", "field", "in", "fields", ":", "value", "=", "struct", ".", "unpack_from", "(", "field", ".", "fmt", ",", "r", ")", "[", "0", "]", "read", "=", "struct", ".", "calcsize", "(", "field", ".", "fmt", ")", "bytes_read", "+=", "read", "r", "=", "r", "[", "read", ":", "]", "# If this entry is not actually present, just parse and then discard it", "if", "field", ".", "presence", "and", "not", "bool", "(", "value", ")", ":", "discard", "=", "True", "if", "not", "field", ".", "include", ":", "continue", "if", "(", "field", ".", "fmt", "[", "-", "1", "]", "==", "\"s\"", ")", ":", "value", "=", "value", ".", "rstrip", "(", "\"\\x00\"", ")", "if", "(", "field", ".", "mapper", "and", "value", "in", "field", ".", "mapper", ")", ":", "value", "=", "field", ".", "mapper", "[", "value", "]", "if", "(", "field", ".", "valuefunc", ")", ":", "value", "=", "field", ".", "valuefunc", "(", "value", ")", "if", "not", "field", ".", "multivaluefunc", ":", "obj", "[", "field", ".", "name", "]", "=", "value", "else", ":", "for", "key", "in", "value", ":", "obj", "[", "key", "]", "=", "value", "[", "key", "]", "if", "discard", ":", "obj", "=", "None", "return", "bytes_read", ",", "obj" ]
Parses one entry in an inventory category. :param raw: the raw data to the entry. May contain more than one entry, only one entry will be read in that case. :param fields: an iterable of EntryField objects to be used for parsing the entry. :returns: dict -- a tuple with the number of bytes read and a dictionary representing the entry.
[ "Parses", "one", "entry", "in", "an", "inventory", "category", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/lenovo/inventory.py#L105-L147
16,134
openstack/pyghmi
pyghmi/ipmi/private/serversession.py
IpmiServer.sessionless_data
def sessionless_data(self, data, sockaddr): """Examines unsolocited packet and decides appropriate action. For a listening IpmiServer, a packet without an active session comes here for examination. If it is something that is utterly sessionless (e.g. get channel authentication), send the appropriate response. If it is a get session challenge or open rmcp+ request, spawn a session to handle the context. """ if len(data) < 22: return data = bytearray(data) if not (data[0] == 6 and data[2:4] == b'\xff\x07'): # not ipmi return if data[4] == 6: # ipmi 2 payload... payloadtype = data[5] if payloadtype not in (0, 16): return if payloadtype == 16: # new session to handle conversation ServerSession(self.authdata, self.kg, sockaddr, self.serversocket, data[16:], self.uuid, bmc=self) return # ditch two byte, because ipmi2 header is two # bytes longer than ipmi1 (payload type added, payload length 2). data = data[2:] myaddr, netfnlun = struct.unpack('2B', bytes(data[14:16])) netfn = (netfnlun & 0b11111100) >> 2 mylun = netfnlun & 0b11 if netfn == 6: # application request if data[19] == 0x38: # cmd = get channel auth capabilities verchannel, level = struct.unpack('2B', bytes(data[20:22])) version = verchannel & 0b10000000 if version != 0b10000000: return channel = verchannel & 0b1111 if channel != 0xe: return (clientaddr, clientlun) = struct.unpack( 'BB', bytes(data[17:19])) clientseq = clientlun >> 2 clientlun &= 0b11 # Lun is only the least significant bits level &= 0b1111 self.send_auth_cap(myaddr, mylun, clientaddr, clientlun, clientseq, sockaddr)
python
def sessionless_data(self, data, sockaddr): """Examines unsolocited packet and decides appropriate action. For a listening IpmiServer, a packet without an active session comes here for examination. If it is something that is utterly sessionless (e.g. get channel authentication), send the appropriate response. If it is a get session challenge or open rmcp+ request, spawn a session to handle the context. """ if len(data) < 22: return data = bytearray(data) if not (data[0] == 6 and data[2:4] == b'\xff\x07'): # not ipmi return if data[4] == 6: # ipmi 2 payload... payloadtype = data[5] if payloadtype not in (0, 16): return if payloadtype == 16: # new session to handle conversation ServerSession(self.authdata, self.kg, sockaddr, self.serversocket, data[16:], self.uuid, bmc=self) return # ditch two byte, because ipmi2 header is two # bytes longer than ipmi1 (payload type added, payload length 2). data = data[2:] myaddr, netfnlun = struct.unpack('2B', bytes(data[14:16])) netfn = (netfnlun & 0b11111100) >> 2 mylun = netfnlun & 0b11 if netfn == 6: # application request if data[19] == 0x38: # cmd = get channel auth capabilities verchannel, level = struct.unpack('2B', bytes(data[20:22])) version = verchannel & 0b10000000 if version != 0b10000000: return channel = verchannel & 0b1111 if channel != 0xe: return (clientaddr, clientlun) = struct.unpack( 'BB', bytes(data[17:19])) clientseq = clientlun >> 2 clientlun &= 0b11 # Lun is only the least significant bits level &= 0b1111 self.send_auth_cap(myaddr, mylun, clientaddr, clientlun, clientseq, sockaddr)
[ "def", "sessionless_data", "(", "self", ",", "data", ",", "sockaddr", ")", ":", "if", "len", "(", "data", ")", "<", "22", ":", "return", "data", "=", "bytearray", "(", "data", ")", "if", "not", "(", "data", "[", "0", "]", "==", "6", "and", "data", "[", "2", ":", "4", "]", "==", "b'\\xff\\x07'", ")", ":", "# not ipmi", "return", "if", "data", "[", "4", "]", "==", "6", ":", "# ipmi 2 payload...", "payloadtype", "=", "data", "[", "5", "]", "if", "payloadtype", "not", "in", "(", "0", ",", "16", ")", ":", "return", "if", "payloadtype", "==", "16", ":", "# new session to handle conversation", "ServerSession", "(", "self", ".", "authdata", ",", "self", ".", "kg", ",", "sockaddr", ",", "self", ".", "serversocket", ",", "data", "[", "16", ":", "]", ",", "self", ".", "uuid", ",", "bmc", "=", "self", ")", "return", "# ditch two byte, because ipmi2 header is two", "# bytes longer than ipmi1 (payload type added, payload length 2).", "data", "=", "data", "[", "2", ":", "]", "myaddr", ",", "netfnlun", "=", "struct", ".", "unpack", "(", "'2B'", ",", "bytes", "(", "data", "[", "14", ":", "16", "]", ")", ")", "netfn", "=", "(", "netfnlun", "&", "0b11111100", ")", ">>", "2", "mylun", "=", "netfnlun", "&", "0b11", "if", "netfn", "==", "6", ":", "# application request", "if", "data", "[", "19", "]", "==", "0x38", ":", "# cmd = get channel auth capabilities", "verchannel", ",", "level", "=", "struct", ".", "unpack", "(", "'2B'", ",", "bytes", "(", "data", "[", "20", ":", "22", "]", ")", ")", "version", "=", "verchannel", "&", "0b10000000", "if", "version", "!=", "0b10000000", ":", "return", "channel", "=", "verchannel", "&", "0b1111", "if", "channel", "!=", "0xe", ":", "return", "(", "clientaddr", ",", "clientlun", ")", "=", "struct", ".", "unpack", "(", "'BB'", ",", "bytes", "(", "data", "[", "17", ":", "19", "]", ")", ")", "clientseq", "=", "clientlun", ">>", "2", "clientlun", "&=", "0b11", "# Lun is only the least significant bits", "level", "&=", "0b1111", "self", ".", "send_auth_cap", "(", "myaddr", ",", "mylun", ",", "clientaddr", ",", "clientlun", ",", "clientseq", ",", "sockaddr", ")" ]
Examines unsolocited packet and decides appropriate action. For a listening IpmiServer, a packet without an active session comes here for examination. If it is something that is utterly sessionless (e.g. get channel authentication), send the appropriate response. If it is a get session challenge or open rmcp+ request, spawn a session to handle the context.
[ "Examines", "unsolocited", "packet", "and", "decides", "appropriate", "action", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/serversession.py#L297-L341
16,135
openstack/pyghmi
pyghmi/ipmi/private/serversession.py
IpmiServer.set_kg
def set_kg(self, kg): """Sets the Kg for the BMC to use In RAKP, Kg is a BMC-specific integrity key that can be set. If not set, Kuid is used for the integrity key """ try: self.kg = kg.encode('utf-8') except AttributeError: self.kg = kg
python
def set_kg(self, kg): """Sets the Kg for the BMC to use In RAKP, Kg is a BMC-specific integrity key that can be set. If not set, Kuid is used for the integrity key """ try: self.kg = kg.encode('utf-8') except AttributeError: self.kg = kg
[ "def", "set_kg", "(", "self", ",", "kg", ")", ":", "try", ":", "self", ".", "kg", "=", "kg", ".", "encode", "(", "'utf-8'", ")", "except", "AttributeError", ":", "self", ".", "kg", "=", "kg" ]
Sets the Kg for the BMC to use In RAKP, Kg is a BMC-specific integrity key that can be set. If not set, Kuid is used for the integrity key
[ "Sets", "the", "Kg", "for", "the", "BMC", "to", "use" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/serversession.py#L343-L352
16,136
astraw/stdeb
stdeb/util.py
source_debianize_name
def source_debianize_name(name): "make name acceptable as a Debian source package name" name = name.replace('_','-') name = name.replace('.','-') name = name.lower() return name
python
def source_debianize_name(name): "make name acceptable as a Debian source package name" name = name.replace('_','-') name = name.replace('.','-') name = name.lower() return name
[ "def", "source_debianize_name", "(", "name", ")", ":", "name", "=", "name", ".", "replace", "(", "'_'", ",", "'-'", ")", "name", "=", "name", ".", "replace", "(", "'.'", ",", "'-'", ")", "name", "=", "name", ".", "lower", "(", ")", "return", "name" ]
make name acceptable as a Debian source package name
[ "make", "name", "acceptable", "as", "a", "Debian", "source", "package", "name" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L220-L225
16,137
astraw/stdeb
stdeb/util.py
get_date_822
def get_date_822(): """return output of 822-date command""" cmd = '/bin/date' if not os.path.exists(cmd): raise ValueError('%s command does not exist.'%cmd) args = [cmd,'-R'] result = get_cmd_stdout(args).strip() result = normstr(result) return result
python
def get_date_822(): """return output of 822-date command""" cmd = '/bin/date' if not os.path.exists(cmd): raise ValueError('%s command does not exist.'%cmd) args = [cmd,'-R'] result = get_cmd_stdout(args).strip() result = normstr(result) return result
[ "def", "get_date_822", "(", ")", ":", "cmd", "=", "'/bin/date'", "if", "not", "os", ".", "path", ".", "exists", "(", "cmd", ")", ":", "raise", "ValueError", "(", "'%s command does not exist.'", "%", "cmd", ")", "args", "=", "[", "cmd", ",", "'-R'", "]", "result", "=", "get_cmd_stdout", "(", "args", ")", ".", "strip", "(", ")", "result", "=", "normstr", "(", "result", ")", "return", "result" ]
return output of 822-date command
[ "return", "output", "of", "822", "-", "date", "command" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L261-L269
16,138
astraw/stdeb
stdeb/util.py
make_tarball
def make_tarball(tarball_fname,directory,cwd=None): "create a tarball from a directory" if tarball_fname.endswith('.gz'): opts = 'czf' else: opts = 'cf' args = ['/bin/tar',opts,tarball_fname,directory] process_command(args, cwd=cwd)
python
def make_tarball(tarball_fname,directory,cwd=None): "create a tarball from a directory" if tarball_fname.endswith('.gz'): opts = 'czf' else: opts = 'cf' args = ['/bin/tar',opts,tarball_fname,directory] process_command(args, cwd=cwd)
[ "def", "make_tarball", "(", "tarball_fname", ",", "directory", ",", "cwd", "=", "None", ")", ":", "if", "tarball_fname", ".", "endswith", "(", "'.gz'", ")", ":", "opts", "=", "'czf'", "else", ":", "opts", "=", "'cf'", "args", "=", "[", "'/bin/tar'", ",", "opts", ",", "tarball_fname", ",", "directory", "]", "process_command", "(", "args", ",", "cwd", "=", "cwd", ")" ]
create a tarball from a directory
[ "create", "a", "tarball", "from", "a", "directory" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L458-L463
16,139
astraw/stdeb
stdeb/util.py
expand_tarball
def expand_tarball(tarball_fname,cwd=None): "expand a tarball" if tarball_fname.endswith('.gz'): opts = 'xzf' elif tarball_fname.endswith('.bz2'): opts = 'xjf' else: opts = 'xf' args = ['/bin/tar',opts,tarball_fname] process_command(args, cwd=cwd)
python
def expand_tarball(tarball_fname,cwd=None): "expand a tarball" if tarball_fname.endswith('.gz'): opts = 'xzf' elif tarball_fname.endswith('.bz2'): opts = 'xjf' else: opts = 'xf' args = ['/bin/tar',opts,tarball_fname] process_command(args, cwd=cwd)
[ "def", "expand_tarball", "(", "tarball_fname", ",", "cwd", "=", "None", ")", ":", "if", "tarball_fname", ".", "endswith", "(", "'.gz'", ")", ":", "opts", "=", "'xzf'", "elif", "tarball_fname", ".", "endswith", "(", "'.bz2'", ")", ":", "opts", "=", "'xjf'", "else", ":", "opts", "=", "'xf'", "args", "=", "[", "'/bin/tar'", ",", "opts", ",", "tarball_fname", "]", "process_command", "(", "args", ",", "cwd", "=", "cwd", ")" ]
expand a tarball
[ "expand", "a", "tarball" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L466-L472
16,140
astraw/stdeb
stdeb/util.py
expand_zip
def expand_zip(zip_fname,cwd=None): "expand a zip" unzip_path = '/usr/bin/unzip' if not os.path.exists(unzip_path): log.error('ERROR: {} does not exist'.format(unzip_path)) sys.exit(1) args = [unzip_path, zip_fname] # Does it have a top dir res = subprocess.Popen( [args[0], '-l', args[1]], cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) contents = [] for line in res.stdout.readlines()[3:-2]: contents.append(line.split()[-1]) commonprefix = os.path.commonprefix(contents) if not commonprefix: extdir = os.path.join(cwd, os.path.basename(zip_fname[:-4])) args.extend(['-d', os.path.abspath(extdir)]) process_command(args, cwd=cwd)
python
def expand_zip(zip_fname,cwd=None): "expand a zip" unzip_path = '/usr/bin/unzip' if not os.path.exists(unzip_path): log.error('ERROR: {} does not exist'.format(unzip_path)) sys.exit(1) args = [unzip_path, zip_fname] # Does it have a top dir res = subprocess.Popen( [args[0], '-l', args[1]], cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) contents = [] for line in res.stdout.readlines()[3:-2]: contents.append(line.split()[-1]) commonprefix = os.path.commonprefix(contents) if not commonprefix: extdir = os.path.join(cwd, os.path.basename(zip_fname[:-4])) args.extend(['-d', os.path.abspath(extdir)]) process_command(args, cwd=cwd)
[ "def", "expand_zip", "(", "zip_fname", ",", "cwd", "=", "None", ")", ":", "unzip_path", "=", "'/usr/bin/unzip'", "if", "not", "os", ".", "path", ".", "exists", "(", "unzip_path", ")", ":", "log", ".", "error", "(", "'ERROR: {} does not exist'", ".", "format", "(", "unzip_path", ")", ")", "sys", ".", "exit", "(", "1", ")", "args", "=", "[", "unzip_path", ",", "zip_fname", "]", "# Does it have a top dir", "res", "=", "subprocess", ".", "Popen", "(", "[", "args", "[", "0", "]", ",", "'-l'", ",", "args", "[", "1", "]", "]", ",", "cwd", "=", "cwd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", ")", "contents", "=", "[", "]", "for", "line", "in", "res", ".", "stdout", ".", "readlines", "(", ")", "[", "3", ":", "-", "2", "]", ":", "contents", ".", "append", "(", "line", ".", "split", "(", ")", "[", "-", "1", "]", ")", "commonprefix", "=", "os", ".", "path", ".", "commonprefix", "(", "contents", ")", "if", "not", "commonprefix", ":", "extdir", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "os", ".", "path", ".", "basename", "(", "zip_fname", "[", ":", "-", "4", "]", ")", ")", "args", ".", "extend", "(", "[", "'-d'", ",", "os", ".", "path", ".", "abspath", "(", "extdir", ")", "]", ")", "process_command", "(", "args", ",", "cwd", "=", "cwd", ")" ]
expand a zip
[ "expand", "a", "zip" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L475-L496
16,141
astraw/stdeb
stdeb/util.py
parse_vals
def parse_vals(cfg,section,option): """parse comma separated values in debian control file style from .cfg""" try: vals = cfg.get(section,option) except ConfigParser.NoSectionError as err: if section != 'DEFAULT': vals = cfg.get('DEFAULT',option) else: raise err vals = vals.split('#')[0] vals = vals.strip() vals = vals.split(',') vals = [v.strip() for v in vals] vals = [v for v in vals if len(v)] return vals
python
def parse_vals(cfg,section,option): """parse comma separated values in debian control file style from .cfg""" try: vals = cfg.get(section,option) except ConfigParser.NoSectionError as err: if section != 'DEFAULT': vals = cfg.get('DEFAULT',option) else: raise err vals = vals.split('#')[0] vals = vals.strip() vals = vals.split(',') vals = [v.strip() for v in vals] vals = [v for v in vals if len(v)] return vals
[ "def", "parse_vals", "(", "cfg", ",", "section", ",", "option", ")", ":", "try", ":", "vals", "=", "cfg", ".", "get", "(", "section", ",", "option", ")", "except", "ConfigParser", ".", "NoSectionError", "as", "err", ":", "if", "section", "!=", "'DEFAULT'", ":", "vals", "=", "cfg", ".", "get", "(", "'DEFAULT'", ",", "option", ")", "else", ":", "raise", "err", "vals", "=", "vals", ".", "split", "(", "'#'", ")", "[", "0", "]", "vals", "=", "vals", ".", "strip", "(", ")", "vals", "=", "vals", ".", "split", "(", "','", ")", "vals", "=", "[", "v", ".", "strip", "(", ")", "for", "v", "in", "vals", "]", "vals", "=", "[", "v", "for", "v", "in", "vals", "if", "len", "(", "v", ")", "]", "return", "vals" ]
parse comma separated values in debian control file style from .cfg
[ "parse", "comma", "separated", "values", "in", "debian", "control", "file", "style", "from", ".", "cfg" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L595-L609
16,142
astraw/stdeb
stdeb/util.py
parse_val
def parse_val(cfg,section,option): """extract a single value from .cfg""" vals = parse_vals(cfg,section,option) if len(vals)==0: return '' else: assert len(vals)==1, (section, option, vals, type(vals)) return vals[0]
python
def parse_val(cfg,section,option): """extract a single value from .cfg""" vals = parse_vals(cfg,section,option) if len(vals)==0: return '' else: assert len(vals)==1, (section, option, vals, type(vals)) return vals[0]
[ "def", "parse_val", "(", "cfg", ",", "section", ",", "option", ")", ":", "vals", "=", "parse_vals", "(", "cfg", ",", "section", ",", "option", ")", "if", "len", "(", "vals", ")", "==", "0", ":", "return", "''", "else", ":", "assert", "len", "(", "vals", ")", "==", "1", ",", "(", "section", ",", "option", ",", "vals", ",", "type", "(", "vals", ")", ")", "return", "vals", "[", "0", "]" ]
extract a single value from .cfg
[ "extract", "a", "single", "value", "from", ".", "cfg" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L611-L618
16,143
astraw/stdeb
stdeb/util.py
check_cfg_files
def check_cfg_files(cfg_files,module_name): """check if the configuration files actually specify something If config files are given, give warning if they don't contain information. This may indicate a wrong module name name, for example. """ cfg = ConfigParser.SafeConfigParser() cfg.read(cfg_files) if cfg.has_section(module_name): section_items = cfg.items(module_name) else: section_items = [] default_items = cfg.items('DEFAULT') n_items = len(section_items) + len(default_items) if n_items==0: log.warn('configuration files were specified, but no options were ' 'found in "%s" or "DEFAULT" sections.' % (module_name,) )
python
def check_cfg_files(cfg_files,module_name): """check if the configuration files actually specify something If config files are given, give warning if they don't contain information. This may indicate a wrong module name name, for example. """ cfg = ConfigParser.SafeConfigParser() cfg.read(cfg_files) if cfg.has_section(module_name): section_items = cfg.items(module_name) else: section_items = [] default_items = cfg.items('DEFAULT') n_items = len(section_items) + len(default_items) if n_items==0: log.warn('configuration files were specified, but no options were ' 'found in "%s" or "DEFAULT" sections.' % (module_name,) )
[ "def", "check_cfg_files", "(", "cfg_files", ",", "module_name", ")", ":", "cfg", "=", "ConfigParser", ".", "SafeConfigParser", "(", ")", "cfg", ".", "read", "(", "cfg_files", ")", "if", "cfg", ".", "has_section", "(", "module_name", ")", ":", "section_items", "=", "cfg", ".", "items", "(", "module_name", ")", "else", ":", "section_items", "=", "[", "]", "default_items", "=", "cfg", ".", "items", "(", "'DEFAULT'", ")", "n_items", "=", "len", "(", "section_items", ")", "+", "len", "(", "default_items", ")", "if", "n_items", "==", "0", ":", "log", ".", "warn", "(", "'configuration files were specified, but no options were '", "'found in \"%s\" or \"DEFAULT\" sections.'", "%", "(", "module_name", ",", ")", ")" ]
check if the configuration files actually specify something If config files are given, give warning if they don't contain information. This may indicate a wrong module name name, for example.
[ "check", "if", "the", "configuration", "files", "actually", "specify", "something" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L667-L686
16,144
astraw/stdeb
stdeb/transport.py
RequestsTransport._build_url
def _build_url(self, host, handler): """ Build a url for our request based on the host, handler and use_http property """ scheme = 'https' if self.use_https else 'http' return '%s://%s/%s' % (scheme, host, handler)
python
def _build_url(self, host, handler): """ Build a url for our request based on the host, handler and use_http property """ scheme = 'https' if self.use_https else 'http' return '%s://%s/%s' % (scheme, host, handler)
[ "def", "_build_url", "(", "self", ",", "host", ",", "handler", ")", ":", "scheme", "=", "'https'", "if", "self", ".", "use_https", "else", "'http'", "return", "'%s://%s/%s'", "%", "(", "scheme", ",", "host", ",", "handler", ")" ]
Build a url for our request based on the host, handler and use_http property
[ "Build", "a", "url", "for", "our", "request", "based", "on", "the", "host", "handler", "and", "use_http", "property" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/transport.py#L90-L96
16,145
nwhitehead/pyfluidsynth
fluidsynth.py
Synth.setting
def setting(self, opt, val): """change an arbitrary synth setting, type-smart""" opt = opt.encode() if isinstance(val, basestring): fluid_settings_setstr(self.settings, opt, val) elif isinstance(val, int): fluid_settings_setint(self.settings, opt, val) elif isinstance(val, float): fluid_settings_setnum(self.settings, opt, val)
python
def setting(self, opt, val): """change an arbitrary synth setting, type-smart""" opt = opt.encode() if isinstance(val, basestring): fluid_settings_setstr(self.settings, opt, val) elif isinstance(val, int): fluid_settings_setint(self.settings, opt, val) elif isinstance(val, float): fluid_settings_setnum(self.settings, opt, val)
[ "def", "setting", "(", "self", ",", "opt", ",", "val", ")", ":", "opt", "=", "opt", ".", "encode", "(", ")", "if", "isinstance", "(", "val", ",", "basestring", ")", ":", "fluid_settings_setstr", "(", "self", ".", "settings", ",", "opt", ",", "val", ")", "elif", "isinstance", "(", "val", ",", "int", ")", ":", "fluid_settings_setint", "(", "self", ".", "settings", ",", "opt", ",", "val", ")", "elif", "isinstance", "(", "val", ",", "float", ")", ":", "fluid_settings_setnum", "(", "self", ".", "settings", ",", "opt", ",", "val", ")" ]
change an arbitrary synth setting, type-smart
[ "change", "an", "arbitrary", "synth", "setting", "type", "-", "smart" ]
9a8ecee996e83a279e8d29d75e8a859aee4aba67
https://github.com/nwhitehead/pyfluidsynth/blob/9a8ecee996e83a279e8d29d75e8a859aee4aba67/fluidsynth.py#L421-L429
16,146
nwhitehead/pyfluidsynth
fluidsynth.py
Synth.start
def start(self, driver=None, device=None, midi_driver=None): """Start audio output driver in separate background thread Call this function any time after creating the Synth object. If you don't call this function, use get_samples() to generate samples. Optional keyword argument: driver : which audio driver to use for output Possible choices: 'alsa', 'oss', 'jack', 'portaudio' 'sndmgr', 'coreaudio', 'Direct Sound' device: the device to use for audio output Not all drivers will be available for every platform, it depends on which drivers were compiled into FluidSynth for your platform. """ if driver is not None: assert (driver in ['alsa', 'oss', 'jack', 'portaudio', 'sndmgr', 'coreaudio', 'Direct Sound', 'pulseaudio']) fluid_settings_setstr(self.settings, b'audio.driver', driver.encode()) if device is not None: fluid_settings_setstr(self.settings, str('audio.%s.device' % (driver)).encode(), device.encode()) self.audio_driver = new_fluid_audio_driver(self.settings, self.synth) if midi_driver is not None: assert (midi_driver in ['alsa_seq', 'alsa_raw', 'oss', 'winmidi', 'midishare', 'coremidi']) fluid_settings_setstr(self.settings, b'midi.driver', midi_driver.encode()) self.router = new_fluid_midi_router(self.settings, fluid_synth_handle_midi_event, self.synth) fluid_synth_set_midi_router(self.synth, self.router) self.midi_driver = new_fluid_midi_driver(self.settings, fluid_midi_router_handle_midi_event, self.router)
python
def start(self, driver=None, device=None, midi_driver=None): """Start audio output driver in separate background thread Call this function any time after creating the Synth object. If you don't call this function, use get_samples() to generate samples. Optional keyword argument: driver : which audio driver to use for output Possible choices: 'alsa', 'oss', 'jack', 'portaudio' 'sndmgr', 'coreaudio', 'Direct Sound' device: the device to use for audio output Not all drivers will be available for every platform, it depends on which drivers were compiled into FluidSynth for your platform. """ if driver is not None: assert (driver in ['alsa', 'oss', 'jack', 'portaudio', 'sndmgr', 'coreaudio', 'Direct Sound', 'pulseaudio']) fluid_settings_setstr(self.settings, b'audio.driver', driver.encode()) if device is not None: fluid_settings_setstr(self.settings, str('audio.%s.device' % (driver)).encode(), device.encode()) self.audio_driver = new_fluid_audio_driver(self.settings, self.synth) if midi_driver is not None: assert (midi_driver in ['alsa_seq', 'alsa_raw', 'oss', 'winmidi', 'midishare', 'coremidi']) fluid_settings_setstr(self.settings, b'midi.driver', midi_driver.encode()) self.router = new_fluid_midi_router(self.settings, fluid_synth_handle_midi_event, self.synth) fluid_synth_set_midi_router(self.synth, self.router) self.midi_driver = new_fluid_midi_driver(self.settings, fluid_midi_router_handle_midi_event, self.router)
[ "def", "start", "(", "self", ",", "driver", "=", "None", ",", "device", "=", "None", ",", "midi_driver", "=", "None", ")", ":", "if", "driver", "is", "not", "None", ":", "assert", "(", "driver", "in", "[", "'alsa'", ",", "'oss'", ",", "'jack'", ",", "'portaudio'", ",", "'sndmgr'", ",", "'coreaudio'", ",", "'Direct Sound'", ",", "'pulseaudio'", "]", ")", "fluid_settings_setstr", "(", "self", ".", "settings", ",", "b'audio.driver'", ",", "driver", ".", "encode", "(", ")", ")", "if", "device", "is", "not", "None", ":", "fluid_settings_setstr", "(", "self", ".", "settings", ",", "str", "(", "'audio.%s.device'", "%", "(", "driver", ")", ")", ".", "encode", "(", ")", ",", "device", ".", "encode", "(", ")", ")", "self", ".", "audio_driver", "=", "new_fluid_audio_driver", "(", "self", ".", "settings", ",", "self", ".", "synth", ")", "if", "midi_driver", "is", "not", "None", ":", "assert", "(", "midi_driver", "in", "[", "'alsa_seq'", ",", "'alsa_raw'", ",", "'oss'", ",", "'winmidi'", ",", "'midishare'", ",", "'coremidi'", "]", ")", "fluid_settings_setstr", "(", "self", ".", "settings", ",", "b'midi.driver'", ",", "midi_driver", ".", "encode", "(", ")", ")", "self", ".", "router", "=", "new_fluid_midi_router", "(", "self", ".", "settings", ",", "fluid_synth_handle_midi_event", ",", "self", ".", "synth", ")", "fluid_synth_set_midi_router", "(", "self", ".", "synth", ",", "self", ".", "router", ")", "self", ".", "midi_driver", "=", "new_fluid_midi_driver", "(", "self", ".", "settings", ",", "fluid_midi_router_handle_midi_event", ",", "self", ".", "router", ")" ]
Start audio output driver in separate background thread Call this function any time after creating the Synth object. If you don't call this function, use get_samples() to generate samples. Optional keyword argument: driver : which audio driver to use for output Possible choices: 'alsa', 'oss', 'jack', 'portaudio' 'sndmgr', 'coreaudio', 'Direct Sound' device: the device to use for audio output Not all drivers will be available for every platform, it depends on which drivers were compiled into FluidSynth for your platform.
[ "Start", "audio", "output", "driver", "in", "separate", "background", "thread" ]
9a8ecee996e83a279e8d29d75e8a859aee4aba67
https://github.com/nwhitehead/pyfluidsynth/blob/9a8ecee996e83a279e8d29d75e8a859aee4aba67/fluidsynth.py#L430-L460
16,147
nwhitehead/pyfluidsynth
fluidsynth.py
Synth.sfload
def sfload(self, filename, update_midi_preset=0): """Load SoundFont and return its ID""" return fluid_synth_sfload(self.synth, filename.encode(), update_midi_preset)
python
def sfload(self, filename, update_midi_preset=0): """Load SoundFont and return its ID""" return fluid_synth_sfload(self.synth, filename.encode(), update_midi_preset)
[ "def", "sfload", "(", "self", ",", "filename", ",", "update_midi_preset", "=", "0", ")", ":", "return", "fluid_synth_sfload", "(", "self", ".", "synth", ",", "filename", ".", "encode", "(", ")", ",", "update_midi_preset", ")" ]
Load SoundFont and return its ID
[ "Load", "SoundFont", "and", "return", "its", "ID" ]
9a8ecee996e83a279e8d29d75e8a859aee4aba67
https://github.com/nwhitehead/pyfluidsynth/blob/9a8ecee996e83a279e8d29d75e8a859aee4aba67/fluidsynth.py#L466-L468
16,148
nwhitehead/pyfluidsynth
fluidsynth.py
Synth.channel_info
def channel_info(self, chan): """get soundfont, bank, prog, preset name of channel""" info=fluid_synth_channel_info_t() fluid_synth_get_channel_info(self.synth, chan, byref(info)) return (info.sfont_id, info.bank, info.program, info.name)
python
def channel_info(self, chan): """get soundfont, bank, prog, preset name of channel""" info=fluid_synth_channel_info_t() fluid_synth_get_channel_info(self.synth, chan, byref(info)) return (info.sfont_id, info.bank, info.program, info.name)
[ "def", "channel_info", "(", "self", ",", "chan", ")", ":", "info", "=", "fluid_synth_channel_info_t", "(", ")", "fluid_synth_get_channel_info", "(", "self", ".", "synth", ",", "chan", ",", "byref", "(", "info", ")", ")", "return", "(", "info", ".", "sfont_id", ",", "info", ".", "bank", ",", "info", ".", "program", ",", "info", ".", "name", ")" ]
get soundfont, bank, prog, preset name of channel
[ "get", "soundfont", "bank", "prog", "preset", "name", "of", "channel" ]
9a8ecee996e83a279e8d29d75e8a859aee4aba67
https://github.com/nwhitehead/pyfluidsynth/blob/9a8ecee996e83a279e8d29d75e8a859aee4aba67/fluidsynth.py#L475-L479
16,149
scrapinghub/kafka-scanner
kafka_scanner/msg_processor_handlers.py
MsgProcessorHandlers.decompress_messages
def decompress_messages(self, partitions_offmsgs): """ Decompress pre-defined compressed fields for each message. """ for pomsg in partitions_offmsgs: if pomsg['message']: pomsg['message'] = self.decompress_fun(pomsg['message']) yield pomsg
python
def decompress_messages(self, partitions_offmsgs): """ Decompress pre-defined compressed fields for each message. """ for pomsg in partitions_offmsgs: if pomsg['message']: pomsg['message'] = self.decompress_fun(pomsg['message']) yield pomsg
[ "def", "decompress_messages", "(", "self", ",", "partitions_offmsgs", ")", ":", "for", "pomsg", "in", "partitions_offmsgs", ":", "if", "pomsg", "[", "'message'", "]", ":", "pomsg", "[", "'message'", "]", "=", "self", ".", "decompress_fun", "(", "pomsg", "[", "'message'", "]", ")", "yield", "pomsg" ]
Decompress pre-defined compressed fields for each message.
[ "Decompress", "pre", "-", "defined", "compressed", "fields", "for", "each", "message", "." ]
8a71901012e8c948180f70a485b57f8d2e7e3ec1
https://github.com/scrapinghub/kafka-scanner/blob/8a71901012e8c948180f70a485b57f8d2e7e3ec1/kafka_scanner/msg_processor_handlers.py#L85-L91
16,150
scrapinghub/kafka-scanner
kafka_scanner/__init__.py
KafkaScanner._init_offsets
def _init_offsets(self, batchsize): """ Compute new initial and target offsets and do other maintenance tasks """ upper_offsets = previous_lower_offsets = self._lower_offsets if not upper_offsets: upper_offsets = self.latest_offsets self._upper_offsets = {p: o for p, o in upper_offsets.items() if o > self._min_lower_offsets[p]} # remove db dupes not used anymore if self._dupes: for p in list(six.iterkeys(self._dupes)): if p not in self._upper_offsets: db = self._dupes.pop(p) db.close() os.remove(db.filename) partition_batchsize = 0 if self._upper_offsets: partition_batchsize = max(int(batchsize * self.__scan_excess), batchsize) self._lower_offsets = self._upper_offsets.copy() total_offsets_run = 0 for p in sorted(self._upper_offsets.keys()): # readjust partition_batchsize when a partition scan starts from latest offset if total_offsets_run > 0 and partition_batchsize > batchsize: partition_batchsize = batchsize if partition_batchsize > 0: self._lower_offsets[p] = max(self._upper_offsets[p] - partition_batchsize, self._min_lower_offsets[p]) offsets_run = self._upper_offsets[p] - self._lower_offsets[p] total_offsets_run += offsets_run partition_batchsize = partition_batchsize - offsets_run else: break log.info('Offset run: %d', total_offsets_run) # create new consumer if partition list changes if previous_lower_offsets is not None and set(previous_lower_offsets.keys()) != set(self._lower_offsets): self._create_scan_consumer(self._lower_offsets.keys()) # consumer must restart from newly computed lower offsets self._update_offsets(self._lower_offsets) log.info('Initial offsets for topic %s: %s', self._topic, repr(self._lower_offsets)) log.info('Target offsets for topic %s: %s', self._topic, repr(self._upper_offsets)) return batchsize
python
def _init_offsets(self, batchsize): """ Compute new initial and target offsets and do other maintenance tasks """ upper_offsets = previous_lower_offsets = self._lower_offsets if not upper_offsets: upper_offsets = self.latest_offsets self._upper_offsets = {p: o for p, o in upper_offsets.items() if o > self._min_lower_offsets[p]} # remove db dupes not used anymore if self._dupes: for p in list(six.iterkeys(self._dupes)): if p not in self._upper_offsets: db = self._dupes.pop(p) db.close() os.remove(db.filename) partition_batchsize = 0 if self._upper_offsets: partition_batchsize = max(int(batchsize * self.__scan_excess), batchsize) self._lower_offsets = self._upper_offsets.copy() total_offsets_run = 0 for p in sorted(self._upper_offsets.keys()): # readjust partition_batchsize when a partition scan starts from latest offset if total_offsets_run > 0 and partition_batchsize > batchsize: partition_batchsize = batchsize if partition_batchsize > 0: self._lower_offsets[p] = max(self._upper_offsets[p] - partition_batchsize, self._min_lower_offsets[p]) offsets_run = self._upper_offsets[p] - self._lower_offsets[p] total_offsets_run += offsets_run partition_batchsize = partition_batchsize - offsets_run else: break log.info('Offset run: %d', total_offsets_run) # create new consumer if partition list changes if previous_lower_offsets is not None and set(previous_lower_offsets.keys()) != set(self._lower_offsets): self._create_scan_consumer(self._lower_offsets.keys()) # consumer must restart from newly computed lower offsets self._update_offsets(self._lower_offsets) log.info('Initial offsets for topic %s: %s', self._topic, repr(self._lower_offsets)) log.info('Target offsets for topic %s: %s', self._topic, repr(self._upper_offsets)) return batchsize
[ "def", "_init_offsets", "(", "self", ",", "batchsize", ")", ":", "upper_offsets", "=", "previous_lower_offsets", "=", "self", ".", "_lower_offsets", "if", "not", "upper_offsets", ":", "upper_offsets", "=", "self", ".", "latest_offsets", "self", ".", "_upper_offsets", "=", "{", "p", ":", "o", "for", "p", ",", "o", "in", "upper_offsets", ".", "items", "(", ")", "if", "o", ">", "self", ".", "_min_lower_offsets", "[", "p", "]", "}", "# remove db dupes not used anymore", "if", "self", ".", "_dupes", ":", "for", "p", "in", "list", "(", "six", ".", "iterkeys", "(", "self", ".", "_dupes", ")", ")", ":", "if", "p", "not", "in", "self", ".", "_upper_offsets", ":", "db", "=", "self", ".", "_dupes", ".", "pop", "(", "p", ")", "db", ".", "close", "(", ")", "os", ".", "remove", "(", "db", ".", "filename", ")", "partition_batchsize", "=", "0", "if", "self", ".", "_upper_offsets", ":", "partition_batchsize", "=", "max", "(", "int", "(", "batchsize", "*", "self", ".", "__scan_excess", ")", ",", "batchsize", ")", "self", ".", "_lower_offsets", "=", "self", ".", "_upper_offsets", ".", "copy", "(", ")", "total_offsets_run", "=", "0", "for", "p", "in", "sorted", "(", "self", ".", "_upper_offsets", ".", "keys", "(", ")", ")", ":", "# readjust partition_batchsize when a partition scan starts from latest offset", "if", "total_offsets_run", ">", "0", "and", "partition_batchsize", ">", "batchsize", ":", "partition_batchsize", "=", "batchsize", "if", "partition_batchsize", ">", "0", ":", "self", ".", "_lower_offsets", "[", "p", "]", "=", "max", "(", "self", ".", "_upper_offsets", "[", "p", "]", "-", "partition_batchsize", ",", "self", ".", "_min_lower_offsets", "[", "p", "]", ")", "offsets_run", "=", "self", ".", "_upper_offsets", "[", "p", "]", "-", "self", ".", "_lower_offsets", "[", "p", "]", "total_offsets_run", "+=", "offsets_run", "partition_batchsize", "=", "partition_batchsize", "-", "offsets_run", "else", ":", "break", "log", ".", "info", "(", "'Offset run: %d'", ",", "total_offsets_run", ")", "# create new consumer if partition list changes", "if", "previous_lower_offsets", "is", "not", "None", "and", "set", "(", "previous_lower_offsets", ".", "keys", "(", ")", ")", "!=", "set", "(", "self", ".", "_lower_offsets", ")", ":", "self", ".", "_create_scan_consumer", "(", "self", ".", "_lower_offsets", ".", "keys", "(", ")", ")", "# consumer must restart from newly computed lower offsets", "self", ".", "_update_offsets", "(", "self", ".", "_lower_offsets", ")", "log", ".", "info", "(", "'Initial offsets for topic %s: %s'", ",", "self", ".", "_topic", ",", "repr", "(", "self", ".", "_lower_offsets", ")", ")", "log", ".", "info", "(", "'Target offsets for topic %s: %s'", ",", "self", ".", "_topic", ",", "repr", "(", "self", ".", "_upper_offsets", ")", ")", "return", "batchsize" ]
Compute new initial and target offsets and do other maintenance tasks
[ "Compute", "new", "initial", "and", "target", "offsets", "and", "do", "other", "maintenance", "tasks" ]
8a71901012e8c948180f70a485b57f8d2e7e3ec1
https://github.com/scrapinghub/kafka-scanner/blob/8a71901012e8c948180f70a485b57f8d2e7e3ec1/kafka_scanner/__init__.py#L275-L318
16,151
scrapinghub/kafka-scanner
kafka_scanner/__init__.py
KafkaScanner._filter_deleted_records
def _filter_deleted_records(self, batches): """ Filter out deleted records """ for batch in batches: for record in batch: if not self.must_delete_record(record): yield record
python
def _filter_deleted_records(self, batches): """ Filter out deleted records """ for batch in batches: for record in batch: if not self.must_delete_record(record): yield record
[ "def", "_filter_deleted_records", "(", "self", ",", "batches", ")", ":", "for", "batch", "in", "batches", ":", "for", "record", "in", "batch", ":", "if", "not", "self", ".", "must_delete_record", "(", "record", ")", ":", "yield", "record" ]
Filter out deleted records
[ "Filter", "out", "deleted", "records" ]
8a71901012e8c948180f70a485b57f8d2e7e3ec1
https://github.com/scrapinghub/kafka-scanner/blob/8a71901012e8c948180f70a485b57f8d2e7e3ec1/kafka_scanner/__init__.py#L404-L411
16,152
systemd/python-systemd
systemd/journal.py
get_catalog
def get_catalog(mid): """Return catalog entry for the specified ID. `mid` should be either a UUID or a 32 digit hex number. """ if isinstance(mid, _uuid.UUID): mid = mid.hex return _get_catalog(mid)
python
def get_catalog(mid): """Return catalog entry for the specified ID. `mid` should be either a UUID or a 32 digit hex number. """ if isinstance(mid, _uuid.UUID): mid = mid.hex return _get_catalog(mid)
[ "def", "get_catalog", "(", "mid", ")", ":", "if", "isinstance", "(", "mid", ",", "_uuid", ".", "UUID", ")", ":", "mid", "=", "mid", ".", "hex", "return", "_get_catalog", "(", "mid", ")" ]
Return catalog entry for the specified ID. `mid` should be either a UUID or a 32 digit hex number.
[ "Return", "catalog", "entry", "for", "the", "specified", "ID", "." ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L393-L400
16,153
systemd/python-systemd
systemd/journal.py
Reader._convert_entry
def _convert_entry(self, entry): """Convert entire journal entry utilising _convert_field.""" result = {} for key, value in entry.items(): if isinstance(value, list): result[key] = [self._convert_field(key, val) for val in value] else: result[key] = self._convert_field(key, value) return result
python
def _convert_entry(self, entry): """Convert entire journal entry utilising _convert_field.""" result = {} for key, value in entry.items(): if isinstance(value, list): result[key] = [self._convert_field(key, val) for val in value] else: result[key] = self._convert_field(key, value) return result
[ "def", "_convert_entry", "(", "self", ",", "entry", ")", ":", "result", "=", "{", "}", "for", "key", ",", "value", "in", "entry", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "result", "[", "key", "]", "=", "[", "self", ".", "_convert_field", "(", "key", ",", "val", ")", "for", "val", "in", "value", "]", "else", ":", "result", "[", "key", "]", "=", "self", ".", "_convert_field", "(", "key", ",", "value", ")", "return", "result" ]
Convert entire journal entry utilising _convert_field.
[ "Convert", "entire", "journal", "entry", "utilising", "_convert_field", "." ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L200-L208
16,154
systemd/python-systemd
systemd/journal.py
Reader.add_match
def add_match(self, *args, **kwargs): """Add one or more matches to the filter journal log entries. All matches of different field are combined with logical AND, and matches of the same field are automatically combined with logical OR. Matches can be passed as strings of form "FIELD=value", or keyword arguments FIELD="value". """ args = list(args) args.extend(_make_line(key, val) for key, val in kwargs.items()) for arg in args: super(Reader, self).add_match(arg)
python
def add_match(self, *args, **kwargs): """Add one or more matches to the filter journal log entries. All matches of different field are combined with logical AND, and matches of the same field are automatically combined with logical OR. Matches can be passed as strings of form "FIELD=value", or keyword arguments FIELD="value". """ args = list(args) args.extend(_make_line(key, val) for key, val in kwargs.items()) for arg in args: super(Reader, self).add_match(arg)
[ "def", "add_match", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "args", "=", "list", "(", "args", ")", "args", ".", "extend", "(", "_make_line", "(", "key", ",", "val", ")", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", ")", "for", "arg", "in", "args", ":", "super", "(", "Reader", ",", "self", ")", ".", "add_match", "(", "arg", ")" ]
Add one or more matches to the filter journal log entries. All matches of different field are combined with logical AND, and matches of the same field are automatically combined with logical OR. Matches can be passed as strings of form "FIELD=value", or keyword arguments FIELD="value".
[ "Add", "one", "or", "more", "matches", "to", "the", "filter", "journal", "log", "entries", "." ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L233-L244
16,155
systemd/python-systemd
systemd/journal.py
Reader.get_next
def get_next(self, skip=1): r"""Return the next log entry as a dictionary. Entries will be processed with converters specified during Reader creation. Optional `skip` value will return the `skip`-th log entry. Currently a standard dictionary of fields is returned, but in the future this might be changed to a different mapping type, so the calling code should not make assumptions about a specific type. """ if super(Reader, self)._next(skip): entry = super(Reader, self)._get_all() if entry: entry['__REALTIME_TIMESTAMP'] = self._get_realtime() entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic() entry['__CURSOR'] = self._get_cursor() return self._convert_entry(entry) return dict()
python
def get_next(self, skip=1): r"""Return the next log entry as a dictionary. Entries will be processed with converters specified during Reader creation. Optional `skip` value will return the `skip`-th log entry. Currently a standard dictionary of fields is returned, but in the future this might be changed to a different mapping type, so the calling code should not make assumptions about a specific type. """ if super(Reader, self)._next(skip): entry = super(Reader, self)._get_all() if entry: entry['__REALTIME_TIMESTAMP'] = self._get_realtime() entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic() entry['__CURSOR'] = self._get_cursor() return self._convert_entry(entry) return dict()
[ "def", "get_next", "(", "self", ",", "skip", "=", "1", ")", ":", "if", "super", "(", "Reader", ",", "self", ")", ".", "_next", "(", "skip", ")", ":", "entry", "=", "super", "(", "Reader", ",", "self", ")", ".", "_get_all", "(", ")", "if", "entry", ":", "entry", "[", "'__REALTIME_TIMESTAMP'", "]", "=", "self", ".", "_get_realtime", "(", ")", "entry", "[", "'__MONOTONIC_TIMESTAMP'", "]", "=", "self", ".", "_get_monotonic", "(", ")", "entry", "[", "'__CURSOR'", "]", "=", "self", ".", "_get_cursor", "(", ")", "return", "self", ".", "_convert_entry", "(", "entry", ")", "return", "dict", "(", ")" ]
r"""Return the next log entry as a dictionary. Entries will be processed with converters specified during Reader creation. Optional `skip` value will return the `skip`-th log entry. Currently a standard dictionary of fields is returned, but in the future this might be changed to a different mapping type, so the calling code should not make assumptions about a specific type.
[ "r", "Return", "the", "next", "log", "entry", "as", "a", "dictionary", "." ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L246-L265
16,156
systemd/python-systemd
systemd/journal.py
Reader.query_unique
def query_unique(self, field): """Return a list of unique values appearing in the journal for the given `field`. Note this does not respect any journal matches. Entries will be processed with converters specified during Reader creation. """ return set(self._convert_field(field, value) for value in super(Reader, self).query_unique(field))
python
def query_unique(self, field): """Return a list of unique values appearing in the journal for the given `field`. Note this does not respect any journal matches. Entries will be processed with converters specified during Reader creation. """ return set(self._convert_field(field, value) for value in super(Reader, self).query_unique(field))
[ "def", "query_unique", "(", "self", ",", "field", ")", ":", "return", "set", "(", "self", ".", "_convert_field", "(", "field", ",", "value", ")", "for", "value", "in", "super", "(", "Reader", ",", "self", ")", ".", "query_unique", "(", "field", ")", ")" ]
Return a list of unique values appearing in the journal for the given `field`. Note this does not respect any journal matches. Entries will be processed with converters specified during Reader creation.
[ "Return", "a", "list", "of", "unique", "values", "appearing", "in", "the", "journal", "for", "the", "given", "field", "." ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L283-L293
16,157
systemd/python-systemd
systemd/journal.py
Reader.wait
def wait(self, timeout=None): """Wait for a change in the journal. `timeout` is the maximum time in seconds to wait, or None which means to wait forever. Returns one of NOP (no change), APPEND (new entries have been added to the end of the journal), or INVALIDATE (journal files have been added or removed). """ us = -1 if timeout is None else int(timeout * 1000000) return super(Reader, self).wait(us)
python
def wait(self, timeout=None): """Wait for a change in the journal. `timeout` is the maximum time in seconds to wait, or None which means to wait forever. Returns one of NOP (no change), APPEND (new entries have been added to the end of the journal), or INVALIDATE (journal files have been added or removed). """ us = -1 if timeout is None else int(timeout * 1000000) return super(Reader, self).wait(us)
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "us", "=", "-", "1", "if", "timeout", "is", "None", "else", "int", "(", "timeout", "*", "1000000", ")", "return", "super", "(", "Reader", ",", "self", ")", ".", "wait", "(", "us", ")" ]
Wait for a change in the journal. `timeout` is the maximum time in seconds to wait, or None which means to wait forever. Returns one of NOP (no change), APPEND (new entries have been added to the end of the journal), or INVALIDATE (journal files have been added or removed).
[ "Wait", "for", "a", "change", "in", "the", "journal", "." ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L295-L306
16,158
systemd/python-systemd
systemd/journal.py
Reader.seek_realtime
def seek_realtime(self, realtime): """Seek to a matching journal entry nearest to `timestamp` time. Argument `realtime` must be either an integer UNIX timestamp (in microseconds since the beginning of the UNIX epoch), or an float UNIX timestamp (in seconds since the beginning of the UNIX epoch), or a datetime.datetime instance. The integer form is deprecated. >>> import time >>> from systemd import journal >>> yesterday = time.time() - 24 * 60**2 >>> j = journal.Reader() >>> j.seek_realtime(yesterday) """ if isinstance(realtime, _datetime.datetime): realtime = int(float(realtime.strftime("%s.%f")) * 1000000) elif not isinstance(realtime, int): realtime = int(realtime * 1000000) return super(Reader, self).seek_realtime(realtime)
python
def seek_realtime(self, realtime): """Seek to a matching journal entry nearest to `timestamp` time. Argument `realtime` must be either an integer UNIX timestamp (in microseconds since the beginning of the UNIX epoch), or an float UNIX timestamp (in seconds since the beginning of the UNIX epoch), or a datetime.datetime instance. The integer form is deprecated. >>> import time >>> from systemd import journal >>> yesterday = time.time() - 24 * 60**2 >>> j = journal.Reader() >>> j.seek_realtime(yesterday) """ if isinstance(realtime, _datetime.datetime): realtime = int(float(realtime.strftime("%s.%f")) * 1000000) elif not isinstance(realtime, int): realtime = int(realtime * 1000000) return super(Reader, self).seek_realtime(realtime)
[ "def", "seek_realtime", "(", "self", ",", "realtime", ")", ":", "if", "isinstance", "(", "realtime", ",", "_datetime", ".", "datetime", ")", ":", "realtime", "=", "int", "(", "float", "(", "realtime", ".", "strftime", "(", "\"%s.%f\"", ")", ")", "*", "1000000", ")", "elif", "not", "isinstance", "(", "realtime", ",", "int", ")", ":", "realtime", "=", "int", "(", "realtime", "*", "1000000", ")", "return", "super", "(", "Reader", ",", "self", ")", ".", "seek_realtime", "(", "realtime", ")" ]
Seek to a matching journal entry nearest to `timestamp` time. Argument `realtime` must be either an integer UNIX timestamp (in microseconds since the beginning of the UNIX epoch), or an float UNIX timestamp (in seconds since the beginning of the UNIX epoch), or a datetime.datetime instance. The integer form is deprecated. >>> import time >>> from systemd import journal >>> yesterday = time.time() - 24 * 60**2 >>> j = journal.Reader() >>> j.seek_realtime(yesterday)
[ "Seek", "to", "a", "matching", "journal", "entry", "nearest", "to", "timestamp", "time", "." ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L308-L327
16,159
systemd/python-systemd
systemd/journal.py
Reader.seek_monotonic
def seek_monotonic(self, monotonic, bootid=None): """Seek to a matching journal entry nearest to `monotonic` time. Argument `monotonic` is a timestamp from boot in either seconds or a datetime.timedelta instance. Argument `bootid` is a string or UUID representing which boot the monotonic time is reference to. Defaults to current bootid. """ if isinstance(monotonic, _datetime.timedelta): monotonic = monotonic.total_seconds() monotonic = int(monotonic * 1000000) if isinstance(bootid, _uuid.UUID): bootid = bootid.hex return super(Reader, self).seek_monotonic(monotonic, bootid)
python
def seek_monotonic(self, monotonic, bootid=None): """Seek to a matching journal entry nearest to `monotonic` time. Argument `monotonic` is a timestamp from boot in either seconds or a datetime.timedelta instance. Argument `bootid` is a string or UUID representing which boot the monotonic time is reference to. Defaults to current bootid. """ if isinstance(monotonic, _datetime.timedelta): monotonic = monotonic.total_seconds() monotonic = int(monotonic * 1000000) if isinstance(bootid, _uuid.UUID): bootid = bootid.hex return super(Reader, self).seek_monotonic(monotonic, bootid)
[ "def", "seek_monotonic", "(", "self", ",", "monotonic", ",", "bootid", "=", "None", ")", ":", "if", "isinstance", "(", "monotonic", ",", "_datetime", ".", "timedelta", ")", ":", "monotonic", "=", "monotonic", ".", "total_seconds", "(", ")", "monotonic", "=", "int", "(", "monotonic", "*", "1000000", ")", "if", "isinstance", "(", "bootid", ",", "_uuid", ".", "UUID", ")", ":", "bootid", "=", "bootid", ".", "hex", "return", "super", "(", "Reader", ",", "self", ")", ".", "seek_monotonic", "(", "monotonic", ",", "bootid", ")" ]
Seek to a matching journal entry nearest to `monotonic` time. Argument `monotonic` is a timestamp from boot in either seconds or a datetime.timedelta instance. Argument `bootid` is a string or UUID representing which boot the monotonic time is reference to. Defaults to current bootid.
[ "Seek", "to", "a", "matching", "journal", "entry", "nearest", "to", "monotonic", "time", "." ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L329-L342
16,160
systemd/python-systemd
systemd/journal.py
Reader.log_level
def log_level(self, level): """Set maximum log `level` by setting matches for PRIORITY. """ if 0 <= level <= 7: for i in range(level+1): self.add_match(PRIORITY="%d" % i) else: raise ValueError("Log level must be 0 <= level <= 7")
python
def log_level(self, level): """Set maximum log `level` by setting matches for PRIORITY. """ if 0 <= level <= 7: for i in range(level+1): self.add_match(PRIORITY="%d" % i) else: raise ValueError("Log level must be 0 <= level <= 7")
[ "def", "log_level", "(", "self", ",", "level", ")", ":", "if", "0", "<=", "level", "<=", "7", ":", "for", "i", "in", "range", "(", "level", "+", "1", ")", ":", "self", ".", "add_match", "(", "PRIORITY", "=", "\"%d\"", "%", "i", ")", "else", ":", "raise", "ValueError", "(", "\"Log level must be 0 <= level <= 7\"", ")" ]
Set maximum log `level` by setting matches for PRIORITY.
[ "Set", "maximum", "log", "level", "by", "setting", "matches", "for", "PRIORITY", "." ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L344-L351
16,161
systemd/python-systemd
systemd/journal.py
Reader.messageid_match
def messageid_match(self, messageid): """Add match for log entries with specified `messageid`. `messageid` can be string of hexadicimal digits or a UUID instance. Standard message IDs can be found in systemd.id128. Equivalent to add_match(MESSAGE_ID=`messageid`). """ if isinstance(messageid, _uuid.UUID): messageid = messageid.hex self.add_match(MESSAGE_ID=messageid)
python
def messageid_match(self, messageid): """Add match for log entries with specified `messageid`. `messageid` can be string of hexadicimal digits or a UUID instance. Standard message IDs can be found in systemd.id128. Equivalent to add_match(MESSAGE_ID=`messageid`). """ if isinstance(messageid, _uuid.UUID): messageid = messageid.hex self.add_match(MESSAGE_ID=messageid)
[ "def", "messageid_match", "(", "self", ",", "messageid", ")", ":", "if", "isinstance", "(", "messageid", ",", "_uuid", ".", "UUID", ")", ":", "messageid", "=", "messageid", ".", "hex", "self", ".", "add_match", "(", "MESSAGE_ID", "=", "messageid", ")" ]
Add match for log entries with specified `messageid`. `messageid` can be string of hexadicimal digits or a UUID instance. Standard message IDs can be found in systemd.id128. Equivalent to add_match(MESSAGE_ID=`messageid`).
[ "Add", "match", "for", "log", "entries", "with", "specified", "messageid", "." ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L353-L363
16,162
systemd/python-systemd
systemd/journal.py
Reader.this_boot
def this_boot(self, bootid=None): """Add match for _BOOT_ID for current boot or the specified boot ID. If specified, bootid should be either a UUID or a 32 digit hex number. Equivalent to add_match(_BOOT_ID='bootid'). """ if bootid is None: bootid = _id128.get_boot().hex else: bootid = getattr(bootid, 'hex', bootid) self.add_match(_BOOT_ID=bootid)
python
def this_boot(self, bootid=None): """Add match for _BOOT_ID for current boot or the specified boot ID. If specified, bootid should be either a UUID or a 32 digit hex number. Equivalent to add_match(_BOOT_ID='bootid'). """ if bootid is None: bootid = _id128.get_boot().hex else: bootid = getattr(bootid, 'hex', bootid) self.add_match(_BOOT_ID=bootid)
[ "def", "this_boot", "(", "self", ",", "bootid", "=", "None", ")", ":", "if", "bootid", "is", "None", ":", "bootid", "=", "_id128", ".", "get_boot", "(", ")", ".", "hex", "else", ":", "bootid", "=", "getattr", "(", "bootid", ",", "'hex'", ",", "bootid", ")", "self", ".", "add_match", "(", "_BOOT_ID", "=", "bootid", ")" ]
Add match for _BOOT_ID for current boot or the specified boot ID. If specified, bootid should be either a UUID or a 32 digit hex number. Equivalent to add_match(_BOOT_ID='bootid').
[ "Add", "match", "for", "_BOOT_ID", "for", "current", "boot", "or", "the", "specified", "boot", "ID", "." ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L365-L376
16,163
systemd/python-systemd
systemd/journal.py
Reader.this_machine
def this_machine(self, machineid=None): """Add match for _MACHINE_ID equal to the ID of this machine. If specified, machineid should be either a UUID or a 32 digit hex number. Equivalent to add_match(_MACHINE_ID='machineid'). """ if machineid is None: machineid = _id128.get_machine().hex else: machineid = getattr(machineid, 'hex', machineid) self.add_match(_MACHINE_ID=machineid)
python
def this_machine(self, machineid=None): """Add match for _MACHINE_ID equal to the ID of this machine. If specified, machineid should be either a UUID or a 32 digit hex number. Equivalent to add_match(_MACHINE_ID='machineid'). """ if machineid is None: machineid = _id128.get_machine().hex else: machineid = getattr(machineid, 'hex', machineid) self.add_match(_MACHINE_ID=machineid)
[ "def", "this_machine", "(", "self", ",", "machineid", "=", "None", ")", ":", "if", "machineid", "is", "None", ":", "machineid", "=", "_id128", ".", "get_machine", "(", ")", ".", "hex", "else", ":", "machineid", "=", "getattr", "(", "machineid", ",", "'hex'", ",", "machineid", ")", "self", ".", "add_match", "(", "_MACHINE_ID", "=", "machineid", ")" ]
Add match for _MACHINE_ID equal to the ID of this machine. If specified, machineid should be either a UUID or a 32 digit hex number. Equivalent to add_match(_MACHINE_ID='machineid').
[ "Add", "match", "for", "_MACHINE_ID", "equal", "to", "the", "ID", "of", "this", "machine", "." ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L378-L390
16,164
systemd/python-systemd
systemd/journal.py
JournalHandler.emit
def emit(self, record): """Write `record` as a journal event. MESSAGE is taken from the message provided by the user, and PRIORITY, LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended automatically. In addition, record.MESSAGE_ID will be used if present. """ try: msg = self.format(record) pri = self.map_priority(record.levelno) # defaults extras = self._extra.copy() # higher priority if record.exc_text: extras['EXCEPTION_TEXT'] = record.exc_text if record.exc_info: extras['EXCEPTION_INFO'] = record.exc_info if record.args: extras['CODE_ARGS'] = str(record.args) # explicit arguments — highest priority extras.update(record.__dict__) self.send(msg, PRIORITY=format(pri), LOGGER=record.name, THREAD_NAME=record.threadName, PROCESS_NAME=record.processName, CODE_FILE=record.pathname, CODE_LINE=record.lineno, CODE_FUNC=record.funcName, **extras) except Exception: self.handleError(record)
python
def emit(self, record): """Write `record` as a journal event. MESSAGE is taken from the message provided by the user, and PRIORITY, LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended automatically. In addition, record.MESSAGE_ID will be used if present. """ try: msg = self.format(record) pri = self.map_priority(record.levelno) # defaults extras = self._extra.copy() # higher priority if record.exc_text: extras['EXCEPTION_TEXT'] = record.exc_text if record.exc_info: extras['EXCEPTION_INFO'] = record.exc_info if record.args: extras['CODE_ARGS'] = str(record.args) # explicit arguments — highest priority extras.update(record.__dict__) self.send(msg, PRIORITY=format(pri), LOGGER=record.name, THREAD_NAME=record.threadName, PROCESS_NAME=record.processName, CODE_FILE=record.pathname, CODE_LINE=record.lineno, CODE_FUNC=record.funcName, **extras) except Exception: self.handleError(record)
[ "def", "emit", "(", "self", ",", "record", ")", ":", "try", ":", "msg", "=", "self", ".", "format", "(", "record", ")", "pri", "=", "self", ".", "map_priority", "(", "record", ".", "levelno", ")", "# defaults", "extras", "=", "self", ".", "_extra", ".", "copy", "(", ")", "# higher priority", "if", "record", ".", "exc_text", ":", "extras", "[", "'EXCEPTION_TEXT'", "]", "=", "record", ".", "exc_text", "if", "record", ".", "exc_info", ":", "extras", "[", "'EXCEPTION_INFO'", "]", "=", "record", ".", "exc_info", "if", "record", ".", "args", ":", "extras", "[", "'CODE_ARGS'", "]", "=", "str", "(", "record", ".", "args", ")", "# explicit arguments — highest priority", "extras", ".", "update", "(", "record", ".", "__dict__", ")", "self", ".", "send", "(", "msg", ",", "PRIORITY", "=", "format", "(", "pri", ")", ",", "LOGGER", "=", "record", ".", "name", ",", "THREAD_NAME", "=", "record", ".", "threadName", ",", "PROCESS_NAME", "=", "record", ".", "processName", ",", "CODE_FILE", "=", "record", ".", "pathname", ",", "CODE_LINE", "=", "record", ".", "lineno", ",", "CODE_FUNC", "=", "record", ".", "funcName", ",", "*", "*", "extras", ")", "except", "Exception", ":", "self", ".", "handleError", "(", "record", ")" ]
Write `record` as a journal event. MESSAGE is taken from the message provided by the user, and PRIORITY, LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended automatically. In addition, record.MESSAGE_ID will be used if present.
[ "Write", "record", "as", "a", "journal", "event", "." ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L568-L604
16,165
systemd/python-systemd
systemd/daemon.py
listen_fds
def listen_fds(unset_environment=True): """Return a list of socket activated descriptors Example:: (in primary window) $ systemd-activate -l 2000 python3 -c \\ 'from systemd.daemon import listen_fds; print(listen_fds())' (in another window) $ telnet localhost 2000 (in primary window) ... Execing python3 (...) [3] """ num = _listen_fds(unset_environment) return list(range(LISTEN_FDS_START, LISTEN_FDS_START + num))
python
def listen_fds(unset_environment=True): """Return a list of socket activated descriptors Example:: (in primary window) $ systemd-activate -l 2000 python3 -c \\ 'from systemd.daemon import listen_fds; print(listen_fds())' (in another window) $ telnet localhost 2000 (in primary window) ... Execing python3 (...) [3] """ num = _listen_fds(unset_environment) return list(range(LISTEN_FDS_START, LISTEN_FDS_START + num))
[ "def", "listen_fds", "(", "unset_environment", "=", "True", ")", ":", "num", "=", "_listen_fds", "(", "unset_environment", ")", "return", "list", "(", "range", "(", "LISTEN_FDS_START", ",", "LISTEN_FDS_START", "+", "num", ")", ")" ]
Return a list of socket activated descriptors Example:: (in primary window) $ systemd-activate -l 2000 python3 -c \\ 'from systemd.daemon import listen_fds; print(listen_fds())' (in another window) $ telnet localhost 2000 (in primary window) ... Execing python3 (...) [3]
[ "Return", "a", "list", "of", "socket", "activated", "descriptors" ]
c06c5d401d60ae9175367be0797a6c2b562ac5ba
https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/daemon.py#L55-L71
16,166
earl/beanstalkc
beanstalkc.py
Connection.connect
def connect(self): """Connect to beanstalkd server.""" self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.settimeout(self._connect_timeout) SocketError.wrap(self._socket.connect, (self.host, self.port)) self._socket.settimeout(None) self._socket_file = self._socket.makefile('rb')
python
def connect(self): """Connect to beanstalkd server.""" self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.settimeout(self._connect_timeout) SocketError.wrap(self._socket.connect, (self.host, self.port)) self._socket.settimeout(None) self._socket_file = self._socket.makefile('rb')
[ "def", "connect", "(", "self", ")", ":", "self", ".", "_socket", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "self", ".", "_socket", ".", "settimeout", "(", "self", ".", "_connect_timeout", ")", "SocketError", ".", "wrap", "(", "self", ".", "_socket", ".", "connect", ",", "(", "self", ".", "host", ",", "self", ".", "port", ")", ")", "self", ".", "_socket", ".", "settimeout", "(", "None", ")", "self", ".", "_socket_file", "=", "self", ".", "_socket", ".", "makefile", "(", "'rb'", ")" ]
Connect to beanstalkd server.
[ "Connect", "to", "beanstalkd", "server", "." ]
70c2ffc41cc84b0a1ae557e470e1db89b7b61023
https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L71-L77
16,167
earl/beanstalkc
beanstalkc.py
Connection.close
def close(self): """Close connection to server.""" try: self._socket.sendall('quit\r\n') except socket.error: pass try: self._socket.close() except socket.error: pass
python
def close(self): """Close connection to server.""" try: self._socket.sendall('quit\r\n') except socket.error: pass try: self._socket.close() except socket.error: pass
[ "def", "close", "(", "self", ")", ":", "try", ":", "self", ".", "_socket", ".", "sendall", "(", "'quit\\r\\n'", ")", "except", "socket", ".", "error", ":", "pass", "try", ":", "self", ".", "_socket", ".", "close", "(", ")", "except", "socket", ".", "error", ":", "pass" ]
Close connection to server.
[ "Close", "connection", "to", "server", "." ]
70c2ffc41cc84b0a1ae557e470e1db89b7b61023
https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L79-L88
16,168
earl/beanstalkc
beanstalkc.py
Connection.put
def put(self, body, priority=DEFAULT_PRIORITY, delay=0, ttr=DEFAULT_TTR): """Put a job into the current tube. Returns job id.""" assert isinstance(body, str), 'Job body must be a str instance' jid = self._interact_value('put %d %d %d %d\r\n%s\r\n' % ( priority, delay, ttr, len(body), body), ['INSERTED'], ['JOB_TOO_BIG', 'BURIED', 'DRAINING']) return int(jid)
python
def put(self, body, priority=DEFAULT_PRIORITY, delay=0, ttr=DEFAULT_TTR): """Put a job into the current tube. Returns job id.""" assert isinstance(body, str), 'Job body must be a str instance' jid = self._interact_value('put %d %d %d %d\r\n%s\r\n' % ( priority, delay, ttr, len(body), body), ['INSERTED'], ['JOB_TOO_BIG', 'BURIED', 'DRAINING']) return int(jid)
[ "def", "put", "(", "self", ",", "body", ",", "priority", "=", "DEFAULT_PRIORITY", ",", "delay", "=", "0", ",", "ttr", "=", "DEFAULT_TTR", ")", ":", "assert", "isinstance", "(", "body", ",", "str", ")", ",", "'Job body must be a str instance'", "jid", "=", "self", ".", "_interact_value", "(", "'put %d %d %d %d\\r\\n%s\\r\\n'", "%", "(", "priority", ",", "delay", ",", "ttr", ",", "len", "(", "body", ")", ",", "body", ")", ",", "[", "'INSERTED'", "]", ",", "[", "'JOB_TOO_BIG'", ",", "'BURIED'", ",", "'DRAINING'", "]", ")", "return", "int", "(", "jid", ")" ]
Put a job into the current tube. Returns job id.
[ "Put", "a", "job", "into", "the", "current", "tube", ".", "Returns", "job", "id", "." ]
70c2ffc41cc84b0a1ae557e470e1db89b7b61023
https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L140-L147
16,169
earl/beanstalkc
beanstalkc.py
Connection.reserve
def reserve(self, timeout=None): """Reserve a job from one of the watched tubes, with optional timeout in seconds. Returns a Job object, or None if the request times out.""" if timeout is not None: command = 'reserve-with-timeout %d\r\n' % timeout else: command = 'reserve\r\n' try: return self._interact_job(command, ['RESERVED'], ['DEADLINE_SOON', 'TIMED_OUT']) except CommandFailed: exc = sys.exc_info()[1] _, status, results = exc.args if status == 'TIMED_OUT': return None elif status == 'DEADLINE_SOON': raise DeadlineSoon(results)
python
def reserve(self, timeout=None): """Reserve a job from one of the watched tubes, with optional timeout in seconds. Returns a Job object, or None if the request times out.""" if timeout is not None: command = 'reserve-with-timeout %d\r\n' % timeout else: command = 'reserve\r\n' try: return self._interact_job(command, ['RESERVED'], ['DEADLINE_SOON', 'TIMED_OUT']) except CommandFailed: exc = sys.exc_info()[1] _, status, results = exc.args if status == 'TIMED_OUT': return None elif status == 'DEADLINE_SOON': raise DeadlineSoon(results)
[ "def", "reserve", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "not", "None", ":", "command", "=", "'reserve-with-timeout %d\\r\\n'", "%", "timeout", "else", ":", "command", "=", "'reserve\\r\\n'", "try", ":", "return", "self", ".", "_interact_job", "(", "command", ",", "[", "'RESERVED'", "]", ",", "[", "'DEADLINE_SOON'", ",", "'TIMED_OUT'", "]", ")", "except", "CommandFailed", ":", "exc", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "_", ",", "status", ",", "results", "=", "exc", ".", "args", "if", "status", "==", "'TIMED_OUT'", ":", "return", "None", "elif", "status", "==", "'DEADLINE_SOON'", ":", "raise", "DeadlineSoon", "(", "results", ")" ]
Reserve a job from one of the watched tubes, with optional timeout in seconds. Returns a Job object, or None if the request times out.
[ "Reserve", "a", "job", "from", "one", "of", "the", "watched", "tubes", "with", "optional", "timeout", "in", "seconds", ".", "Returns", "a", "Job", "object", "or", "None", "if", "the", "request", "times", "out", "." ]
70c2ffc41cc84b0a1ae557e470e1db89b7b61023
https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L149-L166
16,170
earl/beanstalkc
beanstalkc.py
Connection.release
def release(self, jid, priority=DEFAULT_PRIORITY, delay=0): """Release a reserved job back into the ready queue.""" self._interact('release %d %d %d\r\n' % (jid, priority, delay), ['RELEASED', 'BURIED'], ['NOT_FOUND'])
python
def release(self, jid, priority=DEFAULT_PRIORITY, delay=0): """Release a reserved job back into the ready queue.""" self._interact('release %d %d %d\r\n' % (jid, priority, delay), ['RELEASED', 'BURIED'], ['NOT_FOUND'])
[ "def", "release", "(", "self", ",", "jid", ",", "priority", "=", "DEFAULT_PRIORITY", ",", "delay", "=", "0", ")", ":", "self", ".", "_interact", "(", "'release %d %d %d\\r\\n'", "%", "(", "jid", ",", "priority", ",", "delay", ")", ",", "[", "'RELEASED'", ",", "'BURIED'", "]", ",", "[", "'NOT_FOUND'", "]", ")" ]
Release a reserved job back into the ready queue.
[ "Release", "a", "reserved", "job", "back", "into", "the", "ready", "queue", "." ]
70c2ffc41cc84b0a1ae557e470e1db89b7b61023
https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L244-L248
16,171
earl/beanstalkc
beanstalkc.py
Job.delete
def delete(self): """Delete this job.""" self.conn.delete(self.jid) self.reserved = False
python
def delete(self): """Delete this job.""" self.conn.delete(self.jid) self.reserved = False
[ "def", "delete", "(", "self", ")", ":", "self", ".", "conn", ".", "delete", "(", "self", ".", "jid", ")", "self", ".", "reserved", "=", "False" ]
Delete this job.
[ "Delete", "this", "job", "." ]
70c2ffc41cc84b0a1ae557e470e1db89b7b61023
https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L283-L286
16,172
earl/beanstalkc
beanstalkc.py
Job.release
def release(self, priority=None, delay=0): """Release this job back into the ready queue.""" if self.reserved: self.conn.release(self.jid, priority or self._priority(), delay) self.reserved = False
python
def release(self, priority=None, delay=0): """Release this job back into the ready queue.""" if self.reserved: self.conn.release(self.jid, priority or self._priority(), delay) self.reserved = False
[ "def", "release", "(", "self", ",", "priority", "=", "None", ",", "delay", "=", "0", ")", ":", "if", "self", ".", "reserved", ":", "self", ".", "conn", ".", "release", "(", "self", ".", "jid", ",", "priority", "or", "self", ".", "_priority", "(", ")", ",", "delay", ")", "self", ".", "reserved", "=", "False" ]
Release this job back into the ready queue.
[ "Release", "this", "job", "back", "into", "the", "ready", "queue", "." ]
70c2ffc41cc84b0a1ae557e470e1db89b7b61023
https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L288-L292
16,173
earl/beanstalkc
beanstalkc.py
Job.bury
def bury(self, priority=None): """Bury this job.""" if self.reserved: self.conn.bury(self.jid, priority or self._priority()) self.reserved = False
python
def bury(self, priority=None): """Bury this job.""" if self.reserved: self.conn.bury(self.jid, priority or self._priority()) self.reserved = False
[ "def", "bury", "(", "self", ",", "priority", "=", "None", ")", ":", "if", "self", ".", "reserved", ":", "self", ".", "conn", ".", "bury", "(", "self", ".", "jid", ",", "priority", "or", "self", ".", "_priority", "(", ")", ")", "self", ".", "reserved", "=", "False" ]
Bury this job.
[ "Bury", "this", "job", "." ]
70c2ffc41cc84b0a1ae557e470e1db89b7b61023
https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L294-L298
16,174
fatiando/pooch
pooch/core.py
Pooch.abspath
def abspath(self): "Absolute path to the local storage" return Path(os.path.abspath(os.path.expanduser(str(self.path))))
python
def abspath(self): "Absolute path to the local storage" return Path(os.path.abspath(os.path.expanduser(str(self.path))))
[ "def", "abspath", "(", "self", ")", ":", "return", "Path", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "str", "(", "self", ".", "path", ")", ")", ")", ")" ]
Absolute path to the local storage
[ "Absolute", "path", "to", "the", "local", "storage" ]
fc38601d2d32809b4df75d0715922025740c869a
https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L221-L223
16,175
fatiando/pooch
pooch/core.py
Pooch.fetch
def fetch(self, fname, processor=None): """ Get the absolute path to a file in the local storage. If it's not in the local storage, it will be downloaded. If the hash of the file in local storage doesn't match the one in the registry, will download a new copy of the file. This is considered a sign that the file was updated in the remote storage. If the hash of the downloaded file still doesn't match the one in the registry, will raise an exception to warn of possible file corruption. Post-processing actions sometimes need to be taken on downloaded files (unzipping, conversion to a more efficient format, etc). If these actions are time or memory consuming, it would be best to do this only once when the file is actually downloaded. Use the *processor* argument to specify a function that is executed after the downloaded (if required) to perform these actions. See below. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. processor : None or callable If not None, then a function (or callable object) that will be called before returning the full path and after the file has been downloaded (if required). See below. Returns ------- full_path : str The absolute path (including the file name) of the file in the local storage. Notes ----- Processor functions should have the following format: .. code:: python def myprocessor(fname, action, update): ''' Processes the downloaded file and returns a new file name. The function **must** take as arguments (in order): fname : str The full path of the file in the local data storage action : str Either: "download" (file doesn't exist and will be downloaded), "update" (file is outdated and will be downloaded), or "fetch" (file exists and is updated so no download is necessary). pooch : pooch.Pooch The instance of the Pooch class that is calling this function. The return value can be anything but is usually a full path to a file (or list of files). This is what will be returned by *fetch* in place of the original file path. ''' ... """ self._assert_file_in_registry(fname) # Create the local data directory if it doesn't already exist if not self.abspath.exists(): os.makedirs(str(self.abspath)) full_path = self.abspath / fname in_storage = full_path.exists() if not in_storage: action = "download" elif in_storage and file_hash(str(full_path)) != self.registry[fname]: action = "update" else: action = "fetch" if action in ("download", "update"): action_word = dict(download="Downloading", update="Updating") warn( "{} data file '{}' from remote data store '{}' to '{}'.".format( action_word[action], fname, self.get_url(fname), str(self.path) ) ) self._download_file(fname) if processor is not None: return processor(str(full_path), action, self) return str(full_path)
python
def fetch(self, fname, processor=None): """ Get the absolute path to a file in the local storage. If it's not in the local storage, it will be downloaded. If the hash of the file in local storage doesn't match the one in the registry, will download a new copy of the file. This is considered a sign that the file was updated in the remote storage. If the hash of the downloaded file still doesn't match the one in the registry, will raise an exception to warn of possible file corruption. Post-processing actions sometimes need to be taken on downloaded files (unzipping, conversion to a more efficient format, etc). If these actions are time or memory consuming, it would be best to do this only once when the file is actually downloaded. Use the *processor* argument to specify a function that is executed after the downloaded (if required) to perform these actions. See below. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. processor : None or callable If not None, then a function (or callable object) that will be called before returning the full path and after the file has been downloaded (if required). See below. Returns ------- full_path : str The absolute path (including the file name) of the file in the local storage. Notes ----- Processor functions should have the following format: .. code:: python def myprocessor(fname, action, update): ''' Processes the downloaded file and returns a new file name. The function **must** take as arguments (in order): fname : str The full path of the file in the local data storage action : str Either: "download" (file doesn't exist and will be downloaded), "update" (file is outdated and will be downloaded), or "fetch" (file exists and is updated so no download is necessary). pooch : pooch.Pooch The instance of the Pooch class that is calling this function. The return value can be anything but is usually a full path to a file (or list of files). This is what will be returned by *fetch* in place of the original file path. ''' ... """ self._assert_file_in_registry(fname) # Create the local data directory if it doesn't already exist if not self.abspath.exists(): os.makedirs(str(self.abspath)) full_path = self.abspath / fname in_storage = full_path.exists() if not in_storage: action = "download" elif in_storage and file_hash(str(full_path)) != self.registry[fname]: action = "update" else: action = "fetch" if action in ("download", "update"): action_word = dict(download="Downloading", update="Updating") warn( "{} data file '{}' from remote data store '{}' to '{}'.".format( action_word[action], fname, self.get_url(fname), str(self.path) ) ) self._download_file(fname) if processor is not None: return processor(str(full_path), action, self) return str(full_path)
[ "def", "fetch", "(", "self", ",", "fname", ",", "processor", "=", "None", ")", ":", "self", ".", "_assert_file_in_registry", "(", "fname", ")", "# Create the local data directory if it doesn't already exist", "if", "not", "self", ".", "abspath", ".", "exists", "(", ")", ":", "os", ".", "makedirs", "(", "str", "(", "self", ".", "abspath", ")", ")", "full_path", "=", "self", ".", "abspath", "/", "fname", "in_storage", "=", "full_path", ".", "exists", "(", ")", "if", "not", "in_storage", ":", "action", "=", "\"download\"", "elif", "in_storage", "and", "file_hash", "(", "str", "(", "full_path", ")", ")", "!=", "self", ".", "registry", "[", "fname", "]", ":", "action", "=", "\"update\"", "else", ":", "action", "=", "\"fetch\"", "if", "action", "in", "(", "\"download\"", ",", "\"update\"", ")", ":", "action_word", "=", "dict", "(", "download", "=", "\"Downloading\"", ",", "update", "=", "\"Updating\"", ")", "warn", "(", "\"{} data file '{}' from remote data store '{}' to '{}'.\"", ".", "format", "(", "action_word", "[", "action", "]", ",", "fname", ",", "self", ".", "get_url", "(", "fname", ")", ",", "str", "(", "self", ".", "path", ")", ")", ")", "self", ".", "_download_file", "(", "fname", ")", "if", "processor", "is", "not", "None", ":", "return", "processor", "(", "str", "(", "full_path", ")", ",", "action", ",", "self", ")", "return", "str", "(", "full_path", ")" ]
Get the absolute path to a file in the local storage. If it's not in the local storage, it will be downloaded. If the hash of the file in local storage doesn't match the one in the registry, will download a new copy of the file. This is considered a sign that the file was updated in the remote storage. If the hash of the downloaded file still doesn't match the one in the registry, will raise an exception to warn of possible file corruption. Post-processing actions sometimes need to be taken on downloaded files (unzipping, conversion to a more efficient format, etc). If these actions are time or memory consuming, it would be best to do this only once when the file is actually downloaded. Use the *processor* argument to specify a function that is executed after the downloaded (if required) to perform these actions. See below. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. processor : None or callable If not None, then a function (or callable object) that will be called before returning the full path and after the file has been downloaded (if required). See below. Returns ------- full_path : str The absolute path (including the file name) of the file in the local storage. Notes ----- Processor functions should have the following format: .. code:: python def myprocessor(fname, action, update): ''' Processes the downloaded file and returns a new file name. The function **must** take as arguments (in order): fname : str The full path of the file in the local data storage action : str Either: "download" (file doesn't exist and will be downloaded), "update" (file is outdated and will be downloaded), or "fetch" (file exists and is updated so no download is necessary). pooch : pooch.Pooch The instance of the Pooch class that is calling this function. The return value can be anything but is usually a full path to a file (or list of files). This is what will be returned by *fetch* in place of the original file path. ''' ...
[ "Get", "the", "absolute", "path", "to", "a", "file", "in", "the", "local", "storage", "." ]
fc38601d2d32809b4df75d0715922025740c869a
https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L230-L320
16,176
fatiando/pooch
pooch/core.py
Pooch.get_url
def get_url(self, fname): """ Get the full URL to download a file in the registry. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. """ self._assert_file_in_registry(fname) return self.urls.get(fname, "".join([self.base_url, fname]))
python
def get_url(self, fname): """ Get the full URL to download a file in the registry. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. """ self._assert_file_in_registry(fname) return self.urls.get(fname, "".join([self.base_url, fname]))
[ "def", "get_url", "(", "self", ",", "fname", ")", ":", "self", ".", "_assert_file_in_registry", "(", "fname", ")", "return", "self", ".", "urls", ".", "get", "(", "fname", ",", "\"\"", ".", "join", "(", "[", "self", ".", "base_url", ",", "fname", "]", ")", ")" ]
Get the full URL to download a file in the registry. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage.
[ "Get", "the", "full", "URL", "to", "download", "a", "file", "in", "the", "registry", "." ]
fc38601d2d32809b4df75d0715922025740c869a
https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L329-L341
16,177
fatiando/pooch
pooch/core.py
Pooch._download_file
def _download_file(self, fname): """ Download a file from the remote data storage to the local storage. Used by :meth:`~pooch.Pooch.fetch` to do the actual downloading. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. Raises ------ ValueError If the hash of the downloaded file doesn't match the hash in the registry. """ destination = self.abspath / fname source = self.get_url(fname) # Stream the file to a temporary so that we can safely check its hash before # overwriting the original fout = tempfile.NamedTemporaryFile(delete=False, dir=str(self.abspath)) try: with fout: response = requests.get(source, stream=True) response.raise_for_status() for chunk in response.iter_content(chunk_size=1024): if chunk: fout.write(chunk) tmphash = file_hash(fout.name) if tmphash != self.registry[fname]: raise ValueError( "Hash of downloaded file '{}' doesn't match the entry in the registry:" " Expected '{}' and got '{}'.".format( fout.name, self.registry[fname], tmphash ) ) # Make sure the parent directory exists in case the file is in a subdirectory. # Otherwise, move will cause an error. if not os.path.exists(str(destination.parent)): os.makedirs(str(destination.parent)) shutil.move(fout.name, str(destination)) except Exception: os.remove(fout.name) raise
python
def _download_file(self, fname): """ Download a file from the remote data storage to the local storage. Used by :meth:`~pooch.Pooch.fetch` to do the actual downloading. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. Raises ------ ValueError If the hash of the downloaded file doesn't match the hash in the registry. """ destination = self.abspath / fname source = self.get_url(fname) # Stream the file to a temporary so that we can safely check its hash before # overwriting the original fout = tempfile.NamedTemporaryFile(delete=False, dir=str(self.abspath)) try: with fout: response = requests.get(source, stream=True) response.raise_for_status() for chunk in response.iter_content(chunk_size=1024): if chunk: fout.write(chunk) tmphash = file_hash(fout.name) if tmphash != self.registry[fname]: raise ValueError( "Hash of downloaded file '{}' doesn't match the entry in the registry:" " Expected '{}' and got '{}'.".format( fout.name, self.registry[fname], tmphash ) ) # Make sure the parent directory exists in case the file is in a subdirectory. # Otherwise, move will cause an error. if not os.path.exists(str(destination.parent)): os.makedirs(str(destination.parent)) shutil.move(fout.name, str(destination)) except Exception: os.remove(fout.name) raise
[ "def", "_download_file", "(", "self", ",", "fname", ")", ":", "destination", "=", "self", ".", "abspath", "/", "fname", "source", "=", "self", ".", "get_url", "(", "fname", ")", "# Stream the file to a temporary so that we can safely check its hash before", "# overwriting the original", "fout", "=", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ",", "dir", "=", "str", "(", "self", ".", "abspath", ")", ")", "try", ":", "with", "fout", ":", "response", "=", "requests", ".", "get", "(", "source", ",", "stream", "=", "True", ")", "response", ".", "raise_for_status", "(", ")", "for", "chunk", "in", "response", ".", "iter_content", "(", "chunk_size", "=", "1024", ")", ":", "if", "chunk", ":", "fout", ".", "write", "(", "chunk", ")", "tmphash", "=", "file_hash", "(", "fout", ".", "name", ")", "if", "tmphash", "!=", "self", ".", "registry", "[", "fname", "]", ":", "raise", "ValueError", "(", "\"Hash of downloaded file '{}' doesn't match the entry in the registry:\"", "\" Expected '{}' and got '{}'.\"", ".", "format", "(", "fout", ".", "name", ",", "self", ".", "registry", "[", "fname", "]", ",", "tmphash", ")", ")", "# Make sure the parent directory exists in case the file is in a subdirectory.", "# Otherwise, move will cause an error.", "if", "not", "os", ".", "path", ".", "exists", "(", "str", "(", "destination", ".", "parent", ")", ")", ":", "os", ".", "makedirs", "(", "str", "(", "destination", ".", "parent", ")", ")", "shutil", ".", "move", "(", "fout", ".", "name", ",", "str", "(", "destination", ")", ")", "except", "Exception", ":", "os", ".", "remove", "(", "fout", ".", "name", ")", "raise" ]
Download a file from the remote data storage to the local storage. Used by :meth:`~pooch.Pooch.fetch` to do the actual downloading. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. Raises ------ ValueError If the hash of the downloaded file doesn't match the hash in the registry.
[ "Download", "a", "file", "from", "the", "remote", "data", "storage", "to", "the", "local", "storage", "." ]
fc38601d2d32809b4df75d0715922025740c869a
https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L343-L388
16,178
fatiando/pooch
pooch/core.py
Pooch.load_registry
def load_registry(self, fname): """ Load entries from a file and add them to the registry. Use this if you are managing many files. Each line of the file should have file name and its SHA256 hash separate by a space. Only one file per line is allowed. Custom download URLs for individual files can be specified as a third element on the line. Parameters ---------- fname : str File name and path to the registry file. """ with open(fname) as fin: for linenum, line in enumerate(fin): elements = line.strip().split() if len(elements) > 3 or len(elements) < 2: raise IOError( "Expected 2 or 3 elements in line {} but got {}.".format( linenum, len(elements) ) ) file_name = elements[0] file_sha256 = elements[1] if len(elements) == 3: file_url = elements[2] self.urls[file_name] = file_url self.registry[file_name] = file_sha256
python
def load_registry(self, fname): """ Load entries from a file and add them to the registry. Use this if you are managing many files. Each line of the file should have file name and its SHA256 hash separate by a space. Only one file per line is allowed. Custom download URLs for individual files can be specified as a third element on the line. Parameters ---------- fname : str File name and path to the registry file. """ with open(fname) as fin: for linenum, line in enumerate(fin): elements = line.strip().split() if len(elements) > 3 or len(elements) < 2: raise IOError( "Expected 2 or 3 elements in line {} but got {}.".format( linenum, len(elements) ) ) file_name = elements[0] file_sha256 = elements[1] if len(elements) == 3: file_url = elements[2] self.urls[file_name] = file_url self.registry[file_name] = file_sha256
[ "def", "load_registry", "(", "self", ",", "fname", ")", ":", "with", "open", "(", "fname", ")", "as", "fin", ":", "for", "linenum", ",", "line", "in", "enumerate", "(", "fin", ")", ":", "elements", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "len", "(", "elements", ")", ">", "3", "or", "len", "(", "elements", ")", "<", "2", ":", "raise", "IOError", "(", "\"Expected 2 or 3 elements in line {} but got {}.\"", ".", "format", "(", "linenum", ",", "len", "(", "elements", ")", ")", ")", "file_name", "=", "elements", "[", "0", "]", "file_sha256", "=", "elements", "[", "1", "]", "if", "len", "(", "elements", ")", "==", "3", ":", "file_url", "=", "elements", "[", "2", "]", "self", ".", "urls", "[", "file_name", "]", "=", "file_url", "self", ".", "registry", "[", "file_name", "]", "=", "file_sha256" ]
Load entries from a file and add them to the registry. Use this if you are managing many files. Each line of the file should have file name and its SHA256 hash separate by a space. Only one file per line is allowed. Custom download URLs for individual files can be specified as a third element on the line. Parameters ---------- fname : str File name and path to the registry file.
[ "Load", "entries", "from", "a", "file", "and", "add", "them", "to", "the", "registry", "." ]
fc38601d2d32809b4df75d0715922025740c869a
https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L390-L420
16,179
fatiando/pooch
pooch/core.py
Pooch.is_available
def is_available(self, fname): """ Check availability of a remote file without downloading it. Use this method when working with large files to check if they are available for download. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. Returns ------- status : bool True if the file is available for download. False otherwise. """ self._assert_file_in_registry(fname) source = self.get_url(fname) response = requests.head(source, allow_redirects=True) return bool(response.status_code == 200)
python
def is_available(self, fname): """ Check availability of a remote file without downloading it. Use this method when working with large files to check if they are available for download. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. Returns ------- status : bool True if the file is available for download. False otherwise. """ self._assert_file_in_registry(fname) source = self.get_url(fname) response = requests.head(source, allow_redirects=True) return bool(response.status_code == 200)
[ "def", "is_available", "(", "self", ",", "fname", ")", ":", "self", ".", "_assert_file_in_registry", "(", "fname", ")", "source", "=", "self", ".", "get_url", "(", "fname", ")", "response", "=", "requests", ".", "head", "(", "source", ",", "allow_redirects", "=", "True", ")", "return", "bool", "(", "response", ".", "status_code", "==", "200", ")" ]
Check availability of a remote file without downloading it. Use this method when working with large files to check if they are available for download. Parameters ---------- fname : str The file name (relative to the *base_url* of the remote data storage) to fetch from the local storage. Returns ------- status : bool True if the file is available for download. False otherwise.
[ "Check", "availability", "of", "a", "remote", "file", "without", "downloading", "it", "." ]
fc38601d2d32809b4df75d0715922025740c869a
https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L422-L444
16,180
fatiando/pooch
pooch/utils.py
file_hash
def file_hash(fname): """ Calculate the SHA256 hash of a given file. Useful for checking if a file has changed or been corrupted. Parameters ---------- fname : str The name of the file. Returns ------- hash : str The hash of the file. Examples -------- >>> fname = "test-file-for-hash.txt" >>> with open(fname, "w") as f: ... __ = f.write("content of the file") >>> print(file_hash(fname)) 0fc74468e6a9a829f103d069aeb2bb4f8646bad58bf146bb0e3379b759ec4a00 >>> import os >>> os.remove(fname) """ # Calculate the hash in chunks to avoid overloading the memory chunksize = 65536 hasher = hashlib.sha256() with open(fname, "rb") as fin: buff = fin.read(chunksize) while buff: hasher.update(buff) buff = fin.read(chunksize) return hasher.hexdigest()
python
def file_hash(fname): """ Calculate the SHA256 hash of a given file. Useful for checking if a file has changed or been corrupted. Parameters ---------- fname : str The name of the file. Returns ------- hash : str The hash of the file. Examples -------- >>> fname = "test-file-for-hash.txt" >>> with open(fname, "w") as f: ... __ = f.write("content of the file") >>> print(file_hash(fname)) 0fc74468e6a9a829f103d069aeb2bb4f8646bad58bf146bb0e3379b759ec4a00 >>> import os >>> os.remove(fname) """ # Calculate the hash in chunks to avoid overloading the memory chunksize = 65536 hasher = hashlib.sha256() with open(fname, "rb") as fin: buff = fin.read(chunksize) while buff: hasher.update(buff) buff = fin.read(chunksize) return hasher.hexdigest()
[ "def", "file_hash", "(", "fname", ")", ":", "# Calculate the hash in chunks to avoid overloading the memory", "chunksize", "=", "65536", "hasher", "=", "hashlib", ".", "sha256", "(", ")", "with", "open", "(", "fname", ",", "\"rb\"", ")", "as", "fin", ":", "buff", "=", "fin", ".", "read", "(", "chunksize", ")", "while", "buff", ":", "hasher", ".", "update", "(", "buff", ")", "buff", "=", "fin", ".", "read", "(", "chunksize", ")", "return", "hasher", ".", "hexdigest", "(", ")" ]
Calculate the SHA256 hash of a given file. Useful for checking if a file has changed or been corrupted. Parameters ---------- fname : str The name of the file. Returns ------- hash : str The hash of the file. Examples -------- >>> fname = "test-file-for-hash.txt" >>> with open(fname, "w") as f: ... __ = f.write("content of the file") >>> print(file_hash(fname)) 0fc74468e6a9a829f103d069aeb2bb4f8646bad58bf146bb0e3379b759ec4a00 >>> import os >>> os.remove(fname)
[ "Calculate", "the", "SHA256", "hash", "of", "a", "given", "file", "." ]
fc38601d2d32809b4df75d0715922025740c869a
https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/utils.py#L39-L75
16,181
fatiando/pooch
pooch/utils.py
check_version
def check_version(version, fallback="master"): """ Check that a version string is PEP440 compliant and there are no unreleased changes. For example, ``version = "0.1"`` will be returned as is but ``version = "0.1+10.8dl8dh9"`` will return the fallback. This is the convention used by `versioneer <https://github.com/warner/python-versioneer>`__ to mark that this version is 10 commits ahead of the last release. Parameters ---------- version : str A version string. fallback : str What to return if the version string has unreleased changes. Returns ------- version : str If *version* is PEP440 compliant and there are unreleased changes, then return *version*. Otherwise, return *fallback*. Raises ------ InvalidVersion If *version* is not PEP440 compliant. Examples -------- >>> check_version("0.1") '0.1' >>> check_version("0.1a10") '0.1a10' >>> check_version("0.1+111.9hdg36") 'master' >>> check_version("0.1+111.9hdg36", fallback="dev") 'dev' """ parse = Version(version) if parse.local is not None: return fallback return version
python
def check_version(version, fallback="master"): """ Check that a version string is PEP440 compliant and there are no unreleased changes. For example, ``version = "0.1"`` will be returned as is but ``version = "0.1+10.8dl8dh9"`` will return the fallback. This is the convention used by `versioneer <https://github.com/warner/python-versioneer>`__ to mark that this version is 10 commits ahead of the last release. Parameters ---------- version : str A version string. fallback : str What to return if the version string has unreleased changes. Returns ------- version : str If *version* is PEP440 compliant and there are unreleased changes, then return *version*. Otherwise, return *fallback*. Raises ------ InvalidVersion If *version* is not PEP440 compliant. Examples -------- >>> check_version("0.1") '0.1' >>> check_version("0.1a10") '0.1a10' >>> check_version("0.1+111.9hdg36") 'master' >>> check_version("0.1+111.9hdg36", fallback="dev") 'dev' """ parse = Version(version) if parse.local is not None: return fallback return version
[ "def", "check_version", "(", "version", ",", "fallback", "=", "\"master\"", ")", ":", "parse", "=", "Version", "(", "version", ")", "if", "parse", ".", "local", "is", "not", "None", ":", "return", "fallback", "return", "version" ]
Check that a version string is PEP440 compliant and there are no unreleased changes. For example, ``version = "0.1"`` will be returned as is but ``version = "0.1+10.8dl8dh9"`` will return the fallback. This is the convention used by `versioneer <https://github.com/warner/python-versioneer>`__ to mark that this version is 10 commits ahead of the last release. Parameters ---------- version : str A version string. fallback : str What to return if the version string has unreleased changes. Returns ------- version : str If *version* is PEP440 compliant and there are unreleased changes, then return *version*. Otherwise, return *fallback*. Raises ------ InvalidVersion If *version* is not PEP440 compliant. Examples -------- >>> check_version("0.1") '0.1' >>> check_version("0.1a10") '0.1a10' >>> check_version("0.1+111.9hdg36") 'master' >>> check_version("0.1+111.9hdg36", fallback="dev") 'dev'
[ "Check", "that", "a", "version", "string", "is", "PEP440", "compliant", "and", "there", "are", "no", "unreleased", "changes", "." ]
fc38601d2d32809b4df75d0715922025740c869a
https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/utils.py#L78-L121
16,182
fatiando/pooch
pooch/utils.py
make_registry
def make_registry(directory, output, recursive=True): """ Make a registry of files and hashes for the given directory. This is helpful if you have many files in your test dataset as it keeps you from needing to manually update the registry. Parameters ---------- directory : str Directory of the test data to put in the registry. All file names in the registry will be relative to this directory. output : str Name of the output registry file. recursive : bool If True, will recursively look for files in subdirectories of *directory*. """ directory = Path(directory) if recursive: pattern = "**/*" else: pattern = "*" files = sorted( [ str(path.relative_to(directory)) for path in directory.glob(pattern) if path.is_file() ] ) hashes = [file_hash(str(directory / fname)) for fname in files] with open(output, "w") as outfile: for fname, fhash in zip(files, hashes): # Only use Unix separators for the registry so that we don't go insane # dealing with file paths. outfile.write("{} {}\n".format(fname.replace("\\", "/"), fhash))
python
def make_registry(directory, output, recursive=True): """ Make a registry of files and hashes for the given directory. This is helpful if you have many files in your test dataset as it keeps you from needing to manually update the registry. Parameters ---------- directory : str Directory of the test data to put in the registry. All file names in the registry will be relative to this directory. output : str Name of the output registry file. recursive : bool If True, will recursively look for files in subdirectories of *directory*. """ directory = Path(directory) if recursive: pattern = "**/*" else: pattern = "*" files = sorted( [ str(path.relative_to(directory)) for path in directory.glob(pattern) if path.is_file() ] ) hashes = [file_hash(str(directory / fname)) for fname in files] with open(output, "w") as outfile: for fname, fhash in zip(files, hashes): # Only use Unix separators for the registry so that we don't go insane # dealing with file paths. outfile.write("{} {}\n".format(fname.replace("\\", "/"), fhash))
[ "def", "make_registry", "(", "directory", ",", "output", ",", "recursive", "=", "True", ")", ":", "directory", "=", "Path", "(", "directory", ")", "if", "recursive", ":", "pattern", "=", "\"**/*\"", "else", ":", "pattern", "=", "\"*\"", "files", "=", "sorted", "(", "[", "str", "(", "path", ".", "relative_to", "(", "directory", ")", ")", "for", "path", "in", "directory", ".", "glob", "(", "pattern", ")", "if", "path", ".", "is_file", "(", ")", "]", ")", "hashes", "=", "[", "file_hash", "(", "str", "(", "directory", "/", "fname", ")", ")", "for", "fname", "in", "files", "]", "with", "open", "(", "output", ",", "\"w\"", ")", "as", "outfile", ":", "for", "fname", ",", "fhash", "in", "zip", "(", "files", ",", "hashes", ")", ":", "# Only use Unix separators for the registry so that we don't go insane", "# dealing with file paths.", "outfile", ".", "write", "(", "\"{} {}\\n\"", ".", "format", "(", "fname", ".", "replace", "(", "\"\\\\\"", ",", "\"/\"", ")", ",", "fhash", ")", ")" ]
Make a registry of files and hashes for the given directory. This is helpful if you have many files in your test dataset as it keeps you from needing to manually update the registry. Parameters ---------- directory : str Directory of the test data to put in the registry. All file names in the registry will be relative to this directory. output : str Name of the output registry file. recursive : bool If True, will recursively look for files in subdirectories of *directory*.
[ "Make", "a", "registry", "of", "files", "and", "hashes", "for", "the", "given", "directory", "." ]
fc38601d2d32809b4df75d0715922025740c869a
https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/utils.py#L124-L162
16,183
kennethreitz/omnijson
omnijson/core.py
loads
def loads(s, **kwargs): """Loads JSON object.""" try: return _engine[0](s) except _engine[2]: # except_clause: 'except' [test ['as' NAME]] # grammar for py3x # except_clause: 'except' [test [('as' | ',') test]] # grammar for py2x why = sys.exc_info()[1] raise JSONError(why)
python
def loads(s, **kwargs): """Loads JSON object.""" try: return _engine[0](s) except _engine[2]: # except_clause: 'except' [test ['as' NAME]] # grammar for py3x # except_clause: 'except' [test [('as' | ',') test]] # grammar for py2x why = sys.exc_info()[1] raise JSONError(why)
[ "def", "loads", "(", "s", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "_engine", "[", "0", "]", "(", "s", ")", "except", "_engine", "[", "2", "]", ":", "# except_clause: 'except' [test ['as' NAME]] # grammar for py3x", "# except_clause: 'except' [test [('as' | ',') test]] # grammar for py2x", "why", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "raise", "JSONError", "(", "why", ")" ]
Loads JSON object.
[ "Loads", "JSON", "object", "." ]
a5890a51a59ad76f78a61f5bf91fa86b784cf694
https://github.com/kennethreitz/omnijson/blob/a5890a51a59ad76f78a61f5bf91fa86b784cf694/omnijson/core.py#L41-L51
16,184
kennethreitz/omnijson
omnijson/core.py
dumps
def dumps(o, **kwargs): """Dumps JSON object.""" try: return _engine[1](o) except: ExceptionClass, why = sys.exc_info()[:2] if any([(issubclass(ExceptionClass, e)) for e in _engine[2]]): raise JSONError(why) else: raise why
python
def dumps(o, **kwargs): """Dumps JSON object.""" try: return _engine[1](o) except: ExceptionClass, why = sys.exc_info()[:2] if any([(issubclass(ExceptionClass, e)) for e in _engine[2]]): raise JSONError(why) else: raise why
[ "def", "dumps", "(", "o", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "_engine", "[", "1", "]", "(", "o", ")", "except", ":", "ExceptionClass", ",", "why", "=", "sys", ".", "exc_info", "(", ")", "[", ":", "2", "]", "if", "any", "(", "[", "(", "issubclass", "(", "ExceptionClass", ",", "e", ")", ")", "for", "e", "in", "_engine", "[", "2", "]", "]", ")", ":", "raise", "JSONError", "(", "why", ")", "else", ":", "raise", "why" ]
Dumps JSON object.
[ "Dumps", "JSON", "object", "." ]
a5890a51a59ad76f78a61f5bf91fa86b784cf694
https://github.com/kennethreitz/omnijson/blob/a5890a51a59ad76f78a61f5bf91fa86b784cf694/omnijson/core.py#L54-L66
16,185
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/pt.py
from_table
def from_table(table, engine, limit=None): """ Select data in a database table and put into prettytable. Create a :class:`prettytable.PrettyTable` from :class:`sqlalchemy.Table`. **中文文档** 将数据表中的数据放入prettytable中. """ sql = select([table]) if limit is not None: sql = sql.limit(limit) result_proxy = engine.execute(sql) return from_db_cursor(result_proxy.cursor)
python
def from_table(table, engine, limit=None): """ Select data in a database table and put into prettytable. Create a :class:`prettytable.PrettyTable` from :class:`sqlalchemy.Table`. **中文文档** 将数据表中的数据放入prettytable中. """ sql = select([table]) if limit is not None: sql = sql.limit(limit) result_proxy = engine.execute(sql) return from_db_cursor(result_proxy.cursor)
[ "def", "from_table", "(", "table", ",", "engine", ",", "limit", "=", "None", ")", ":", "sql", "=", "select", "(", "[", "table", "]", ")", "if", "limit", "is", "not", "None", ":", "sql", "=", "sql", ".", "limit", "(", "limit", ")", "result_proxy", "=", "engine", ".", "execute", "(", "sql", ")", "return", "from_db_cursor", "(", "result_proxy", ".", "cursor", ")" ]
Select data in a database table and put into prettytable. Create a :class:`prettytable.PrettyTable` from :class:`sqlalchemy.Table`. **中文文档** 将数据表中的数据放入prettytable中.
[ "Select", "data", "in", "a", "database", "table", "and", "put", "into", "prettytable", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/pt.py#L68-L82
16,186
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/pt.py
from_data
def from_data(data): """ Construct a Prettytable from list of rows. """ if len(data) == 0: # pragma: no cover return None else: ptable = PrettyTable() ptable.field_names = data[0].keys() for row in data: ptable.add_row(row) return ptable
python
def from_data(data): """ Construct a Prettytable from list of rows. """ if len(data) == 0: # pragma: no cover return None else: ptable = PrettyTable() ptable.field_names = data[0].keys() for row in data: ptable.add_row(row) return ptable
[ "def", "from_data", "(", "data", ")", ":", "if", "len", "(", "data", ")", "==", "0", ":", "# pragma: no cover", "return", "None", "else", ":", "ptable", "=", "PrettyTable", "(", ")", "ptable", ".", "field_names", "=", "data", "[", "0", "]", ".", "keys", "(", ")", "for", "row", "in", "data", ":", "ptable", ".", "add_row", "(", "row", ")", "return", "ptable" ]
Construct a Prettytable from list of rows.
[ "Construct", "a", "Prettytable", "from", "list", "of", "rows", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/pt.py#L117-L128
16,187
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/pkg/prettytable/factory.py
TableHandler.generate_table
def generate_table(self, rows): """ Generates from a list of rows a PrettyTable object. """ table = PrettyTable(**self.kwargs) for row in self.rows: if len(row[0]) < self.max_row_width: appends = self.max_row_width - len(row[0]) for i in range(1, appends): row[0].append("-") if row[1] is True: self.make_fields_unique(row[0]) table.field_names = row[0] else: table.add_row(row[0]) return table
python
def generate_table(self, rows): """ Generates from a list of rows a PrettyTable object. """ table = PrettyTable(**self.kwargs) for row in self.rows: if len(row[0]) < self.max_row_width: appends = self.max_row_width - len(row[0]) for i in range(1, appends): row[0].append("-") if row[1] is True: self.make_fields_unique(row[0]) table.field_names = row[0] else: table.add_row(row[0]) return table
[ "def", "generate_table", "(", "self", ",", "rows", ")", ":", "table", "=", "PrettyTable", "(", "*", "*", "self", ".", "kwargs", ")", "for", "row", "in", "self", ".", "rows", ":", "if", "len", "(", "row", "[", "0", "]", ")", "<", "self", ".", "max_row_width", ":", "appends", "=", "self", ".", "max_row_width", "-", "len", "(", "row", "[", "0", "]", ")", "for", "i", "in", "range", "(", "1", ",", "appends", ")", ":", "row", "[", "0", "]", ".", "append", "(", "\"-\"", ")", "if", "row", "[", "1", "]", "is", "True", ":", "self", ".", "make_fields_unique", "(", "row", "[", "0", "]", ")", "table", ".", "field_names", "=", "row", "[", "0", "]", "else", ":", "table", ".", "add_row", "(", "row", "[", "0", "]", ")", "return", "table" ]
Generates from a list of rows a PrettyTable object.
[ "Generates", "from", "a", "list", "of", "rows", "a", "PrettyTable", "object", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/pkg/prettytable/factory.py#L99-L115
16,188
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/io.py
sql_to_csv
def sql_to_csv(sql, engine, filepath, chunksize=1000, overwrite=False): """ Export sql result to csv file. :param sql: :class:`sqlalchemy.sql.selectable.Select` instance. :param engine: :class:`sqlalchemy.engine.base.Engine`. :param filepath: file path. :param chunksize: number of rows write to csv each time. :param overwrite: bool, if True, avoid to overite existing file. **中文文档** 将执行sql的结果中的所有数据, 以生成器的方式(一次只使用一小部分内存), 将 整个结果写入csv文件。 """ if overwrite: # pragma: no cover if os.path.exists(filepath): raise Exception("'%s' already exists!" % filepath) import pandas as pd columns = [str(column.name) for column in sql.columns] with open(filepath, "w") as f: # write header df = pd.DataFrame([], columns=columns) df.to_csv(f, header=True, index=False) # iterate big database table result_proxy = engine.execute(sql) while True: data = result_proxy.fetchmany(chunksize) if len(data) == 0: break else: df = pd.DataFrame(data, columns=columns) df.to_csv(f, header=False, index=False)
python
def sql_to_csv(sql, engine, filepath, chunksize=1000, overwrite=False): """ Export sql result to csv file. :param sql: :class:`sqlalchemy.sql.selectable.Select` instance. :param engine: :class:`sqlalchemy.engine.base.Engine`. :param filepath: file path. :param chunksize: number of rows write to csv each time. :param overwrite: bool, if True, avoid to overite existing file. **中文文档** 将执行sql的结果中的所有数据, 以生成器的方式(一次只使用一小部分内存), 将 整个结果写入csv文件。 """ if overwrite: # pragma: no cover if os.path.exists(filepath): raise Exception("'%s' already exists!" % filepath) import pandas as pd columns = [str(column.name) for column in sql.columns] with open(filepath, "w") as f: # write header df = pd.DataFrame([], columns=columns) df.to_csv(f, header=True, index=False) # iterate big database table result_proxy = engine.execute(sql) while True: data = result_proxy.fetchmany(chunksize) if len(data) == 0: break else: df = pd.DataFrame(data, columns=columns) df.to_csv(f, header=False, index=False)
[ "def", "sql_to_csv", "(", "sql", ",", "engine", ",", "filepath", ",", "chunksize", "=", "1000", ",", "overwrite", "=", "False", ")", ":", "if", "overwrite", ":", "# pragma: no cover", "if", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "raise", "Exception", "(", "\"'%s' already exists!\"", "%", "filepath", ")", "import", "pandas", "as", "pd", "columns", "=", "[", "str", "(", "column", ".", "name", ")", "for", "column", "in", "sql", ".", "columns", "]", "with", "open", "(", "filepath", ",", "\"w\"", ")", "as", "f", ":", "# write header", "df", "=", "pd", ".", "DataFrame", "(", "[", "]", ",", "columns", "=", "columns", ")", "df", ".", "to_csv", "(", "f", ",", "header", "=", "True", ",", "index", "=", "False", ")", "# iterate big database table", "result_proxy", "=", "engine", ".", "execute", "(", "sql", ")", "while", "True", ":", "data", "=", "result_proxy", ".", "fetchmany", "(", "chunksize", ")", "if", "len", "(", "data", ")", "==", "0", ":", "break", "else", ":", "df", "=", "pd", ".", "DataFrame", "(", "data", ",", "columns", "=", "columns", ")", "df", ".", "to_csv", "(", "f", ",", "header", "=", "False", ",", "index", "=", "False", ")" ]
Export sql result to csv file. :param sql: :class:`sqlalchemy.sql.selectable.Select` instance. :param engine: :class:`sqlalchemy.engine.base.Engine`. :param filepath: file path. :param chunksize: number of rows write to csv each time. :param overwrite: bool, if True, avoid to overite existing file. **中文文档** 将执行sql的结果中的所有数据, 以生成器的方式(一次只使用一小部分内存), 将 整个结果写入csv文件。
[ "Export", "sql", "result", "to", "csv", "file", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/io.py#L12-L47
16,189
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/io.py
table_to_csv
def table_to_csv(table, engine, filepath, chunksize=1000, overwrite=False): """ Export entire table to a csv file. :param table: :class:`sqlalchemy.Table` instance. :param engine: :class:`sqlalchemy.engine.base.Engine`. :param filepath: file path. :param chunksize: number of rows write to csv each time. :param overwrite: bool, if True, avoid to overite existing file. **中文文档** 将整个表中的所有数据, 写入csv文件。 """ sql = select([table]) sql_to_csv(sql, engine, filepath, chunksize)
python
def table_to_csv(table, engine, filepath, chunksize=1000, overwrite=False): """ Export entire table to a csv file. :param table: :class:`sqlalchemy.Table` instance. :param engine: :class:`sqlalchemy.engine.base.Engine`. :param filepath: file path. :param chunksize: number of rows write to csv each time. :param overwrite: bool, if True, avoid to overite existing file. **中文文档** 将整个表中的所有数据, 写入csv文件。 """ sql = select([table]) sql_to_csv(sql, engine, filepath, chunksize)
[ "def", "table_to_csv", "(", "table", ",", "engine", ",", "filepath", ",", "chunksize", "=", "1000", ",", "overwrite", "=", "False", ")", ":", "sql", "=", "select", "(", "[", "table", "]", ")", "sql_to_csv", "(", "sql", ",", "engine", ",", "filepath", ",", "chunksize", ")" ]
Export entire table to a csv file. :param table: :class:`sqlalchemy.Table` instance. :param engine: :class:`sqlalchemy.engine.base.Engine`. :param filepath: file path. :param chunksize: number of rows write to csv each time. :param overwrite: bool, if True, avoid to overite existing file. **中文文档** 将整个表中的所有数据, 写入csv文件。
[ "Export", "entire", "table", "to", "a", "csv", "file", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/io.py#L50-L65
16,190
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/crud/updating.py
update_all
def update_all(engine, table, data, upsert=False): """ Update data by its primary_key column. """ data = ensure_list(data) ins = table.insert() upd = table.update() # Find all primary key columns pk_cols = OrderedDict() for column in table._columns: if column.primary_key: pk_cols[column.name] = column data_to_insert = list() # Multiple primary key column if len(pk_cols) >= 2: for row in data: result = engine.execute( upd. where( and_( *[col == row[name] for name, col in pk_cols.items()] ) ). values(**row) ) if result.rowcount == 0: data_to_insert.append(row) # Single primary key column elif len(pk_cols) == 1: for row in data: result = engine.execute( upd. where( [col == row[name] for name, col in pk_cols.items()][0] ). values(**row) ) if result.rowcount == 0: data_to_insert.append(row) else: # pragma: no cover data_to_insert = data # Insert rest of data if upsert: if len(data_to_insert): engine.execute(ins, data_to_insert)
python
def update_all(engine, table, data, upsert=False): """ Update data by its primary_key column. """ data = ensure_list(data) ins = table.insert() upd = table.update() # Find all primary key columns pk_cols = OrderedDict() for column in table._columns: if column.primary_key: pk_cols[column.name] = column data_to_insert = list() # Multiple primary key column if len(pk_cols) >= 2: for row in data: result = engine.execute( upd. where( and_( *[col == row[name] for name, col in pk_cols.items()] ) ). values(**row) ) if result.rowcount == 0: data_to_insert.append(row) # Single primary key column elif len(pk_cols) == 1: for row in data: result = engine.execute( upd. where( [col == row[name] for name, col in pk_cols.items()][0] ). values(**row) ) if result.rowcount == 0: data_to_insert.append(row) else: # pragma: no cover data_to_insert = data # Insert rest of data if upsert: if len(data_to_insert): engine.execute(ins, data_to_insert)
[ "def", "update_all", "(", "engine", ",", "table", ",", "data", ",", "upsert", "=", "False", ")", ":", "data", "=", "ensure_list", "(", "data", ")", "ins", "=", "table", ".", "insert", "(", ")", "upd", "=", "table", ".", "update", "(", ")", "# Find all primary key columns", "pk_cols", "=", "OrderedDict", "(", ")", "for", "column", "in", "table", ".", "_columns", ":", "if", "column", ".", "primary_key", ":", "pk_cols", "[", "column", ".", "name", "]", "=", "column", "data_to_insert", "=", "list", "(", ")", "# Multiple primary key column", "if", "len", "(", "pk_cols", ")", ">=", "2", ":", "for", "row", "in", "data", ":", "result", "=", "engine", ".", "execute", "(", "upd", ".", "where", "(", "and_", "(", "*", "[", "col", "==", "row", "[", "name", "]", "for", "name", ",", "col", "in", "pk_cols", ".", "items", "(", ")", "]", ")", ")", ".", "values", "(", "*", "*", "row", ")", ")", "if", "result", ".", "rowcount", "==", "0", ":", "data_to_insert", ".", "append", "(", "row", ")", "# Single primary key column", "elif", "len", "(", "pk_cols", ")", "==", "1", ":", "for", "row", "in", "data", ":", "result", "=", "engine", ".", "execute", "(", "upd", ".", "where", "(", "[", "col", "==", "row", "[", "name", "]", "for", "name", ",", "col", "in", "pk_cols", ".", "items", "(", ")", "]", "[", "0", "]", ")", ".", "values", "(", "*", "*", "row", ")", ")", "if", "result", ".", "rowcount", "==", "0", ":", "data_to_insert", ".", "append", "(", "row", ")", "else", ":", "# pragma: no cover", "data_to_insert", "=", "data", "# Insert rest of data", "if", "upsert", ":", "if", "len", "(", "data_to_insert", ")", ":", "engine", ".", "execute", "(", "ins", ",", "data_to_insert", ")" ]
Update data by its primary_key column.
[ "Update", "data", "by", "its", "primary_key", "column", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/crud/updating.py#L13-L64
16,191
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/crud/updating.py
upsert_all
def upsert_all(engine, table, data): """ Update data by primary key columns. If not able to update, do insert. Example:: # suppose in database we already have {"id": 1, "name": "Alice"} >>> data = [ ... {"id": 1, "name": "Bob"}, # this will be updated ... {"id": 2, "name": "Cathy"}, # this will be added ... ] >>> upsert_all(engine, table_user, data) >>> engine.execute(select([table_user])).fetchall() [{"id": 1, "name": "Bob"}, {"id": 2, "name": "Cathy"}] **中文文档** 批量更新文档. 如果该表格定义了Primary Key, 则用Primary Key约束where语句. 对于 where语句无法找到的行, 自动进行批量bulk insert. """ update_all(engine, table, data, upsert=True)
python
def upsert_all(engine, table, data): """ Update data by primary key columns. If not able to update, do insert. Example:: # suppose in database we already have {"id": 1, "name": "Alice"} >>> data = [ ... {"id": 1, "name": "Bob"}, # this will be updated ... {"id": 2, "name": "Cathy"}, # this will be added ... ] >>> upsert_all(engine, table_user, data) >>> engine.execute(select([table_user])).fetchall() [{"id": 1, "name": "Bob"}, {"id": 2, "name": "Cathy"}] **中文文档** 批量更新文档. 如果该表格定义了Primary Key, 则用Primary Key约束where语句. 对于 where语句无法找到的行, 自动进行批量bulk insert. """ update_all(engine, table, data, upsert=True)
[ "def", "upsert_all", "(", "engine", ",", "table", ",", "data", ")", ":", "update_all", "(", "engine", ",", "table", ",", "data", ",", "upsert", "=", "True", ")" ]
Update data by primary key columns. If not able to update, do insert. Example:: # suppose in database we already have {"id": 1, "name": "Alice"} >>> data = [ ... {"id": 1, "name": "Bob"}, # this will be updated ... {"id": 2, "name": "Cathy"}, # this will be added ... ] >>> upsert_all(engine, table_user, data) >>> engine.execute(select([table_user])).fetchall() [{"id": 1, "name": "Bob"}, {"id": 2, "name": "Cathy"}] **中文文档** 批量更新文档. 如果该表格定义了Primary Key, 则用Primary Key约束where语句. 对于 where语句无法找到的行, 自动进行批量bulk insert.
[ "Update", "data", "by", "primary", "key", "columns", ".", "If", "not", "able", "to", "update", "do", "insert", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/crud/updating.py#L67-L87
16,192
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py
ExtendedBase.pk_names
def pk_names(cls): """ Primary key column name list. """ if cls._cache_pk_names is None: cls._cache_pk_names = cls._get_primary_key_names() return cls._cache_pk_names
python
def pk_names(cls): """ Primary key column name list. """ if cls._cache_pk_names is None: cls._cache_pk_names = cls._get_primary_key_names() return cls._cache_pk_names
[ "def", "pk_names", "(", "cls", ")", ":", "if", "cls", ".", "_cache_pk_names", "is", "None", ":", "cls", ".", "_cache_pk_names", "=", "cls", ".", "_get_primary_key_names", "(", ")", "return", "cls", ".", "_cache_pk_names" ]
Primary key column name list.
[ "Primary", "key", "column", "name", "list", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L62-L68
16,193
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py
ExtendedBase.id_field_name
def id_field_name(cls): """ If only one primary_key, then return it. Otherwise, raise ValueError. """ if cls._cache_id_field_name is None: pk_names = cls.pk_names() if len(pk_names) == 1: cls._cache_id_field_name = pk_names[0] else: # pragma: no cover raise ValueError( "{classname} has more than 1 primary key!" .format(classname=cls.__name__) ) return cls._cache_id_field_name
python
def id_field_name(cls): """ If only one primary_key, then return it. Otherwise, raise ValueError. """ if cls._cache_id_field_name is None: pk_names = cls.pk_names() if len(pk_names) == 1: cls._cache_id_field_name = pk_names[0] else: # pragma: no cover raise ValueError( "{classname} has more than 1 primary key!" .format(classname=cls.__name__) ) return cls._cache_id_field_name
[ "def", "id_field_name", "(", "cls", ")", ":", "if", "cls", ".", "_cache_id_field_name", "is", "None", ":", "pk_names", "=", "cls", ".", "pk_names", "(", ")", "if", "len", "(", "pk_names", ")", "==", "1", ":", "cls", ".", "_cache_id_field_name", "=", "pk_names", "[", "0", "]", "else", ":", "# pragma: no cover", "raise", "ValueError", "(", "\"{classname} has more than 1 primary key!\"", ".", "format", "(", "classname", "=", "cls", ".", "__name__", ")", ")", "return", "cls", ".", "_cache_id_field_name" ]
If only one primary_key, then return it. Otherwise, raise ValueError.
[ "If", "only", "one", "primary_key", "then", "return", "it", ".", "Otherwise", "raise", "ValueError", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L77-L90
16,194
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py
ExtendedBase.values
def values(self): """ return list of value of all declared columns. """ return [getattr(self, c.name, None) for c in self.__table__._columns]
python
def values(self): """ return list of value of all declared columns. """ return [getattr(self, c.name, None) for c in self.__table__._columns]
[ "def", "values", "(", "self", ")", ":", "return", "[", "getattr", "(", "self", ",", "c", ".", "name", ",", "None", ")", "for", "c", "in", "self", ".", "__table__", ".", "_columns", "]" ]
return list of value of all declared columns.
[ "return", "list", "of", "value", "of", "all", "declared", "columns", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L101-L105
16,195
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py
ExtendedBase.items
def items(self): """ return list of pair of name and value of all declared columns. """ return [ (c.name, getattr(self, c.name, None)) for c in self.__table__._columns ]
python
def items(self): """ return list of pair of name and value of all declared columns. """ return [ (c.name, getattr(self, c.name, None)) for c in self.__table__._columns ]
[ "def", "items", "(", "self", ")", ":", "return", "[", "(", "c", ".", "name", ",", "getattr", "(", "self", ",", "c", ".", "name", ",", "None", ")", ")", "for", "c", "in", "self", ".", "__table__", ".", "_columns", "]" ]
return list of pair of name and value of all declared columns.
[ "return", "list", "of", "pair", "of", "name", "and", "value", "of", "all", "declared", "columns", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L107-L114
16,196
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py
ExtendedBase.to_dict
def to_dict(self, include_null=True): """ Convert to dict. """ if include_null: return dict(self.items()) else: return { attr: value for attr, value in self.__dict__.items() if not attr.startswith("_sa_") }
python
def to_dict(self, include_null=True): """ Convert to dict. """ if include_null: return dict(self.items()) else: return { attr: value for attr, value in self.__dict__.items() if not attr.startswith("_sa_") }
[ "def", "to_dict", "(", "self", ",", "include_null", "=", "True", ")", ":", "if", "include_null", ":", "return", "dict", "(", "self", ".", "items", "(", ")", ")", "else", ":", "return", "{", "attr", ":", "value", "for", "attr", ",", "value", "in", "self", ".", "__dict__", ".", "items", "(", ")", "if", "not", "attr", ".", "startswith", "(", "\"_sa_\"", ")", "}" ]
Convert to dict.
[ "Convert", "to", "dict", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L125-L136
16,197
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py
ExtendedBase.to_OrderedDict
def to_OrderedDict(self, include_null=True): """ Convert to OrderedDict. """ if include_null: return OrderedDict(self.items()) else: items = list() for c in self.__table__._columns: try: items.append((c.name, self.__dict__[c.name])) except KeyError: pass return OrderedDict(items)
python
def to_OrderedDict(self, include_null=True): """ Convert to OrderedDict. """ if include_null: return OrderedDict(self.items()) else: items = list() for c in self.__table__._columns: try: items.append((c.name, self.__dict__[c.name])) except KeyError: pass return OrderedDict(items)
[ "def", "to_OrderedDict", "(", "self", ",", "include_null", "=", "True", ")", ":", "if", "include_null", ":", "return", "OrderedDict", "(", "self", ".", "items", "(", ")", ")", "else", ":", "items", "=", "list", "(", ")", "for", "c", "in", "self", ".", "__table__", ".", "_columns", ":", "try", ":", "items", ".", "append", "(", "(", "c", ".", "name", ",", "self", ".", "__dict__", "[", "c", ".", "name", "]", ")", ")", "except", "KeyError", ":", "pass", "return", "OrderedDict", "(", "items", ")" ]
Convert to OrderedDict.
[ "Convert", "to", "OrderedDict", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L138-L151
16,198
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py
ExtendedBase.by_id
def by_id(cls, _id, engine_or_session): """ Get one object by primary_key value. """ ses, auto_close = ensure_session(engine_or_session) obj = ses.query(cls).get(_id) if auto_close: ses.close() return obj
python
def by_id(cls, _id, engine_or_session): """ Get one object by primary_key value. """ ses, auto_close = ensure_session(engine_or_session) obj = ses.query(cls).get(_id) if auto_close: ses.close() return obj
[ "def", "by_id", "(", "cls", ",", "_id", ",", "engine_or_session", ")", ":", "ses", ",", "auto_close", "=", "ensure_session", "(", "engine_or_session", ")", "obj", "=", "ses", ".", "query", "(", "cls", ")", ".", "get", "(", "_id", ")", "if", "auto_close", ":", "ses", ".", "close", "(", ")", "return", "obj" ]
Get one object by primary_key value.
[ "Get", "one", "object", "by", "primary_key", "value", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L185-L193
16,199
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py
ExtendedBase.by_sql
def by_sql(cls, sql, engine_or_session): """ Query with sql statement or texture sql. """ ses, auto_close = ensure_session(engine_or_session) result = ses.query(cls).from_statement(sql).all() if auto_close: ses.close() return result
python
def by_sql(cls, sql, engine_or_session): """ Query with sql statement or texture sql. """ ses, auto_close = ensure_session(engine_or_session) result = ses.query(cls).from_statement(sql).all() if auto_close: ses.close() return result
[ "def", "by_sql", "(", "cls", ",", "sql", ",", "engine_or_session", ")", ":", "ses", ",", "auto_close", "=", "ensure_session", "(", "engine_or_session", ")", "result", "=", "ses", ".", "query", "(", "cls", ")", ".", "from_statement", "(", "sql", ")", ".", "all", "(", ")", "if", "auto_close", ":", "ses", ".", "close", "(", ")", "return", "result" ]
Query with sql statement or texture sql.
[ "Query", "with", "sql", "statement", "or", "texture", "sql", "." ]
96282b779a3efb422802de83c48ca284598ba952
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L196-L204