sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def get_dependencies(self, filename):
"""Get a list of files that depends on the file named *filename*.
:param filename: the name of the file to find dependencies of
"""
if self.is_partial(filename):
return self.templates
elif self.is_template(filename):
return [self.get_template(filename)]
elif self.is_static(filename):
return [filename]
else:
return [] | Get a list of files that depends on the file named *filename*.
:param filename: the name of the file to find dependencies of | entailment |
def render(self, use_reloader=False):
"""Generate the site.
:param use_reloader: if given, reload templates on modification
"""
self.render_templates(self.templates)
self.copy_static(self.static_names)
if use_reloader:
self.logger.info("Watching '%s' for changes..." %
self.searchpath)
self.logger.info("Press Ctrl+C to stop.")
Reloader(self).watch() | Generate the site.
:param use_reloader: if given, reload templates on modification | entailment |
def register_jinja_loaders(self, *loaders):
"""Register one or many `jinja2.Loader` instances for templates lookup.
During application initialization plugins can register a loader so that
their templates are available to jinja2 renderer.
Order of registration matters: last registered is first looked up (after
standard Flask lookup in app template folder). This allows a plugin to
override templates provided by others, or by base application. The
application can override any template from any plugins from its template
folder (See `Flask.Application.template_folder`).
:raise: `ValueError` if a template has already been rendered
"""
if not hasattr(self, "_jinja_loaders"):
raise ValueError(
"Cannot register new jinja loaders after first template rendered"
)
self._jinja_loaders.extend(loaders) | Register one or many `jinja2.Loader` instances for templates lookup.
During application initialization plugins can register a loader so that
their templates are available to jinja2 renderer.
Order of registration matters: last registered is first looked up (after
standard Flask lookup in app template folder). This allows a plugin to
override templates provided by others, or by base application. The
application can override any template from any plugins from its template
folder (See `Flask.Application.template_folder`).
:raise: `ValueError` if a template has already been rendered | entailment |
def jinja_loader(self):
"""Search templates in custom app templates dir (default Flask
behaviour), fallback on abilian templates."""
loaders = self._jinja_loaders
del self._jinja_loaders
loaders.append(Flask.jinja_loader.func(self))
loaders.reverse()
return jinja2.ChoiceLoader(loaders) | Search templates in custom app templates dir (default Flask
behaviour), fallback on abilian templates. | entailment |
def render(args):
"""
Render a site.
:param args:
A map from command-line options to their values. For example:
{
'--help': False,
'--outpath': None,
'--srcpath': None,
'--static': None,
'--version': False,
'build': True,
'watch': False
}
"""
srcpath = (
os.path.join(os.getcwd(), 'templates') if args['--srcpath'] is None
else args['--srcpath'] if os.path.isabs(args['--srcpath'])
else os.path.join(os.getcwd(), args['--srcpath'])
)
if not os.path.isdir(srcpath):
print("The templates directory '%s' is invalid."
% srcpath)
sys.exit(1)
if args['--outpath'] is not None:
outpath = args['--outpath']
else:
outpath = os.getcwd()
if not os.path.isdir(outpath):
print("The output directory '%s' is invalid."
% outpath)
sys.exit(1)
staticdirs = args['--static']
staticpaths = None
if staticdirs:
staticpaths = staticdirs.split(",")
for path in staticpaths:
path = os.path.join(srcpath, path)
if not os.path.isdir(path):
print("The static files directory '%s' is invalid." % path)
sys.exit(1)
site = staticjinja.make_site(
searchpath=srcpath,
outpath=outpath,
staticpaths=staticpaths
)
use_reloader = args['watch']
site.render(use_reloader=use_reloader) | Render a site.
:param args:
A map from command-line options to their values. For example:
{
'--help': False,
'--outpath': None,
'--srcpath': None,
'--static': None,
'--version': False,
'build': True,
'watch': False
} | entailment |
def indexable_role(principal):
"""Return a string suitable for query against `allowed_roles_and_users`
field.
:param principal: It can be :data:`Anonymous`, :data:`Authenticated`,
or an instance of :class:`User` or :class:`Group`.
"""
principal = unwrap(principal)
if hasattr(principal, "is_anonymous") and principal.is_anonymous:
# transform anonymous user to anonymous role
principal = Anonymous
if isinstance(principal, Role):
return f"role:{principal.name}"
elif isinstance(principal, User):
fmt = "user:{:d}"
elif isinstance(principal, Group):
fmt = "group:{:d}"
else:
raise ValueError(repr(principal))
return fmt.format(principal.id) | Return a string suitable for query against `allowed_roles_and_users`
field.
:param principal: It can be :data:`Anonymous`, :data:`Authenticated`,
or an instance of :class:`User` or :class:`Group`. | entailment |
def to_text(self, digest, blob, mime_type):
"""Convert a file to plain text.
Useful for full-text indexing. Returns a Unicode string.
"""
# Special case, for now (XXX).
if mime_type.startswith("image/"):
return ""
cache_key = "txt:" + digest
text = self.cache.get(cache_key)
if text:
return text
# Direct conversion possible
for handler in self.handlers:
if handler.accept(mime_type, "text/plain"):
text = handler.convert(blob)
self.cache[cache_key] = text
return text
# Use PDF as a pivot format
pdf = self.to_pdf(digest, blob, mime_type)
for handler in self.handlers:
if handler.accept("application/pdf", "text/plain"):
text = handler.convert(pdf)
self.cache[cache_key] = text
return text
raise HandlerNotFound(f"No handler found to convert from {mime_type} to text") | Convert a file to plain text.
Useful for full-text indexing. Returns a Unicode string. | entailment |
def has_image(self, digest, mime_type, index, size=500):
"""Tell if there is a preview image."""
cache_key = f"img:{index}:{size}:{digest}"
return mime_type.startswith("image/") or cache_key in self.cache | Tell if there is a preview image. | entailment |
def get_image(self, digest, blob, mime_type, index, size=500):
"""Return an image for the given content, only if it already exists in
the image cache."""
# Special case, for now (XXX).
if mime_type.startswith("image/"):
return ""
cache_key = f"img:{index}:{size}:{digest}"
return self.cache.get(cache_key) | Return an image for the given content, only if it already exists in
the image cache. | entailment |
def to_image(self, digest, blob, mime_type, index, size=500):
"""Convert a file to a list of images.
Returns image at the given index.
"""
# Special case, for now (XXX).
if mime_type.startswith("image/"):
return ""
cache_key = f"img:{index}:{size}:{digest}"
converted = self.cache.get(cache_key)
if converted:
return converted
# Direct conversion possible
for handler in self.handlers:
if handler.accept(mime_type, "image/jpeg"):
converted_images = handler.convert(blob, size=size)
for i in range(0, len(converted_images)):
converted = converted_images[i]
cache_key = f"img:{i}:{size}:{digest}"
self.cache[cache_key] = converted
return converted_images[index]
# Use PDF as a pivot format
pdf = self.to_pdf(digest, blob, mime_type)
for handler in self.handlers:
if handler.accept("application/pdf", "image/jpeg"):
converted_images = handler.convert(pdf, size=size)
for i in range(0, len(converted_images)):
converted = converted_images[i]
cache_key = f"img:{i}:{size}:{digest}"
self.cache[cache_key] = converted
return converted_images[index]
raise HandlerNotFound(f"No handler found to convert from {mime_type} to image") | Convert a file to a list of images.
Returns image at the given index. | entailment |
def get_metadata(self, digest, content, mime_type):
"""Get a dictionary representing the metadata embedded in the given
content."""
# XXX: ad-hoc for now, refactor later
if mime_type.startswith("image/"):
img = Image.open(BytesIO(content))
ret = {}
if not hasattr(img, "_getexif"):
return {}
info = img._getexif()
if not info:
return {}
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
ret["EXIF:" + str(decoded)] = value
return ret
else:
if mime_type != "application/pdf":
content = self.to_pdf(digest, content, mime_type)
with make_temp_file(content) as in_fn:
try:
output = subprocess.check_output(["pdfinfo", in_fn])
except OSError:
logger.error("Conversion failed, probably pdfinfo is not installed")
raise
ret = {}
for line in output.split(b"\n"):
if b":" in line:
key, value = line.strip().split(b":", 1)
key = str(key)
ret["PDF:" + key] = str(value.strip(), errors="replace")
return ret | Get a dictionary representing the metadata embedded in the given
content. | entailment |
def ToByteArray(self):
"""Serialize the command.
Encodes the command as per the U2F specs, using the standard
ISO 7816-4 extended encoding. All Commands expect data, so
Le is always present.
Returns:
Python bytearray of the encoded command.
"""
lc = self.InternalEncodeLc()
out = bytearray(4) # will extend
out[0] = self.cla
out[1] = self.ins
out[2] = self.p1
out[3] = self.p2
if self.data:
out.extend(lc)
out.extend(self.data)
out.extend([0x00, 0x00]) # Le
else:
out.extend([0x00, 0x00, 0x00]) # Le
return out | Serialize the command.
Encodes the command as per the U2F specs, using the standard
ISO 7816-4 extended encoding. All Commands expect data, so
Le is always present.
Returns:
Python bytearray of the encoded command. | entailment |
def Authenticate(self, app_id, challenge_data,
print_callback=sys.stderr.write):
"""See base class."""
# Ensure environment variable is present
plugin_cmd = os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR)
if plugin_cmd is None:
raise errors.PluginError('{} env var is not set'
.format(SK_SIGNING_PLUGIN_ENV_VAR))
# Prepare input to signer
client_data_map, signing_input = self._BuildPluginRequest(
app_id, challenge_data, self.origin)
# Call plugin
print_callback('Please insert and touch your security key\n')
response = self._CallPlugin([plugin_cmd], signing_input)
# Handle response
key_challenge_pair = (response['keyHandle'], response['challengeHash'])
client_data_json = client_data_map[key_challenge_pair]
client_data = client_data_json.encode()
return self._BuildAuthenticatorResponse(app_id, client_data, response) | See base class. | entailment |
def _BuildPluginRequest(self, app_id, challenge_data, origin):
"""Builds a JSON request in the form that the plugin expects."""
client_data_map = {}
encoded_challenges = []
app_id_hash_encoded = self._Base64Encode(self._SHA256(app_id))
for challenge_item in challenge_data:
key = challenge_item['key']
key_handle_encoded = self._Base64Encode(key.key_handle)
raw_challenge = challenge_item['challenge']
client_data_json = model.ClientData(
model.ClientData.TYP_AUTHENTICATION,
raw_challenge,
origin).GetJson()
challenge_hash_encoded = self._Base64Encode(
self._SHA256(client_data_json))
# Populate challenges list
encoded_challenges.append({
'appIdHash': app_id_hash_encoded,
'challengeHash': challenge_hash_encoded,
'keyHandle': key_handle_encoded,
'version': key.version,
})
# Populate ClientData map
key_challenge_pair = (key_handle_encoded, challenge_hash_encoded)
client_data_map[key_challenge_pair] = client_data_json
signing_request = {
'type': 'sign_helper_request',
'signData': encoded_challenges,
'timeoutSeconds': U2F_SIGNATURE_TIMEOUT_SECONDS,
'localAlways': True
}
return client_data_map, json.dumps(signing_request) | Builds a JSON request in the form that the plugin expects. | entailment |
def _BuildAuthenticatorResponse(self, app_id, client_data, plugin_response):
"""Builds the response to return to the caller."""
encoded_client_data = self._Base64Encode(client_data)
signature_data = str(plugin_response['signatureData'])
key_handle = str(plugin_response['keyHandle'])
response = {
'clientData': encoded_client_data,
'signatureData': signature_data,
'applicationId': app_id,
'keyHandle': key_handle,
}
return response | Builds the response to return to the caller. | entailment |
def _CallPlugin(self, cmd, input_json):
"""Calls the plugin and validates the response."""
# Calculate length of input
input_length = len(input_json)
length_bytes_le = struct.pack('<I', input_length)
request = length_bytes_le + input_json.encode()
# Call plugin
sign_process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout = sign_process.communicate(request)[0]
exit_status = sign_process.wait()
# Parse and validate response size
response_len_le = stdout[:4]
response_len = struct.unpack('<I', response_len_le)[0]
response = stdout[4:]
if response_len != len(response):
raise errors.PluginError(
'Plugin response length {} does not match data {} (exit_status={})'
.format(response_len, len(response), exit_status))
# Ensure valid json
try:
json_response = json.loads(response.decode())
except ValueError:
raise errors.PluginError('Plugin returned invalid output (exit_status={})'
.format(exit_status))
# Ensure response type
if json_response.get('type') != 'sign_helper_reply':
raise errors.PluginError('Plugin returned invalid response type '
'(exit_status={})'
.format(exit_status))
# Parse response codes
result_code = json_response.get('code')
if result_code is None:
raise errors.PluginError('Plugin missing result code (exit_status={})'
.format(exit_status))
# Handle errors
if result_code == SK_SIGNING_PLUGIN_TOUCH_REQUIRED:
raise errors.U2FError(errors.U2FError.TIMEOUT)
elif result_code == SK_SIGNING_PLUGIN_WRONG_DATA:
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
elif result_code != SK_SIGNING_PLUGIN_NO_ERROR:
raise errors.PluginError(
'Plugin failed with error {} - {} (exit_status={})'
.format(result_code,
json_response.get('errorDetail'),
exit_status))
# Ensure response data is present
response_data = json_response.get('responseData')
if response_data is None:
raise errors.PluginErrors(
'Plugin returned output with missing responseData (exit_status={})'
.format(exit_status))
return response_data | Calls the plugin and validates the response. | entailment |
def InternalInit(self):
"""Initializes the device and obtains channel id."""
self.cid = UsbHidTransport.U2FHID_BROADCAST_CID
nonce = bytearray(os.urandom(8))
r = self.InternalExchange(UsbHidTransport.U2FHID_INIT, nonce)
if len(r) < 17:
raise errors.HidError('unexpected init reply len')
if r[0:8] != nonce:
raise errors.HidError('nonce mismatch')
self.cid = bytearray(r[8:12])
self.u2fhid_version = r[12] | Initializes the device and obtains channel id. | entailment |
def InternalExchange(self, cmd, payload_in):
"""Sends and receives a message from the device."""
# make a copy because we destroy it below
self.logger.debug('payload: ' + str(list(payload_in)))
payload = bytearray()
payload[:] = payload_in
for _ in range(2):
self.InternalSend(cmd, payload)
ret_cmd, ret_payload = self.InternalRecv()
if ret_cmd == UsbHidTransport.U2FHID_ERROR:
if ret_payload == UsbHidTransport.ERR_CHANNEL_BUSY:
time.sleep(0.5)
continue
raise errors.HidError('Device error: %d' % int(ret_payload[0]))
elif ret_cmd != cmd:
raise errors.HidError('Command mismatch!')
return ret_payload
raise errors.HidError('Device Busy. Please retry') | Sends and receives a message from the device. | entailment |
def InternalSend(self, cmd, payload):
"""Sends a message to the device, including fragmenting it."""
length_to_send = len(payload)
max_payload = self.packet_size - 7
first_frame = payload[0:max_payload]
first_packet = UsbHidTransport.InitPacket(self.packet_size, self.cid, cmd,
len(payload), first_frame)
del payload[0:max_payload]
length_to_send -= len(first_frame)
self.InternalSendPacket(first_packet)
seq = 0
while length_to_send > 0:
max_payload = self.packet_size - 5
next_frame = payload[0:max_payload]
del payload[0:max_payload]
length_to_send -= len(next_frame)
next_packet = UsbHidTransport.ContPacket(self.packet_size, self.cid, seq,
next_frame)
self.InternalSendPacket(next_packet)
seq += 1 | Sends a message to the device, including fragmenting it. | entailment |
def InternalRecv(self):
"""Receives a message from the device, including defragmenting it."""
first_read = self.InternalReadFrame()
first_packet = UsbHidTransport.InitPacket.FromWireFormat(self.packet_size,
first_read)
data = first_packet.payload
to_read = first_packet.size - len(first_packet.payload)
seq = 0
while to_read > 0:
next_read = self.InternalReadFrame()
next_packet = UsbHidTransport.ContPacket.FromWireFormat(self.packet_size,
next_read)
if self.cid != next_packet.cid:
# Skip over packets that are for communication with other clients.
# HID is broadcast, so we see potentially all communication from the
# device. For well-behaved devices, these should be BUSY messages
# sent to other clients of the device because at this point we're
# in mid-message transit.
continue
if seq != next_packet.seq:
raise errors.HardwareError('Packets received out of order')
# This packet for us at this point, so debit it against our
# balance of bytes to read.
to_read -= len(next_packet.payload)
data.extend(next_packet.payload)
seq += 1
# truncate incomplete frames
data = data[0:first_packet.size]
return (first_packet.cmd, data) | Receives a message from the device, including defragmenting it. | entailment |
def Authenticate(self, app_id, challenge_data,
print_callback=sys.stderr.write):
"""See base class."""
# If authenticator is not plugged in, prompt
try:
device = u2f.GetLocalU2FInterface(origin=self.origin)
except errors.NoDeviceFoundError:
print_callback('Please insert your security key and press enter...')
six.moves.input()
device = u2f.GetLocalU2FInterface(origin=self.origin)
print_callback('Please touch your security key.\n')
for challenge_item in challenge_data:
raw_challenge = challenge_item['challenge']
key = challenge_item['key']
try:
result = device.Authenticate(app_id, raw_challenge, [key])
except errors.U2FError as e:
if e.code == errors.U2FError.DEVICE_INELIGIBLE:
continue
else:
raise
client_data = self._base64encode(result.client_data.GetJson().encode())
signature_data = self._base64encode(result.signature_data)
key_handle = self._base64encode(result.key_handle)
return {
'clientData': client_data,
'signatureData': signature_data,
'applicationId': app_id,
'keyHandle': key_handle,
}
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE) | See base class. | entailment |
def InternalPlatformSwitch(funcname, *args, **kwargs):
"""Determine, on a platform-specific basis, which module to use."""
# pylint: disable=g-import-not-at-top
clz = None
if sys.platform.startswith('linux'):
from pyu2f.hid import linux
clz = linux.LinuxHidDevice
elif sys.platform.startswith('win32'):
from pyu2f.hid import windows
clz = windows.WindowsHidDevice
elif sys.platform.startswith('darwin'):
from pyu2f.hid import macos
clz = macos.MacOsHidDevice
if not clz:
raise Exception('Unsupported platform: ' + sys.platform)
if funcname == '__init__':
return clz(*args, **kwargs)
return getattr(clz, funcname)(*args, **kwargs) | Determine, on a platform-specific basis, which module to use. | entailment |
def GetJson(self):
"""Returns JSON version of ClientData compatible with FIDO spec."""
# The U2F Raw Messages specification specifies that the challenge is encoded
# with URL safe Base64 without padding encoding specified in RFC 4648.
# Python does not natively support a paddingless encoding, so we simply
# remove the padding from the end of the string.
server_challenge_b64 = base64.urlsafe_b64encode(
self.raw_server_challenge).decode()
server_challenge_b64 = server_challenge_b64.rstrip('=')
return json.dumps({'typ': self.typ,
'challenge': server_challenge_b64,
'origin': self.origin}, sort_keys=True) | Returns JSON version of ClientData compatible with FIDO spec. | entailment |
def GetValueLength(rd, pos):
"""Get value length for a key in rd.
For a key at position pos in the Report Descriptor rd, return the length
of the associated value. This supports both short and long format
values.
Args:
rd: Report Descriptor
pos: The position of the key in rd.
Returns:
(key_size, data_len) where key_size is the number of bytes occupied by
the key and data_len is the length of the value associated by the key.
"""
rd = bytearray(rd)
key = rd[pos]
if key == LONG_ITEM_ENCODING:
# If the key is tagged as a long item (0xfe), then the format is
# [key (1 byte)] [data len (1 byte)] [item tag (1 byte)] [data (n # bytes)].
# Thus, the entire key record is 3 bytes long.
if pos + 1 < len(rd):
return (3, rd[pos + 1])
else:
raise errors.HidError('Malformed report descriptor')
else:
# If the key is tagged as a short item, then the item tag and data len are
# packed into one byte. The format is thus:
# [tag (high 4 bits)] [type (2 bits)] [size code (2 bits)] [data (n bytes)].
# The size code specifies 1,2, or 4 bytes (0x03 means 4 bytes).
code = key & 0x03
if code <= 0x02:
return (1, code)
elif code == 0x03:
return (1, 4)
raise errors.HidError('Cannot happen') | Get value length for a key in rd.
For a key at position pos in the Report Descriptor rd, return the length
of the associated value. This supports both short and long format
values.
Args:
rd: Report Descriptor
pos: The position of the key in rd.
Returns:
(key_size, data_len) where key_size is the number of bytes occupied by
the key and data_len is the length of the value associated by the key. | entailment |
def ReadLsbBytes(rd, offset, value_size):
"""Reads value_size bytes from rd at offset, least signifcant byte first."""
encoding = None
if value_size == 1:
encoding = '<B'
elif value_size == 2:
encoding = '<H'
elif value_size == 4:
encoding = '<L'
else:
raise errors.HidError('Invalid value size specified')
ret, = struct.unpack(encoding, rd[offset:offset + value_size])
return ret | Reads value_size bytes from rd at offset, least signifcant byte first. | entailment |
def ParseReportDescriptor(rd, desc):
"""Parse the binary report descriptor.
Parse the binary report descriptor into a DeviceDescriptor object.
Args:
rd: The binary report descriptor
desc: The DeviceDescriptor object to update with the results
from parsing the descriptor.
Returns:
None
"""
rd = bytearray(rd)
pos = 0
report_count = None
report_size = None
usage_page = None
usage = None
while pos < len(rd):
key = rd[pos]
# First step, determine the value encoding (either long or short).
key_size, value_length = GetValueLength(rd, pos)
if key & REPORT_DESCRIPTOR_KEY_MASK == INPUT_ITEM:
if report_count and report_size:
byte_length = (report_count * report_size) // 8
desc.internal_max_in_report_len = max(
desc.internal_max_in_report_len, byte_length)
report_count = None
report_size = None
elif key & REPORT_DESCRIPTOR_KEY_MASK == OUTPUT_ITEM:
if report_count and report_size:
byte_length = (report_count * report_size) // 8
desc.internal_max_out_report_len = max(
desc.internal_max_out_report_len, byte_length)
report_count = None
report_size = None
elif key & REPORT_DESCRIPTOR_KEY_MASK == COLLECTION_ITEM:
if usage_page:
desc.usage_page = usage_page
if usage:
desc.usage = usage
elif key & REPORT_DESCRIPTOR_KEY_MASK == REPORT_COUNT:
if len(rd) >= pos + 1 + value_length:
report_count = ReadLsbBytes(rd, pos + 1, value_length)
elif key & REPORT_DESCRIPTOR_KEY_MASK == REPORT_SIZE:
if len(rd) >= pos + 1 + value_length:
report_size = ReadLsbBytes(rd, pos + 1, value_length)
elif key & REPORT_DESCRIPTOR_KEY_MASK == USAGE_PAGE:
if len(rd) >= pos + 1 + value_length:
usage_page = ReadLsbBytes(rd, pos + 1, value_length)
elif key & REPORT_DESCRIPTOR_KEY_MASK == USAGE:
if len(rd) >= pos + 1 + value_length:
usage = ReadLsbBytes(rd, pos + 1, value_length)
pos += value_length + key_size
return desc | Parse the binary report descriptor.
Parse the binary report descriptor into a DeviceDescriptor object.
Args:
rd: The binary report descriptor
desc: The DeviceDescriptor object to update with the results
from parsing the descriptor.
Returns:
None | entailment |
def Write(self, packet):
"""See base class."""
out = bytearray([0] + packet) # Prepend the zero-byte (report ID)
os.write(self.dev, out) | See base class. | entailment |
def Read(self):
"""See base class."""
raw_in = os.read(self.dev, self.GetInReportDataLength())
decoded_in = list(bytearray(raw_in))
return decoded_in | See base class. | entailment |
def FillDeviceAttributes(device, descriptor):
"""Fill out the attributes of the device.
Fills the devices HidAttributes and product string
into the descriptor.
Args:
device: A handle to the open device
descriptor: The DeviceDescriptor to populate with the
attributes.
Returns:
None
Raises:
WindowsError when unable to obtain attributes or product
string.
"""
attributes = HidAttributes()
result = hid.HidD_GetAttributes(device, ctypes.byref(attributes))
if not result:
raise ctypes.WinError()
buf = ctypes.create_string_buffer(1024)
result = hid.HidD_GetProductString(device, buf, 1024)
if not result:
raise ctypes.WinError()
descriptor.vendor_id = attributes.VendorID
descriptor.product_id = attributes.ProductID
descriptor.product_string = ctypes.wstring_at(buf) | Fill out the attributes of the device.
Fills the devices HidAttributes and product string
into the descriptor.
Args:
device: A handle to the open device
descriptor: The DeviceDescriptor to populate with the
attributes.
Returns:
None
Raises:
WindowsError when unable to obtain attributes or product
string. | entailment |
def FillDeviceCapabilities(device, descriptor):
"""Fill out device capabilities.
Fills the HidCapabilitites of the device into descriptor.
Args:
device: A handle to the open device
descriptor: DeviceDescriptor to populate with the
capabilities
Returns:
none
Raises:
WindowsError when unable to obtain capabilitites.
"""
preparsed_data = PHIDP_PREPARSED_DATA(0)
ret = hid.HidD_GetPreparsedData(device, ctypes.byref(preparsed_data))
if not ret:
raise ctypes.WinError()
try:
caps = HidCapabilities()
ret = hid.HidP_GetCaps(preparsed_data, ctypes.byref(caps))
if ret != HIDP_STATUS_SUCCESS:
raise ctypes.WinError()
descriptor.usage = caps.Usage
descriptor.usage_page = caps.UsagePage
descriptor.internal_max_in_report_len = caps.InputReportByteLength
descriptor.internal_max_out_report_len = caps.OutputReportByteLength
finally:
hid.HidD_FreePreparsedData(preparsed_data) | Fill out device capabilities.
Fills the HidCapabilitites of the device into descriptor.
Args:
device: A handle to the open device
descriptor: DeviceDescriptor to populate with the
capabilities
Returns:
none
Raises:
WindowsError when unable to obtain capabilitites. | entailment |
def OpenDevice(path, enum=False):
"""Open the device and return a handle to it."""
desired_access = GENERIC_WRITE | GENERIC_READ
share_mode = FILE_SHARE_READ | FILE_SHARE_WRITE
if enum:
desired_access = 0
h = kernel32.CreateFileA(path,
desired_access,
share_mode,
None, OPEN_EXISTING, 0, None)
if h == INVALID_HANDLE_VALUE:
raise ctypes.WinError()
return h | Open the device and return a handle to it. | entailment |
def Enumerate():
"""See base class."""
hid_guid = GUID()
hid.HidD_GetHidGuid(ctypes.byref(hid_guid))
devices = setupapi.SetupDiGetClassDevsA(
ctypes.byref(hid_guid), None, None, 0x12)
index = 0
interface_info = DeviceInterfaceData()
interface_info.cbSize = ctypes.sizeof(DeviceInterfaceData) # pylint: disable=invalid-name
out = []
while True:
result = setupapi.SetupDiEnumDeviceInterfaces(
devices, 0, ctypes.byref(hid_guid), index,
ctypes.byref(interface_info))
index += 1
if not result:
break
detail_len = wintypes.DWORD()
result = setupapi.SetupDiGetDeviceInterfaceDetailA(
devices, ctypes.byref(interface_info), None, 0,
ctypes.byref(detail_len), None)
detail_len = detail_len.value
if detail_len == 0:
# skip this device, some kind of error
continue
buf = ctypes.create_string_buffer(detail_len)
interface_detail = DeviceInterfaceDetailData.from_buffer(buf)
interface_detail.cbSize = ctypes.sizeof(DeviceInterfaceDetailData)
result = setupapi.SetupDiGetDeviceInterfaceDetailA(
devices, ctypes.byref(interface_info),
ctypes.byref(interface_detail), detail_len, None, None)
if not result:
raise ctypes.WinError()
descriptor = base.DeviceDescriptor()
# This is a bit of a hack to work around a limitation of ctypes and
# "header" structures that are common in windows. DevicePath is a
# ctypes array of length 1, but it is backed with a buffer that is much
# longer and contains a null terminated string. So, we read the null
# terminated string off DevicePath here. Per the comment above, the
# alignment of this struct varies depending on architecture, but
# in all cases the path string starts 1 DWORD into the structure.
#
# The path length is:
# length of detail buffer - header length (1 DWORD)
path_len = detail_len - ctypes.sizeof(wintypes.DWORD)
descriptor.path = ctypes.string_at(
ctypes.addressof(interface_detail.DevicePath), path_len)
device = None
try:
device = OpenDevice(descriptor.path, True)
except WindowsError as e: # pylint: disable=undefined-variable
if e.winerror == ERROR_ACCESS_DENIED: # Access Denied, e.g. a keyboard
continue
else:
raise e
try:
FillDeviceAttributes(device, descriptor)
FillDeviceCapabilities(device, descriptor)
out.append(descriptor.ToPublicDict())
finally:
kernel32.CloseHandle(device)
return out | See base class. | entailment |
def Write(self, packet):
"""See base class."""
if len(packet) != self.GetOutReportDataLength():
raise errors.HidError("Packet length must match report data length.")
packet_data = [0] + packet # Prepend the zero-byte (report ID)
out = bytes(bytearray(packet_data))
num_written = wintypes.DWORD()
ret = (
kernel32.WriteFile(
self.dev, out, len(out),
ctypes.byref(num_written), None))
if num_written.value != len(out):
raise errors.HidError(
"Failed to write complete packet. " + "Expected %d, but got %d" %
(len(out), num_written.value))
if not ret:
raise ctypes.WinError() | See base class. | entailment |
def Read(self):
"""See base class."""
buf = ctypes.create_string_buffer(self.desc.internal_max_in_report_len)
num_read = wintypes.DWORD()
ret = kernel32.ReadFile(
self.dev, buf, len(buf), ctypes.byref(num_read), None)
if num_read.value != self.desc.internal_max_in_report_len:
raise errors.HidError("Failed to read full length report from device.")
if not ret:
raise ctypes.WinError()
# Convert the string buffer to a list of numbers. Throw away the first
# byte, which is the report id (which we don't care about).
return list(bytearray(buf[1:])) | See base class. | entailment |
def Authenticate(self, app_id, challenge_data,
print_callback=sys.stderr.write):
"""See base class."""
for authenticator in self.authenticators:
if authenticator.IsAvailable():
result = authenticator.Authenticate(app_id,
challenge_data,
print_callback)
return result
raise ValueError('No valid authenticators found') | See base class. | entailment |
def GetLocalU2FInterface(origin=socket.gethostname()):
"""Obtains a U2FInterface for the first valid local U2FHID device found."""
hid_transports = hidtransport.DiscoverLocalHIDU2FDevices()
for t in hid_transports:
try:
return U2FInterface(security_key=hardware.SecurityKey(transport=t),
origin=origin)
except errors.UnsupportedVersionException:
# Skip over devices that don't speak the proper version of the protocol.
pass
# Unable to find a device
raise errors.NoDeviceFoundError() | Obtains a U2FInterface for the first valid local U2FHID device found. | entailment |
def Register(self, app_id, challenge, registered_keys):
"""Registers app_id with the security key.
Executes the U2F registration flow with the security key.
Args:
app_id: The app_id to register the security key against.
challenge: Server challenge passed to the security key.
registered_keys: List of keys already registered for this app_id+user.
Returns:
RegisterResponse with key_handle and attestation information in it (
encoded in FIDO U2F binary format within registration_data field).
Raises:
U2FError: There was some kind of problem with registration (e.g.
the device was already registered or there was a timeout waiting
for the test of user presence).
"""
client_data = model.ClientData(model.ClientData.TYP_REGISTRATION, challenge,
self.origin)
challenge_param = self.InternalSHA256(client_data.GetJson())
app_param = self.InternalSHA256(app_id)
for key in registered_keys:
try:
# skip non U2F_V2 keys
if key.version != u'U2F_V2':
continue
resp = self.security_key.CmdAuthenticate(challenge_param, app_param,
key.key_handle, True)
# check_only mode CmdAuthenticate should always raise some
# exception
raise errors.HardwareError('Should Never Happen')
except errors.TUPRequiredError:
# This indicates key was valid. Thus, no need to register
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
except errors.InvalidKeyHandleError as e:
# This is the case of a key for a different token, so we just ignore it.
pass
except errors.HardwareError as e:
raise errors.U2FError(errors.U2FError.BAD_REQUEST, e)
# Now register the new key
for _ in range(30):
try:
resp = self.security_key.CmdRegister(challenge_param, app_param)
return model.RegisterResponse(resp, client_data)
except errors.TUPRequiredError as e:
self.security_key.CmdWink()
time.sleep(0.5)
except errors.HardwareError as e:
raise errors.U2FError(errors.U2FError.BAD_REQUEST, e)
raise errors.U2FError(errors.U2FError.TIMEOUT) | Registers app_id with the security key.
Executes the U2F registration flow with the security key.
Args:
app_id: The app_id to register the security key against.
challenge: Server challenge passed to the security key.
registered_keys: List of keys already registered for this app_id+user.
Returns:
RegisterResponse with key_handle and attestation information in it (
encoded in FIDO U2F binary format within registration_data field).
Raises:
U2FError: There was some kind of problem with registration (e.g.
the device was already registered or there was a timeout waiting
for the test of user presence). | entailment |
def Authenticate(self, app_id, challenge, registered_keys):
"""Authenticates app_id with the security key.
Executes the U2F authentication/signature flow with the security key.
Args:
app_id: The app_id to register the security key against.
challenge: Server challenge passed to the security key as a bytes object.
registered_keys: List of keys already registered for this app_id+user.
Returns:
SignResponse with client_data, key_handle, and signature_data. The client
data is an object, while the signature_data is encoded in FIDO U2F binary
format.
Raises:
U2FError: There was some kind of problem with authentication (e.g.
there was a timeout while waiting for the test of user presence.)
"""
client_data = model.ClientData(model.ClientData.TYP_AUTHENTICATION,
challenge, self.origin)
app_param = self.InternalSHA256(app_id)
challenge_param = self.InternalSHA256(client_data.GetJson())
num_invalid_keys = 0
for key in registered_keys:
try:
if key.version != u'U2F_V2':
continue
for _ in range(30):
try:
resp = self.security_key.CmdAuthenticate(challenge_param, app_param,
key.key_handle)
return model.SignResponse(key.key_handle, resp, client_data)
except errors.TUPRequiredError:
self.security_key.CmdWink()
time.sleep(0.5)
except errors.InvalidKeyHandleError:
num_invalid_keys += 1
continue
except errors.HardwareError as e:
raise errors.U2FError(errors.U2FError.BAD_REQUEST, e)
if num_invalid_keys == len(registered_keys):
# In this case, all provided keys were invalid.
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
# In this case, the TUP was not pressed.
raise errors.U2FError(errors.U2FError.TIMEOUT) | Authenticates app_id with the security key.
Executes the U2F authentication/signature flow with the security key.
Args:
app_id: The app_id to register the security key against.
challenge: Server challenge passed to the security key as a bytes object.
registered_keys: List of keys already registered for this app_id+user.
Returns:
SignResponse with client_data, key_handle, and signature_data. The client
data is an object, while the signature_data is encoded in FIDO U2F binary
format.
Raises:
U2FError: There was some kind of problem with authentication (e.g.
there was a timeout while waiting for the test of user presence.) | entailment |
def CmdRegister(self, challenge_param, app_param):
"""Register security key.
Ask the security key to register with a particular origin & client.
Args:
challenge_param: Arbitrary 32 byte challenge string.
app_param: Arbitrary 32 byte applciation parameter.
Returns:
A binary structure containing the key handle, attestation, and a
signature over that by the attestation key. The precise format
is dictated by the FIDO U2F specs.
Raises:
TUPRequiredError: A Test of User Precense is required to proceed.
ApduError: Something went wrong on the device.
"""
self.logger.debug('CmdRegister')
if len(challenge_param) != 32 or len(app_param) != 32:
raise errors.InvalidRequestError()
body = bytearray(challenge_param + app_param)
response = self.InternalSendApdu(apdu.CommandApdu(
0,
apdu.CMD_REGISTER,
0x03, # Per the U2F reference code tests
0x00,
body))
response.CheckSuccessOrRaise()
return response.body | Register security key.
Ask the security key to register with a particular origin & client.
Args:
challenge_param: Arbitrary 32 byte challenge string.
app_param: Arbitrary 32 byte applciation parameter.
Returns:
A binary structure containing the key handle, attestation, and a
signature over that by the attestation key. The precise format
is dictated by the FIDO U2F specs.
Raises:
TUPRequiredError: A Test of User Precense is required to proceed.
ApduError: Something went wrong on the device. | entailment |
def CmdAuthenticate(self,
challenge_param,
app_param,
key_handle,
check_only=False):
"""Attempt to obtain an authentication signature.
Ask the security key to sign a challenge for a particular key handle
in order to authenticate the user.
Args:
challenge_param: SHA-256 hash of client_data object as a bytes
object.
app_param: SHA-256 hash of the app id as a bytes object.
key_handle: The key handle to use to issue the signature as a bytes
object.
check_only: If true, only check if key_handle is valid.
Returns:
A binary structure containing the key handle, attestation, and a
signature over that by the attestation key. The precise format
is dictated by the FIDO U2F specs.
Raises:
TUPRequiredError: If check_only is False, a Test of User Precense
is required to proceed. If check_only is True, this means
the key_handle is valid.
InvalidKeyHandleError: The key_handle is not valid for this device.
ApduError: Something else went wrong on the device.
"""
self.logger.debug('CmdAuthenticate')
if len(challenge_param) != 32 or len(app_param) != 32:
raise errors.InvalidRequestError()
control = 0x07 if check_only else 0x03
body = bytearray(challenge_param + app_param +
bytearray([len(key_handle)]) + key_handle)
response = self.InternalSendApdu(apdu.CommandApdu(
0, apdu.CMD_AUTH, control, 0x00, body))
response.CheckSuccessOrRaise()
return response.body | Attempt to obtain an authentication signature.
Ask the security key to sign a challenge for a particular key handle
in order to authenticate the user.
Args:
challenge_param: SHA-256 hash of client_data object as a bytes
object.
app_param: SHA-256 hash of the app id as a bytes object.
key_handle: The key handle to use to issue the signature as a bytes
object.
check_only: If true, only check if key_handle is valid.
Returns:
A binary structure containing the key handle, attestation, and a
signature over that by the attestation key. The precise format
is dictated by the FIDO U2F specs.
Raises:
TUPRequiredError: If check_only is False, a Test of User Precense
is required to proceed. If check_only is True, this means
the key_handle is valid.
InvalidKeyHandleError: The key_handle is not valid for this device.
ApduError: Something else went wrong on the device. | entailment |
def CmdVersion(self):
"""Obtain the version of the device and test transport format.
Obtains the version of the device and determines whether to use ISO
7816-4 or the U2f variant. This function should be called at least once
before CmdAuthenticate or CmdRegister to make sure the object is using the
proper transport for the device.
Returns:
The version of the U2F protocol in use.
"""
self.logger.debug('CmdVersion')
response = self.InternalSendApdu(apdu.CommandApdu(
0, apdu.CMD_VERSION, 0x00, 0x00))
if not response.IsSuccess():
raise errors.ApduError(response.sw1, response.sw2)
return response.body | Obtain the version of the device and test transport format.
Obtains the version of the device and determines whether to use ISO
7816-4 or the U2f variant. This function should be called at least once
before CmdAuthenticate or CmdRegister to make sure the object is using the
proper transport for the device.
Returns:
The version of the U2F protocol in use. | entailment |
def InternalSendApdu(self, apdu_to_send):
"""Send an APDU to the device.
Sends an APDU to the device, possibly falling back to the legacy
encoding format that is not ISO7816-4 compatible.
Args:
apdu_to_send: The CommandApdu object to send
Returns:
The ResponseApdu object constructed out of the devices reply.
"""
response = None
if not self.use_legacy_format:
response = apdu.ResponseApdu(self.transport.SendMsgBytes(
apdu_to_send.ToByteArray()))
if response.sw1 == 0x67 and response.sw2 == 0x00:
# If we failed using the standard format, retry with the
# legacy format.
self.use_legacy_format = True
return self.InternalSendApdu(apdu_to_send)
else:
response = apdu.ResponseApdu(self.transport.SendMsgBytes(
apdu_to_send.ToLegacyU2FByteArray()))
return response | Send an APDU to the device.
Sends an APDU to the device, possibly falling back to the legacy
encoding format that is not ISO7816-4 compatible.
Args:
apdu_to_send: The CommandApdu object to send
Returns:
The ResponseApdu object constructed out of the devices reply. | entailment |
def GetDeviceIntProperty(dev_ref, key):
"""Reads int property from the HID device."""
cf_key = CFStr(key)
type_ref = iokit.IOHIDDeviceGetProperty(dev_ref, cf_key)
cf.CFRelease(cf_key)
if not type_ref:
return None
if cf.CFGetTypeID(type_ref) != cf.CFNumberGetTypeID():
raise errors.OsHidError('Expected number type, got {}'.format(
cf.CFGetTypeID(type_ref)))
out = ctypes.c_int32()
ret = cf.CFNumberGetValue(type_ref, K_CF_NUMBER_SINT32_TYPE,
ctypes.byref(out))
if not ret:
return None
return out.value | Reads int property from the HID device. | entailment |
def GetDeviceStringProperty(dev_ref, key):
"""Reads string property from the HID device."""
cf_key = CFStr(key)
type_ref = iokit.IOHIDDeviceGetProperty(dev_ref, cf_key)
cf.CFRelease(cf_key)
if not type_ref:
return None
if cf.CFGetTypeID(type_ref) != cf.CFStringGetTypeID():
raise errors.OsHidError('Expected string type, got {}'.format(
cf.CFGetTypeID(type_ref)))
type_ref = ctypes.cast(type_ref, CF_STRING_REF)
out = ctypes.create_string_buffer(DEVICE_STRING_PROPERTY_BUFFER_SIZE)
ret = cf.CFStringGetCString(type_ref, out, DEVICE_STRING_PROPERTY_BUFFER_SIZE,
K_CF_STRING_ENCODING_UTF8)
if not ret:
return None
return out.value.decode('utf8') | Reads string property from the HID device. | entailment |
def GetDevicePath(device_handle):
"""Obtains the unique path for the device.
Args:
device_handle: reference to the device
Returns:
A unique path for the device, obtained from the IO Registry
"""
# Obtain device path from IO Registry
io_service_obj = iokit.IOHIDDeviceGetService(device_handle)
str_buffer = ctypes.create_string_buffer(DEVICE_PATH_BUFFER_SIZE)
iokit.IORegistryEntryGetPath(io_service_obj, K_IO_SERVICE_PLANE, str_buffer)
return str_buffer.value | Obtains the unique path for the device.
Args:
device_handle: reference to the device
Returns:
A unique path for the device, obtained from the IO Registry | entailment |
def HidReadCallback(read_queue, result, sender, report_type, report_id, report,
report_length):
"""Handles incoming IN report from HID device."""
del result, sender, report_type, report_id # Unused by the callback function
incoming_bytes = [report[i] for i in range(report_length)]
read_queue.put(incoming_bytes) | Handles incoming IN report from HID device. | entailment |
def DeviceReadThread(hid_device):
"""Binds a device to the thread's run loop, then starts the run loop.
Args:
hid_device: The MacOsHidDevice object
The HID manager requires a run loop to handle Report reads. This thread
function serves that purpose.
"""
# Schedule device events with run loop
hid_device.run_loop_ref = cf.CFRunLoopGetCurrent()
if not hid_device.run_loop_ref:
logger.error('Failed to get current run loop')
return
iokit.IOHIDDeviceScheduleWithRunLoop(hid_device.device_handle,
hid_device.run_loop_ref,
K_CF_RUNLOOP_DEFAULT_MODE)
# Run the run loop
run_loop_run_result = K_CF_RUN_LOOP_RUN_TIMED_OUT
while (run_loop_run_result == K_CF_RUN_LOOP_RUN_TIMED_OUT or
run_loop_run_result == K_CF_RUN_LOOP_RUN_HANDLED_SOURCE):
run_loop_run_result = cf.CFRunLoopRunInMode(
K_CF_RUNLOOP_DEFAULT_MODE,
1000, # Timeout in seconds
False) # Return after source handled
# log any unexpected run loop exit
if run_loop_run_result != K_CF_RUN_LOOP_RUN_STOPPED:
logger.error('Unexpected run loop exit code: %d', run_loop_run_result)
# Unschedule from run loop
iokit.IOHIDDeviceUnscheduleFromRunLoop(hid_device.device_handle,
hid_device.run_loop_ref,
K_CF_RUNLOOP_DEFAULT_MODE) | Binds a device to the thread's run loop, then starts the run loop.
Args:
hid_device: The MacOsHidDevice object
The HID manager requires a run loop to handle Report reads. This thread
function serves that purpose. | entailment |
def Enumerate():
"""See base class."""
# Init a HID manager
hid_mgr = iokit.IOHIDManagerCreate(None, None)
if not hid_mgr:
raise errors.OsHidError('Unable to obtain HID manager reference')
iokit.IOHIDManagerSetDeviceMatching(hid_mgr, None)
# Get devices from HID manager
device_set_ref = iokit.IOHIDManagerCopyDevices(hid_mgr)
if not device_set_ref:
raise errors.OsHidError('Failed to obtain devices from HID manager')
num = iokit.CFSetGetCount(device_set_ref)
devices = (IO_HID_DEVICE_REF * num)()
iokit.CFSetGetValues(device_set_ref, devices)
# Retrieve and build descriptor dictionaries for each device
descriptors = []
for dev in devices:
d = base.DeviceDescriptor()
d.vendor_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_VENDOR_ID)
d.product_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_PRODUCT_ID)
d.product_string = GetDeviceStringProperty(dev,
HID_DEVICE_PROPERTY_PRODUCT)
d.usage = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_PRIMARY_USAGE)
d.usage_page = GetDeviceIntProperty(
dev, HID_DEVICE_PROPERTY_PRIMARY_USAGE_PAGE)
d.report_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_REPORT_ID)
d.path = GetDevicePath(dev)
descriptors.append(d.ToPublicDict())
# Clean up CF objects
cf.CFRelease(device_set_ref)
cf.CFRelease(hid_mgr)
return descriptors | See base class. | entailment |
def Write(self, packet):
"""See base class."""
report_id = 0
out_report_buffer = (ctypes.c_uint8 * self.internal_max_out_report_len)()
out_report_buffer[:] = packet[:]
result = iokit.IOHIDDeviceSetReport(self.device_handle,
K_IO_HID_REPORT_TYPE_OUTPUT,
report_id,
out_report_buffer,
self.internal_max_out_report_len)
# Non-zero status indicates failure
if result != K_IO_RETURN_SUCCESS:
raise errors.OsHidError('Failed to write report to device') | See base class. | entailment |
def Read(self):
"""See base class."""
result = None
while result is None:
try:
result = self.read_queue.get(timeout=60)
except queue.Empty:
continue
return result | See base class. | entailment |
def change_view(self, *args, **kwargs):
"""Renders detailed model edit page."""
Hierarchy.init_hierarchy(self)
self.hierarchy.hook_change_view(self, args, kwargs)
return super(HierarchicalModelAdmin, self).change_view(*args, **kwargs) | Renders detailed model edit page. | entailment |
def action_checkbox(self, obj):
"""Renders checkboxes.
Disable checkbox for parent item navigation link.
"""
if getattr(obj, Hierarchy.UPPER_LEVEL_MODEL_ATTR, False):
return ''
return super(HierarchicalModelAdmin, self).action_checkbox(obj) | Renders checkboxes.
Disable checkbox for parent item navigation link. | entailment |
def hierarchy_nav(self, obj):
"""Renders hierarchy navigation elements (folders)."""
result_repr = '' # For items without children.
ch_count = getattr(obj, Hierarchy.CHILD_COUNT_MODEL_ATTR, 0)
is_parent_link = getattr(obj, Hierarchy.UPPER_LEVEL_MODEL_ATTR, False)
if is_parent_link or ch_count: # For items with children and parent links.
icon = 'icon icon-folder'
title = _('Objects inside: %s') % ch_count
if is_parent_link:
icon = 'icon icon-folder-up'
title = _('Upper level')
url = './'
if obj.pk:
url = '?%s=%s' % (Hierarchy.PARENT_ID_QS_PARAM, obj.pk)
if self._current_changelist.is_popup:
qs_get = copy(self._current_changelist._request.GET)
try:
del qs_get[Hierarchy.PARENT_ID_QS_PARAM]
except KeyError:
pass
qs_get = qs_get.urlencode()
url = ('%s&%s' if '?' in url else '%s?%s') % (url, qs_get)
result_repr = format_html('<a href="{0}" class="{1}" title="{2}"></a>', url, icon, force_text(title))
return result_repr | Renders hierarchy navigation elements (folders). | entailment |
def get_queryset(self, request):
"""Constructs a query set.
:param request:
:return:
"""
self._hierarchy.hook_get_queryset(self, request)
return super(HierarchicalChangeList, self).get_queryset(request) | Constructs a query set.
:param request:
:return: | entailment |
def get_results(self, request):
"""Gets query set results.
:param request:
:return:
"""
super(HierarchicalChangeList, self).get_results(request)
self._hierarchy.hook_get_results(self) | Gets query set results.
:param request:
:return: | entailment |
def check_field_exists(self, field_name):
"""Implements field exists check for debugging purposes.
:param field_name:
:return:
"""
if not settings.DEBUG:
return
try:
self.lookup_opts.get_field(field_name)
except FieldDoesNotExist as e:
raise AdmirarchyConfigurationError(e) | Implements field exists check for debugging purposes.
:param field_name:
:return: | entailment |
def init_hierarchy(cls, model_admin):
"""Initializes model admin with hierarchy data."""
hierarchy = getattr(model_admin, 'hierarchy')
if hierarchy:
if not isinstance(hierarchy, Hierarchy):
hierarchy = AdjacencyList() # For `True` and etc. TODO heuristics maybe.
else:
hierarchy = NoHierarchy()
model_admin.hierarchy = hierarchy | Initializes model admin with hierarchy data. | entailment |
def get_pid_from_request(cls, changelist, request):
"""Gets parent ID from query string.
:param changelist:
:param request:
:return:
"""
val = request.GET.get(cls.PARENT_ID_QS_PARAM, False)
pid = val or None
try:
del changelist.params[cls.PARENT_ID_QS_PARAM]
except KeyError:
pass
return pid | Gets parent ID from query string.
:param changelist:
:param request:
:return: | entailment |
def hook_get_queryset(self, changelist, request):
"""Triggered by `ChangeList.get_queryset()`."""
changelist.check_field_exists(self.pid_field)
self.pid = self.get_pid_from_request(changelist, request)
changelist.params[self.pid_field] = self.pid | Triggered by `ChangeList.get_queryset()`. | entailment |
def hook_get_results(self, changelist):
"""Triggered by `ChangeList.get_results()`."""
result_list = list(changelist.result_list)
if self.pid:
# Render to upper level link.
parent = changelist.model.objects.get(pk=self.pid)
parent = changelist.model(pk=getattr(parent, self.pid_field_real, None))
setattr(parent, self.UPPER_LEVEL_MODEL_ATTR, True)
result_list = [parent] + result_list
# Get children stats.
kwargs_filter = {'%s__in' % self.pid_field: result_list}
stats_qs = changelist.model.objects.filter(
**kwargs_filter).values_list(self.pid_field).annotate(cnt=models.Count(self.pid_field))
stats = {item[0]: item[1] for item in stats_qs}
for item in result_list:
if hasattr(item, self.CHILD_COUNT_MODEL_ATTR):
continue
try:
setattr(item, self.CHILD_COUNT_MODEL_ATTR, stats[item.id])
except KeyError:
setattr(item, self.CHILD_COUNT_MODEL_ATTR, 0)
changelist.result_list = result_list | Triggered by `ChangeList.get_results()`. | entailment |
def hook_get_queryset(self, changelist, request):
"""Triggered by `ChangeList.get_queryset()`."""
changelist.check_field_exists(self.left_field)
changelist.check_field_exists(self.right_field)
self.pid = self.get_pid_from_request(changelist, request)
# Get parent item first.
qs = changelist.root_queryset
if self.pid:
self.parent = qs.get(pk=self.pid)
changelist.params.update(self.get_immediate_children_filter(self.parent))
else:
changelist.params[self.level_field] = self.root_level
self.parent = qs.get(**{key: val for key, val in changelist.params.items() if not key.startswith('_')}) | Triggered by `ChangeList.get_queryset()`. | entailment |
def hook_get_results(self, changelist):
"""Triggered by `ChangeList.get_results()`."""
# Poor NestedSet guys they've punished themselves once chosen that approach,
# and now we punish them again with all those DB hits.
result_list = list(changelist.result_list)
# Get children stats.
filter_kwargs = {'%s' % self.left_field: models.F('%s' % self.right_field) - 1} # Leaf nodes only.
filter_kwargs.update(self.get_immediate_children_filter(self.parent))
stats_qs = changelist.result_list.filter(**filter_kwargs).values_list('id')
leafs = [item[0] for item in stats_qs]
for result in result_list:
if result.id in leafs:
setattr(result, self.CHILD_COUNT_MODEL_ATTR, 0)
else:
setattr(result, self.CHILD_COUNT_MODEL_ATTR, '>1') # Too much pain to get real stats, so that'll suffice.
if self.pid:
# Render to upper level link.
parent = self.parent
filter_kwargs = {
'%s__lt' % self.left_field: getattr(parent, self.left_field),
'%s__gt' % self.right_field: getattr(parent, self.right_field),
}
try:
granparent_id = changelist.model.objects.filter(**filter_kwargs).order_by('-%s' % self.left_field)[0].id
except IndexError:
granparent_id = None
if granparent_id != parent.id:
parent = changelist.model(pk=granparent_id)
setattr(parent, self.UPPER_LEVEL_MODEL_ATTR, True)
result_list = [parent] + result_list
changelist.result_list = result_list | Triggered by `ChangeList.get_results()`. | entailment |
def read_file(fpath):
"""Reads a file within package directories."""
with io.open(os.path.join(PATH_BASE, fpath)) as f:
return f.read() | Reads a file within package directories. | entailment |
def get_version():
"""Returns version number, without module import (which can lead to ImportError
if some dependencies are unavailable before install."""
contents = read_file(os.path.join('admirarchy', '__init__.py'))
version = re.search('VERSION = \(([^)]+)\)', contents)
version = version.group(1).replace(', ', '.').strip()
return version | Returns version number, without module import (which can lead to ImportError
if some dependencies are unavailable before install. | entailment |
def process_update(self, update):
"""Process an incoming update from a remote NetworkTables"""
data = json.loads(update)
NetworkTables.getEntry(data["k"]).setValue(data["v"]) | Process an incoming update from a remote NetworkTables | entailment |
def _send_update(self, data):
"""Send a NetworkTables update via the stored send_update callback"""
if isinstance(data, dict):
data = json.dumps(data)
self.update_callback(data) | Send a NetworkTables update via the stored send_update callback | entailment |
def _nt_on_change(self, key, value, isNew):
"""NetworkTables global listener callback"""
self._send_update({"k": key, "v": value, "n": isNew}) | NetworkTables global listener callback | entailment |
def close(self):
"""
Clean up NetworkTables listeners
"""
NetworkTables.removeGlobalListener(self._nt_on_change)
NetworkTables.removeConnectionListener(self._nt_connected) | Clean up NetworkTables listeners | entailment |
def get_lat_long_climate_zones(latitude, longitude):
""" Get climate zones that contain lat/long coordinates.
Parameters
----------
latitude : float
Latitude of point.
longitude : float
Longitude of point.
Returns
-------
climate_zones: dict of str
Region ids for each climate zone type.
"""
try:
from shapely.geometry import Point
except ImportError: # pragma: no cover
raise ImportError("Finding climate zone of lat/long points requires shapely.")
(
iecc_climate_zones,
iecc_moisture_regimes,
ba_climate_zones,
ca_climate_zones,
) = cached_data.climate_zone_geometry
point = Point(longitude, latitude) # x,y
climate_zones = {}
for iecc_climate_zone, shape in iecc_climate_zones:
if shape.contains(point):
climate_zones["iecc_climate_zone"] = iecc_climate_zone
break
else:
climate_zones["iecc_climate_zone"] = None
for iecc_moisture_regime, shape in iecc_moisture_regimes:
if shape.contains(point):
climate_zones["iecc_moisture_regime"] = iecc_moisture_regime
break
else:
climate_zones["iecc_moisture_regime"] = None
for ba_climate_zone, shape in ba_climate_zones:
if shape.contains(point):
climate_zones["ba_climate_zone"] = ba_climate_zone
break
else:
climate_zones["ba_climate_zone"] = None
for ca_climate_zone, shape in ca_climate_zones:
if shape.contains(point):
climate_zones["ca_climate_zone"] = ca_climate_zone
break
else:
climate_zones["ca_climate_zone"] = None
return climate_zones | Get climate zones that contain lat/long coordinates.
Parameters
----------
latitude : float
Latitude of point.
longitude : float
Longitude of point.
Returns
-------
climate_zones: dict of str
Region ids for each climate zone type. | entailment |
def get_zcta_metadata(zcta):
""" Get metadata about a ZIP Code Tabulation Area (ZCTA).
Parameters
----------
zcta : str
ID of ZIP Code Tabulation Area
Returns
-------
metadata : dict
Dict of data about the ZCTA, including lat/long coordinates.
"""
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
*
from
zcta_metadata
where
zcta_id = ?
""",
(zcta,),
)
row = cur.fetchone()
if row is None:
raise UnrecognizedZCTAError(zcta)
return {col[0]: row[i] for i, col in enumerate(cur.description)} | Get metadata about a ZIP Code Tabulation Area (ZCTA).
Parameters
----------
zcta : str
ID of ZIP Code Tabulation Area
Returns
-------
metadata : dict
Dict of data about the ZCTA, including lat/long coordinates. | entailment |
def zcta_to_lat_long(zcta):
"""Get location of ZCTA centroid
Retrieves latitude and longitude of centroid of ZCTA
to use for matching with weather station.
Parameters
----------
zcta : str
ID of the target ZCTA.
Returns
-------
latitude : float
Latitude of centroid of ZCTA.
longitude : float
Target Longitude of centroid of ZCTA.
"""
valid_zcta_or_raise(zcta)
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select
latitude
, longitude
from
zcta_metadata
where
zcta_id = ?
""",
(zcta,),
)
# match existence checked in validate_zcta_or_raise(zcta)
latitude, longitude = cur.fetchone()
return float(latitude), float(longitude) | Get location of ZCTA centroid
Retrieves latitude and longitude of centroid of ZCTA
to use for matching with weather station.
Parameters
----------
zcta : str
ID of the target ZCTA.
Returns
-------
latitude : float
Latitude of centroid of ZCTA.
longitude : float
Target Longitude of centroid of ZCTA. | entailment |
def get_zcta_ids(state=None):
""" Get ids of all supported ZCTAs, optionally by state.
Parameters
----------
state : str, optional
Select zipcodes only from this state or territory, given as 2-letter
abbreviation (e.g., ``'CA'``, ``'PR'``).
Returns
-------
results : list of str
List of all supported selected ZCTA IDs.
"""
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
if state is None:
cur.execute(
"""
select zcta_id from zcta_metadata
"""
)
else:
cur.execute(
"""
select zcta_id from zcta_metadata where state = ?
""",
(state,),
)
return [row[0] for row in cur.fetchall()] | Get ids of all supported ZCTAs, optionally by state.
Parameters
----------
state : str, optional
Select zipcodes only from this state or territory, given as 2-letter
abbreviation (e.g., ``'CA'``, ``'PR'``).
Returns
-------
results : list of str
List of all supported selected ZCTA IDs. | entailment |
def valid_zcta_or_raise(zcta):
""" Check if ZCTA is valid and raise eeweather.UnrecognizedZCTAError if not. """
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select exists (
select
zcta_id
from
zcta_metadata
where
zcta_id = ?
)
""",
(zcta,),
)
(exists,) = cur.fetchone()
if exists:
return True
else:
raise UnrecognizedZCTAError(zcta) | Check if ZCTA is valid and raise eeweather.UnrecognizedZCTAError if not. | entailment |
def valid_usaf_id_or_raise(usaf_id):
""" Check if USAF ID is valid and raise eeweather.UnrecognizedUSAFIDError if not. """
conn = metadata_db_connection_proxy.get_connection()
cur = conn.cursor()
cur.execute(
"""
select exists (
select
usaf_id
from
isd_station_metadata
where
usaf_id = ?
)
""",
(usaf_id,),
)
(exists,) = cur.fetchone()
if exists:
return True
else:
raise UnrecognizedUSAFIDError(usaf_id) | Check if USAF ID is valid and raise eeweather.UnrecognizedUSAFIDError if not. | entailment |
def rank_stations(
site_latitude,
site_longitude,
site_state=None,
site_elevation=None,
match_iecc_climate_zone=False,
match_iecc_moisture_regime=False,
match_ba_climate_zone=False,
match_ca_climate_zone=False,
match_state=False,
minimum_quality=None,
minimum_tmy3_class=None,
max_distance_meters=None,
max_difference_elevation_meters=None,
is_tmy3=None,
is_cz2010=None,
):
""" Get a ranked, filtered set of candidate weather stations and metadata
for a particular site.
Parameters
----------
site_latitude : float
Latitude of target site for which to find candidate weather stations.
site_longitude : float
Longitude of target site for which to find candidate weather stations.
site_state : str, 2 letter abbreviation
US state of target site, used optionally to filter potential candidate
weather stations. Ignored unless ``match_state=True``.
site_elevation : float
Elevation of target site in meters, used optionally to filter potential
candidate weather stations. Ignored unless
``max_difference_elevation_meters`` is set.
match_iecc_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the IECC climate zone of the target site.
match_iecc_moisture_regime : bool
If ``True``, filter candidate weather stations to those
matching the IECC moisture regime of the target site.
match_ca_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the CA climate zone of the target site.
match_ba_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the Building America climate zone of the target site.
match_state : bool
If ``True``, filter candidate weather stations to those
matching the US state of the target site, as specified by
``site_state=True``.
minimum_quality : str, ``'high'``, ``'medium'``, ``'low'``
If given, filter candidate weather stations to those meeting or
exceeding the given quality, as summarized by the frequency and
availability of observations in the NOAA Integrated Surface Database.
minimum_tmy3_class : str, ``'I'``, ``'II'``, ``'III'``
If given, filter candidate weather stations to those meeting or
exceeding the given class, as reported in the NREL TMY3 metadata.
max_distance_meters : float
If given, filter candidate weather stations to those within the
``max_distance_meters`` of the target site location.
max_difference_elevation_meters : float
If given, filter candidate weather stations to those with elevations
within ``max_difference_elevation_meters`` of the target site elevation.
is_tmy3 : bool
If given, filter candidate weather stations to those for which TMY3
normal year temperature data is available.
is_cz2010 : bool
If given, filter candidate weather stations to those for which CZ2010
normal year temperature data is available.
Returns
-------
ranked_filtered_candidates : :any:`pandas.DataFrame`
Index is ``usaf_id``. Each row contains a potential weather station
match and metadata. Contains the following columns:
- ``rank``: Rank of weather station match for the target site.
- ``distance_meters``: Distance from target site to weather station site.
- ``latitude``: Latitude of weather station site.
- ``longitude``: Longitude of weather station site.
- ``iecc_climate_zone``: IECC Climate Zone ID (1-8)
- ``iecc_moisture_regime``: IECC Moisture Regime ID (A-C)
- ``ba_climate_zone``: Building America climate zone name
- ``ca_climate_zone``: Califoria climate zone number
- ``rough_quality``: Approximate measure of frequency of ISD
observations data at weather station.
- ``elevation``: Elevation of weather station site, if available.
- ``state``: US state of weather station site, if applicable.
- ``tmy3_class``: Weather station class as reported by NREL TMY3, if
available
- ``is_tmy3``: Weather station has associated TMY3 data.
- ``is_cz2010``: Weather station has associated CZ2010 data.
- ``difference_elevation_meters``: Absolute difference in meters
between target site elevation and weather station elevation, if
available.
"""
candidates = cached_data.all_station_metadata
# compute distances
candidates_defined_lat_long = candidates[
candidates.latitude.notnull() & candidates.longitude.notnull()
]
candidates_latitude = candidates_defined_lat_long.latitude
candidates_longitude = candidates_defined_lat_long.longitude
tiled_site_latitude = np.tile(site_latitude, candidates_latitude.shape)
tiled_site_longitude = np.tile(site_longitude, candidates_longitude.shape)
geod = pyproj.Geod(ellps="WGS84")
dists = geod.inv(
tiled_site_longitude,
tiled_site_latitude,
candidates_longitude.values,
candidates_latitude.values,
)[2]
distance_meters = pd.Series(dists, index=candidates_defined_lat_long.index).reindex(
candidates.index
)
candidates["distance_meters"] = distance_meters
if site_elevation is not None:
difference_elevation_meters = (candidates.elevation - site_elevation).abs()
else:
difference_elevation_meters = None
candidates["difference_elevation_meters"] = difference_elevation_meters
site_climate_zones = get_lat_long_climate_zones(site_latitude, site_longitude)
site_iecc_climate_zone = site_climate_zones["iecc_climate_zone"]
site_iecc_moisture_regime = site_climate_zones["iecc_moisture_regime"]
site_ca_climate_zone = site_climate_zones["ca_climate_zone"]
site_ba_climate_zone = site_climate_zones["ba_climate_zone"]
# create filters
filters = []
if match_iecc_climate_zone:
if site_iecc_climate_zone is None:
filters.append(candidates.iecc_climate_zone.isnull())
else:
filters.append(candidates.iecc_climate_zone == site_iecc_climate_zone)
if match_iecc_moisture_regime:
if site_iecc_moisture_regime is None:
filters.append(candidates.iecc_moisture_regime.isnull())
else:
filters.append(candidates.iecc_moisture_regime == site_iecc_moisture_regime)
if match_ba_climate_zone:
if site_ba_climate_zone is None:
filters.append(candidates.ba_climate_zone.isnull())
else:
filters.append(candidates.ba_climate_zone == site_ba_climate_zone)
if match_ca_climate_zone:
if site_ca_climate_zone is None:
filters.append(candidates.ca_climate_zone.isnull())
else:
filters.append(candidates.ca_climate_zone == site_ca_climate_zone)
if match_state:
if site_state is None:
filters.append(candidates.state.isnull())
else:
filters.append(candidates.state == site_state)
if is_tmy3 is not None:
filters.append(candidates.is_tmy3.isin([is_tmy3]))
if is_cz2010 is not None:
filters.append(candidates.is_cz2010.isin([is_cz2010]))
if minimum_quality == "low":
filters.append(candidates.rough_quality.isin(["high", "medium", "low"]))
elif minimum_quality == "medium":
filters.append(candidates.rough_quality.isin(["high", "medium"]))
elif minimum_quality == "high":
filters.append(candidates.rough_quality.isin(["high"]))
if minimum_tmy3_class == "III":
filters.append(candidates.tmy3_class.isin(["I", "II", "III"]))
elif minimum_tmy3_class == "II":
filters.append(candidates.tmy3_class.isin(["I", "II"]))
elif minimum_tmy3_class == "I":
filters.append(candidates.tmy3_class.isin(["I"]))
if max_distance_meters is not None:
filters.append(candidates.distance_meters <= max_distance_meters)
if max_difference_elevation_meters is not None and site_elevation is not None:
filters.append(
candidates.difference_elevation_meters <= max_difference_elevation_meters
)
combined_filters = _combine_filters(filters, candidates.index)
filtered_candidates = candidates[combined_filters]
ranked_filtered_candidates = filtered_candidates.sort_values(by=["distance_meters"])
# add rank column
ranks = range(1, 1 + len(ranked_filtered_candidates))
ranked_filtered_candidates.insert(0, "rank", ranks)
return ranked_filtered_candidates[
[
"rank",
"distance_meters",
"latitude",
"longitude",
"iecc_climate_zone",
"iecc_moisture_regime",
"ba_climate_zone",
"ca_climate_zone",
"rough_quality",
"elevation",
"state",
"tmy3_class",
"is_tmy3",
"is_cz2010",
"difference_elevation_meters",
]
] | Get a ranked, filtered set of candidate weather stations and metadata
for a particular site.
Parameters
----------
site_latitude : float
Latitude of target site for which to find candidate weather stations.
site_longitude : float
Longitude of target site for which to find candidate weather stations.
site_state : str, 2 letter abbreviation
US state of target site, used optionally to filter potential candidate
weather stations. Ignored unless ``match_state=True``.
site_elevation : float
Elevation of target site in meters, used optionally to filter potential
candidate weather stations. Ignored unless
``max_difference_elevation_meters`` is set.
match_iecc_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the IECC climate zone of the target site.
match_iecc_moisture_regime : bool
If ``True``, filter candidate weather stations to those
matching the IECC moisture regime of the target site.
match_ca_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the CA climate zone of the target site.
match_ba_climate_zone : bool
If ``True``, filter candidate weather stations to those
matching the Building America climate zone of the target site.
match_state : bool
If ``True``, filter candidate weather stations to those
matching the US state of the target site, as specified by
``site_state=True``.
minimum_quality : str, ``'high'``, ``'medium'``, ``'low'``
If given, filter candidate weather stations to those meeting or
exceeding the given quality, as summarized by the frequency and
availability of observations in the NOAA Integrated Surface Database.
minimum_tmy3_class : str, ``'I'``, ``'II'``, ``'III'``
If given, filter candidate weather stations to those meeting or
exceeding the given class, as reported in the NREL TMY3 metadata.
max_distance_meters : float
If given, filter candidate weather stations to those within the
``max_distance_meters`` of the target site location.
max_difference_elevation_meters : float
If given, filter candidate weather stations to those with elevations
within ``max_difference_elevation_meters`` of the target site elevation.
is_tmy3 : bool
If given, filter candidate weather stations to those for which TMY3
normal year temperature data is available.
is_cz2010 : bool
If given, filter candidate weather stations to those for which CZ2010
normal year temperature data is available.
Returns
-------
ranked_filtered_candidates : :any:`pandas.DataFrame`
Index is ``usaf_id``. Each row contains a potential weather station
match and metadata. Contains the following columns:
- ``rank``: Rank of weather station match for the target site.
- ``distance_meters``: Distance from target site to weather station site.
- ``latitude``: Latitude of weather station site.
- ``longitude``: Longitude of weather station site.
- ``iecc_climate_zone``: IECC Climate Zone ID (1-8)
- ``iecc_moisture_regime``: IECC Moisture Regime ID (A-C)
- ``ba_climate_zone``: Building America climate zone name
- ``ca_climate_zone``: Califoria climate zone number
- ``rough_quality``: Approximate measure of frequency of ISD
observations data at weather station.
- ``elevation``: Elevation of weather station site, if available.
- ``state``: US state of weather station site, if applicable.
- ``tmy3_class``: Weather station class as reported by NREL TMY3, if
available
- ``is_tmy3``: Weather station has associated TMY3 data.
- ``is_cz2010``: Weather station has associated CZ2010 data.
- ``difference_elevation_meters``: Absolute difference in meters
between target site elevation and weather station elevation, if
available. | entailment |
def combine_ranked_stations(rankings):
""" Combine :any:`pandas.DataFrame` s of candidate weather stations to form
a hybrid ranking dataframe.
Parameters
----------
rankings : list of :any:`pandas.DataFrame`
Dataframes of ranked weather station candidates and metadata.
All ranking dataframes should have the same columns and must be
sorted by rank.
Returns
-------
ranked_filtered_candidates : :any:`pandas.DataFrame`
Dataframe has a rank column and the same columns given in the source
dataframes.
"""
if len(rankings) == 0:
raise ValueError("Requires at least one ranking.")
combined_ranking = rankings[0]
for ranking in rankings[1:]:
filtered_ranking = ranking[~ranking.index.isin(combined_ranking.index)]
combined_ranking = pd.concat([combined_ranking, filtered_ranking])
combined_ranking["rank"] = range(1, 1 + len(combined_ranking))
return combined_ranking | Combine :any:`pandas.DataFrame` s of candidate weather stations to form
a hybrid ranking dataframe.
Parameters
----------
rankings : list of :any:`pandas.DataFrame`
Dataframes of ranked weather station candidates and metadata.
All ranking dataframes should have the same columns and must be
sorted by rank.
Returns
-------
ranked_filtered_candidates : :any:`pandas.DataFrame`
Dataframe has a rank column and the same columns given in the source
dataframes. | entailment |
def select_station(
candidates,
coverage_range=None,
min_fraction_coverage=0.9,
distance_warnings=(50000, 200000),
rank=1,
):
""" Select a station from a list of candidates that meets given data
quality criteria.
Parameters
----------
candidates : :any:`pandas.DataFrame`
A dataframe of the form given by :any:`eeweather.rank_stations` or
:any:`eeweather.combine_ranked_stations`, specifically having at least
an index with ``usaf_id`` values and the column ``distance_meters``.
Returns
-------
isd_station, warnings : tuple of (:any:`eeweather.ISDStation`, list of str)
A qualified weather station. ``None`` if no station meets criteria.
"""
def _test_station(station):
if coverage_range is None:
return True, []
else:
start_date, end_date = coverage_range
try:
tempC, warnings = eeweather.mockable.load_isd_hourly_temp_data(
station, start_date, end_date
)
except ISDDataNotAvailableError:
return False, [] # reject
# TODO(philngo): also need to incorporate within-day limits
if len(tempC) > 0:
fraction_coverage = tempC.notnull().sum() / float(len(tempC))
return (fraction_coverage > min_fraction_coverage), warnings
else:
return False, [] # reject
def _station_warnings(station, distance_meters):
return [
EEWeatherWarning(
qualified_name="eeweather.exceeds_maximum_distance",
description=(
"Distance from target to weather station is greater"
"than the specified km."
),
data={
"distance_meters": distance_meters,
"max_distance_meters": d,
"rank": rank,
},
)
for d in distance_warnings
if distance_meters > d
]
n_stations_passed = 0
for usaf_id, row in candidates.iterrows():
station = ISDStation(usaf_id)
test_result, warnings = _test_station(station)
if test_result:
n_stations_passed += 1
if n_stations_passed == rank:
if not warnings:
warnings = []
warnings.extend(_station_warnings(station, row.distance_meters))
return station, warnings
no_station_warning = EEWeatherWarning(
qualified_name="eeweather.no_weather_station_selected",
description=(
"No weather station found with the specified rank and"
" minimum fracitional coverage."
),
data={"rank": rank, "min_fraction_coverage": min_fraction_coverage},
)
return None, [no_station_warning] | Select a station from a list of candidates that meets given data
quality criteria.
Parameters
----------
candidates : :any:`pandas.DataFrame`
A dataframe of the form given by :any:`eeweather.rank_stations` or
:any:`eeweather.combine_ranked_stations`, specifically having at least
an index with ``usaf_id`` values and the column ``distance_meters``.
Returns
-------
isd_station, warnings : tuple of (:any:`eeweather.ISDStation`, list of str)
A qualified weather station. ``None`` if no station meets criteria. | entailment |
def _load_isd_station_metadata(download_path):
""" Collect metadata for US isd stations.
"""
from shapely.geometry import Point
# load ISD history which contains metadata
isd_history = pd.read_csv(
os.path.join(download_path, "isd-history.csv"),
dtype=str,
parse_dates=["BEGIN", "END"],
)
hasGEO = (
isd_history.LAT.notnull() & isd_history.LON.notnull() & (isd_history.LAT != 0)
)
isUS = (
((isd_history.CTRY == "US") & (isd_history.STATE.notnull()))
# AQ = American Samoa, GQ = Guam, RQ = Peurto Rico, VQ = Virgin Islands
| (isd_history.CTRY.str[1] == "Q")
)
hasUSAF = isd_history.USAF != "999999"
metadata = {}
for usaf_station, group in isd_history[hasGEO & isUS & hasUSAF].groupby("USAF"):
# find most recent
recent = group.loc[group.END.idxmax()]
wban_stations = list(group.WBAN)
metadata[usaf_station] = {
"usaf_id": usaf_station,
"wban_ids": wban_stations,
"recent_wban_id": recent.WBAN,
"name": recent["STATION NAME"],
"icao_code": recent.ICAO,
"latitude": recent.LAT if recent.LAT not in ("+00.000",) else None,
"longitude": recent.LON if recent.LON not in ("+000.000",) else None,
"point": Point(float(recent.LON), float(recent.LAT)),
"elevation": recent["ELEV(M)"]
if not str(float(recent["ELEV(M)"])).startswith("-999")
else None,
"state": recent.STATE,
}
return metadata | Collect metadata for US isd stations. | entailment |
def _load_isd_file_metadata(download_path, isd_station_metadata):
""" Collect data counts for isd files.
"""
isd_inventory = pd.read_csv(
os.path.join(download_path, "isd-inventory.csv"), dtype=str
)
# filter to stations with metadata
station_keep = [usaf in isd_station_metadata for usaf in isd_inventory.USAF]
isd_inventory = isd_inventory[station_keep]
# filter by year
year_keep = isd_inventory.YEAR > "2005"
isd_inventory = isd_inventory[year_keep]
metadata = {}
for (usaf_station, year), group in isd_inventory.groupby(["USAF", "YEAR"]):
if usaf_station not in metadata:
metadata[usaf_station] = {"usaf_id": usaf_station, "years": {}}
metadata[usaf_station]["years"][year] = [
{
"wban_id": row.WBAN,
"counts": [
row.JAN,
row.FEB,
row.MAR,
row.APR,
row.MAY,
row.JUN,
row.JUL,
row.AUG,
row.SEP,
row.OCT,
row.NOV,
row.DEC,
],
}
for i, row in group.iterrows()
]
return metadata | Collect data counts for isd files. | entailment |
def build_metadata_db(
zcta_geometry=False,
iecc_climate_zone_geometry=True,
iecc_moisture_regime_geometry=True,
ba_climate_zone_geometry=True,
ca_climate_zone_geometry=True,
):
""" Build database of metadata from primary sources.
Downloads primary sources, clears existing DB, and rebuilds from scratch.
Parameters
----------
zcta_geometry : bool, optional
Whether or not to include ZCTA geometry in database.
iecc_climate_zone_geometry : bool, optional
Whether or not to include IECC Climate Zone geometry in database.
iecc_moisture_regime_geometry : bool, optional
Whether or not to include IECC Moisture Regime geometry in database.
ba_climate_zone_geometry : bool, optional
Whether or not to include Building America Climate Zone geometry in database.
ca_climate_zone_geometry : bool, optional
Whether or not to include California Building Climate Zone Area geometry in database.
"""
try:
import shapely
except ImportError:
raise ImportError("Loading polygons requires shapely.")
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError("Scraping TMY3 station data requires beautifulsoup4.")
try:
import pyproj
except ImportError:
raise ImportError("Computing distances requires pyproj.")
try:
import simplejson
except ImportError:
raise ImportError("Writing geojson requires simplejson.")
download_path = _download_primary_sources()
conn = metadata_db_connection_proxy.reset_database()
# Load data into memory
print("Loading ZCTAs")
zcta_metadata = _load_zcta_metadata(download_path)
print("Loading counties")
county_metadata = _load_county_metadata(download_path)
print("Merging county climate zones")
(
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
) = _create_merged_climate_zones_metadata(county_metadata)
print("Loading CA climate zones")
ca_climate_zone_metadata = _load_CA_climate_zone_metadata(download_path)
print("Loading ISD station metadata")
isd_station_metadata = _load_isd_station_metadata(download_path)
print("Loading ISD station file metadata")
isd_file_metadata = _load_isd_file_metadata(download_path, isd_station_metadata)
print("Loading TMY3 station metadata")
tmy3_station_metadata = _load_tmy3_station_metadata(download_path)
print("Loading CZ2010 station metadata")
cz2010_station_metadata = _load_cz2010_station_metadata()
# Augment data in memory
print("Computing ISD station quality")
# add rough station quality to station metadata
# (all months in last 5 years have at least 600 points)
_compute_isd_station_quality(isd_station_metadata, isd_file_metadata)
print("Mapping ZCTAs to climate zones")
# add county and ca climate zone mappings
_map_zcta_to_climate_zones(
zcta_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
)
print("Mapping ISD stations to climate zones")
# add county and ca climate zone mappings
_map_isd_station_to_climate_zones(
isd_station_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
)
# Write tables
print("Creating table structures")
_create_table_structures(conn)
print("Writing ZCTA data")
_write_zcta_metadata_table(conn, zcta_metadata, geometry=zcta_geometry)
print("Writing IECC climate zone data")
_write_iecc_climate_zone_metadata_table(
conn, iecc_climate_zone_metadata, geometry=iecc_climate_zone_geometry
)
print("Writing IECC moisture regime data")
_write_iecc_moisture_regime_metadata_table(
conn, iecc_moisture_regime_metadata, geometry=iecc_moisture_regime_geometry
)
print("Writing BA climate zone data")
_write_ba_climate_zone_metadata_table(
conn, ba_climate_zone_metadata, geometry=ba_climate_zone_geometry
)
print("Writing CA climate zone data")
_write_ca_climate_zone_metadata_table(
conn, ca_climate_zone_metadata, geometry=ca_climate_zone_geometry
)
print("Writing ISD station metadata")
_write_isd_station_metadata_table(conn, isd_station_metadata)
print("Writing ISD file metadata")
_write_isd_file_metadata_table(conn, isd_file_metadata)
print("Writing TMY3 station metadata")
_write_tmy3_station_metadata_table(conn, tmy3_station_metadata)
print("Writing CZ2010 station metadata")
_write_cz2010_station_metadata_table(conn, cz2010_station_metadata)
print("Cleaning up...")
shutil.rmtree(download_path)
print("\u2728 Completed! \u2728") | Build database of metadata from primary sources.
Downloads primary sources, clears existing DB, and rebuilds from scratch.
Parameters
----------
zcta_geometry : bool, optional
Whether or not to include ZCTA geometry in database.
iecc_climate_zone_geometry : bool, optional
Whether or not to include IECC Climate Zone geometry in database.
iecc_moisture_regime_geometry : bool, optional
Whether or not to include IECC Moisture Regime geometry in database.
ba_climate_zone_geometry : bool, optional
Whether or not to include Building America Climate Zone geometry in database.
ca_climate_zone_geometry : bool, optional
Whether or not to include California Building Climate Zone Area geometry in database. | entailment |
def json(self):
""" Return a JSON-serializeable object containing station metadata."""
return {
"elevation": self.elevation,
"latitude": self.latitude,
"longitude": self.longitude,
"icao_code": self.icao_code,
"name": self.name,
"quality": self.quality,
"wban_ids": self.wban_ids,
"recent_wban_id": self.recent_wban_id,
"climate_zones": {
"iecc_climate_zone": self.iecc_climate_zone,
"iecc_moisture_regime": self.iecc_moisture_regime,
"ba_climate_zone": self.ba_climate_zone,
"ca_climate_zone": self.ca_climate_zone,
},
} | Return a JSON-serializeable object containing station metadata. | entailment |
def get_isd_filenames(self, year=None, with_host=False):
""" Get filenames of raw ISD station data. """
return get_isd_filenames(self.usaf_id, year, with_host=with_host) | Get filenames of raw ISD station data. | entailment |
def get_gsod_filenames(self, year=None, with_host=False):
""" Get filenames of raw GSOD station data. """
return get_gsod_filenames(self.usaf_id, year, with_host=with_host) | Get filenames of raw GSOD station data. | entailment |
def load_isd_hourly_temp_data(
self,
start,
end,
read_from_cache=True,
write_to_cache=True,
error_on_missing_years=True,
):
""" Load resampled hourly ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled hourly ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
error_on_missing_years=error_on_missing_years,
) | Load resampled hourly ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled hourly ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache. | entailment |
def load_isd_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
) | Load resampled daily ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache. | entailment |
def load_gsod_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily GSOD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily GSOD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_gsod_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
) | Load resampled daily GSOD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily GSOD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache. | entailment |
def load_tmy3_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly TMY3 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly TMY3 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_tmy3_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
) | Load hourly TMY3 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly TMY3 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache. | entailment |
def load_cz2010_hourly_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load hourly CZ2010 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly CZ2010 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_cz2010_hourly_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
) | Load hourly CZ2010 temperature data from start date to end date (inclusive).
This is the primary convenience method for loading hourly CZ2010 temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache. | entailment |
def plot_station_mapping(
target_latitude,
target_longitude,
isd_station,
distance_meters,
target_label="target",
): # pragma: no cover
""" Plots this mapping on a map."""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Plotting requires matplotlib.")
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.img_tiles as cimgt
except ImportError:
raise ImportError("Plotting requires cartopy.")
lat, lng = isd_station.coords
t_lat, t_lng = float(target_latitude), float(target_longitude)
# fiture
fig = plt.figure(figsize=(16, 8))
# axes
tiles = cimgt.StamenTerrain()
ax = plt.subplot(1, 1, 1, projection=tiles.crs)
# offsets for labels
x_max = max([lng, t_lng])
x_min = min([lng, t_lng])
x_diff = x_max - x_min
y_max = max([lat, t_lat])
y_min = min([lat, t_lat])
y_diff = y_max - y_min
xoffset = x_diff * 0.05
yoffset = y_diff * 0.05
# minimum
left = x_min - x_diff * 0.5
right = x_max + x_diff * 0.5
bottom = y_min - y_diff * 0.3
top = y_max + y_diff * 0.3
width_ratio = 2.
height_ratio = 1.
if (right - left) / (top - bottom) > width_ratio / height_ratio:
# too short
goal = (right - left) * height_ratio / width_ratio
diff = goal - (top - bottom)
bottom = bottom - diff / 2.
top = top + diff / 2.
else:
# too skinny
goal = (top - bottom) * width_ratio / height_ratio
diff = goal - (right - left)
left = left - diff / 2.
right = right + diff / 2.
ax.set_extent([left, right, bottom, top])
# determine zoom level
# tile size at level 1 = 64 km
# level 2 = 32 km, level 3 = 16 km, etc, i.e. 128/(2^n) km
N_TILES = 600 # (how many tiles approximately fit in distance)
km = distance_meters / 1000.0
zoom_level = int(np.log2(128 * N_TILES / km))
ax.add_image(tiles, zoom_level)
# line between
plt.plot(
[lng, t_lng],
[lat, t_lat],
linestyle="-",
dashes=[2, 2],
transform=ccrs.Geodetic(),
)
# station
ax.plot(lng, lat, "ko", markersize=7, transform=ccrs.Geodetic())
# target
ax.plot(t_lng, t_lat, "ro", markersize=7, transform=ccrs.Geodetic())
# station label
station_label = "{} ({})".format(isd_station.usaf_id, isd_station.name)
ax.text(lng + xoffset, lat + yoffset, station_label, transform=ccrs.Geodetic())
# target label
ax.text(t_lng + xoffset, t_lat + yoffset, target_label, transform=ccrs.Geodetic())
# distance labels
mid_lng = (lng + t_lng) / 2
mid_lat = (lat + t_lat) / 2
dist_text = "{:.01f} km".format(km)
ax.text(mid_lng + xoffset, mid_lat + yoffset, dist_text, transform=ccrs.Geodetic())
plt.show() | Plots this mapping on a map. | entailment |
def plot_station_mappings(mapping_results): # pragma: no cover
""" Plot a list of mapping results on a map.
Requires matplotlib and cartopy.
Parameters
----------
mapping_results : list of MappingResult objects
Mapping results to plot
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Plotting requires matplotlib.")
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
except ImportError:
raise ImportError("Plotting requires cartopy.")
lats = []
lngs = []
t_lats = []
t_lngs = []
n_discards = 0
for mapping_result in mapping_results:
if not mapping_result.is_empty():
lat, lng = mapping_result.isd_station.coords
t_lat, t_lng = map(float, mapping_result.target_coords)
lats.append(lat)
lngs.append(lng)
t_lats.append(t_lat)
t_lngs.append(t_lng)
else:
n_discards += 1
print("Discarded {} empty mappings".format(n_discards))
# figure
fig = plt.figure(figsize=(60, 60))
# axes
ax = plt.subplot(1, 1, 1, projection=ccrs.Mercator())
# offsets for labels
all_lngs = lngs + t_lngs
all_lats = lats + t_lats
x_max = max(all_lngs) # lists
x_min = min(all_lngs)
x_diff = x_max - x_min
y_max = max(all_lats)
y_min = min(all_lats)
y_diff = y_max - y_min
# minimum
x_pad = 0.1 * x_diff
y_pad = 0.1 * y_diff
left = x_min - x_pad
right = x_max + x_pad
bottom = y_min - y_pad
top = y_max + y_pad
width_ratio = 2.
height_ratio = 1.
if (right - left) / (top - bottom) > height_ratio / width_ratio:
# too short
goal = (right - left) * height_ratio / width_ratio
diff = goal - (top - bottom)
bottom = bottom - diff / 2.
top = top + diff / 2.
else:
# too skinny
goal = (top - bottom) * width_ratio / height_ratio
diff = goal - (right - left)
left = left - diff / 2.
right = right + diff / 2.
left = max(left, -179.9)
right = min(right, 179.9)
bottom = max([bottom, -89.9])
top = min([top, 89.9])
ax.set_extent([left, right, bottom, top])
# OCEAN
ax.add_feature(
cfeature.NaturalEarthFeature(
"physical",
"ocean",
"50m",
edgecolor="face",
facecolor=cfeature.COLORS["water"],
)
)
# LAND
ax.add_feature(
cfeature.NaturalEarthFeature(
"physical",
"land",
"50m",
edgecolor="face",
facecolor=cfeature.COLORS["land"],
)
)
# BORDERS
ax.add_feature(
cfeature.NaturalEarthFeature(
"cultural",
"admin_0_boundary_lines_land",
"50m",
edgecolor="black",
facecolor="none",
)
)
# LAKES
ax.add_feature(
cfeature.NaturalEarthFeature(
"physical",
"lakes",
"50m",
edgecolor="face",
facecolor=cfeature.COLORS["water"],
)
)
# COASTLINE
ax.add_feature(
cfeature.NaturalEarthFeature(
"physical", "coastline", "50m", edgecolor="black", facecolor="none"
)
)
# lines between
# for lat, t_lat, lng, t_lng in zip(lats, t_lats, lngs, t_lngs):
ax.plot(
[lngs, t_lngs],
[lats, t_lats],
color="k",
linestyle="-",
transform=ccrs.Geodetic(),
linewidth=0.3,
)
# stations
ax.plot(lngs, lats, "bo", markersize=1, transform=ccrs.Geodetic())
plt.title("Location to weather station mapping")
plt.show() | Plot a list of mapping results on a map.
Requires matplotlib and cartopy.
Parameters
----------
mapping_results : list of MappingResult objects
Mapping results to plot | entailment |
def execute(self, input_data):
''' Execute the ViewPDF worker '''
# Just a small check to make sure we haven't been called on the wrong file type
if (input_data['meta']['type_tag'] != 'pdf'):
return {'error': self.__class__.__name__+': called on '+input_data['meta']['type_tag']}
view = {}
view['strings'] = input_data['strings']['string_list'][:5]
view.update(input_data['meta'])
return view | Execute the ViewPDF worker | entailment |
def execute(self, input_data):
''' Execute '''
view = {}
# Grab logs from Bro
view['bro_logs'] = {key: input_data['pcap_bro'][key] for key in input_data['pcap_bro'].keys() if '_log' in key}
# Grab logs from Bro
view['extracted_files'] = input_data['pcap_bro']['extracted_files']
return view | Execute | entailment |
def add_node(self, node_id, name, labels):
''' Cache aware add_node '''
if node_id not in self.node_cache:
self.workbench.add_node(node_id, name, labels)
self.node_cache.add(node_id) | Cache aware add_node | entailment |
def add_rel(self, source_id, target_id, rel):
''' Cache aware add_rel '''
if (source_id, target_id) not in self.rel_cache:
self.workbench.add_rel(source_id, target_id, rel)
self.rel_cache.add((source_id, target_id)) | Cache aware add_rel | entailment |
def conn_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro conn.log) '''
conn_log = list(stream)
print 'Entering conn_log_graph...(%d rows)' % len(conn_log)
for row in stream:
# Add the connection id with service as one of the labels
self.add_node(row['uid'], row['uid'][:6], ['conn_id', row['service']])
# Add the originating host
self.add_node(row['id.orig_h'], row['id.orig_h'], ['ip', 'origin'])
# Add the response host
self.add_node(row['id.resp_h'], row['id.resp_h'], ['ip', 'response'])
# Add the ip->connection relationships
self.add_rel(row['uid'], row['id.orig_h'], 'origin')
self.add_rel(row['uid'], row['id.resp_h'], 'response') | Build up a graph (nodes and edges from a Bro conn.log) | entailment |
def dns_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro dns.log) '''
dns_log = list(stream)
print 'Entering dns_log_graph...(%d rows)' % len(dns_log)
for row in dns_log:
# Skip '-' hosts
if (row['id.orig_h'] == '-'):
continue
# Add the originating host
self.add_node(row['id.orig_h'], row['id.orig_h'], ['host', 'origin'])
# Add the query host
self.add_node(row['query'], row['query'], ['host', 'dns_query'])
# The relationship between origin host and query
self.add_rel(row['id.orig_h'], row['query'], 'dns_query')
# Add the DNS answers as hosts and add the relationships
for answer in row['answers'].split(','):
self.add_node(answer, answer, ['host'])
self.add_rel(row['query'], answer, row['qtype_name']) | Build up a graph (nodes and edges from a Bro dns.log) | entailment |
def weird_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro weird.log) '''
weird_log = list(stream)
print 'Entering weird_log_graph...(%d rows)' % len(weird_log)
# Here we're just going to capture that something weird
# happened between two hosts
weird_pairs = set()
for row in weird_log:
weird_pairs.add((row['id.orig_h'], row['id.resp_h']))
# Okay now make the weird node for each pair
for pair in weird_pairs:
# Skip '-' hosts
if (pair[0] == '-'):
continue
# Add the originating host
self.add_node(pair[0], pair[0], ['host', 'origin'])
# Add the response host
self.add_node(pair[1], pair[1], ['host'])
# Add a weird node
weird_name = 'weird'+pair[0]+'_'+pair[1]
self.add_node(weird_name, 'weird', ['weird'])
# The relationships between the nodes
self.add_rel(pair[0], weird_name, 'weird')
self.add_rel(weird_name, pair[1], 'weird') | Build up a graph (nodes and edges from a Bro weird.log) | entailment |
def files_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro files.log) '''
file_log = list(stream)
print 'Entering file_log_graph...(%d rows)' % len(file_log)
for row in file_log:
# If the mime-type is interesting add the uri and the host->uri->host relationships
if row['mime_type'] not in self.exclude_mime_types:
# Check for weird conditions
if (row['total_bytes'] == '-'):
continue
if ('-' in row['md5']):
continue
# Check for missing bytes
if row['missing_bytes']:
labels = ['file','missing']
else:
labels = ['file']
# Make the file node name kewl
name = '%6s %s %.0f-KB' % (row['md5'][:6], row['mime_type'], row['total_bytes']/1024.0)
if row['missing_bytes']:
name += '*'
name = name.replace('application/','')
# Add the file node
self.add_node(row['md5'], name, labels)
# Add the tx_host
self.add_node(row['tx_hosts'], row['tx_hosts'], ['host'])
# Add the file->tx_host relationship
self.add_rel(row['tx_hosts'], row['md5'], 'file') | Build up a graph (nodes and edges from a Bro files.log) | entailment |
def run():
''' This client calls a bunch of help commands from workbench '''
# Grab server args
args = client_helper.grab_server_args()
# Start up workbench connection
workbench = zerorpc.Client(timeout=300, heartbeat=60)
workbench.connect('tcp://'+args['server']+':'+args['port'])
# Call help methods
print workbench.help()
print workbench.help('basic')
print workbench.help('commands')
print workbench.help('store_sample')
print workbench.help('workers')
print workbench.help('meta')
# Call a test worker
print workbench.test_worker('meta') | This client calls a bunch of help commands from workbench | entailment |
def execute(self, input_data):
''' Execute the URL worker '''
string_output = input_data['strings']['string_list']
flatten = ' '.join(string_output)
urls = self.url_match.findall(flatten)
return {'url_list': urls} | Execute the URL worker | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.