text stringlengths 4 1.02M | meta dict |
|---|---|
def test_cert_get_cn(client):
from .vectors import INTERNAL_VALID_LONG_CERT
from lemur.common.defaults import common_name
assert common_name(INTERNAL_VALID_LONG_CERT) == 'long.lived.com'
def test_cert_sub_alt_domains(client):
from .vectors import INTERNAL_VALID_SAN_CERT, INTERNAL_VALID_LONG_CERT
from lemur.common.defaults import domains
assert domains(INTERNAL_VALID_LONG_CERT) == []
assert domains(INTERNAL_VALID_SAN_CERT) == ['example2.long.com', 'example3.long.com']
def test_cert_is_san(client):
from .vectors import INTERNAL_VALID_SAN_CERT, INTERNAL_VALID_LONG_CERT
from lemur.common.defaults import san
assert not san(INTERNAL_VALID_LONG_CERT)
assert san(INTERNAL_VALID_SAN_CERT)
def test_cert_is_wildcard(client):
from .vectors import INTERNAL_VALID_WILDCARD_CERT, INTERNAL_VALID_LONG_CERT
from lemur.common.defaults import is_wildcard
assert is_wildcard(INTERNAL_VALID_WILDCARD_CERT)
assert not is_wildcard(INTERNAL_VALID_LONG_CERT)
def test_cert_bitstrength(client):
from .vectors import INTERNAL_VALID_LONG_CERT
from lemur.common.defaults import bitstrength
assert bitstrength(INTERNAL_VALID_LONG_CERT) == 2048
def test_cert_issuer(client):
from .vectors import INTERNAL_VALID_LONG_CERT
from lemur.common.defaults import issuer
assert issuer(INTERNAL_VALID_LONG_CERT) == 'Example'
def test_text_to_slug(client):
from lemur.common.defaults import text_to_slug
assert text_to_slug('test - string') == 'test-string'
# Accented characters are decomposed
assert text_to_slug('föö bär') == 'foo-bar'
# Melt away the Unicode Snowman
assert text_to_slug('\u2603') == ''
assert text_to_slug('\u2603test\u2603') == 'test'
assert text_to_slug('snow\u2603man') == 'snow-man'
# IDNA-encoded domain names should be kept as-is
assert text_to_slug('xn--i1b6eqas.xn--xmpl-loa9b3671b.com') == 'xn--i1b6eqas.xn--xmpl-loa9b3671b.com'
def test_create_name(client):
from lemur.common.defaults import certificate_name
from datetime import datetime
assert certificate_name(
'example.com',
'Example Inc,',
datetime(2015, 5, 7, 0, 0, 0),
datetime(2015, 5, 12, 0, 0, 0),
False
) == 'example.com-ExampleInc-20150507-20150512'
assert certificate_name(
'example.com',
'Example Inc,',
datetime(2015, 5, 7, 0, 0, 0),
datetime(2015, 5, 12, 0, 0, 0),
True
) == 'SAN-example.com-ExampleInc-20150507-20150512'
assert certificate_name(
'xn--mnchen-3ya.de',
'Vertrauenswürdig Autorität',
datetime(2015, 5, 7, 0, 0, 0),
datetime(2015, 5, 12, 0, 0, 0),
False
) == 'xn--mnchen-3ya.de-VertrauenswurdigAutoritat-20150507-20150512'
| {
"content_hash": "39bd52037dba5ba9303eec8bec743a54",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 105,
"avg_line_length": 35.35443037974684,
"alnum_prop": 0.6813462226996062,
"repo_name": "kevgliss/lemur",
"id": "a72689cc90a31962ef3c95e862f933322d328d22",
"size": "2800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lemur/tests/test_defaults.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2728"
},
{
"name": "HTML",
"bytes": "209897"
},
{
"name": "JavaScript",
"bytes": "15086"
},
{
"name": "Makefile",
"bytes": "3492"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "803019"
}
],
"symlink_target": ""
} |
from neon import NervanaObject
class Cost(NervanaObject):
"""
Base class for the cost functions
"""
def __call__(self, y, t):
"""
Applies the cost function
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
OpTree: Returns the cost
"""
return self.func(y, t)
def bprop(self, y, t):
"""
Computes the derivative of the cost function
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
OpTree: Returns the derivative of the cost function
"""
return self.funcgrad(y, t)
class Metric(Cost):
"""
Base class for Metric
Meant for non-smooth costs that we just want to check on validation.
"""
def __call__(self, y, t):
"""
To implement in derived classes
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
float: Returns the metric
"""
raise NotImplementedError()
def bprop(self, y, t):
"""
Not relevant for Metric
"""
pass
class CrossEntropyBinary(Cost):
"""
Applies the binary cross entropy function
Note:
bprop assumes that shortcut is used to calculate derivative
"""
def __init__(self, epsilon=2 ** -23, scale=1):
"""
Initialize the binary cross entropy function
Args:
epsilon (float): set the epsilon
(small number to prevent log(0) errors)
"""
self.epsilon = epsilon
self.scale = scale
def __call__(self, y, t):
"""
Applies the binary cross entropy cost function
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
OpTree: Returns the binary cross entropy cost
"""
a = - self.be.log(y + self.epsilon) * t
b = - self.be.log(1 - y + self.epsilon) * (1 - t)
return self.be.sum(a + b, axis=0)
def bprop(self, y, t):
"""
Computes the shortcut derivative of the binary cross entropy cost function
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
OpTree: Returns the (mean) shortcut derivative of the binary entropy
cost function ``(y - t) / y.shape[1]``
"""
return self.scale * (y - t)
class CrossEntropyMulti(Cost):
"""
Applies the multiclass cross entropy function
Note:
bprop assumes that shortcut is used to calculate derivative
"""
def __init__(self, epsilon=2 ** -23, scale=1, usebits=False):
"""
Initialize the multiclass cross entropy function
Args:
epsilon (float): set the epsilon
(small number to prevent log(0) errors)
usebits (boolean): whether to display costs in bits or nats (default)
"""
self.epsilon = epsilon
self.scale = scale
self.logfunc = self.be.log2 if usebits else self.be.log
def __call__(self, y, t):
"""
Applies the multiclass cross entropy cost function
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
OpTree: Returns the multiclass cross entropy cost
"""
return (self.be.sum(-t * self.logfunc(self.be.clip(y, self.epsilon, 1.0)), axis=0))
def bprop(self, y, t):
"""
Computes the shortcut derivative of the multiclass cross entropy cost
function
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
OpTree: Returns the (mean) shortcut derivative of the multiclass
entropy cost function ``(y - t) / y.shape[1]``
"""
return self.scale * (y - t)
class SumSquared(Cost):
"""
Applies the squared error cost function
"""
def __init__(self):
"""
Initialize the squared error cost functions
"""
self.func = lambda y, t: self.be.sum(
self.be.square(y - t), axis=0) / 2.
self.funcgrad = lambda y, t: (y - t)
class TopKMisclassification(Metric):
"""
Compute the misclassification error metric
"""
def __init__(self, k):
self.outputs = self.be.iobuf(3)
self.correctProbs = self.outputs[0].reshape((1, self.be.bsz))
self.top1 = self.outputs[1].reshape((1, self.be.bsz))
self.topk = self.outputs[2].reshape((1, self.be.bsz))
self.k = k
self.metric_names = ['LogLoss', 'Top1Misclass', 'Top' + str(k) + 'Misclass']
def __call__(self, y, t):
"""
Compute the misclassification error metric
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
float: Returns the metric
"""
be = self.be
# import pdb; pdb.set_trace()
self.correctProbs[:] = be.sum(y * t, axis=0)
nSlots = self.k - be.sum((y > self.correctProbs), axis=0)
nEq = be.sum(y == self.correctProbs, axis=0)
self.topk[:] = 1. - (nSlots > 0) * ((nEq <= nSlots) * (1 - nSlots / nEq) + nSlots / nEq)
self.top1[:] = 1. - (be.max(y, axis=0) == self.correctProbs) / nEq
self.correctProbs[:] = -be.log(self.correctProbs)
return self.outputs.get().mean(axis=1)
class Misclassification(Metric):
"""
Compute the misclassification error metric
"""
def __init__(self):
self.preds = self.be.iobuf(1)
self.hyps = self.be.iobuf(1)
self.outputs = self.preds # Contains per record metric
self.metric_names = ['Top1Misclass']
def __call__(self, y, t):
"""
Compute the misclassification error metric
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
float: Returns the metric
"""
# convert back from onehot and compare
self.preds[:] = self.be.argmax(y, axis=0)
self.hyps[:] = self.be.argmax(t, axis=0)
self.outputs[:] = self.be.not_equal(self.preds, self.hyps)
return self.outputs.get().mean()
| {
"content_hash": "9532b46341e56b8e75c2b869e62c5ea5",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 96,
"avg_line_length": 29.70689655172414,
"alnum_prop": 0.5622460824143934,
"repo_name": "sunclx/neon",
"id": "7b4905c85a86af4fa1935d6498a0d3513a9e9534",
"size": "7633",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neon/transforms/cost.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6534"
},
{
"name": "C++",
"bytes": "13448"
},
{
"name": "CSS",
"bytes": "810211"
},
{
"name": "Cuda",
"bytes": "87750"
},
{
"name": "Makefile",
"bytes": "8982"
},
{
"name": "Python",
"bytes": "777025"
}
],
"symlink_target": ""
} |
import unittest
from app.common.http_methods_unittests import get_request
from app.common.string_methods import string_contains, get_values_from_regex
from app.common.target_parse_strings import PLANE_PANEL_AVAILABLE_HTML, ALLIANCE_CONCORDE_PATTERN_HTML
from app.common.target_urls import ALLIANCE_PAGE, ALLIANCE_PLANE_PANEL_URL, ALLIANCE_TAKE_PLANE_URL
class TestAirportParser(unittest.TestCase):
@unittest.skip("Not ready yet")
def test_amount_needed(self):
take_plane_from_alliance()
# self.assertEqual(43900000, 0)
#TODO
# from mockito import when, mock, unstub
#
# import requests
# response = mock({'status_code': 200, 'text': 'Ok'})
# when(requests).get('http://google.com/').thenReturn(response)
#
# # use it
# truc = requests.get('http://google.com/')
# print(truc.text)
#
# # clean up
# unstub()
@unittest.skip("Not ready yet")
def take_plane_from_alliance(required_plane_type=10):
page = get_request(ALLIANCE_PAGE)
if string_contains(PLANE_PANEL_AVAILABLE_HTML.format(plane_type=required_plane_type), page):
page = get_request(ALLIANCE_PLANE_PANEL_URL.format(plane_type=required_plane_type))
plane_id = get_values_from_regex(ALLIANCE_CONCORDE_PATTERN_HTML, page)[0]
page = get_request(ALLIANCE_TAKE_PLANE_URL.format(plane_id=plane_id))
if u"Vous avez retiré avec succès l'avion" in page:
return True
return False
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "c91715042feb8c1fd42f37ebaaa6ee96",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 102,
"avg_line_length": 32.46666666666667,
"alnum_prop": 0.7097878165639973,
"repo_name": "egenerat/gae-django",
"id": "8f6d8554ad52f8c854a950b706bc3fddc40a3dfa",
"size": "1487",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/test/airport/test_airport_buyer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37815"
},
{
"name": "HTML",
"bytes": "86170"
},
{
"name": "JavaScript",
"bytes": "94035"
},
{
"name": "Python",
"bytes": "4820796"
}
],
"symlink_target": ""
} |
"""
router implementation base class
a template for router implementation that support VRRP
Those routers needs to be created by someone else.
sample_manager.routerManager is an example.
Usage example:
PYTHONPATH=. ./bin/ryu-manager --verbose \
ryu.services.protocols.vrrp.manager \
ryu.services.protocols.vrrp.dumper \
ryu.services.protocols.vrrp.sample_manager
"""
import contextlib
import greenlet
import socket
from ryu.base import app_manager
from ryu.controller import handler
from ryu.controller import ofp_event
from ryu.lib import hub
from ryu.lib import mac as mac_lib
from ryu.lib.packet import arp
from ryu.lib.packet import ethernet
from ryu.lib.packet import packet
from ryu.lib.packet import vlan
from ryu.lib.packet import vrrp
from ryu.ofproto import ether
from ryu.ofproto import ofproto_v1_2
from ryu.services.protocols.vrrp import api as vrrp_api
from ryu.services.protocols.vrrp import event as vrrp_event
from ryu.services.protocols.vrrp import utils
class RouterBase(app_manager.RyuApp):
def _router_name(self, config, interface):
ip_version = 'ipv6' if config.is_ipv6 else 'ipv4'
return '%s-%s-%d-%s' % (self.__class__.__name__,
str(interface), config.vrid, ip_version)
def __init__(self, *args, **kwargs):
super(RouterBase, self).__init__(*args, **kwargs)
self.instance_name = kwargs['name']
self.monitor_name = kwargs['monitor_name']
self.config = kwargs['config']
self.interface = kwargs['interface']
self.name = self._router_name(self.config, self.interface)
def _transmit(self, data):
vrrp_api.vrrp_transmit(self, self.monitor_name, data)
def _initialized(self):
self.logger.debug('initialized')
def _initialized_to_master(self):
self.logger.debug('initialized to master')
# RFC3768 6.4.1
# o Broadcast a gratuitous ARP request containing the virtual
# router MAC address for each IP address associated with the
# virtual router.
#
# or
#
# RFC 5795 6.4.1
#(115)+ If the protected IPvX address is an IPv4 address, then:
# (120) * Broadcast a gratuitous ARP request containing the
# virtual router MAC address for each IP address associated
# with the virtual router.
#(125) + else // IPv6
# (130) * For each IPv6 address associated with the virtual
# router, send an unsolicited ND Neighbor Advertisement with
# the Router Flag (R) set, the Solicited Flag (S) unset, the
# Override flag (O) set, the target address set to the IPv6
# address of the virtual router, and the target link-layer
# address set to the virtual router MAC address.
def _become_master(self):
self.logger.debug('become master')
# RFC3768 6.4.2
# o Broadcast a gratuitous ARP request containing the virtual
# router MAC address for each IP address associated with the
# virtual router
#
# or
#
# RFC 5795 6.4.2
#(375)+ If the protected IPvX address is an IPv4 address, then:
# (380)* Broadcast a gratuitous ARP request on that interface
# containing the virtual router MAC address for each IPv4
# address associated with the virtual router.
#(385) + else // ipv6
# (390) * Compute and join the Solicited-Node multicast
# address [RFC4291] for the IPv6 address(es) associated with
# the virtual router.
# (395) * For each IPv6 address associated with the virtual
# router, send an unsolicited ND Neighbor Advertisement with
# the Router Flag (R) set, the Solicited Flag (S) unset, the
# Override flag (O) set, the target address set to the IPv6
# address of the virtual router, and the target link-layer
# address set to the virtual router MAC address.
def _become_backup(self):
self.logger.debug('become backup')
# RFC 3768 6.4.2 Backup
# - MUST NOT respond to ARP requests for the IP address(s)
# associated with the virtual router.
# - MUST discard packets with a destination link layer MAC address
# equal to the virtual router MAC address.
# - MUST NOT accept packets addressed to the IP address(es)
# associated with the virtual router.
#
# or
#
# RFC 5798 6.4.2 Backup
#(305) - If the protected IPvX address is an IPv4 address, then:
# (310) + MUST NOT respond to ARP requests for the IPv4
# address(es) associated with the virtual router.
#(315) - else // protected addr is IPv6
# (320) + MUST NOT respond to ND Neighbor Solicitation messages
# for the IPv6 address(es) associated with the virtual router.
# (325) + MUST NOT send ND Router Advertisement messages for the
# virtual router.
#(330) -endif // was protected addr IPv4?
#(335) - MUST discard packets with a destination link-layer MAC
#address equal to the virtual router MAC address.
#(340) - MUST NOT accept packets addressed to the IPvX address(es)
#associated with the virtual router.
def _shutdowned(self):
self.logger.debug('shutdowned')
@handler.set_ev_handler(vrrp_event.EventVRRPStateChanged)
def vrrp_state_changed_handler(self, ev):
old_state = ev.old_state
new_state = ev.new_state
self.logger.debug('sample router %s -> %s', old_state, new_state)
if new_state == vrrp_event.VRRP_STATE_MASTER:
if old_state == vrrp_event.VRRP_STATE_INITIALIZE:
self._initialized_to_master()
elif old_state == vrrp_event.VRRP_STATE_BACKUP:
self._become_master()
# RFC 3768 6.4.3
# - MUST respond to ARP requests for the IP address(es) associated
# with the virtual router.
# - MUST forward packets with a destination link layer MAC address
# equal to the virtual router MAC address.
# - MUST NOT accept packets addressed to the IP address(es)
# associated with the virtual router if it is not the IP address
# owner.
# - MUST accept packets addressed to the IP address(es) associated
# with the virtual router if it is the IP address owner.
#
# or
#
# RFC5798 6.4.3
#(605) - If the protected IPvX address is an IPv4 address, then:
# (610) + MUST respond to ARP requests for the IPv4 address(es)
# associated with the virtual router.
#(615) - else // ipv6
# (620) + MUST be a member of the Solicited-Node multicast
# address for the IPv6 address(es) associated with the virtual
# router.
# (625) + MUST respond to ND Neighbor Solicitation message for
# the IPv6 address(es) associated with the virtual router.
# (630) ++ MUST send ND Router Advertisements for the virtual
# router.
# (635) ++ If Accept_Mode is False: MUST NOT drop IPv6 Neighbor
# Solicitations and Neighbor Advertisements.
#(640) +-endif // ipv4?
#(645) - MUST forward packets with a destination link-layer MAC
#address equal to the virtual router MAC address.
#(650) - MUST accept packets addressed to the IPvX address(es)
#associated with the virtual router if it is the IPvX address owner
#or if Accept_Mode is True. Otherwise, MUST NOT accept these
#packets.
elif new_state == vrrp_event.VRRP_STATE_BACKUP:
self._become_backup()
elif new_state == vrrp_event.VRRP_STATE_INITIALIZE:
if old_state is None:
self._initialized()
else:
self._shutdowned()
else:
raise ValueError('invalid vrrp state %s' % new_state)
class RouterIPV4(RouterBase):
def _garp_packet(self, ip_address):
# prepare garp packet
src_mac = vrrp.vrrp_ipv4_src_mac_address(self.config.vrid)
e = ethernet.ethernet(mac_lib.BROADCAST_STR, src_mac,
ether.ETH_TYPE_ARP)
a = arp.arp_ip(arp.ARP_REQUEST, src_mac, ip_address,
mac_lib.DONTCARE_STR, ip_address)
p = packet.Packet()
p.add_protocol(e)
utils.may_add_vlan(p, self.interface.vlan_id)
p.add_protocol(a)
p.serialize()
return p
def __init__(self, *args, **kwargs):
super(RouterIPV4, self).__init__(*args, **kwargs)
assert not self.config.is_ipv6
self.garp_packets = [self._garp_packet(ip_address)
for ip_address in self.config.ip_addresses]
def _send_garp(self):
self.logger.debug('_send_garp')
for garp_packet in self.garp_packets:
self._transmit(garp_packet.data)
def _arp_reply_packet(self, arp_req_sha, arp_req_spa, arp_req_tpa):
if not (arp_req_tpa in self.config.ip_addresses or
arp_req_tpa == self.config.primary_ip_address):
return None
src_mac = vrrp.vrrp_ipv4_src_mac_address(self.config.vrid)
e = ethernet.ethernet(arp_req_sha, src_mac, ether.ETH_TYPE_ARP)
a = arp.arp_ip(arp.ARP_REPLY, src_mac, arp_req_tpa,
arp_req_sha, arp_req_spa)
p = packet.Packet()
p.add_protocol(e)
utils.may_add_vlan(p, self.interface.vlan_id)
p.add_protocol(a)
p.serialize()
self._transmit(p.data)
def _arp_process(self, data):
dst_mac = vrrp.vrrp_ipv4_src_mac_address(self.config.vrid)
arp_sha = None
arp_spa = None
arp_tpa = None
p = packet.Packet(data)
for proto in p.protocols:
if isinstance(proto, ethernet.ethernet):
if proto.dst not in (mac_lib.BROADCAST_STR, dst_mac):
return None
ethertype = proto.ethertype
if not ((self.interface.vlan_id is None and
ethertype == ether.ETH_TYPE_ARP) or
(self.interface.vlan_id is not None and
ethertype == ether.ETH_TYPE_8021Q)):
return None
elif isinstance(proto, vlan.vlan):
if (proto.vid != self.interface.vlan_id or
proto.ethertype != ether.ETH_TYPE_ARP):
return None
elif isinstance(proto, arp.arp):
if (proto.hwtype != arp.ARP_HW_TYPE_ETHERNET or
proto.proto != ether.ETH_TYPE_IP or
proto.hlen != 6 or proto.plen != 4 or
proto.opcode != arp.ARP_REQUEST or
proto.dst_mac != dst_mac):
return None
arp_sha = proto.src_mac
arp_spa = proto.src_ip
arp_tpa = proto.dst_ip
break
if arp_sha is None or arp_spa is None or arp_tpa is None:
self.logger.debug('malformed arp request? arp_sha %s arp_spa %s',
arp_sha, arp_spa)
return None
self._arp_reply_packet(arp_sha, arp_spa, arp_tpa)
class RouterIPV4Linux(RouterIPV4):
def __init__(self, *args, **kwargs):
super(RouterIPV4Linux, self).__init__(*args, **kwargs)
assert isinstance(self.interface,
vrrp_event.VRRPInterfaceNetworkDevice)
self.__is_master = False
self._arp_thread = None
def start(self):
self._disable_router()
super(RouterIPV4Linux, self).start()
def _initialized_to_master(self):
self.logger.debug('initialized to master')
self._master()
def _become_master(self):
self.logger.debug('become master')
self._master()
def _master(self):
self.__is_master = True
self._enable_router()
self._send_garp()
def _become_backup(self):
self.logger.debug('become backup')
self.__is_master = False
self._disable_router()
def _shutdowned(self):
# When VRRP functionality is disabled, what to do?
# should we also exit? or continue to route packets?
self._disable_router()
def _arp_loop_socket(self, packet_socket):
while True:
try:
buf = packet_socket.recv(1500)
except socket.timeout:
continue
self._arp_process(buf)
def _arp_loop(self):
try:
with contextlib.closing(
socket.socket(
socket.AF_PACKET, socket.SOCK_RAW,
socket.htons(ether.ETH_TYPE_ARP))) as packet_socket:
packet_socket.bind((self.interface.device_name,
socket.htons(ether.ETH_TYPE_ARP),
socket.PACKET_BROADCAST,
arp.ARP_HW_TYPE_ETHERNET,
mac_lib.BROADCAST))
self._arp_loop_socket(packet_socket)
except greenlet.GreenletExit:
# suppress thread.kill exception
pass
def _enable_router(self):
if self._arp_thread is None:
self._arp_thread = hub.spawn(self._arp_loop)
# TODO: implement real routing logic
self.logger.debug('TODO:_enable_router')
def _disable_router(self):
if self._arp_thread is not None:
self._arp_thread.kill()
hub.joinall([self._arp_thread])
self._arp_thread = None
# TODO: implement real routing logic
self.logger.debug('TODO:_disable_router')
class RouterIPV4OpenFlow(RouterIPV4):
OFP_VERSIONS = [ofproto_v1_2.OFP_VERSION]
# it must be that
# _DROP_PRIORITY < monitor.VRRPInterfaceMonitorOpenFlow._PRIORITY or
# _DROP_TABLE > monitor.VRRPInterfaceMonitorOpenFlow._TABLE
# to gurantee that VRRP packets are send to controller
_DROP_TABLE = 0
_DROP_PRIORITY = 0x8000 / 2
# it must be that
# _ARP_PRIORITY < _DROP_PRIORITY or
# _ARP_TABLE > _DROP_TABLE
# to gurantee that responding arp can be disabled
_ARP_TABLE = 0
_ARP_PRIORITY = _DROP_PRIORITY / 2
# it must be that
# _ROUTEING_TABLE < _ARP_TABLE or
# _ROUTING_TABLE > _ARP_TABLE
# to gurantee that routing can be disabled
_ROUTING_TABLE = 0
_ROUTING_PRIORITY = _ARP_PRIORITY / 2
def __init__(self, *args, **kwargs):
super(RouterIPV4OpenFlow, self).__init__(*args, **kwargs)
assert isinstance(self.interface, vrrp_event.VRRPInterfaceOpenFlow)
def _get_dp(self):
return utils.get_dp(self, self.interface.dpid)
def start(self):
dp = self._get_dp()
assert dp
self._uninstall_route_rule(dp)
self._uninstall_arp_rule(dp)
self._uninstall_drop_rule(dp)
self._install_drop_rule(dp)
self._install_arp_rule(dp)
self._install_route_rule(dp)
super(RouterIPV4OpenFlow, self).start()
def _initialized_to_master(self):
self.logger.debug('initialized to master')
self._master()
def _become_master(self):
self.logger.debug('become master')
self._master()
def _master(self):
dp = self._get_dp()
if dp is None:
return
self._uninstall_drop_rule(dp)
self._send_garp(dp)
def _become_backup(self):
self.logger.debug('become backup')
dp = self._get_dp()
if dp is None:
return
self._install_drop_rule(dp)
def _shutdowned(self):
dp = self._get_dp()
if dp is None:
return
# When VRRP functionality is disabled, what to do?
# should we also exit? or continue to route packets?
self._uninstall_route_rule(dp)
self._uninstall_arp_rule(dp)
self._uninstall_drop_rule(dp)
@handler.set_ev_cls(ofp_event.EventOFPPacketIn, handler.MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
# TODO: subscribe only the datapath that we route
dpid = datapath.dpid
if dpid != self.interface.dpid:
return
for field in msg.match.fields:
header = field.header
if header == ofproto.OXM_OF_IN_PORT:
if field.value != self.interface.port_no:
return
break
self._arp_process(msg.data)
def _drop_match(self, dp):
kwargs = {}
kwargs['in_port'] = self.interface.port_no
kwargs['eth_dst'] = vrrp.vrrp_ipv4_src_mac_address(self.config.vrid)
if self.interface.vlan_id is not None:
kwargs['vlan_vid'] = self.interface.vlan_id
return dp.ofproto_parser.OFPMatch(**kwargs)
def _install_drop_rule(self, dp):
match = self._drop_match(dp)
utils.dp_flow_mod(dp, self._DROP_TABLE, dp.ofproto.OFPFC_ADD,
self._DROP_PRIORITY, match, [])
def _uninstall_drop_rule(self, dp):
match = self._drop_match(dp)
utils.dp_flow_mod(dp, self._DROP_TABLE, dp.ofproto.OFPFC_DELETE_STRICT,
self._DROP_PRIORITY, match, [])
def _arp_match(self, dp):
kwargs = {}
kwargs['in_port'] = self.interface.port_no
kwargs['eth_dst'] = mac_lib.BROADCAST_STR
kwargs['eth_type'] = ether.ETH_TYPE_ARP
if self.interface.vlan_id is not None:
kwargs['vlan_vid'] = self.interface.vlan_id
kwargs['arp_op'] = arp.ARP_REQUEST
kwargs['arp_tpa'] = vrrp.vrrp_ipv4_src_mac_address(self.config.vrid)
return dp.ofproto_parser.OFPMatch(**kwargs)
def _install_arp_rule(self, dp):
ofproto = dp.ofproto
ofproto_parser = dp.ofproto_parser
match = self._arp_match(dp)
actions = [ofproto_parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
instructions = [ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)]
utils.dp_flow_mod(dp, self._ARP_TABLE, dp.fproto.OFPFC_ADD,
self._ARP_PRIORITY, match, instructions)
def _uninstall_arp_rule(self, dp):
match = self._arp_match(dp)
utils.dp_flow_mod(dp, self._ARP_TABLE, dp.fproto.OFPFC_DELETE_STRICT,
self._ARP_PRIORITY, match, [])
def _install_route_rule(self, dp):
# TODO: implement real routing logic
self.logger.debug('TODO:_install_router_rule')
def _uninstall_route_rule(self, dp):
# TODO: implement real routing logic
self.logger.debug('TODO:_uninstall_router_rule')
class RouterIPV6(RouterBase):
def __init__(self, *args, **kwargs):
super(RouterIPV6, self).__init__(*args, **kwargs)
assert self.config.is_ipv6
class RouterIPV6Linux(RouterIPV6):
def __init__(self, *args, **kwargs):
super(RouterIPV6Linux, self).__init__(*args, **kwargs)
assert isinstance(self.interface,
vrrp_event.VRRPInterfaceNetworkDevice)
# TODO: reader's home work
pass
class RouterIPV6OpenFlow(RouterIPV6):
def __init__(self, *args, **kwargs):
super(RouterIPV6OpenFlow, self).__init__(*args, **kwargs)
assert isinstance(self.interface, vrrp_event.VRRPInterfaceOpenFlow)
# TODO: reader's home work
pass
| {
"content_hash": "e265ac7d32e48d52d6c2ee159ee8863b",
"timestamp": "",
"source": "github",
"line_count": 523,
"max_line_length": 79,
"avg_line_length": 38.08221797323136,
"alnum_prop": 0.5880403675252297,
"repo_name": "torufuru/OFPatchPanel",
"id": "ca834d29eabef1e46621c0404928544d21308585",
"size": "20600",
"binary": false,
"copies": "7",
"ref": "refs/heads/hackathon",
"path": "ryu/services/protocols/vrrp/sample_router.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Erlang",
"bytes": "849217"
},
{
"name": "Python",
"bytes": "3646303"
},
{
"name": "Shell",
"bytes": "14365"
}
],
"symlink_target": ""
} |
import sys
import pytest
import astroid
from pylint.checkers import strings
from pylint.testutils import CheckerTestCase
class TestStringChecker(CheckerTestCase):
CHECKER_CLASS = strings.StringFormatChecker
@pytest.mark.skipif(sys.version_info <= (3, 0), reason=""
"Tests that the string formatting checker "
"doesn't fail when encountering a bytes "
"string with a .format call")
def test_format_bytes(self):
code = "b'test'.format(1, 2)"
node = astroid.extract_node(code)
with self.assertNoMessages():
self.checker.visit_call(node)
| {
"content_hash": "f8ef3fce2ae10a111ddc7ed6710534e7",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 67,
"avg_line_length": 30,
"alnum_prop": 0.6348484848484849,
"repo_name": "ClovisIRex/Snake-django",
"id": "9cb518828b15ce9defc605853003f9f00adba5e3",
"size": "873",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "env/lib/python3.6/site-packages/pylint/test/unittest_checker_strings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6771"
},
{
"name": "HTML",
"bytes": "3435"
},
{
"name": "JavaScript",
"bytes": "2172"
},
{
"name": "Python",
"bytes": "8285"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
"""Add a new patient to the xpacsdb.patient_info table
Usage:
add_patient.py [options]
add_patient.py -h
Options:
-h, --help Show this screen
-d, --debug Show some debug information
-s <host> The MySQL server host IP address.
-p <port> MySQL port. Default is 3306.
-u <user> User name.
--password=<password> User password.
--db=<database> Database name. Default is xpacs.
-f <csv_file> Read list of patients from a CSV file (see below)
Batch addition of patients
^^^^^^^^^^^^^^^^^^^^^^^^^^
If -f option is given, then patients are added in from a CSV file in a batch
mode. This file should have the following headers:
'patient_id', 'gender', 'cohort', 'ethnicity', 'primary_diagnosis
If there are existing patient_ids in the database, then the existing rows
will be updated.
Author: Avan Suinesiaputra - University of Auckland (2017)
"""
# Docopt is a library for parsing command line arguments
import docopt
import getpass
import mysql.connector
import termutils as tu
import sqlutils as su
if __name__ == '__main__':
try:
# Parse arguments, use file docstring as a parameter definition
arguments = docopt.docopt(__doc__)
# Default values
if not arguments['-s']:
arguments['-s'] = '127.0.0.1'
if not arguments['-p']:
arguments['-p'] = 3306
if not arguments['--db']:
arguments['--db'] = 'xpacs'
# Check user & password
if not arguments['-u']:
arguments['-u'] = raw_input('Username: ')
if arguments['--password'] is None:
arguments['--password'] = getpass.getpass('Password: ')
# print arguments for debug
if arguments['--debug']:
tu.debug(str(arguments))
# Handle invalid options
except docopt.DocoptExit as e:
tu.error(e.message)
exit()
# connecting
print 'Connecting to mysql://' + arguments['-s'] + ':' + str(arguments['-p']) + ' ...'
try:
cnx = mysql.connector.connect(user=arguments['-u'],
host=arguments['-s'],
port=arguments['-p'],
password=arguments['--password'],
database=arguments['--db'])
except mysql.connector.Error as err:
print(err)
exit()
existing_patients = su.get_all_patient_ids(cnx)
# it's either by CSV file or interactive
if arguments['-f'] is None:
# First question: who is the patient?
patientID = raw_input('Patient ID: ')
if patientID in existing_patients:
tu.error("Patient " + patientID + " already exists.")
exit()
# Remaining questions
cohort = raw_input('Cohort [press <enter> to skip]: ')
ethnicity = raw_input('Ethnicity [press <enter> to skip]: ')
gender = raw_input('Cohort [M/F/U=unknown (default)]: ')
if str.lower(gender) == 'f':
gender = 'female'
elif str.lower(gender) == 'm':
gender = 'male'
else:
gender = 'unknown'
primary_diagnosis = raw_input('Primary diagnosis [press <enter> to skip]: ')
query = su.insert_new_patient_info(cnx, {
'patient_id': patientID,
'cohort': cohort,
'ethnicity': ethnicity,
'gender': gender,
'primary_diagnosis': primary_diagnosis
})
if arguments['--debug']:
tu.debug(query)
tu.ok("Patient " + patientID + " added to the database")
# don't forget to close the connection
cnx.close()
else:
try:
for row in su.read_csv(arguments['-f']):
# fix gender
g = str.lower(row['gender'])
if g == 'male' or g == 'm':
row['gender'] = 'male'
elif g == 'female' or g == 'f':
row['gender'] = 'female'
else:
row['gender'] = 'unknown'
# update or insert
if row['patient_id'] in existing_patients:
if arguments['--debug']:
tu.warn('Updating ' + row['patient_id'])
query = su.update_patient_info(cnx, row)
else:
if arguments['--debug']:
tu.debug('Inserting ' + row['patient_id'])
query = su.insert_new_patient_info(cnx, row)
if arguments['--debug']:
print query
except Exception, e:
tu.error(str(e))
exit()
tu.ok("SUCCESS")
| {
"content_hash": "2f2e3aa5f6e304d045d4a15052a73347",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 86,
"avg_line_length": 29.48701298701299,
"alnum_prop": 0.5463554283197534,
"repo_name": "CardiacAtlasProject/CAPServer2.0",
"id": "a99a3ece7c9f12ec8a34f174a147734dbb5907ca",
"size": "4541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbase/utils/add_patient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5036"
},
{
"name": "CSS",
"bytes": "10248"
},
{
"name": "Gherkin",
"bytes": "179"
},
{
"name": "HTML",
"bytes": "182082"
},
{
"name": "Java",
"bytes": "446416"
},
{
"name": "JavaScript",
"bytes": "271433"
},
{
"name": "Python",
"bytes": "13917"
},
{
"name": "Scala",
"bytes": "20977"
},
{
"name": "Shell",
"bytes": "9179"
}
],
"symlink_target": ""
} |
from feature_format import featureFormat, targetFeatureSplit
import pickle
from sklearn.naive_bayes import GaussianNB
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.grid_search import GridSearchCV
import numpy as np
# loading the enron data dictionary
with open("final_project_dataset.pkl", "r") as data_file:
data_dict = pickle.load(data_file)
# removing 'TOTAL' outlier
del data_dict['TOTAL']
# creating new features
for name in data_dict:
if data_dict[name]["total_payments"] != "NaN" and\
data_dict[name]["total_stock_value"] != "NaN":
data_dict[name]["ttl_pay_stock"] = \
data_dict[name]["total_payments"] + \
data_dict[name]["total_stock_value"]
else:
data_dict[name]["ttl_pay_stock"] = 0.0
# list containing all labels and features except email
feat_list = ['poi',
'salary',
'to_messages',
'deferral_payments',
'total_payments',
'exercised_stock_options',
'bonus',
'restricted_stock',
'shared_receipt_with_poi',
'restricted_stock_deferred',
'total_stock_value',
'expenses',
'loan_advances',
'from_messages',
'other',
'from_this_person_to_poi',
'director_fees',
'deferred_income',
'long_term_incentive',
'from_poi_to_this_person',
'ttl_pay_stock']
# Selecting the best features using GridSearchCV
data = featureFormat(data_dict, feat_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
pipe = Pipeline([('KBest', SelectKBest()),
('clf', GaussianNB())])
K = [1,2,3,4,5]
param_grid = [{'KBest__k': K}]
gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='f1')
gs.fit(features, labels)
kb = SelectKBest(k=gs.best_params_['KBest__k'])
kb.fit(features, labels)
best_feat = list(kb.get_support(indices=True)+1)
print "Best f1 score:", gs.best_score_
print "No. of features used for the best f1 score:", gs.best_params_['KBest__k']
print "Names of features used:\n", [feat_list[i] for i in best_feat]
final_feat_list = ['poi',
'salary',
'exercised_stock_options',
'bonus',
'total_stock_value']
# Computing evaluation metrics using the selected features
final_data = featureFormat(data_dict, final_feat_list, sort_keys = True)
final_labels, final_features = targetFeatureSplit(final_data)
final_sss = StratifiedShuffleSplit(final_labels, 1000, random_state = 42)
accuracy = []
precision = []
recall = []
f1 = []
for train_indices, test_indices in final_sss:
features_train = [final_features[i] for i in train_indices]
features_test = [final_features[j] for j in test_indices]
labels_train = [final_labels[i] for i in train_indices]
labels_test = [final_labels[j] for j in test_indices]
clf = GaussianNB()
clf.fit(features_train, labels_train)
pred = clf.predict(features_test)
accuracy.append(accuracy_score(labels_test, pred))
precision.append(precision_score(labels_test, pred))
recall.append(recall_score(labels_test, pred))
f1.append(f1_score(labels_test, pred))
print "Evaluation results of GaussianNB using best features:"
print "Mean Accuracy:", np.mean(accuracy)
print "Mean Precision:", np.mean(precision)
print "Mean Recall:", np.mean(recall)
print "Mean f1 score:", np.mean(f1)
| {
"content_hash": "a24100f845e24b23ac5195d51edbcdfb",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 83,
"avg_line_length": 34.570093457943926,
"alnum_prop": 0.6455798864557989,
"repo_name": "rjegankumar/enron_email_fraud_identification",
"id": "a8a6242d389ca5b999f731ba8fcc8cd50ed1ed98",
"size": "3699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nb_classifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29136"
}
],
"symlink_target": ""
} |
import sys, os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
import skameyka
skameyka.app.run(port=5001, debug=True) | {
"content_hash": "46e7e46b32bc29d3febdd5955ac83d41",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 66,
"avg_line_length": 21.285714285714285,
"alnum_prop": 0.697986577181208,
"repo_name": "baverman/skameyka",
"id": "063e358e784f1830e802c72c9208e82e93e7f679",
"size": "172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4845"
}
],
"symlink_target": ""
} |
import tigre
import numpy as np
from tigre.utilities import sample_loader
import tigre.algorithms as algs
#%% Geometry
geo = tigre.geometry_default(high_resolution=False)
# Offsets
## ####################################################################################
# Lets try simnple offset: The detector gets completelly displaced
geo.offOrigin = np.array([0, 0, 0]) # Offset of image from origin (mm)
geo.offDetector = np.array([200, 200]) # Offset of Detector (mm)
## ####################################################################################
# Auxiliary
geo.accuracy = 0.5
# Accuracy of FWD proj (vx/sample)
## Load data and generate projections
# see previous demo for explanation
angles = np.linspace(0, 2 * np.pi, 100)
# Load thorax phatom data
head = sample_loader.load_head_phantom(geo.nVoxel)
projections = tigre.Ax(head, geo, angles)
#%% lets see it
tigre.plotproj(projections, angles)
## we will skip reconstruction of this tests because the image is outside the detector
## #####################################################################
#%% Second test: lets test variying offsets:
geo.offDetector = np.vstack(
[10 * np.sin(angles), 20 * np.cos(angles)]
).T # Offset of Detector (mm)
projections2 = tigre.Ax(head, geo, angles)
## lets see it
tigre.plotproj(projections2, angles)
## reconstruction
res = algs.sart(projections2, geo, angles, 10)
tigre.plotimg(res, dim="z")
#%% Third test: lets vary everything
# Lets make the image smaller
geo.nVoxel = np.array([128, 128, 128]) # number of voxels (vx)
geo.sVoxel = np.array([256, 256, 256]) / 2 # total size of the image (mm)
geo.dVoxel = geo.sVoxel / geo.nVoxel # size of each voxel (mm)
head = sample_loader.load_head_phantom(geo.nVoxel)
geo.offDetector = np.vstack(
[10 * np.sin(angles), 10 * np.cos(angles)]
).T # Offset of Detector (mm)
geo.offOrigin = np.vstack(
[40 * np.sin(angles), np.linspace(-30, 30, 100), 40 * np.cos(angles)]
).T # Offset of image from origin (mm)
projections3 = tigre.Ax(head, geo, angles)
## lets see it
tigre.plotproj(projections3, angles)
## reconstruction
res = algs.sart(projections3, geo, angles, 10)
tigre.plotimg(res, dim="z")
| {
"content_hash": "fe2ae2f5642346af4245e7cb1ce54657",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 87,
"avg_line_length": 32.78260869565217,
"alnum_prop": 0.610079575596817,
"repo_name": "CERN/TIGRE",
"id": "f61ba7bcef144cb08d79cb8bfc6a99ff3faaadfd",
"size": "3177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/demos/d14_Offsets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "108432"
},
{
"name": "Cuda",
"bytes": "453981"
},
{
"name": "Cython",
"bytes": "28212"
},
{
"name": "MATLAB",
"bytes": "636785"
},
{
"name": "Python",
"bytes": "281064"
},
{
"name": "Shell",
"bytes": "761"
},
{
"name": "TeX",
"bytes": "32165"
}
],
"symlink_target": ""
} |
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#
# sklearn-dummies documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 2 23:06:01 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sklearn-dummies'
copyright = '2017, Gustavo Sena Mafra'
author = 'Gustavo Sena Mafra'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'sklearn-dummiesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sklearn-dummies.tex', 'sklearn-dummies Documentation',
'Gustavo Sena Mafra', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sklearn-dummies', 'sklearn-dummies Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sklearn-dummies', 'sklearn-dummies Documentation',
author, 'sklearn-dummies', 'One line description of project.',
'Miscellaneous'),
]
| {
"content_hash": "3bc301fd580aae299ae3d9a1bb9ee0e0",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 79,
"avg_line_length": 30.59006211180124,
"alnum_prop": 0.6797969543147208,
"repo_name": "gsmafra/sklearn-dummies",
"id": "6f90c52f0bb408369b73b532a3841fa6bc576f0f",
"size": "4973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7524"
}
],
"symlink_target": ""
} |
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology, create_directories
from Ruby import send_evicts
#
# Declare caches used by the protocol
#
class L1Cache(RubyCache): pass
class L2Cache(RubyCache): pass
def define_options(parser):
parser.add_option("--l1-retries", type="int", default=1,
help="Token_CMP: # of l1 retries before going persistent")
parser.add_option("--timeout-latency", type="int", default=300,
help="Token_CMP: cycles until issuing again");
parser.add_option("--disable-dyn-timeouts", action="store_true",
help="Token_CMP: disable dyanimc timeouts, use fixed latency instead")
parser.add_option("--allow-atomic-migration", action="store_true",
help="allow migratory sharing for atomic only accessed blocks")
def create_system(options, full_system, system, dma_ports, bootmem,
ruby_system):
if buildEnv['PROTOCOL'] != 'MOESI_CMP_token':
panic("This script requires the MOESI_CMP_token protocol to be built.")
#
# number of tokens that the owner passes to requests so that shared blocks can
# respond to read requests
#
n_tokens = options.num_cpus + 1
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l1_cntrl_nodes = []
l2_cntrl_nodes = []
dma_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
#
l1i_cache = L1Cache(size = options.l1i_size,
assoc = options.l1i_assoc,
start_index_bit = block_size_bits)
l1d_cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits)
# the ruby random tester reuses num_cpus to specify the
# number of cpu ports connected to the tester object, which
# is stored in system.cpu. because there is only ever one
# tester object, num_cpus is not necessarily equal to the
# size of system.cpu; therefore if len(system.cpu) == 1
# we use system.cpu[0] to set the clk_domain, thereby ensuring
# we don't index off the end of the cpu list.
if len(system.cpu) == 1:
clk_domain = system.cpu[0].clk_domain
else:
clk_domain = system.cpu[i].clk_domain
l1_cntrl = L1Cache_Controller(version=i, L1Icache=l1i_cache,
L1Dcache=l1d_cache,
l2_select_num_bits=l2_bits,
N_tokens=n_tokens,
retry_threshold=options.l1_retries,
fixed_timeout_latency=\
options.timeout_latency,
dynamic_timeout_enabled=\
not options.disable_dyn_timeouts,
no_mig_atomic=not \
options.allow_atomic_migration,
send_evictions=send_evicts(options),
transitions_per_cycle=options.ports,
clk_domain=clk_domain,
ruby_system=ruby_system)
cpu_seq = RubySequencer(version=i, icache=l1i_cache,
dcache=l1d_cache, clk_domain=clk_domain,
ruby_system=ruby_system)
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
# Add controllers and sequencers to the appropriate lists
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L1 controllers and the network
l1_cntrl.requestFromL1Cache = MessageBuffer()
l1_cntrl.requestFromL1Cache.master = ruby_system.network.slave
l1_cntrl.responseFromL1Cache = MessageBuffer()
l1_cntrl.responseFromL1Cache.master = ruby_system.network.slave
l1_cntrl.persistentFromL1Cache = MessageBuffer(ordered = True)
l1_cntrl.persistentFromL1Cache.master = ruby_system.network.slave
l1_cntrl.mandatoryQueue = MessageBuffer()
l1_cntrl.requestToL1Cache = MessageBuffer()
l1_cntrl.requestToL1Cache.slave = ruby_system.network.master
l1_cntrl.responseToL1Cache = MessageBuffer()
l1_cntrl.responseToL1Cache.slave = ruby_system.network.master
l1_cntrl.persistentToL1Cache = MessageBuffer(ordered = True)
l1_cntrl.persistentToL1Cache.slave = ruby_system.network.master
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
#
# First create the Ruby objects associated with this cpu
#
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = l2_index_start)
l2_cntrl = L2Cache_Controller(version = i,
L2cache = l2_cache,
N_tokens = n_tokens,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
# Connect the L2 controllers and the network
l2_cntrl.GlobalRequestFromL2Cache = MessageBuffer()
l2_cntrl.GlobalRequestFromL2Cache.master = ruby_system.network.slave
l2_cntrl.L1RequestFromL2Cache = MessageBuffer()
l2_cntrl.L1RequestFromL2Cache.master = ruby_system.network.slave
l2_cntrl.responseFromL2Cache = MessageBuffer()
l2_cntrl.responseFromL2Cache.master = ruby_system.network.slave
l2_cntrl.GlobalRequestToL2Cache = MessageBuffer()
l2_cntrl.GlobalRequestToL2Cache.slave = ruby_system.network.master
l2_cntrl.L1RequestToL2Cache = MessageBuffer()
l2_cntrl.L1RequestToL2Cache.slave = ruby_system.network.master
l2_cntrl.responseToL2Cache = MessageBuffer()
l2_cntrl.responseToL2Cache.slave = ruby_system.network.master
l2_cntrl.persistentToL2Cache = MessageBuffer(ordered = True)
l2_cntrl.persistentToL2Cache.slave = ruby_system.network.master
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories(
options, bootmem, ruby_system, system)
dir_cntrl_nodes = mem_dir_cntrl_nodes[:]
if rom_dir_cntrl_node is not None:
dir_cntrl_nodes.append(rom_dir_cntrl_node)
for dir_cntrl in dir_cntrl_nodes:
dir_cntrl.l2_select_num_bits = l2_bits
# Connect the directory controllers and the network
dir_cntrl.requestToDir = MessageBuffer()
dir_cntrl.requestToDir.slave = ruby_system.network.master
dir_cntrl.responseToDir = MessageBuffer()
dir_cntrl.responseToDir.slave = ruby_system.network.master
dir_cntrl.persistentToDir = MessageBuffer(ordered = True)
dir_cntrl.persistentToDir.slave = ruby_system.network.master
dir_cntrl.dmaRequestToDir = MessageBuffer(ordered = True)
dir_cntrl.dmaRequestToDir.slave = ruby_system.network.master
dir_cntrl.requestFromDir = MessageBuffer()
dir_cntrl.requestFromDir.master = ruby_system.network.slave
dir_cntrl.responseFromDir = MessageBuffer()
dir_cntrl.responseFromDir.master = ruby_system.network.slave
dir_cntrl.persistentFromDir = MessageBuffer(ordered = True)
dir_cntrl.persistentFromDir.master = ruby_system.network.slave
dir_cntrl.dmaResponseFromDir = MessageBuffer(ordered = True)
dir_cntrl.dmaResponseFromDir.master = ruby_system.network.slave
dir_cntrl.responseFromMemory = MessageBuffer()
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system,
slave = dma_port)
dma_cntrl = DMA_Controller(version = i,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
dma_cntrl_nodes.append(dma_cntrl)
# Connect the dma controller to the network
dma_cntrl.mandatoryQueue = MessageBuffer()
dma_cntrl.responseFromDir = MessageBuffer(ordered = True)
dma_cntrl.responseFromDir.slave = ruby_system.network.master
dma_cntrl.reqToDirectory = MessageBuffer()
dma_cntrl.reqToDirectory.master = ruby_system.network.slave
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
# Create the io controller and the sequencer
if full_system:
io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
ruby_system._io_port = io_seq
io_controller = DMA_Controller(version = len(dma_ports),
dma_sequencer = io_seq,
ruby_system = ruby_system)
ruby_system.io_controller = io_controller
# Connect the dma controller to the network
io_controller.mandatoryQueue = MessageBuffer()
io_controller.responseFromDir = MessageBuffer(ordered = True)
io_controller.responseFromDir.slave = ruby_system.network.master
io_controller.reqToDirectory = MessageBuffer()
io_controller.reqToDirectory.master = ruby_system.network.slave
all_cntrls = all_cntrls + [io_controller]
ruby_system.network.number_of_virtual_networks = 6
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, mem_dir_cntrl_nodes, topology)
| {
"content_hash": "f3058165e405a8884a4f14cb836d1702",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 82,
"avg_line_length": 45.31818181818182,
"alnum_prop": 0.6108324974924775,
"repo_name": "TUD-OS/gem5-dtu",
"id": "94a518b2afffe83f01a3c223f906c40e500dc6e5",
"size": "12597",
"binary": false,
"copies": "2",
"ref": "refs/heads/dtu-mmu",
"path": "configs/ruby/MOESI_CMP_token.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "648342"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "C",
"bytes": "1717604"
},
{
"name": "C++",
"bytes": "35149040"
},
{
"name": "CMake",
"bytes": "79529"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Forth",
"bytes": "15790"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "75007"
},
{
"name": "Makefile",
"bytes": "68265"
},
{
"name": "Objective-C",
"bytes": "24714"
},
{
"name": "Perl",
"bytes": "33696"
},
{
"name": "Python",
"bytes": "6073714"
},
{
"name": "Roff",
"bytes": "8783"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "14236"
},
{
"name": "Shell",
"bytes": "101649"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "Vim Script",
"bytes": "4335"
},
{
"name": "sed",
"bytes": "3927"
}
],
"symlink_target": ""
} |
import document
import interface
import package
import rulegenerator
import testgenerator
class ArgumentVisitorBase(interface.ArgumentVisitor):
def visitReturnValue(self, retValue):
self.visitAllocation(retValue)
class SingleArgumentVisitor(ArgumentVisitorBase):
"""
Visitor which handles compound arguments and calls the accept methods
of each component of the compound arguments. I.e. derived visitors do not
have to care about compound arguments they only have to consider "single"
arguments.
"""
def visitCompound(self, compound):
for arg in compound.args:
if not arg is None:
arg.accept(self)
class CollectVisitor(SingleArgumentVisitor):
"""
Visitor stores all arguments it visits the common set self.args.
"""
def __init__(self):
self.args = set()
def visitInput(self, inputArg):
self.args.add(inputArg)
def visitParameter(self, parameter):
self.args.add(parameter)
def visitConstant(self, const):
self.args.add(const)
def visitRefInput(self, refInput):
self.args.add(refInput)
def visitAllocation(self, allocation):
self.args.add(allocation)
def visitInputOutput(self, inputOutput):
self.args.add(inputOutput)
def visitOutput(self, output):
self.args.add(output)
class MethodGenerator(object):
"""
Abstract base class of all generators which output files depending on
operators. It provides utility functions used by the derived classes.
"""
class CollectParametersVisitor(SingleArgumentVisitor):
def __init__(self):
self.params = []
def visitParameter(self, parameter):
self.params.append(parameter)
class DocVisitor(SingleArgumentVisitor):
"""
Visitor which holds a document.
"""
def __init__(self, doc):
self.doc = doc
def __init__(self):
self.p = None
self.m = None
self.doc = None
def save(self, package, method, printResult = False):
"""
Writes the output of the generator for the input method to the current
document and optionally prints it to the standard output.
"""
self.p = package
self.m = method
self.doc = document.Document()
self.optionParam = self.createOptionParameter()
self.generate()
if printResult:
print self.doc.string()
def createOptionParameter(self):
"""
Creates and returns an enum parameters which provides one value for
each option of the method.
"""
p = package.EnumParameter("dataFlow", "Data flow")
p.isInit = True
for opt in self.m.options:
desc = package.EnumDescription(opt.ident.constant(), str(opt.name))
desc.name = opt.name
p.descriptions.append(desc)
return p
def visitAll(self, visitor, visitOptionParam = True):
"""
Collects all arguments of all options and removes duplicates (i.e.
arguments with common identifier). Then the visitor visits all
remaining arguments and the option parameter if the according flag is
set to true.
"""
v = CollectVisitor()
for opt in self.m.options:
for arg in opt.args:
arg.accept(v)
args = v.args
argIdents = set()
filteredArgs = set()
for arg in args:
if str(arg.ident) not in argIdents:
argIdents.add(str(arg.ident))
filteredArgs.add(arg)
for arg in sorted(filteredArgs, key=lambda arg: str(arg.ident)):
arg.accept(visitor)
if visitOptionParam and self.optionParam:
self.optionParam.accept(visitor)
def visitOption(self, opt, visitor):
"""
The visitor visits all arguments of the given option.
"""
for arg in opt.args:
arg.accept(visitor)
def namespaceEnter(self):
"""
Enters the namespace of the package the method belongs to.
"""
self.doc.namespaceEnter("stromx")
self.doc.namespaceEnter(self.p.ident)
def namespaceExit(self):
"""
Exits the package namespace.
"""
self.doc.namespaceExit(self.p.ident)
self.doc.namespaceExit("stromx")
self.doc.blank()
class OpHeaderGenerator(MethodGenerator):
"""
Generates the header of a method operator.
"""
class ConnectorEnumVisitor(SingleArgumentVisitor):
"""
Exports the enumeration of the IDs of all visited input and output
connectors.
"""
def __init__(self):
self.connectors = set()
def visitRefInput(self, refInputArg):
self.connectors.add(refInputArg)
def visitInput(self, inputArg):
self.connectors.add(inputArg)
def visitInputOutput(self, arg):
self.connectors.add(arg)
def visitOutput(self, output):
self.connectors.add(output)
def visitAllocation(self, allocation):
self.connectors.add(allocation)
def export(self, doc):
connectorIds = [i.ident.constant() for i in self.connectors]
doc.enum("ConnectorId", set(connectorIds))
class ParameterEnumVisitor(MethodGenerator.CollectParametersVisitor):
"""
Exports the enumeration of the parameter IDs of all visited parameters.
"""
def export(self, doc):
paramIds = [p.ident.constant() for p in self.params]
doc.enum("ParameterId", set(paramIds))
class DataMemberVisitor(MethodGenerator.DocVisitor):
"""
Exports class members for the values of all visited parameters.
"""
def visitParameter(self, parameter):
l = "{0} {1};".format(parameter.dataType.concreteTypeId(),
parameter.ident.attribute())
self.doc.line(l)
class DescriptionsVisitor(MethodGenerator.DocVisitor):
"""
Exports class members for the parameter description of all visited
parameters.
"""
def visitParameter(self, parameter):
if parameter.argType == package.ArgType.PLAIN:
self.doc.line(("runtime::Parameter* m_{0}Parameter;"
).format(parameter.ident))
elif parameter.argType == package.ArgType.ENUM:
self.doc.line(("runtime::EnumParameter* m_{0}Parameter;"
).format(parameter.ident))
elif parameter.argType == package.ArgType.NUMERIC:
self.doc.line(("runtime::NumericParameter<{1}>* m_{0}Parameter;"
).format(parameter.ident,
parameter.dataType.typeId()))
elif parameter.argType == package.ArgType.MATRIX:
self.doc.line(("runtime::MatrixParameter* m_{0}Parameter;"
).format(parameter.ident))
else:
assert(False)
def visitOutput(self, arg):
self.visitInput(arg)
def visitInputOutput(self, arg):
self.visitInput(arg)
def visitAllocation(self, arg):
self.visitInput(arg)
def visitRefInput(self, arg):
self.visitInput(arg)
def visitInput(self, arg):
if arg.argType == package.ArgType.MATRIX:
self.doc.line((
"runtime::MatrixDescription* m_{0}Description;"
).format(arg.ident))
else:
self.doc.line((
"runtime::Description* m_{0}Description;"
).format(arg.ident))
class EnumParameterIdVisitor(MethodGenerator.DocVisitor):
"""
Exports enumerations for the IDs of all visited enumeration parameters.
"""
def visitParameter(self, parameter):
if parameter.argType != package.ArgType.ENUM:
return
keys = []
for desc in parameter.descriptions:
keys.append(desc.ident)
enumName = "{0}Id".format(parameter.ident.className())
self.doc.enum(enumName, keys)
class EnumConversionDeclVisitor(MethodGenerator.DocVisitor):
"""
Exports declarations of conversion functions for each visited
enumeration parameter.
"""
def visitParameter(self, parameter):
if parameter.argType != package.ArgType.ENUM:
return
name = parameter.ident.className()
l = "int convert{0}(const runtime::Enum & value);".format(name)
self.doc.line(l)
def generate(self):
self.__includeGuardEnter()
self.__includes()
self.namespaceEnter()
self.__classEnter()
self.__public()
v = OpHeaderGenerator.EnumParameterIdVisitor(self.doc)
self.visitAll(v)
v = OpHeaderGenerator.ConnectorEnumVisitor()
self.visitAll(v)
v.export(self.doc)
v = OpHeaderGenerator.ParameterEnumVisitor()
self.visitAll(v)
v.export(self.doc)
self.__constructor()
self.__kernelOverloads()
self.__private()
self.__statics()
self.__setupFunctions()
v = OpHeaderGenerator.EnumConversionDeclVisitor(self.doc)
self.visitAll(v, False)
self.doc.blank()
v = OpHeaderGenerator.DataMemberVisitor(self.doc)
self.visitAll(v)
v = OpHeaderGenerator.DescriptionsVisitor(self.doc)
self.visitAll(v)
self.__classExit()
self.namespaceExit()
self.__includeGuardExit()
filename = "stromx/{0}/{1}.h".format(self.p.ident,
self.m.ident.className())
with file(filename, "w") as f:
f.write(self.doc.string())
def __includes(self):
self.doc.line('#include "stromx/{0}/Config.h"'.format(self.p.ident))
self.doc.line('#include <stromx/cvsupport/Matrix.h>')
self.doc.line('#include <stromx/runtime/Enum.h>')
self.doc.line('#include <stromx/runtime/EnumParameter.h>')
self.doc.line('#include <stromx/runtime/List.h>')
self.doc.line('#include <stromx/runtime/MatrixDescription.h>')
self.doc.line('#include <stromx/runtime/MatrixParameter.h>')
self.doc.line('#include <stromx/runtime/NumericParameter.h>')
self.doc.line('#include <stromx/runtime/OperatorException.h>')
self.doc.line('#include <stromx/runtime/OperatorKernel.h>')
self.doc.line('#include <stromx/runtime/Primitive.h>')
self.doc.blank()
def __includeGuardEnter(self):
self.doc.line("#ifndef {0}".format(self.__includeGuard()))
self.doc.line("#define {0}".format(self.__includeGuard()))
self.doc.blank()
def __classEnter(self):
self.doc.line("class {0} {1} : public runtime::OperatorKernel".format(
self.__apiDecl(), self.m.ident.className()))
self.doc.line("{")
self.doc.increaseIndent()
def __public(self):
self.doc.label("public")
def __constructor(self):
self.doc.line("{0}();".format(self.m.ident.className()))
def __kernelOverloads(self):
self.doc.line("virtual OperatorKernel* clone() const "
"{{ return new {0}; }}".format(self.m.ident.className()))
self.doc.line("virtual void setParameter(const unsigned int id, "
"const runtime::Data& value);")
self.doc.line("virtual const runtime::DataRef getParameter("
"const unsigned int id) const;")
self.doc.line("void initialize();")
self.doc.line("virtual void execute(runtime::DataProvider& provider);")
self.doc.blank()
def __private(self):
self.doc.label("private")
def __statics(self):
self.doc.line("static const std::string PACKAGE;")
self.doc.line("static const runtime::Version VERSION;")
self.doc.line("static const std::string TYPE;")
self.doc.blank()
def __setupFunctions(self):
self.doc.line("const std::vector<const runtime::Parameter*> "
"setupInitParameters();")
self.doc.line("const std::vector<const runtime::Parameter*> "
"setupParameters();")
self.doc.line("const std::vector<const runtime::Description*> "
"setupInputs();")
self.doc.line("const std::vector<const runtime::Description*> "
"setupOutputs();")
self.doc.blank()
def __classExit(self):
self.doc.decreaseIndent()
self.doc.line("};")
def __includeGuardExit(self):
self.doc.line("#endif // {0}".format(self.__includeGuard()))
def __includeGuard(self):
return "STROMX_{0}_{1}_H".format(self.p.ident.upper(),
self.m.ident.upper())
def __apiDecl(self):
return "STROMX_{0}_API".format(self.p.ident.upper())
class OpImplGenerator(MethodGenerator):
"""
Generates the header of a method operator.
"""
class ParameterInitVisitor(MethodGenerator.CollectParametersVisitor):
"""
Exports the constructor initialization for all visited parameter data
members .
"""
def export(self, doc):
for i, p in enumerate(self.params):
defaultValue = p.default if p.default != None else ""
defaultValue = document.pythonToCpp(defaultValue)
init = "{0}({1})".format(p.ident.attribute(), defaultValue)
if i != len(self.params) - 1:
doc.line("{0},".format(init))
else:
doc.line(init)
class GetParametersVisitor(MethodGenerator.DocVisitor):
"""
Exports case sections which return the values of all visited
parameters.
"""
def visitParameter(self, parameter):
self.doc.label("case {0}".format(parameter.ident.constant()))
self.doc.line("return {0};".format(parameter.ident.attribute()))
class SetParametersVisitor(MethodGenerator.DocVisitor):
"""
Exports case sections which set the values of all visited parameters.
"""
def visitParameter(self, parameter):
l = ""
if parameter.argType == package.ArgType.PLAIN:
pass
elif parameter.argType == package.ArgType.ENUM:
l = ("cvsupport::checkEnumValue(castedValue, {0}Parameter, *this);"
).format(parameter.ident.attribute())
elif parameter.argType == package.ArgType.NUMERIC:
l = ("cvsupport::checkNumericValue(castedValue, {0}Parameter, *this);"
).format(parameter.ident.attribute())
elif parameter.argType == package.ArgType.MATRIX:
l = ("cvsupport::checkMatrixValue(castedValue, {0}Parameter, *this);"
).format(parameter.ident.attribute())
else:
assert(False)
self.__setParameterWithCheck(parameter, l)
def __setParameterWithCheck(self, parameter, check):
self.doc.label("case {0}".format(parameter.ident.constant()))
self.doc.scopeEnter()
self.doc.line(("const {0} & castedValue = runtime::data_cast<{1}>(value);"
).format(parameter.dataType.typeId(),
parameter.dataType.typeId()))
l = ("if(! castedValue.variant().isVariant({0}))".format(
parameter.dataType.variant()))
self.doc.line(l)
self.doc.scopeEnter()
l = 'throw runtime::WrongParameterType(parameter(id), *this);'
self.doc.line(l)
self.doc.scopeExit()
if check != "":
self.doc.line(check)
checkParams = rulegenerator.CheckParameterVisitor(self.doc,
parameter)
for rule in parameter.rules:
rule.accept(checkParams)
self.doc.line(("{0} = castedValue;"
).format(parameter.ident.attribute()))
self.doc.scopeExit()
self.doc.line("break;")
class SetupParametersVisitor(MethodGenerator.DocVisitor):
"""
Exports the allocation of the descriptions of all visited parameters.
"""
def __init__(self, doc, isInit = False):
super(OpImplGenerator.SetupParametersVisitor, self).__init__(doc)
self.isInit = isInit
def visitParameter(self, parameter):
if parameter.argType == package.ArgType.PLAIN:
self.__visitPlainParameter(parameter)
elif parameter.argType == package.ArgType.ENUM:
self.__visitEnumParameter(parameter)
elif parameter.argType == package.ArgType.MATRIX:
self.__visitMatrixParameter(parameter)
elif parameter.argType == package.ArgType.NUMERIC:
self.__visitNumericParameter(parameter)
else:
assert(False)
def __visitPlainParameter(self, parameter):
ident = "m_{0}Parameter".format(parameter.ident)
l = "{0} = new runtime::Parameter({1}, {2});"\
.format(ident, parameter.ident.constant(),
parameter.dataType.variant())
self.doc.line(l)
self.__accessMode(ident)
l = '{0}->setTitle(L_("{1}"));'.format(ident, parameter.name)
self.doc.line(l)
l = "parameters.push_back({0});".format(ident)
self.doc.line(l)
self.doc.blank()
def __visitEnumParameter(self, parameter):
ident = "m_{0}Parameter".format(parameter.ident)
l = ("{0} = new runtime::EnumParameter({1});"
).format(ident, parameter.ident.constant())
self.doc.line(l)
self.__accessMode(ident)
l = '{0}->setTitle(L_("{1}"));'.format(ident, parameter.name)
self.doc.line(l)
for desc in parameter.descriptions:
d = 'runtime::Enum({0})'.format(desc.ident)
l = '{0}->add(runtime::EnumDescription({1}, L_("{2}")));'\
.format(ident, d, desc.name)
self.doc.line(l)
l = "parameters.push_back({0});".format(ident)
self.doc.line(l)
self.doc.blank()
def __visitMatrixParameter(self, parameter):
ident = "m_{0}Parameter".format(parameter.ident)
l = "{0} = new runtime::MatrixParameter({1}, {2});"\
.format(ident, parameter.ident.constant(),
parameter.dataType.variant())
self.doc.line(l)
self.__accessMode(ident)
l = '{0}->setTitle(L_("{1}"));'.format(ident, parameter.name)
self.doc.line(l)
self.doc.line("{0}->setRows({1});".format(ident, parameter.rows))
self.doc.line("{0}->setCols({1});".format(ident, parameter.cols))
l = "parameters.push_back({0});".format(ident)
self.doc.line(l)
self.doc.blank()
def __visitNumericParameter(self, parameter):
ident = "m_{0}Parameter".format(parameter.ident)
l = ("{0} = new runtime::NumericParameter<{2}>({1});"
).format(ident, parameter.ident.constant(),
parameter.dataType.typeId())
self.doc.line(l)
self.__accessMode(ident)
l = '{0}->setTitle(L_("{1}"));'\
.format(ident, parameter.name)
self.doc.line(l)
if parameter.maxValue != None:
l = "{0}->setMax({1});".format(ident,
parameter.dataType.cast(parameter.maxValue))
self.doc.line(l)
if parameter.minValue != None:
l = "{0}->setMin({1});".format(ident,
parameter.dataType.cast(parameter.minValue))
self.doc.line(l)
if parameter.step != None:
l = "{0}->setStep({1});".format(ident,
parameter.dataType.cast(parameter.step))
self.doc.line(l)
l = "parameters.push_back({0});".format(ident)
self.doc.line(l)
self.doc.blank()
def __accessMode(self, ident):
if self.isInit:
accessMode = "NONE_WRITE"
else:
accessMode = "ACTIVATED_WRITE"
l = "{0}->setAccessMode(runtime::Parameter::{1});"\
.format(ident, accessMode)
self.doc.line(l)
class SetupOutputsVistor(MethodGenerator.DocVisitor):
"""
Exports the allocation of the descriptions of all visited outputs.
"""
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
if output.argType == package.ArgType.PLAIN:
self.__setupDescription(output)
elif output.argType == package.ArgType.MATRIX:
self.__setupMatrixDescription(output)
else:
assert(False)
def visitAllocation(self, allocation):
self.visitOutput(allocation)
def __setupDescription(self, arg):
l = "runtime::Description* {0} = new runtime::Description({1}, {2});"\
.format(arg.ident, arg.ident.constant(),
arg.dataType.variant())
self.doc.line(l)
l = '{0}->setTitle(L_("{1}"));'.format(arg.ident, arg.name)
self.doc.line(l)
l = "outputs.push_back({0});".format(arg.ident)
self.doc.line(l)
self.doc.blank()
def __setupMatrixDescription(self, arg):
l = "runtime::MatrixDescription* {0} = new runtime::MatrixDescription({1}, {2});"\
.format(arg.ident, arg.ident.constant(),
arg.dataType.variant())
self.doc.line(l)
l = '{0}->setTitle(L_("{1}"));'.format(arg.ident, arg.name)
self.doc.line(l)
l = '{0}->setRows({1});'.format(arg.ident, arg.rows)
self.doc.line(l)
l = '{0}->setCols({1});'.format(arg.ident, arg.cols)
self.doc.line(l)
l = "outputs.push_back({0});".format(arg.ident)
self.doc.line(l)
self.doc.blank()
class SetupInputsVisitor(MethodGenerator.DocVisitor):
"""
Exports the allocation of the descriptions of all visited inputs.
"""
def visitOutput(self, arg):
if arg.argType == package.ArgType.PLAIN:
self.__setupDescription(arg, True)
elif arg.argType == package.ArgType.MATRIX:
self.__setupMatrixDescription(arg, True)
else:
assert(False)
def visitInput(self, arg):
if arg.argType == package.ArgType.PLAIN:
self.__setupDescription(arg, False)
elif arg.argType == package.ArgType.MATRIX:
self.__setupMatrixDescription(arg, False)
else:
assert(False)
def visitInputOutput(self, arg):
self.visitInput(arg)
def __setupDescription(self, arg, isOutput):
description = "{0}Description".format(arg.ident.attribute())
l = "{0} = new runtime::Description({1}, {2});"\
.format(description, arg.ident.constant(),
self.__getVariant(arg, isOutput))
self.doc.line(l)
l = '{0}->setTitle(L_("{1}"));'\
.format(description, arg.name)
self.doc.line(l)
l = "inputs.push_back({0});".format(description)
self.doc.line(l)
self.doc.blank()
def __setupMatrixDescription(self, arg, isOutput):
description = "{0}Description".format(arg.ident.attribute())
l = (
"{0} = new "
"runtime::MatrixDescription({1}, {2});"
).format(description, arg.ident.constant(),
self.__getVariant(arg, isOutput))
self.doc.line(l)
l = '{0}->setTitle("{1}");'.format(description, arg.name)
self.doc.line(l)
l = '{0}->setRows({1});'.format(description, arg.rows)
self.doc.line(l)
l = '{0}->setCols({1});'.format(description, arg.cols)
self.doc.line(l)
l = "inputs.push_back({0});".format(description)
self.doc.line(l)
self.doc.blank()
def __getVariant(self, arg, isOutput):
if isOutput:
return arg.dataType.canBeCreatedFromVariant()
else:
return arg.dataType.variant()
class InputMapperVisitor(MethodGenerator.DocVisitor):
"""
Exports input mappers for all visited inputs and outputs.
"""
def visitInput(self, arg):
self.__visit(arg)
def visitOutput(self, arg):
self.__visit(arg)
def visitInputOutput(self, arg):
self.__visit(arg)
def __visit(self, arg):
ident = arg.ident
constant = arg.ident.constant()
l = "runtime::Id2DataPair {0}InMapper({1});".format(ident, constant)
self.doc.line(l)
class ReceiveInputDataVisitor(SingleArgumentVisitor):
"""
Exports the receive input command for all visited inputs and outputs.
"""
def __init__(self):
self.line = ""
def visitInput(self, inputArg):
self.__visit(inputArg)
def visitOutput(self, output):
self.__visit(output)
def visitInputOutput(self, arg):
self.__visit(arg)
def export(self, doc):
if self.line != "":
doc.line("provider.receiveInputData({0});".format(self.line))
def __visit(self, arg):
if self.line == "":
self.line = "{0}InMapper".format(arg.ident)
else:
self.line += " && {0}InMapper".format(arg.ident)
class InDataVisitor(MethodGenerator.DocVisitor):
"""
Exports stromx::Data* variables for all visited inputs and outputs.
"""
def visitInput(self, inputArg):
self.doc.line(("const runtime::Data* "
"{0}Data = 0;").format(inputArg.ident))
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
self.doc.line("runtime::Data* {0}Data = 0;".format(output.ident))
class AccessVisitor(MethodGenerator.DocVisitor):
"""
Exports data accessors for all visited inputs and outputs.
"""
def visitInput(self, inputArg):
self.doc.line(("runtime::ReadAccess "
"{0}ReadAccess;").format(inputArg.ident))
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
mapper = "{0}InMapper".format(output.ident)
data = "{0}Data".format(output.ident)
self.doc.line(("runtime::DataContainer inContainer = "
"{0}.data();").format(mapper))
self.doc.line("runtime::WriteAccess writeAccess(inContainer);")
self.doc.line("{0} = &writeAccess.get();".format(data))
class CopyWriteAccessVisitor(SingleArgumentVisitor):
"""
Exports the if-conditions which either create a read access or
reference an existing write access to read each visited input.
"""
def __init__(self):
self.output = None
self.inputs = []
def visitInput(self, inputArg):
self.inputs.append(inputArg)
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
assert(self.output == None)
self.output = output
def export(self, doc):
# no danger of reading a write access if there is no output (i.e.
# no write access)
if self.output == None:
for i in self.inputs:
l = ("{0}ReadAccess = runtime::ReadAccess("
"{0}InMapper.data());").format(i.ident)
doc.line(l)
l = "{0}Data = &{0}ReadAccess.get();".format(i.ident)
doc.line(l)
doc.blank()
return
# check if a read access refers to the same data as the write
# acess and handle this situation accordingly
for i in self.inputs:
l = "if({0}InMapper.data() == inContainer)".format(i.ident)
doc.line(l)
doc.scopeEnter()
if i.inPlace:
doc.line("srcData = &writeAccess.get();")
else:
message = '"Can not operate in place."'
ex = (
"throw runtime::InputError({0}, *this, {1});"
).format(i.ident.constant(), message)
doc.line(ex)
doc.scopeExit()
doc.line("else")
doc.scopeEnter()
l = ("{0}ReadAccess = runtime::ReadAccess("
"{0}InMapper.data());").format(i.ident)
doc.line(l)
l = "{0}Data = &{0}ReadAccess.get();".format(i.ident)
doc.line(l)
doc.scopeExit()
doc.blank()
class CheckVariantVisitor(MethodGenerator.DocVisitor):
"""
Exports the variant check for each visited input.
"""
def visitInput(self, inputArg):
self.__visit(inputArg)
def visitInputOutput(self, arg):
self.__visit(arg)
def visitOutput(self, output):
self.__visit(output)
def __visit(self, arg):
l = (
"if(! {0}Data->variant().isVariant({1}Description->variant()))"
).format(arg.ident, arg.ident.attribute())
self.doc.line(l)
self.doc.scopeEnter()
l = (
'throw runtime::InputError({0}, *this, "Wrong input data '
'variant.");'
).format(arg.ident.constant())
self.doc.line(l)
self.doc.scopeExit()
class CastedDataVisitor(MethodGenerator.DocVisitor):
"""
Exports the cast to a concrete stromx data type for each visited
input and output.
"""
def visitInput(self, inputArg):
l = ("const {1}* {0}CastedData = "
"runtime::data_cast<{1}>({0}Data);").format(inputArg.ident,
inputArg.dataType.typeId())
self.doc.line(l)
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
l = ("{1} * {0}CastedData = "
"runtime::data_cast<{1}>({0}Data);").format(output.ident,
output.dataType.typeId())
self.doc.line(l)
class CheckCastedDataVisitor(MethodGenerator.DocVisitor):
"""
Exports the data check for the data check of each visited input.
"""
def visitInput(self, inputArg):
self.__visit(inputArg)
def visitInputOutput(self, arg):
self.__visit(arg)
def visitOutput(self, output):
self.__visit(output)
def __visit(self, arg):
if arg.argType == package.ArgType.MATRIX:
l = (
"cvsupport::checkMatrixValue(*{0}CastedData, {1}Description, *this);"
).format(arg.ident, arg.ident.attribute())
self.doc.line(l)
else:
pass
class InitInVisitor(MethodGenerator.DocVisitor):
"""
Exports the initialization of the argument before the OpenCV
function is called.
"""
def visitConstant(self, arg):
self.__visit(arg)
def visitInputOutput(self, arg):
self.__visit(arg)
def visitOutput(self, output):
self.__visit(output)
def __visit(self, arg):
self.doc.document(arg.initIn)
class CvDataVisitor(MethodGenerator.DocVisitor):
"""
Exports the conversion to a native or OpenCV data type for each visited
argument.
"""
def visitInput(self, inputArg):
cvData = "{0} {1}CvData".format(inputArg.cvType.typeId(),
inputArg.ident)
castedData = "*{0}CastedData".format(inputArg.ident)
cast = inputArg.cvType.cast(castedData)
l = "{0} = {1};".format(cvData, cast)
self.doc.line(l)
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, inputArg):
cvData = "{0} {1}CvData".format(inputArg.cvType.typeId(),
inputArg.ident)
castedData = "*{0}CastedData".format(inputArg.ident)
cast = inputArg.cvType.cast(castedData)
l = "{0} = {1};".format(cvData, cast)
self.doc.line(l)
def visitAllocation(self, allocation):
cvData = "{0} {1}CvData;".format(allocation.cvType.typeId(),
allocation.ident)
self.doc.line(cvData)
def visitParameter(self, parameter):
if parameter.argType == package.ArgType.ENUM:
self.__visitEnumParameter(parameter)
else:
cvData = "{0} {1}CvData".format(parameter.cvType.typeId(),
parameter.ident)
castedData = parameter.cvType.cast(parameter.ident.attribute())
self.doc.line("{0} = {1};".format(cvData, castedData))
def __visitEnumParameter(self, parameter):
ident = parameter.ident
cvData = "{0} {1}CvData".format(parameter.cvType.typeId(),
ident)
castedData = "convert{0}({1})".format(ident.className(),
ident.attribute())
self.doc.line("{0} = {1};".format(cvData, castedData))
def visitRefInput(self, refInput):
cvData = "{0} {1}CvData".format(refInput.cvType.typeId(),
refInput.ident)
rhs = "{0}CvData".format(refInput.refArg.ident)
self.doc.line("{0} = {1};".format(cvData, rhs))
class MethodArgumentVisitor(ArgumentVisitorBase):
"""
Exports the argument of the OpenCV function for each visited argument.
"""
def __init__(self):
self.args = []
def visitInput(self, inputArg):
self.visit(inputArg)
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
self.visit(output)
def visitAllocation(self, allocation):
self.visit(allocation)
def visitParameter(self, parameter):
self.visit(parameter)
def visitConstant(self, constant):
value = constant.value
value = document.pythonToCpp(value)
self.args.append(str(value))
def visitRefInput(self, refInput):
self.visit(refInput)
def visitReturnValue(self, retValue):
pass
def visit(self, arg):
self.args.append("{0}CvData".format(arg.ident))
def visitCompound(self, compound):
self.args.append(compound.create())
def export(self):
argStr = ""
for i, arg in enumerate(self.args):
argStr += arg
if i < len(self.args) - 1:
argStr += ", "
return argStr
class MethodReturnValueVisitor(ArgumentVisitorBase):
"""
Exports the return value of the OpenCV function out of each visited argument.
"""
def __init__(self):
self.returnValue = ""
def visitReturnValue(self, retVal):
self.returnValue = "{0}CvData = ".format(retVal.ident)
def export(self):
return self.returnValue
class OutDataVisitor(MethodGenerator.DocVisitor):
"""
Exports the wrapping of the result data into a data container for
each visited output or allocation.
"""
def visitInputOutput(self, arg):
self.visitOutput(arg)
def visitOutput(self, output):
l = "runtime::DataContainer {0}OutContainer = inContainer;".format(output.ident)
self.doc.line(l)
l = ("runtime::Id2DataPair {0}OutMapper({1}, "
"{0}OutContainer);").format(output.ident, output.ident.constant());
self.doc.line(l)
def visitAllocation(self, allocation):
dataType = allocation.dataType.typeId()
ident = allocation.ident
cvData = "{0}CvData".format(ident)
newObject = allocation.dataType.allocate(cvData)
l = "{0}* {1}CastedData = {2};".format(dataType, ident, newObject)
self.doc.line(l)
l = ("runtime::DataContainer {0}OutContainer = "
"runtime::DataContainer({0}CastedData);").format(ident)
self.doc.line(l)
l = ("runtime::Id2DataPair {0}OutMapper({1}, "
"{0}OutContainer);").format(ident, allocation.ident.constant())
self.doc.line(l)
class InitOutVisitor(MethodGenerator.DocVisitor):
"""
Exports the initialization of the output argument after the OpenCV
function is called.
"""
def visitAllocation(self, allocation):
self.doc.document(allocation.initOut)
class SendOutputDataVisitor(SingleArgumentVisitor):
"""
Exports the send output command for all visited outputs.
"""
def __init__(self):
self.line = ""
def visitAllocation(self, output):
self.__visit(output)
def visitOutput(self, output):
self.__visit(output)
def visitInputOutput(self, arg):
self.__visit(arg)
def export(self, doc):
if self.line != "":
doc.line("provider.sendOutputData({0});".format(self.line))
def __visit(self, arg):
if self.line == "":
self.line = "{0}OutMapper".format(arg.ident)
else:
self.line += " && {0}OutMapper".format(arg.ident)
class EnumConversionDefVisitor(MethodGenerator.DocVisitor):
"""
Exports the function which converts an enumeration value to its
OpenCV value for each visited enumeration parameter.
"""
def __init__(self, doc, m):
super(OpImplGenerator.EnumConversionDefVisitor, self).__init__(doc)
self.m = m
def visitParameter(self, parameter):
if parameter.argType != package.ArgType.ENUM:
return
name = parameter.ident.className()
l = ("int {1}::convert{0}(const runtime::Enum & value)"
).format(name, self.m.ident.className())
self.doc.line(l)
self.doc.scopeEnter()
self.doc.line("switch(int(value))")
self.doc.scopeEnter()
for desc in parameter.descriptions:
self.doc.label("case {0}".format(desc.ident))
self.doc.line("return {0};".format(desc.cvIdent))
self.doc.label("default")
self.doc.line(("throw runtime::WrongParameterValue(parameter({0}),"
" *this);").format(parameter.ident.constant()))
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.blank()
def generate(self):
self.__includes()
self.namespaceEnter()
self.__statics()
self.__constructor()
self.__getParameter()
self.__setParameter()
self.__setupInitParameters()
self.__setupParameters()
self.__setupInputs()
self.__setupOutputs()
self.__initialize()
self.__execute()
self.__convertEnumValues()
self.namespaceExit()
filename = "stromx/{0}/{1}.cpp".format(self.p.ident,
self.m.ident.className())
with file(filename, "w") as f:
f.write(self.doc.string())
def __includes(self):
cvModule = str(self.p.ident)[2:]
self.doc.line('#include "stromx/{0}/{1}.h"'\
.format(self.p.ident, self.m.ident.className()))
self.doc.blank()
self.doc.line('#include "stromx/{0}/Locale.h"'.format(self.p.ident))
self.doc.line('#include "stromx/{0}/Utility.h"'.format(self.p.ident))
self.doc.line('#include <stromx/cvsupport/Image.h>')
self.doc.line('#include <stromx/cvsupport/Matrix.h>')
self.doc.line('#include <stromx/cvsupport/Utilities.h>')
self.doc.line('#include <stromx/runtime/DataContainer.h>')
self.doc.line('#include <stromx/runtime/DataProvider.h>')
self.doc.line('#include <stromx/runtime/Id2DataComposite.h>')
self.doc.line('#include <stromx/runtime/Id2DataPair.h>')
self.doc.line('#include <stromx/runtime/ReadAccess.h>')
self.doc.line('#include <stromx/runtime/VariantComposite.h>')
self.doc.line('#include <stromx/runtime/WriteAccess.h>')
self.doc.line('#include <opencv2/{0}/{0}.hpp>'.format(cvModule))
self.doc.blank()
def __statics(self):
method = self.m.ident.className()
package = self.p.ident.upper()
self.doc.line(("const std::string {0}::PACKAGE(STROMX_{1}_PACKAGE_"
"NAME);").format(method, package))
self.doc.line(("const runtime::Version {0}::VERSION("
"STROMX_{1}_VERSION_MAJOR, STROMX_{1}_VERSION_MINOR, "
"STROMX_{1}_VERSION_PATCH);".format(method, package)))
self.doc.line('const std::string {0}::TYPE("{0}");'.format(method))
self.doc.blank()
def __constructor(self):
self.doc.line("{0}::{0}()".format(self.m.ident.className()))
self.doc.line(" : runtime::OperatorKernel(TYPE, PACKAGE, VERSION, "
"setupInitParameters()),")
self.doc.increaseIndent()
v = OpImplGenerator.ParameterInitVisitor()
self.visitAll(v)
v.export(self.doc)
self.doc.decreaseIndent()
self.doc.scopeEnter()
self.doc.scopeExit()
self.doc.blank()
def __getParameter(self):
self.doc.line("const runtime::DataRef {0}::getParameter"
"(unsigned int id) const"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("switch(id)")
self.doc.scopeEnter()
v = OpImplGenerator.GetParametersVisitor(self.doc)
self.visitAll(v)
self.doc.label("default")
self.doc.line("throw runtime::WrongParameterId(id, *this);")
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.blank()
def __setParameter(self):
self.doc.line("void {0}::setParameter"
"(unsigned int id, const runtime::Data& value)"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("try")
self.doc.scopeEnter()
self.doc.line("switch(id)")
self.doc.scopeEnter()
v = OpImplGenerator.SetParametersVisitor(self.doc)
self.visitAll(v)
self.doc.label("default")
self.doc.line("throw runtime::WrongParameterId(id, *this);")
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.line("catch(runtime::BadCast&)")
self.doc.scopeEnter()
self.doc.line("throw runtime::WrongParameterType(parameter(id), *this);")
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.blank()
def __setupInitParameters(self):
self.doc.line("const std::vector<const runtime::Parameter*> "
"{0}::setupInitParameters()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("std::vector<const runtime::Parameter*> parameters;")
self.doc.blank()
if len(self.m.options) > 1:
v = OpImplGenerator.SetupParametersVisitor(self.doc, isInit = True)
self.optionParam.accept(v)
self.doc.line("return parameters;")
self.doc.scopeExit()
self.doc.blank()
def __setupParameters(self):
self.doc.line("const std::vector<const runtime::Parameter*> "
"{0}::setupParameters()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("std::vector<const runtime::Parameter*> parameters;")
self.doc.blank()
self.doc.line("switch(int({0}))".format(
self.optionParam.ident.attribute()))
self.doc.scopeEnter()
for o in self.m.options:
self.doc.label("case({0})".format(o.ident.constant()))
self.doc.scopeEnter()
v = OpImplGenerator.SetupParametersVisitor(self.doc)
for arg in o.args:
arg.accept(v)
self.doc.scopeExit()
self.doc.line("break;")
self.doc.scopeExit()
self.doc.blank()
self.doc.line("return parameters;")
self.doc.scopeExit()
self.doc.blank()
def __setupInputs(self):
self.doc.line("const std::vector<const runtime::Description*> "
"{0}::setupInputs()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("std::vector<const runtime::Description*> inputs;")
self.doc.blank()
self.doc.line("switch(int({0}))".format(
self.optionParam.ident.attribute()))
self.doc.scopeEnter()
for o in self.m.options:
self.doc.label("case({0})".format(o.ident.constant()))
self.doc.scopeEnter()
v = OpImplGenerator.SetupInputsVisitor(self.doc)
for arg in o.args:
arg.accept(v)
self.doc.scopeExit()
self.doc.line("break;")
self.doc.scopeExit()
self.doc.blank()
self.doc.line("return inputs;")
self.doc.scopeExit()
self.doc.blank()
def __setupOutputs(self):
self.doc.line("const std::vector<const runtime::Description*> "
"{0}::setupOutputs()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("std::vector<const runtime::Description*> outputs;")
self.doc.blank()
self.doc.line("switch(int({0}))".format(
self.optionParam.ident.attribute()))
self.doc.scopeEnter()
for o in self.m.options:
self.doc.label("case({0})".format(o.ident.constant()))
self.doc.scopeEnter()
v = OpImplGenerator.SetupOutputsVistor(self.doc)
self.visitOption(o, v)
self.doc.scopeExit()
self.doc.line("break;")
self.doc.scopeExit()
self.doc.blank()
self.doc.line("return outputs;")
self.doc.scopeExit()
self.doc.blank()
def __initialize(self):
self.doc.line("void {0}::initialize()"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("runtime::OperatorKernel::initialize(setupInputs(), "
"setupOutputs(), setupParameters());")
self.doc.scopeExit()
self.doc.blank()
def __execute(self):
self.doc.line("void {0}::execute(runtime::DataProvider & provider)"\
.format(self.m.ident.className()))
self.doc.scopeEnter()
self.doc.line("switch(int({0}))".format(
self.optionParam.ident.attribute()))
self.doc.scopeEnter()
for o in self.m.options:
self.doc.label("case({0})".format(o.ident.constant()))
self.doc.scopeEnter()
v = OpImplGenerator.InputMapperVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.ReceiveInputDataVisitor()
self.visitOption(o, v)
v.export(self.doc)
self.doc.blank()
v = OpImplGenerator.InDataVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.AccessVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.CopyWriteAccessVisitor()
self.visitOption(o, v)
v.export(self.doc)
v = OpImplGenerator.CheckVariantVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.CastedDataVisitor(self.doc)
self.visitOption(o, v)
v = OpImplGenerator.CheckCastedDataVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
if o.inputCheck != None:
self.doc.document(o.inputCheck)
self.doc.blank()
v = OpImplGenerator.InitInVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.CvDataVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.MethodReturnValueVisitor()
self.visitOption(o, v)
retVal = v.export()
v = OpImplGenerator.MethodArgumentVisitor()
self.visitOption(o, v)
argStr = v.export()
namespace = ""
if self.m.namespace != "":
namespace = "{0}::".format(self.m.namespace)
self.doc.line("{3}{2}{0}({1});".format(self.m.ident, argStr,
namespace, retVal))
if o.postCall != None:
self.doc.document(o.postCall)
self.doc.blank()
v = OpImplGenerator.OutDataVisitor(self.doc)
self.visitOption(o, v)
self.doc.blank()
v = OpImplGenerator.InitOutVisitor(self.doc)
self.visitOption(o, v)
v = OpImplGenerator.SendOutputDataVisitor()
self.visitOption(o, v)
v.export(self.doc)
self.doc.scopeExit()
self.doc.line("break;")
self.doc.scopeExit()
self.doc.scopeExit()
self.doc.blank()
def __convertEnumValues(self):
v = OpImplGenerator.EnumConversionDefVisitor(self.doc, self.m)
self.visitAll(v, False)
class OpTestGenerator(object):
"""
Abstract base class of all generators which output operator tests.
"""
def testNames(self):
l = []
for o in self.m.options:
for i in range(len(o.tests)):
l.append("test{0}{1}".format(o.ident.className(), i))
return l
class OpTestHeaderGenerator(MethodGenerator, OpTestGenerator):
"""
Generates the header of an operator test.
"""
def generate(self):
self.__includeGuardEnter()
self.__includes()
self.namespaceEnter()
self.__classEnter()
self.__testSuite()
self.doc.blank()
self.doc.label("public")
self.__constructor()
self.doc.line("void setUp();")
self.doc.line("void tearDown();")
self.doc.blank()
self.doc.label("protected")
self.__testMethods()
self.doc.blank()
self.doc.label("private")
self.doc.line("runtime::OperatorTester* m_operator;")
self.__classExit()
self.namespaceExit()
self.__includeGuardExit()
filename = "stromx/{0}/test/{1}Test.h".format(self.p.ident,
self.m.ident.className())
with file(filename, "w") as f:
f.write(self.doc.string())
def __includeGuardEnter(self):
self.doc.line("#ifndef {0}".format(self.__includeGuard()))
self.doc.line("#define {0}".format(self.__includeGuard()))
self.doc.blank()
def __includes(self):
self.doc.line('#include "stromx/{0}/Config.h"'.format(self.p.ident))
self.doc.blank()
self.doc.line('#include <cppunit/extensions/HelperMacros.h>')
self.doc.line('#include <cppunit/TestFixture.h>')
self.doc.blank()
self.doc.line('#include "stromx/runtime/OperatorTester.h"')
self.doc.blank()
def __includeGuardExit(self):
self.doc.line("#endif // {0}".format(self.__includeGuard()))
def __includeGuard(self):
return "STROMX_{0}_{1}TEST_H".format(self.p.ident.upper(),
self.m.ident.upper())
def __classEnter(self):
self.doc.line((
"class {0}Test : public CPPUNIT_NS::TestFixture"
).format(self.m.ident.className()))
self.doc.line("{")
self.doc.increaseIndent()
def __testSuite(self):
self.doc.line((
"CPPUNIT_TEST_SUITE({0}Test);"
).format(self.m.ident.className()))
for test in self.testNames():
self.doc.line("CPPUNIT_TEST({0});".format(test))
self.doc.line("CPPUNIT_TEST_SUITE_END();")
def __constructor(self):
self.doc.line((
"{0}Test() : m_operator(0) {{}}"
).format(self.m.ident.className()))
def __testMethods(self):
for test in self.testNames():
self.doc.line("void {0}();".format(test))
def __classExit(self):
self.doc.decreaseIndent()
self.doc.line("};")
class OpTestImplGenerator(MethodGenerator, OpTestGenerator):
"""
Generates the implementation of an operator test.
"""
def __includes(self):
self.doc.line((
'#include "stromx/{0}/test/{1}Test.h"'
).format(self.p.ident, self.m.ident.className()))
self.doc.blank()
self.doc.line('#include <boost/lexical_cast.hpp>')
self.doc.line('#include <stromx/runtime/OperatorException.h>')
self.doc.line('#include <stromx/runtime/ReadAccess.h>')
self.doc.line('#include "stromx/cvsupport/Image.h"')
self.doc.line((
'#include "stromx/{0}/{1}.h"'
).format(self.p.ident, self.m.ident.className()))
self.doc.blank()
def __testSuite(self):
self.doc.line((
"CPPUNIT_TEST_SUITE_REGISTRATION (stromx::{0}::{1}Test);"
).format(self.p.ident, self.m.ident.className()))
self.doc.blank()
def __setUp(self):
className = self.m.ident.className()
self.doc.line("void {0}Test::setUp()".format(className))
self.doc.scopeEnter()
self.doc.line((
"m_operator = new stromx::runtime::OperatorTester(new {0});"
).format(self.m.ident.className()))
self.doc.scopeExit()
self.doc.blank()
def __tearDown(self):
className = self.m.ident.className()
self.doc.line("void {0}Test::tearDown()".format(className))
self.doc.scopeEnter()
self.doc.line("delete m_operator;")
self.doc.scopeExit()
self.doc.blank()
def __testMethods(self):
className = self.m.ident.className()
for o in self.m.options:
for i, test in enumerate(o.tests):
testName = "test{0}{1}".format(o.ident.className(), i)
self.doc.line(
"void {0}Test::{1}()".format(className, testName)
)
self.doc.scopeEnter()
if len(self.m.options) > 1:
index = "{0}::DATA_FLOW".format(self.m.ident.className())
value = (
"runtime::Enum({0}::{1})"
).format(self.m.ident.className(), o.ident.constant())
l = "m_operator->setParameter({0}, {1});".format(index, value)
self.doc.line(l)
self.doc.line("m_operator->initialize();")
self.doc.line("m_operator->activate();")
self.doc.blank();
testgenerator.generate(self.doc, self.m, o.args,
test, testName)
self.doc.scopeExit()
self.doc.blank()
def generate(self):
self.__includes()
self.__testSuite()
self.namespaceEnter()
self.__setUp()
self.__tearDown()
self.__testMethods()
self.namespaceExit()
filename = "stromx/{0}/test/{1}Test.cpp".format(self.p.ident,
self.m.ident.className())
with file(filename, "w") as f:
f.write(self.doc.string())
def generateMethodFiles(package, method):
"""
Generates the operator and the operator tests for the given method.
"""
g = OpHeaderGenerator()
g.save(package, method)
g = OpImplGenerator()
g.save(package, method)
g = OpTestHeaderGenerator()
g.save(package, method)
g = OpTestImplGenerator()
g.save(package, method)
if __name__ == "__main__":
import doctest
doctest.testmod() | {
"content_hash": "30e6012555d31549f582f12b6c4f2c1b",
"timestamp": "",
"source": "github",
"line_count": 1619,
"max_line_length": 94,
"avg_line_length": 37.883261272390364,
"alnum_prop": 0.519149560595438,
"repo_name": "sparsebase/stromx",
"id": "7e4f86c41abe18bb8ba8c4d66458da599e72ec14",
"size": "61358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opencv/methodgenerator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2882239"
},
{
"name": "CMake",
"bytes": "63121"
},
{
"name": "Python",
"bytes": "215111"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.views.generic.edit import CreateView
from . import models
import ipdb
class ZeigenUndEintragen(CreateView):
""" zeigt die bisher einzige Liste an """
template_name = 'Notizen/liste.html'
model = models.Zeile
fields = ['autor_name', 'text']
context_object_name = 'liste'
def get_success_url(self, *args, **kwargs):
return reverse('Notizen:liste')
def render_to_response(self, context, **kwargs):
""" Gibt eine Instanz von TemplateResponse zurück
ich übergebe der zusätzlich eine Liste aller Zeilen; später nur die
zugehörigen zu einer models.Liste """
response = super().render_to_response(context, **kwargs)
# übergebe Liste von Zeilen
response.context_data.update([
('liste', models.Zeile.objects.all())
])
return response
def form_valid(self, form):
""" Setzt autor und liste für die Notizzeile.
form.instance wurde vor dem Aufrufen davon erstellt, nur noch nicht
in die db geschrieben (wär ja auch nicht valide); vorher müssen die
null=False-Felder gesetzt werden """
form.instance.autor_id = self.request.user.pk or 1 # für Anonymous
form.instance.liste_id = int(self.kwargs['liste_id'])
form.instance.save()
return super().form_valid(form)
| {
"content_hash": "fdda3554210e6dd56fc196a9cdd9643e",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 76,
"avg_line_length": 38.83783783783784,
"alnum_prop": 0.6610995128740431,
"repo_name": "wmles/olymp",
"id": "515a01581f2bb6423649cb6dc5a2fdeec8490159",
"size": "1447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Notizen/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "40339"
},
{
"name": "HTML",
"bytes": "10515"
},
{
"name": "Python",
"bytes": "73220"
}
],
"symlink_target": ""
} |
import logging
import os
import UserDict
import yaml
CONTOUR_YAML_NAMES = ['contour.yaml', 'contour.yml']
# TODO: Handle other configuration formats.
# TODO: Handle persitent storage of configuration.
class MissingConfigurationError(Exception):
"""Missing configuration option."""
class BadModulePathError(Exception):
"""Invalid module path."""
class InvalidYamlFile(Exception):
"""The config.yaml file is invalid yaml."""
class EmptyYamlFile(Exception):
"""The config.yaml file is empty."""
class MissingYamlFile(Exception):
"""config.yaml cannot be found."""
class Contour(UserDict.IterableUserDict):
def __init__(self, config_name=None, config_path=None,
local_config_name=None, local_config_path=None,
defaults=None):
UserDict.IterableUserDict.__init__(self, dict=defaults)
self._load_config(config_name, config_path)
# Override with the local config if it exists.
self._load_config(local_config_name, local_config_path)
def _load_by_path(self, path):
data = _load_yaml_config(path)
options = _parse_yaml_config(data)
self.data.update(options)
def _load_config(self, config_name=None, config_path=None):
if not config_path and not config_name:
return
# Load by path if it exists.
if config_path:
self._load_by_path(config_path)
return
# Load by name if it exists.
path = find_contour_yaml(names=[config_name])
self._load_by_path(path)
def load(self, option_key, default=None):
option = self.get(option_key, default)
if not option:
return
return module_import(option)
@classmethod
def load_option(cls, option):
# TODO: Load the option, could be a module, function or class.
return
def __setitem__(self, key, item):
raise TypeError
def __delitem__(self, key):
raise TypeError
def clear(self):
raise TypeError
def pop(self, key, *args):
raise TypeError
def popitem(self):
raise TypeError
def module_import(module_path):
"""Imports the module indicated in name
Args:
module_path: string representing a module path such as
'app.config' or 'app.extras.my_module'
Returns:
the module matching name of the last component, ie: for
'app.extras.my_module' it returns a
reference to my_module
Raises:
BadModulePathError if the module is not found
"""
try:
# Import whole module path.
module = __import__(module_path)
# Split into components: ['contour',
# 'extras','appengine','ndb_persistence'].
components = module_path.split('.')
# Starting at the second component, set module to a
# a reference to that component. at the end
# module with be the last component. In this case:
# ndb_persistence
for component in components[1:]:
module = getattr(module, component)
return module
except ImportError:
raise BadModulePathError(
'Unable to find module "%s".' % (module_path,))
def find_contour_yaml(config_file=__file__, names=None):
"""
Traverse directory trees to find a contour.yaml file
Begins with the location of this file then checks the
working directory if not found
Args:
config_file: location of this file, override for
testing
Returns:
the path of contour.yaml or None if not found
"""
checked = set()
contour_yaml = _find_countour_yaml(os.path.dirname(config_file), checked,
names=names)
if not contour_yaml:
contour_yaml = _find_countour_yaml(os.getcwd(), checked, names=names)
return contour_yaml
def _find_countour_yaml(start, checked, names=None):
"""Traverse the directory tree identified by start
until a directory already in checked is encountered or the path
of countour.yaml is found.
Checked is present both to make the loop termination easy
to reason about and so the same directories do not get
rechecked
Args:
start: the path to start looking in and work upward from
checked: the set of already checked directories
Returns:
the path of the countour.yaml file or None if it is not found
"""
extensions = []
if names:
for name in names:
if not os.path.splitext(name)[1]:
extensions.append(name + ".yaml")
extensions.append(name + ".yml")
yaml_names = (names or []) + CONTOUR_YAML_NAMES + extensions
directory = start
while directory not in checked:
checked.add(directory)
for fs_yaml_name in yaml_names:
yaml_path = os.path.join(directory, fs_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
directory = os.path.dirname(directory)
return
def _load_yaml_config(path=None):
"""Open and return the yaml contents."""
countour_yaml_path = path or find_contour_yaml()
if countour_yaml_path is None:
logging.debug("countour.yaml not found.")
return None
with open(countour_yaml_path) as yaml_file:
return yaml_file.read()
def _parse_yaml_config(config_data=None):
"""
Gets the configuration from the found countour.yaml
file and parses the data.
Returns:
a dictionary parsed from the yaml file
"""
data_map = {}
# If we were given config data to use, use it. Otherwise, see if there is
# a countour.yaml to read the config from.
config_data = config_data or _load_yaml_config()
if config_data is None:
logging.debug("No custom countour config, using default config.")
return data_map
config = yaml.safe_load(config_data)
# If there was a valid custom config, it will be a dict. Otherwise,
# ignore it.
if isinstance(config, dict):
# Apply the custom config over the default config. This allows us to
# extend functionality without breaking old stuff.
data_map.update(config)
elif not None:
raise InvalidYamlFile("The countour.yaml file "
"is invalid yaml")
return data_map
| {
"content_hash": "71c8f0a8e8d8aa016b3110f6954d4cf3",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 78,
"avg_line_length": 27.316239316239315,
"alnum_prop": 0.6290675844806007,
"repo_name": "lyddonb/contour",
"id": "91a8b80d88450a25078d3e25040faf30996ac9fb",
"size": "6392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contour/contour.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12742"
}
],
"symlink_target": ""
} |
"""Functions for interpolating `GridVariables`s."""
from typing import Callable, Optional, Tuple, Union
import jax
import jax.numpy as jnp
from jax_cfd.base import boundaries
from jax_cfd.base import grids
import numpy as np
Array = Union[np.ndarray, jnp.DeviceArray]
GridArray = grids.GridArray
GridArrayVector = grids.GridArrayVector
GridVariable = grids.GridVariable
GridVariableVector = grids.GridVariableVector
InterpolationFn = Callable[
[GridVariable, Tuple[float, ...], GridVariableVector, float],
GridVariable]
FluxLimiter = Callable[[grids.Array], grids.Array]
def _linear_along_axis(c: GridVariable,
offset: float,
axis: int) -> GridVariable:
"""Linear interpolation of `c` to `offset` along a single specified `axis`."""
offset_delta = offset - c.offset[axis]
# If offsets are the same, `c` is unchanged.
if offset_delta == 0:
return c
new_offset = tuple(offset if j == axis else o
for j, o in enumerate(c.offset))
# If offsets differ by an integer, we can just shift `c`.
if int(offset_delta) == offset_delta:
return grids.GridVariable(
array=grids.GridArray(data=c.shift(int(offset_delta), axis).data,
offset=new_offset,
grid=c.grid),
bc=c.bc)
floor = int(np.floor(offset_delta))
ceil = int(np.ceil(offset_delta))
floor_weight = ceil - offset_delta
ceil_weight = 1. - floor_weight
data = (floor_weight * c.shift(floor, axis).data +
ceil_weight * c.shift(ceil, axis).data)
return grids.GridVariable(
array=grids.GridArray(data, new_offset, c.grid), bc=c.bc)
def linear(
c: GridVariable,
offset: Tuple[float, ...],
v: Optional[GridVariableVector] = None,
dt: Optional[float] = None
) -> grids.GridVariable:
"""Multi-linear interpolation of `c` to `offset`.
Args:
c: quantitity to be interpolated.
offset: offset to which we will interpolate `c`. Must have the same length
as `c.offset`.
v: velocity field. Not used.
dt: size of the time step. Not used.
Returns:
An `GridArray` containing the values of `c` after linear interpolation
to `offset`. The returned value will have offset equal to `offset`.
"""
del v, dt # unused
if len(offset) != len(c.offset):
raise ValueError('`c.offset` and `offset` must have the same length;'
f'got {c.offset} and {offset}.')
interpolated = c
for a, o in enumerate(offset):
interpolated = _linear_along_axis(interpolated, offset=o, axis=a)
return interpolated
def upwind(
c: GridVariable,
offset: Tuple[float, ...],
v: GridVariableVector,
dt: Optional[float] = None
) -> GridVariable:
"""Upwind interpolation of `c` to `offset` based on velocity field `v`.
Interpolates values of `c` to `offset` in two steps:
1) Identifies the axis along which `c` is interpolated. (must be single axis)
2) For positive (negative) velocity along interpolation axis uses value from
the previous (next) cell along that axis correspondingly.
Args:
c: quantitity to be interpolated.
offset: offset to which `c` will be interpolated. Must have the same
length as `c.offset` and differ in at most one entry.
v: velocity field with offsets at faces of `c`. One of the components
must have the same offset as `offset`.
dt: size of the time step. Not used.
Returns:
A `GridVariable` that containins the values of `c` after interpolation to
`offset`.
Raises:
InconsistentOffsetError: if `offset` and `c.offset` differ in more than one
entry.
"""
del dt # unused
if c.offset == offset: return c
interpolation_axes = tuple(
axis for axis, (current, target) in enumerate(zip(c.offset, offset))
if current != target
)
if len(interpolation_axes) != 1:
raise grids.InconsistentOffsetError(
f'for upwind interpolation `c.offset` and `offset` must differ at most '
f'in one entry, but got: {c.offset} and {offset}.')
axis, = interpolation_axes
u = v[axis]
offset_delta = u.offset[axis] - c.offset[axis]
# If offsets differ by an integer, we can just shift `c`.
if int(offset_delta) == offset_delta:
return grids.GridVariable(
array=grids.GridArray(data=c.shift(int(offset_delta), axis).data,
offset=offset,
grid=grids.consistent_grid(c, u)),
bc=c.bc)
floor = int(np.floor(offset_delta))
ceil = int(np.ceil(offset_delta))
array = grids.applied(jnp.where)(
u.array > 0, c.shift(floor, axis).data, c.shift(ceil, axis).data
)
grid = grids.consistent_grid(c, u)
return grids.GridVariable(
array=grids.GridArray(array.data, offset, grid),
bc=boundaries.periodic_boundary_conditions(grid.ndim))
def lax_wendroff(
c: GridVariable,
offset: Tuple[float, ...],
v: Optional[GridVariableVector] = None,
dt: Optional[float] = None
) -> GridVariable:
"""Lax_Wendroff interpolation of `c` to `offset` based on velocity field `v`.
Interpolates values of `c` to `offset` in two steps:
1) Identifies the axis along which `c` is interpolated. (must be single axis)
2) For positive (negative) velocity along interpolation axis uses value from
the previous (next) cell along that axis plus a correction originating
from expansion of the solution at the half step-size.
This method is second order accurate with fixed coefficients and hence can't
be monotonic due to Godunov's theorem.
https://en.wikipedia.org/wiki/Godunov%27s_theorem
Lax-Wendroff method can be used to form monotonic schemes when augmented with
a flux limiter. See https://en.wikipedia.org/wiki/Flux_limiter
Args:
c: quantitity to be interpolated.
offset: offset to which we will interpolate `c`. Must have the same
length as `c.offset` and differ in at most one entry.
v: velocity field with offsets at faces of `c`. One of the components must
have the same offset as `offset`.
dt: size of the time step. Not used.
Returns:
A `GridVariable` that containins the values of `c` after interpolation to
`offset`.
Raises:
InconsistentOffsetError: if `offset` and `c.offset` differ in more than one
entry.
"""
# TODO(dkochkov) add a function to compute interpolation axis.
if c.offset == offset: return c
interpolation_axes = tuple(
axis for axis, (current, target) in enumerate(zip(c.offset, offset))
if current != target
)
if len(interpolation_axes) != 1:
raise grids.InconsistentOffsetError(
f'for Lax-Wendroff interpolation `c.offset` and `offset` must differ at'
f' most in one entry, but got: {c.offset} and {offset}.')
axis, = interpolation_axes
u = v[axis]
offset_delta = u.offset[axis] - c.offset[axis]
floor = int(np.floor(offset_delta)) # used for positive velocity
ceil = int(np.ceil(offset_delta)) # used for negative velocity
grid = grids.consistent_grid(c, u)
courant_numbers = (dt / grid.step[axis]) * u.data
positive_u_case = (
c.shift(floor, axis).data + 0.5 * (1 - courant_numbers) *
(c.shift(ceil, axis).data - c.shift(floor, axis).data))
negative_u_case = (
c.shift(ceil, axis).data - 0.5 * (1 + courant_numbers) *
(c.shift(ceil, axis).data - c.shift(floor, axis).data))
array = grids.where(u.array > 0, positive_u_case, negative_u_case)
grid = grids.consistent_grid(c, u)
return grids.GridVariable(
array=grids.GridArray(array.data, offset, grid),
bc=boundaries.periodic_boundary_conditions(grid.ndim))
def safe_div(x, y, default_numerator=1):
"""Safe division of `Array`'s."""
return x / jnp.where(y != 0, y, default_numerator)
def van_leer_limiter(r):
"""Van-leer flux limiter."""
return jnp.where(r > 0, safe_div(2 * r, 1 + r), 0.0)
def apply_tvd_limiter(
interpolation_fn: InterpolationFn,
limiter: FluxLimiter = van_leer_limiter
) -> InterpolationFn:
"""Combines low and high accuracy interpolators to get TVD method.
Generates high accuracy interpolator by combining stable lwo accuracy `upwind`
interpolation and high accuracy (but not guaranteed to be stable)
`interpolation_fn` to obtain stable higher order method. This implementation
follows the procedure outined in:
http://www.ita.uni-heidelberg.de/~dullemond/lectures/num_fluid_2012/Chapter_4.pdf
Args:
interpolation_fn: higher order interpolation methods. Must follow the same
interface as other interpolation methods (take `c`, `offset`, `grid`, `v`
and `dt` arguments and return value of `c` at offset `offset`).
limiter: flux limiter function that evaluates the portion of the correction
(high_accuracy - low_accuracy) to add to low_accuracy solution based on
the ratio of the consequtive gradients. Takes array as input and return
array of weights. For more details see:
https://en.wikipedia.org/wiki/Flux_limiter
Returns:
Interpolation method that uses a combination of high and low order methods
to produce monotonic interpolation method.
"""
def tvd_interpolation(
c: GridVariable,
offset: Tuple[float, ...],
v: GridVariableVector,
dt: float,
) -> GridVariable:
"""Interpolated `c` to offset `offset`."""
for axis, axis_offset in enumerate(offset):
interpolation_offset = tuple([
c_offset if i != axis else axis_offset
for i, c_offset in enumerate(c.offset)
])
if interpolation_offset != c.offset:
if interpolation_offset[axis] - c.offset[axis] != 0.5:
raise NotImplementedError('tvd_interpolation only supports forward '
'interpolation to control volume faces.')
c_low = upwind(c, offset, v, dt)
c_high = interpolation_fn(c, offset, v, dt)
# because we are interpolating to the right we are using 2 points ahead
# and 2 points behind: `c`, `c_left`.
c_left = c.shift(-1, axis)
c_right = c.shift(1, axis)
c_next_right = c.shift(2, axis)
# Velocities of different sign are evaluated with limiters at different
# points. See equations (4.34) -- (4.39) from the reference above.
positive_u_r = safe_div(c.data - c_left.data, c_right.data - c.data)
negative_u_r = safe_div(c_next_right.data - c_right.data,
c_right.data - c.data)
positive_u_phi = grids.GridArray(
limiter(positive_u_r), c_low.offset, c.grid)
negative_u_phi = grids.GridArray(
limiter(negative_u_r), c_low.offset, c.grid)
u = v[axis]
phi = grids.applied(jnp.where)(
u.array > 0, positive_u_phi, negative_u_phi)
c_interpolated = c_low.array - (c_low.array - c_high.array) * phi
c = grids.GridVariable(
grids.GridArray(c_interpolated.data, interpolation_offset, c.grid),
c.bc)
return c
return tvd_interpolation
# TODO(pnorgaard) Consider changing c to GridVariable
# Not required since no .shift() method is used
def point_interpolation(
point: Array,
c: GridArray,
order: int = 1,
mode: str = 'nearest',
cval: float = 0.0,
) -> jnp.DeviceArray:
"""Interpolate `c` at `point`.
Args:
point: length N 1-D Array. The point to interpolate to.
c: N-dimensional GridArray. The values that will be interpolated.
order: Integer in the range 0-1. The order of the spline interpolation.
mode: one of {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}.
The `mode` parameter determines how the input array is extended
beyond its boundaries. Default is 'constant'. Behavior for each valid
value is as follows:
'reflect' (`d c b a | a b c d | d c b a`)
The input is extended by reflecting about the edge of the last
pixel.
'constant' (`k k k k | a b c d | k k k k`)
The input is extended by filling all values beyond the edge with
the same constant value, defined by the `cval` parameter.
'nearest' (`a a a a | a b c d | d d d d`)
The input is extended by replicating the last pixel.
'mirror' (`d c b | a b c d | c b a`)
The input is extended by reflecting about the center of the last
pixel.
'wrap' (`a b c d | a b c d | a b c d`)
The input is extended by wrapping around to the opposite edge.
cval: Value to fill past edges of input if `mode` is 'constant'. Default 0.0
Returns:
the interpolated value at `point`.
"""
point = jnp.asarray(point)
domain_lower, domain_upper = zip(*c.grid.domain)
domain_lower = jnp.array(domain_lower)
domain_upper = jnp.array(domain_upper)
shape = jnp.array(c.grid.shape)
offset = jnp.array(c.offset)
# For each dimension `i` in point,
# The map from `point[i]` to index is linear.
# index(domain_lower[i]) = -offset[i]
# index(domain_upper[i]) = shape[i] - offset[i]
# This is easily vectorized as
index = (-offset + (point - domain_lower) * shape /
(domain_upper - domain_lower))
return jax.scipy.ndimage.map_coordinates(
c.data, coordinates=index, order=order, mode=mode, cval=cval)
| {
"content_hash": "f075e06f6b515b57ede0217299890516",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 83,
"avg_line_length": 38.46376811594203,
"alnum_prop": 0.6577995478522984,
"repo_name": "google/jax-cfd",
"id": "c531c5d71d48a49fab133e63abcc17c8f1c275f0",
"size": "13846",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jax_cfd/base/interpolation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7029140"
},
{
"name": "Python",
"bytes": "715552"
}
],
"symlink_target": ""
} |
import unittest
import requests
import lxml.html
import xmlrunner
class TestHtmlTask(unittest.TestCase):
def setUp(self):
self.urls = open("urls.txt", 'r')
self.url_google = self.urls.readline()
self.url_habr = self.urls.readline()
self.urls.close()
def test_1(self):
expected_response_1 = 200
r = requests.get(self.url_google.strip())
self.assertEqual(r.status_code, expected_response_1)
def test_2(self):
expected_response_2 = "Game Development"
t = lxml.html.parse(self.url_habr)
title = t.find(".//title").text.split('/')
self.assertEqual(title[0].rstrip(), expected_response_2)
if __name__ == '__main__':
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports'))
| {
"content_hash": "b3272b3d20f3c267c87b3bedd564683f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 29.37037037037037,
"alnum_prop": 0.6355611601513241,
"repo_name": "amazpyel/sqa_training",
"id": "8c320fe4ccd5f0ace040d9448ed02fec43d4a85d",
"size": "793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/http_validation/http_checker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "165002"
},
{
"name": "Java",
"bytes": "42456"
},
{
"name": "Python",
"bytes": "15895"
}
],
"symlink_target": ""
} |
import rospy
from vortex_msgs.msg import PropulsionCommand, Manipulator
from sensor_msgs.msg import Joy
class JoystickInterfaceNode(object):
def __init__(self):
rospy.init_node('joystick_node')
self.sub = rospy.Subscriber(
'joy_throttle', Joy, self.callback, queue_size=1)
self.pub_motion = rospy.Publisher('propulsion_command',
PropulsionCommand,
queue_size=1)
self.pub_manipulator = rospy.Publisher('manipulator_command',
Manipulator,
queue_size=1)
# Name buttons and axes based on index from joy-node
self.buttons_map = ['A', 'B', 'X', 'Y', 'LB', 'RB', 'back',
'start', 'power', 'stick_button_left',
'stick_button_right']
self.axes_map = ['horizontal_axis_left_stick',
'vertical_axis_left_stick', 'LT',
'horizontal_axis_right_stick',
'vertical_axis_right_stick', 'RT',
'dpad_horizontal', 'dpad_vertical']
def callback(self, msg):
# Connect values to names in two dictionaries
buttons = {}
axes = {}
for i in range(len(msg.buttons)):
buttons[self.buttons_map[i]] = msg.buttons[i]
for j in range(len(msg.axes)):
axes[self.axes_map[j]] = msg.axes[j]
manipulator_msg = Manipulator()
manipulator_msg.claw_direction = axes['dpad_horizontal']
manipulator_msg.vertical_stepper_direction = axes['dpad_vertical']
motion_msg = PropulsionCommand()
motion_msg.motion = [
axes['vertical_axis_left_stick'], # Surge
-axes['horizontal_axis_left_stick'], # Sway
(axes['RT'] - axes['LT'])/2, # Heave
(buttons['RB'] - buttons['LB']), # Roll
-axes['vertical_axis_right_stick'], # Pitch
-axes['horizontal_axis_right_stick'] # Yaw
]
motion_msg.control_mode = [
(buttons['A'] == 1),
(buttons['X'] == 1),
(buttons['B'] == 1),
(buttons['Y'] == 1),
(False),
(False)
]
motion_msg.header.stamp = rospy.get_rostime()
self.pub_manipulator.publish(manipulator_msg)
self.pub_motion.publish(motion_msg)
if __name__ == '__main__':
try:
joystick_node = JoystickInterfaceNode()
rospy.spin()
except rospy.ROSInterruptException:
pass
| {
"content_hash": "8d0565d2b62f493bc1d6982e2710b3de",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 74,
"avg_line_length": 35.64,
"alnum_prop": 0.5054246165357277,
"repo_name": "vortexntnu/rov-control",
"id": "a430af4ffdda32cc395f06e45834ec1d5d475244",
"size": "2695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "joystick_interface/scripts/joystick_interface.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5139"
},
{
"name": "C++",
"bytes": "63650"
},
{
"name": "CMake",
"bytes": "6882"
},
{
"name": "Python",
"bytes": "29832"
}
],
"symlink_target": ""
} |
from decimal import Decimal
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Set, Tuple, Type, Union
from .typing import display_as_type
if TYPE_CHECKING:
from .typing import DictStrAny
# explicitly state exports to avoid "from .errors import *" also importing Decimal, Path etc.
__all__ = (
'PydanticTypeError',
'PydanticValueError',
'ConfigError',
'MissingError',
'ExtraError',
'NoneIsNotAllowedError',
'NoneIsAllowedError',
'WrongConstantError',
'NotNoneError',
'BoolError',
'BytesError',
'DictError',
'EmailError',
'UrlError',
'UrlSchemeError',
'UrlSchemePermittedError',
'UrlUserInfoError',
'UrlHostError',
'UrlHostTldError',
'UrlPortError',
'UrlExtraError',
'EnumError',
'IntEnumError',
'EnumMemberError',
'IntegerError',
'FloatError',
'PathError',
'_PathValueError',
'PathNotExistsError',
'PathNotAFileError',
'PathNotADirectoryError',
'PyObjectError',
'SequenceError',
'ListError',
'SetError',
'FrozenSetError',
'TupleError',
'TupleLengthError',
'ListMinLengthError',
'ListMaxLengthError',
'AnyStrMinLengthError',
'AnyStrMaxLengthError',
'StrError',
'StrRegexError',
'_NumberBoundError',
'NumberNotGtError',
'NumberNotGeError',
'NumberNotLtError',
'NumberNotLeError',
'NumberNotMultipleError',
'DecimalError',
'DecimalIsNotFiniteError',
'DecimalMaxDigitsError',
'DecimalMaxPlacesError',
'DecimalWholeDigitsError',
'DateTimeError',
'DateError',
'TimeError',
'DurationError',
'HashableError',
'UUIDError',
'UUIDVersionError',
'ArbitraryTypeError',
'ClassError',
'SubclassError',
'JsonError',
'JsonTypeError',
'PatternError',
'DataclassTypeError',
'CallableError',
'IPvAnyAddressError',
'IPvAnyInterfaceError',
'IPvAnyNetworkError',
'IPv4AddressError',
'IPv6AddressError',
'IPv4NetworkError',
'IPv6NetworkError',
'IPv4InterfaceError',
'IPv6InterfaceError',
'ColorError',
'StrictBoolError',
'NotDigitError',
'LuhnValidationError',
'InvalidLengthForBrand',
'InvalidByteSize',
'InvalidByteSizeUnit',
)
def cls_kwargs(cls: Type['PydanticErrorMixin'], ctx: 'DictStrAny') -> 'PydanticErrorMixin':
"""
For built-in exceptions like ValueError or TypeError, we need to implement
__reduce__ to override the default behaviour (instead of __getstate__/__setstate__)
By default pickle protocol 2 calls `cls.__new__(cls, *args)`.
Since we only use kwargs, we need a little constructor to change that.
Note: the callable can't be a lambda as pickle looks in the namespace to find it
"""
return cls(**ctx)
class PydanticErrorMixin:
code: str
msg_template: str
def __init__(self, **ctx: Any) -> None:
self.__dict__ = ctx
def __str__(self) -> str:
return self.msg_template.format(**self.__dict__)
def __reduce__(self) -> Tuple[Callable[..., 'PydanticErrorMixin'], Tuple[Type['PydanticErrorMixin'], 'DictStrAny']]:
return cls_kwargs, (self.__class__, self.__dict__)
class PydanticTypeError(PydanticErrorMixin, TypeError):
pass
class PydanticValueError(PydanticErrorMixin, ValueError):
pass
class ConfigError(RuntimeError):
pass
class MissingError(PydanticValueError):
msg_template = 'field required'
class ExtraError(PydanticValueError):
msg_template = 'extra fields not permitted'
class NoneIsNotAllowedError(PydanticTypeError):
code = 'none.not_allowed'
msg_template = 'none is not an allowed value'
class NoneIsAllowedError(PydanticTypeError):
code = 'none.allowed'
msg_template = 'value is not none'
class WrongConstantError(PydanticValueError):
code = 'const'
def __str__(self) -> str:
permitted = ', '.join(repr(v) for v in self.permitted) # type: ignore
return f'unexpected value; permitted: {permitted}'
class NotNoneError(PydanticTypeError):
code = 'not_none'
msg_template = 'value is not None'
class BoolError(PydanticTypeError):
msg_template = 'value could not be parsed to a boolean'
class BytesError(PydanticTypeError):
msg_template = 'byte type expected'
class DictError(PydanticTypeError):
msg_template = 'value is not a valid dict'
class EmailError(PydanticValueError):
msg_template = 'value is not a valid email address'
class UrlError(PydanticValueError):
code = 'url'
class UrlSchemeError(UrlError):
code = 'url.scheme'
msg_template = 'invalid or missing URL scheme'
class UrlSchemePermittedError(UrlError):
code = 'url.scheme'
msg_template = 'URL scheme not permitted'
def __init__(self, allowed_schemes: Set[str]):
super().__init__(allowed_schemes=allowed_schemes)
class UrlUserInfoError(UrlError):
code = 'url.userinfo'
msg_template = 'userinfo required in URL but missing'
class UrlHostError(UrlError):
code = 'url.host'
msg_template = 'URL host invalid'
class UrlHostTldError(UrlError):
code = 'url.host'
msg_template = 'URL host invalid, top level domain required'
class UrlPortError(UrlError):
code = 'url.port'
msg_template = 'URL port invalid, port cannot exceed 65535'
class UrlExtraError(UrlError):
code = 'url.extra'
msg_template = 'URL invalid, extra characters found after valid URL: {extra!r}'
class EnumMemberError(PydanticTypeError):
code = 'enum'
def __str__(self) -> str:
permitted = ', '.join(repr(v.value) for v in self.enum_values) # type: ignore
return f'value is not a valid enumeration member; permitted: {permitted}'
class IntegerError(PydanticTypeError):
msg_template = 'value is not a valid integer'
class FloatError(PydanticTypeError):
msg_template = 'value is not a valid float'
class PathError(PydanticTypeError):
msg_template = 'value is not a valid path'
class _PathValueError(PydanticValueError):
def __init__(self, *, path: Path) -> None:
super().__init__(path=str(path))
class PathNotExistsError(_PathValueError):
code = 'path.not_exists'
msg_template = 'file or directory at path "{path}" does not exist'
class PathNotAFileError(_PathValueError):
code = 'path.not_a_file'
msg_template = 'path "{path}" does not point to a file'
class PathNotADirectoryError(_PathValueError):
code = 'path.not_a_directory'
msg_template = 'path "{path}" does not point to a directory'
class PyObjectError(PydanticTypeError):
msg_template = 'ensure this value contains valid import path or valid callable: {error_message}'
class SequenceError(PydanticTypeError):
msg_template = 'value is not a valid sequence'
class IterableError(PydanticTypeError):
msg_template = 'value is not a valid iterable'
class ListError(PydanticTypeError):
msg_template = 'value is not a valid list'
class SetError(PydanticTypeError):
msg_template = 'value is not a valid set'
class FrozenSetError(PydanticTypeError):
msg_template = 'value is not a valid frozenset'
class DequeError(PydanticTypeError):
msg_template = 'value is not a valid deque'
class TupleError(PydanticTypeError):
msg_template = 'value is not a valid tuple'
class TupleLengthError(PydanticValueError):
code = 'tuple.length'
msg_template = 'wrong tuple length {actual_length}, expected {expected_length}'
def __init__(self, *, actual_length: int, expected_length: int) -> None:
super().__init__(actual_length=actual_length, expected_length=expected_length)
class ListMinLengthError(PydanticValueError):
code = 'list.min_items'
msg_template = 'ensure this value has at least {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class ListMaxLengthError(PydanticValueError):
code = 'list.max_items'
msg_template = 'ensure this value has at most {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class SetMinLengthError(PydanticValueError):
code = 'set.min_items'
msg_template = 'ensure this value has at least {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class SetMaxLengthError(PydanticValueError):
code = 'set.max_items'
msg_template = 'ensure this value has at most {limit_value} items'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class AnyStrMinLengthError(PydanticValueError):
code = 'any_str.min_length'
msg_template = 'ensure this value has at least {limit_value} characters'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class AnyStrMaxLengthError(PydanticValueError):
code = 'any_str.max_length'
msg_template = 'ensure this value has at most {limit_value} characters'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
class StrError(PydanticTypeError):
msg_template = 'str type expected'
class StrRegexError(PydanticValueError):
code = 'str.regex'
msg_template = 'string does not match regex "{pattern}"'
def __init__(self, *, pattern: str) -> None:
super().__init__(pattern=pattern)
class _NumberBoundError(PydanticValueError):
def __init__(self, *, limit_value: Union[int, float, Decimal]) -> None:
super().__init__(limit_value=limit_value)
class NumberNotGtError(_NumberBoundError):
code = 'number.not_gt'
msg_template = 'ensure this value is greater than {limit_value}'
class NumberNotGeError(_NumberBoundError):
code = 'number.not_ge'
msg_template = 'ensure this value is greater than or equal to {limit_value}'
class NumberNotLtError(_NumberBoundError):
code = 'number.not_lt'
msg_template = 'ensure this value is less than {limit_value}'
class NumberNotLeError(_NumberBoundError):
code = 'number.not_le'
msg_template = 'ensure this value is less than or equal to {limit_value}'
class NumberNotMultipleError(PydanticValueError):
code = 'number.not_multiple'
msg_template = 'ensure this value is a multiple of {multiple_of}'
def __init__(self, *, multiple_of: Union[int, float, Decimal]) -> None:
super().__init__(multiple_of=multiple_of)
class DecimalError(PydanticTypeError):
msg_template = 'value is not a valid decimal'
class DecimalIsNotFiniteError(PydanticValueError):
code = 'decimal.not_finite'
msg_template = 'value is not a valid decimal'
class DecimalMaxDigitsError(PydanticValueError):
code = 'decimal.max_digits'
msg_template = 'ensure that there are no more than {max_digits} digits in total'
def __init__(self, *, max_digits: int) -> None:
super().__init__(max_digits=max_digits)
class DecimalMaxPlacesError(PydanticValueError):
code = 'decimal.max_places'
msg_template = 'ensure that there are no more than {decimal_places} decimal places'
def __init__(self, *, decimal_places: int) -> None:
super().__init__(decimal_places=decimal_places)
class DecimalWholeDigitsError(PydanticValueError):
code = 'decimal.whole_digits'
msg_template = 'ensure that there are no more than {whole_digits} digits before the decimal point'
def __init__(self, *, whole_digits: int) -> None:
super().__init__(whole_digits=whole_digits)
class DateTimeError(PydanticValueError):
msg_template = 'invalid datetime format'
class DateError(PydanticValueError):
msg_template = 'invalid date format'
class TimeError(PydanticValueError):
msg_template = 'invalid time format'
class DurationError(PydanticValueError):
msg_template = 'invalid duration format'
class HashableError(PydanticTypeError):
msg_template = 'value is not a valid hashable'
class UUIDError(PydanticTypeError):
msg_template = 'value is not a valid uuid'
class UUIDVersionError(PydanticValueError):
code = 'uuid.version'
msg_template = 'uuid version {required_version} expected'
def __init__(self, *, required_version: int) -> None:
super().__init__(required_version=required_version)
class ArbitraryTypeError(PydanticTypeError):
code = 'arbitrary_type'
msg_template = 'instance of {expected_arbitrary_type} expected'
def __init__(self, *, expected_arbitrary_type: Type[Any]) -> None:
super().__init__(expected_arbitrary_type=display_as_type(expected_arbitrary_type))
class ClassError(PydanticTypeError):
code = 'class'
msg_template = 'a class is expected'
class SubclassError(PydanticTypeError):
code = 'subclass'
msg_template = 'subclass of {expected_class} expected'
def __init__(self, *, expected_class: Type[Any]) -> None:
super().__init__(expected_class=display_as_type(expected_class))
class JsonError(PydanticValueError):
msg_template = 'Invalid JSON'
class JsonTypeError(PydanticTypeError):
code = 'json'
msg_template = 'JSON object must be str, bytes or bytearray'
class PatternError(PydanticValueError):
code = 'regex_pattern'
msg_template = 'Invalid regular expression'
class DataclassTypeError(PydanticTypeError):
code = 'dataclass'
msg_template = 'instance of {class_name}, tuple or dict expected'
class CallableError(PydanticTypeError):
msg_template = '{value} is not callable'
class EnumError(PydanticTypeError):
code = 'enum_instance'
msg_template = '{value} is not a valid Enum instance'
class IntEnumError(PydanticTypeError):
code = 'int_enum_instance'
msg_template = '{value} is not a valid IntEnum instance'
class IPvAnyAddressError(PydanticValueError):
msg_template = 'value is not a valid IPv4 or IPv6 address'
class IPvAnyInterfaceError(PydanticValueError):
msg_template = 'value is not a valid IPv4 or IPv6 interface'
class IPvAnyNetworkError(PydanticValueError):
msg_template = 'value is not a valid IPv4 or IPv6 network'
class IPv4AddressError(PydanticValueError):
msg_template = 'value is not a valid IPv4 address'
class IPv6AddressError(PydanticValueError):
msg_template = 'value is not a valid IPv6 address'
class IPv4NetworkError(PydanticValueError):
msg_template = 'value is not a valid IPv4 network'
class IPv6NetworkError(PydanticValueError):
msg_template = 'value is not a valid IPv6 network'
class IPv4InterfaceError(PydanticValueError):
msg_template = 'value is not a valid IPv4 interface'
class IPv6InterfaceError(PydanticValueError):
msg_template = 'value is not a valid IPv6 interface'
class ColorError(PydanticValueError):
msg_template = 'value is not a valid color: {reason}'
class StrictBoolError(PydanticValueError):
msg_template = 'value is not a valid boolean'
class NotDigitError(PydanticValueError):
code = 'payment_card_number.digits'
msg_template = 'card number is not all digits'
class LuhnValidationError(PydanticValueError):
code = 'payment_card_number.luhn_check'
msg_template = 'card number is not luhn valid'
class InvalidLengthForBrand(PydanticValueError):
code = 'payment_card_number.invalid_length_for_brand'
msg_template = 'Length for a {brand} card must be {required_length}'
class InvalidByteSize(PydanticValueError):
msg_template = 'could not parse value and unit from byte string'
class InvalidByteSizeUnit(PydanticValueError):
msg_template = 'could not interpret byte unit: {unit}'
| {
"content_hash": "2a862bb6e02ebe81c538ac68e97bc015",
"timestamp": "",
"source": "github",
"line_count": 583,
"max_line_length": 120,
"avg_line_length": 26.818181818181817,
"alnum_prop": 0.6959385992964503,
"repo_name": "glenngillen/dotfiles",
"id": "db2df4f4bd6547d4f89bce97a52779983fd6f20c",
"size": "15635",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".vscode/extensions/ms-python.python-2022.2.1924087327/pythonFiles/lib/jedilsp/pydantic/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "3634"
},
{
"name": "Shell",
"bytes": "4225"
},
{
"name": "Vim script",
"bytes": "16306"
}
],
"symlink_target": ""
} |
"""A module containing useful helper functions for unit tests."""
import string
import random
from pytodoist import todoist
def generate_id(size=10):
"""Return a random alphanumeric string."""
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
class TestUser(object):
"""A fake user to use in each unit test."""
def __init__(self):
self.full_name = "Test User"
self.email = "pytodoist_" + generate_id() + "@gmail.com"
self.password = "password"
self.token = None
self.sync_token = '*'
def create_user():
"""Return a newly registered logged in Todoist user."""
user = TestUser()
try:
return todoist.register(user.full_name, user.email, user.password)
except todoist.RequestError:
existing_user = todoist.login(user.email, user.password)
existing_user.delete()
return todoist.register(user.full_name, user.email, user.password)
| {
"content_hash": "e6ac8b82c3b0ddd45fb49c867e4b7745",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 74,
"avg_line_length": 30.181818181818183,
"alnum_prop": 0.6556224899598394,
"repo_name": "Garee/pytodoist",
"id": "7a7259c33a089ba98d444f0ef40205dcc58db2db",
"size": "996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytodoist/test/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "368"
},
{
"name": "Python",
"bytes": "104081"
}
],
"symlink_target": ""
} |
import json
from findig import App
from werkzeug.serving import run_simple
# A simple Findig example app
#
# Findig applications are collections of managed web resources. Apps are
# described by declaring resources and the actions that they support
# (get, save and delete).
#
# This example shows how a very simple application can be declared in
# Findig.
#
app = App()
DATA = {}
@app.route("/greeting")
@app.resource
def greeter():
return {"message": "Hello {0}!".format(DATA.get("name", "Findig"))}
@app.route("/data")
@app.resource
def data():
return dict(DATA)
@data.model("write")
def update_data(res_data):
DATA.clear()
DATA.update(res_data)
@data.model("delete")
def delete_data(res_data):
DATA.clear()
@app.formatter.register("application/json", default=True)
def format_data_json(d):
return json.dumps(d)
@data.parser.register("application/json")
def parse_json_bytes(bs, **opts):
return json.loads(bs.decode(opts.get("charset", "utf8")))
if __name__ == '__main__':
run_simple('localhost', 5001, app, use_reloader=True, use_debugger=True) | {
"content_hash": "66d87e949bdc3b856586cdb03d4c7faa",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 76,
"avg_line_length": 23.25531914893617,
"alnum_prop": 0.6953339432753889,
"repo_name": "geniphi/findig",
"id": "b1ebd89e5283b14460b477071743e665e37c443e",
"size": "1117",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "examples/basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "201777"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
import contacts.views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'addressbook.views.home', name='home'),
# url(r'^addressbook/', include('addressbook.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
# FOR LIST CONTACTS
url(r'^$', contacts.views.ListContactView.as_view(), name='contacts-list',), # Giving a URL pattern a name allows you to do a reverse lookup
# FOR CREATE
url(r'^new$', contacts.views.CreateContactView.as_view(), name='contacts-new',),
# FOR UPDATE
url(r'^edit/(?P<pk>\d+)/$', contacts.views.UpdateContactView.as_view(), name='contacts-edit',),
# FOR DELETE
url(r'^delete/(?P<pk>\d+)/$', contacts.views.DeleteContactView.as_view(), name='contacts-delete',),
# TO PRESENT SINGLE CONTACT
url(r'^(?P<pk>\d+)/$', contacts.views.ContactView.as_view(), name='contacts-view',),
# for CONTACT and ADDRESS
url(r'^edit/(?P<pk>\d+)/addresses$', contacts.views.EditContactAddressView.as_view(), name='contacts-edit-addresses',),
)
urlpatterns += staticfiles_urlpatterns() | {
"content_hash": "4dba08c2368de15877fd2351424591ee",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 146,
"avg_line_length": 41.416666666666664,
"alnum_prop": 0.6847753185781355,
"repo_name": "pyjosh/django_addressbook",
"id": "7f0c56486a91afd537e50da631f75425608ad65b",
"size": "1491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "addressbook/urls.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
The MIT License
Copyright (c) 2010 Olle Johansson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from sqlalchemy import Table, Column, Integer, Boolean, String, MetaData, ForeignKey, Sequence, create_engine
from sqlalchemy.orm import mapper, sessionmaker
import random
import cmd
import yaml
from utils.Conf import *
class Hero(object):
id = 0
name = ''
health = None
strength = None
hurt = None
kills = None
gold = None
level = None
alive = None
def __init__(self):
self.hurt = 0
self.kills = 0
self.gold = 0
self.level = 1
self.alive = True
def reroll(self, name=None):
self.health = self.roll(20, 5)
self.strength = self.roll(20, 5)
self.hurt = 0
self.kills = 0
self.gold = 0
self.level = 1
self.alive = True
if name:
self.name = name
else:
self.name = self.random_name()
def search_treasure(self):
luck = self.roll(100)
if luck > 50:
found_gold = self.roll(self.level)
self.gold = self.gold + found_gold
return found_gold
return 0
def injure(self, hurt):
self.hurt = self.hurt + hurt
if self.hurt > self.health:
self.alive = False
def fight_monster(self, monster):
#print("Monster:", monster.health, monster.strength)
while monster.health >= 0 and self.hurt < self.health:
hit = self.roll(self.strength)
killed = monster.injure(hit)
#print("Hit:", hit, "Monster Health:", monster.health)
if not killed:
monster_hit = self.roll(monster.strength)
self.injure(monster_hit)
#print("Monster Hits:", monster_hit, "Hero Hurt:", self.hurt)
if self.hurt > self.health:
self.alive = False
else:
self.kills = self.kills + 1
return self.alive
def rest(self):
if self.hurt > 0:
heal = random.randint(1, 10)
if heal > self.hurt:
heal = self.hurt
self.hurt = self.hurt - heal
return heal
return 0
def go_deeper(self, depth=None):
if not depth:
depth = 1
self.level = self.level + depth
return self.level
def roll(self, sides, times=1):
total = 0
for i in range(times):
total = total + random.randint(1, sides)
return total
def random_name(self):
name = random.choice(['Conan', 'Canon', 'Hercules', 'Robin', 'Dante', 'Legolas', 'Buffy', 'Xena'])
epithet = random.choice(['Barbarian', 'Invincible', 'Mighty', 'Hairy', 'Bastard', 'Slayer'])
return '%s the %s' % (name, epithet)
def get_attributes(self):
attribs = self.__dict__
attribs['status'] = ""
if not self.alive:
attribs['status'] = " (Deceased)"
#for k, v in attribs.items():
# print k, v
return attribs
#return self.__dict__
def get_charsheet(self):
msg = "%(name)s%(status)s - Strength: %(strength)d Health: %(health)d Hurt: %(hurt)d Kills: %(kills)d Gold: %(gold)d Level: %(level)d"
msg = msg % self.get_attributes()
return msg
class Monster(object):
name = None
strength = None
health = None
def __init__(self, level=None, name=None, boss=False):
if not level:
level = 1
self.strength = random.randint(1, level)
self.health = random.randint(1, level)
if boss:
self.strength = self.strength + level
self.health = self.health + level
if name:
self.name = name
else:
self.name = self.random_name()
def injure(self, hurt):
"""
Injure the monster with hurt points. Returns True if the monster died.
"""
self.health = self.health - hurt
if self.health <= 0:
return True
else:
return False
def random_name(self):
return random.choice([
"an orc", "an ogre", "a bunch of goblins", "a giant spider",
"a cyclops", "a minotaur", "a horde of kobolds",
"a rattling skeleton", "a large troll", "a moaning zombie",
"a swarm of vampire bats", "a baby hydra", "a giant monster ant",
"a slithering lizard", "an angry lion", "three hungry bears",
"a hell hound", "a pack of rabid dogs", "a werewolf",
"an ice demon", "a fire wraith", "a groaning ghoul",
"two goblins", "a three-headed hyena", "a giant monster worm",
"a slobbering were-pig"
])
class Level(object):
depth = None
killed = None
looted = None
boss = None
text = None
def __init__(self, depth=None):
self.killed = 0
self.looted = 0
if depth:
self.depth = depth
else:
self.depth = 1
def get_monster(self, name):
if self.killed == self.depth - 1:
boss = True
if self.boss:
name = self.boss
else:
boss = False
if self.killed < self.depth:
return Monster(self.depth, name, boss)
def get_loot(self):
loot = 0
if self.can_loot():
self.looted = self.looted + 1
luck = random.randint(1, 100)
if luck > 20:
loot = random.randint(1, self.depth)
elif luck < 5:
loot = 0 - luck
return loot
def can_loot(self):
if self.looted < self.killed:
return True
return False
class GoldQuest(object):
_gamedata = None
cfg = None
hero = None
level = None
def __init__(self, cfg):
"""
Setup Sqlite SQL tables and start a db session.
The database will be saved in C{extras/goldquest.db}
Calls L{setup_tables} to setup table metadata and L{setup_session}
to instantiate the db session.
"""
self.cfg = cfg
try:
debug = self.cfg.get_bool('debug')
except AttributeError:
debug = False
self.read_texts()
self.engine = create_engine('sqlite:///extras/quest.db', echo=debug)
self.setup_tables()
self.setup_session()
self.hero = self.get_alive_hero()
if self.hero and not self.level:
self.level = Level(self.hero.level)
def setup_session(self):
"""
Start a SQLAlchemy db session.
Saves the session instance in C{self.session}
"""
Session = sessionmaker(bind=self.engine)
self.session = Session()
def setup_tables(self):
"""
Defines the tables to use for L{Hero}
The Metadata instance is saved to C{self.metadata}
"""
self.metadata = MetaData()
hero_table = Table('hero', self.metadata,
Column('id', Integer, Sequence('hero_id_seq'), primary_key=True),
Column('name', String(100)),
Column('health', Integer),
Column('strength', Integer),
Column('hurt', Integer),
Column('kills', Integer),
Column('gold', Integer),
Column('level', Integer),
Column('alive', Boolean),
)
mapper(Hero, hero_table)
level_table = Table('level', self.metadata,
Column('id', Integer, Sequence('hero_id_seq'), primary_key=True),
Column('depth', Integer),
Column('killed', Integer),
Column('looted', Integer),
)
mapper(Level, level_table)
self.metadata.create_all(self.engine)
def read_texts(self):
f = open('extras/goldquest.dat')
self._gamedata = yaml.load(f)
f.close()
def get_text(self, text):
texts = self._gamedata['texts'][text]
if not texts:
return None
elif isinstance(texts, basestring):
return texts
else:
return random.choice(texts)
def get_level_texts(self, depth):
for lvl in self._gamedata['level']:
if lvl['level'] == depth:
return lvl
def get_monster(self, lvl=None):
if not lvl:
lvl = self.level.depth or 1
monsters = []
for monster in self._gamedata['monster']:
if lvl >= monster['lowlevel'] and monster['highlevel'] == 0 or lvl <= monster['highlevel']:
monsters.append(monster['name'])
if monsters:
name = random.choice(monsters)
else:
name = None
return self.level.get_monster(name)
def play(self, command):
msg = ""
if command in ['reroll', 'ny gubbe']:
return self.reroll()
if not self.hero or not self.hero.alive:
return self.get_text('nochampion')
if command in ['rest', 'vila']:
msg = self.rest()
elif command in ['fight', u'slåss']:
msg = self.fight()
elif command in ['deeper', 'vidare']:
msg = self.go_deeper()
elif command in ['loot', 'search', u'sök', 'finna dolda ting']:
msg = self.search_treasure()
elif command in ['charsheet', u'formulär']:
msg = self.show_charsheet()
else:
return None
self.save_data()
return msg
def save_data(self):
self.session.add(self.hero)
self.session.add(self.level)
self.session.commit()
def get_alive_hero(self):
hero = self.session.query(Hero).filter_by(alive=True).first()
return hero
def get_level(self, lvl):
level = self.session.query(Level).filter_by(depth=lvl).first()
if not level:
level = Level(lvl)
texts = self.get_level_texts(lvl)
if texts:
for k, v in texts.items():
if v:
setattr(level, k , v)
if not level.boss:
level.boss = random.choice(self._gamedata['boss'])
return level
def reroll(self):
if self.hero and self.hero.alive:
msg = self.get_text('noreroll') % self.hero.get_attributes()
return msg
else:
# Delete all old Level data.
self.session.query(Level).delete()
# Reroll new hero.
self.hero = Hero()
self.hero.reroll()
self.level = self.get_level(self.hero.level)
self.save_data()
msg = self.get_text('newhero')
msg = msg % self.hero.get_attributes()
msg = msg + " " + self.level.text
return msg
def search_treasure(self):
#loot = self.hero.search_treasure()
attribs = self.hero.get_attributes()
if self.level.can_loot():
loot = self.level.get_loot()
attribs['loot'] = loot
if loot > 0:
msg = self.get_text('foundloot')
elif loot < 0:
attribs['trap_hurt'] = abs(loot)
self.hero.injure(attribs['trap_hurt'])
msg = self.get_text('foundtrap')
else:
msg = self.get_text('nogold')
else:
msg = self.get_text('noloot')
msg = msg % attribs
return msg
def rest(self):
# TODO: There should be a chance of being attacked by a monster (who
# will hit first)
rested = self.hero.rest()
if rested:
if self.hero.hurt:
restmsg = self.get_text('rests')
else:
restmsg = self.get_text('healed')
else:
restmsg = self.get_text('alreadyhealed')
attribs = self.hero.get_attributes()
attribs['rested'] = rested
msg = restmsg % attribs
return msg
def go_deeper(self):
depth = self.hero.go_deeper()
self.level = self.get_level(depth)
msg = self.level.text or self.get_text('deeper')
msg = msg % self.hero.get_attributes()
return msg
def fight(self):
monster = self.get_monster(self.level.depth)
attribs = self.hero.get_attributes()
if not monster:
msg = self.get_text('nomonsters')
return msg % attribs
won = self.hero.fight_monster(monster)
if won:
self.level.killed = self.level.killed + 1
msg = self.get_text('killed')
attribs['slayed'] = self.get_text('slayed')
else:
msg = self.get_text('died')
attribs['monster'] = monster.name
msg = msg % attribs
msg = self.firstupper(msg)
return msg
def show_charsheet(self):
msg = self.get_text('charsheet')
return msg % self.hero.get_attributes()
def firstupper(self, text):
first = text[0].upper()
return first + text[1:]
class Game(cmd.Cmd):
prompt = 'GoldQuest> '
intro = "Welcome to GoldQuest!"
game = None
def preloop(self):
cfg = Conf('../config.ini', 'LOCAL')
self.game = GoldQuest(cfg)
def default(self, line):
ret = self.game.play(line)
if ret:
print ret
def do_fight(self, line):
"Find a new monster and fight it to the death!"
print self.game.play('fight')
def do_charsheet(self, line):
"Show the character sheet for the current hero."
print self.game.play('charsheet')
def do_reroll(self, line):
"Reroll a new hero if the village doesn't have one already."
print self.game.play('reroll')
def do_rest(self, line):
"Makes the hero rest for a while to regain hurt."
print self.game.play('rest')
def do_loot(self, line):
"The hero will search for loot in the hope to find gold."
print self.game.play('loot')
def do_deeper(self, line):
"Tells the hero to go deeper into the dungeon."
print self.game.play('deeper')
def do_quit(self, line):
"Quit Game"
print "A strange game. The only winning move is not to play."
return True
if __name__ == '__main__':
Game().cmdloop()
| {
"content_hash": "2c688a481f6f661f02c38ee24771aa94",
"timestamp": "",
"source": "github",
"line_count": 486,
"max_line_length": 142,
"avg_line_length": 31.497942386831276,
"alnum_prop": 0.5535667624771361,
"repo_name": "ollej/Twippy",
"id": "89219be2f85dbdf84e97c332ef53be9634e5907f",
"size": "15354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/plugins/GoldQuest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99139"
}
],
"symlink_target": ""
} |
"""
=======================================================================================
This module implements a Description Horn Logic implementation as defined
by Grosof, B. et.al. ("Description Logic Programs: Combining Logic Programs with
Description Logic" [1]) in section 4.4. As such, it implements recursive mapping
functions "T", "Th" and "Tb" which result in "custom" (dynamic) rulesets, RIF Basic
Logic Dialect: Horn rulesets [2], [3]. The rulesets are evaluated against an
efficient RETE-UL network.
It is a Description Logic Programming [1] Implementation on top of RETE-UL:
"A DLP is directly defined as the LP-correspondent of a def-Horn
ruleset that results from applying the mapping T ."
The mapping is as follows:
== Core (Description Horn Logic) ==
== Class Equivalence ==
T(owl:equivalentClass(C, D)) -> { T(rdfs:subClassOf(C, D)
T(rdfs:subClassOf(D, C) }
== Domain and Range Axioms (Base Description Logic: "ALC") ==
T(rdfs:range(P, D)) -> D(y) := P(x, y)
T(rdfs:domain(P, D)) -> D(x) := P(x, y)
== Property Axioms (Role constructors: "I") ==
T(rdfs:subPropertyOf(P, Q)) -> Q(x, y) :- P(x, y)
T(owl:equivalentProperty(P, Q)) -> { Q(x, y) :- P(x, y)
P(x, y) :- Q(x, y) }
T(owl:inverseOf(P, Q)) -> { Q(x, y) :- P(y, x)
P(y, x) :- Q(x, y) }
T(owl:TransitiveProperty(P)) -> P(x, z) :- P(x, y) ^ P(y, z)
[1] http://www.cs.man.ac.uk/~horrocks/Publications/download/2003/p117-grosof.pdf
[2] http://www.w3.org/2005/rules/wg/wiki/Core/Positive_Conditions
[3] http://www.w3.org/2005/rules/wg/wiki/asn06
"""
from rdflib.collection import Collection
# from rdflib.graph import Graph
from rdflib.namespace import (
Namespace,
RDF,
RDFS
)
from rdflib import (
BNode,
Variable,
URIRef,
)
from rdflib.util import first
import copy
from FuXi.Horn.PositiveConditions import And, Or, Uniterm, Condition, Atomic, SetOperator, Exists
from FuXi.Horn import DATALOG_SAFETY_NONE, DATALOG_SAFETY_STRICT, DATALOG_SAFETY_LOOSE
from .LPNormalForms import NormalizeDisjunctions
from FuXi.Horn.HornRules import Clause as OriginalClause, Rule
try:
from functools import reduce
except ImportError:
pass
SKOLEMIZED_CLASS_NS = Namespace('http://code.google.com/p/python-dlp/wiki/SkolemTerm#')
non_DHL_OWL_Semantics = \
u"""
@prefix log: <http://www.w3.org/2000/10/swap/log#>.
@prefix math: <http://www.w3.org/2000/10/swap/math#>.
@prefix owl: <http://www.w3.org/2002/07/owl#>.
@prefix xsd: <http://www.w3.org/2001/XMLSchema#>.
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>.
@prefix : <http://eulersharp.sourceforge.net/2003/03swap/owl-rules#>.
@prefix list: <http://www.w3.org/2000/10/swap/list#>.
#Additional OWL-compliant semantics, mappable to Production Rules
#Subsumption (purely for TBOX classification)
{?C rdfs:subClassOf ?SC. ?A rdfs:subClassOf ?C} => {?A rdfs:subClassOf ?SC}.
{?C owl:equivalentClass ?A} => {?C rdfs:subClassOf ?A. ?A rdfs:subClassOf ?C}.
{?C rdfs:subClassOf ?SC. ?SC rdfs:subClassOf ?C} => {?C owl:equivalentClass ?SC}.
{?C owl:disjointWith ?B. ?M a ?C. ?Y a ?B } => {?M owl:differentFrom ?Y}.
{?P owl:inverseOf ?Q. ?P a owl:InverseFunctionalProperty} => {?Q a owl:FunctionalProperty}.
{?P owl:inverseOf ?Q. ?P a owl:FunctionalProperty} => {?Q a owl:InverseFunctionalProperty}.
#For OWL/InverseFunctionalProperty/premises004
{?C owl:oneOf ?L. ?L rdf:first ?X; rdf:rest rdf:nil. ?P rdfs:domain ?C} => {?P a owl:InverseFunctionalProperty}.
#For OWL/InverseFunctionalProperty/premises004
{?C owl:oneOf ?L. ?L rdf:first ?X; rdf:rest rdf:nil. ?P rdfs:range ?C} => {?P a owl:FunctionalProperty}.
{?S owl:differentFrom ?O} => {?O owl:differentFrom ?S}.
{?S owl:complementOf ?O} => {?O owl:complementOf ?S}.
{?S owl:disjointWith ?O} => {?O owl:disjointWith ?S}.
"""
OWL_NS = Namespace("http://www.w3.org/2002/07/owl#")
LOG = Namespace("http://www.w3.org/2000/10/swap/log#")
Any = None
LHS = 0
RHS = 1
def reduceAnd(left, right):
if isinstance(left, And):
left = reduce(reduceAnd, left)
elif isinstance(right, And):
right = reduce(reduceAnd, right)
if isinstance(left, list) and not isinstance(right, list):
return left + [right]
elif isinstance(left, list) and isinstance(right, list):
return left + right
elif isinstance(left, list) and not isinstance(right, list):
return left + [right]
elif not isinstance(left, list) and isinstance(right, list):
return [left] + right
else:
return [left, right]
def NormalizeClause(clause):
def fetchFirst(gen):
rt = first(gen)
assert rt is not None
return rt
if hasattr(clause.head, 'next'): # and not isinstance(clause.head, Condition):
clause.head = fetchFirst(clause.head)
if hasattr(clause.body, 'next'): # and not isinstance(clause.body, Condition):
clause.body = fetchFirst(clause.body)
# assert isinstance(clause.head, (Atomic, And, Clause)), repr(clause.head)
# assert isinstance(clause.body, Condition), repr(clause.body)
if isinstance(clause.head, And):
clause.head.formulae = reduce(reduceAnd, clause.head, [])
if isinstance(clause.body, And):
clause.body.formulae = reduce(reduceAnd, clause.body)
# print("Normalized clause: ", clause)
# assert clause.body is not None and clause.head is not None, repr(clause)
return clause
class UnsupportedNegation(Exception):
def __init__(self, msg):
super(UnsupportedNegation, self).__init__(msg)
class Clause(OriginalClause):
"""
The RETE-UL algorithm supports conjunctions of facts in the head of a rule
i.e.: H1 ^ H2 ^ ... ^ H3 :- B1 ^ ^ Bm
The Clause definition is overridden to permit this syntax (not allowed
in definite LP or Horn rules)
In addition, since we allow (in definite Horn) entailments beyond simple facts
we ease restrictions on the form of the head to include Clauses
"""
def __init__(self, body, head):
self.body = body
self.head = head
if isinstance(head, Uniterm):
from FuXi.Rete.Network import HashablePatternList
try:
antHash = HashablePatternList([term.toRDFTuple()
for term in body], skipBNodes=True)
consHash = HashablePatternList([term.toRDFTuple()
for term in head], skipBNodes=True)
self._bodyHash = hash(antHash)
self._headHash = hash(consHash)
self._hash = hash((self._headHash, self._bodyHash))
except:
self._hash = None
else:
self._hash = None
def __hash__(self):
if self._hash is None:
from FuXi.Rete.Network import HashablePatternList
antHash = HashablePatternList([term.toRDFTuple()
for term in self.body], skipBNodes=True)
consHash = HashablePatternList([term.toRDFTuple()
for term in self.head], skipBNodes=True)
self._bodyHash = hash(antHash)
self._headHash = hash(consHash)
self._hash = hash((self._headHash, self._bodyHash))
return self._hash
def __repr__(self):
return "%r :- %r" % (self.head, self.body)
def n3(self):
return u'{ %s } => { %s }' % (self.body.n3(), self.head.n3())
def makeRule(clause, nsMap):
vars = set()
for child in clause.head:
if isinstance(child, Or):
#Disjunction in the head, skip this rule:
#When a disjunction occurs on the r.h.s. of a subclass axiom it
#becomes a disjunction in the head of the corresponding rule, and
#this cannot be handled within the def-Horn framework.
return None
assert isinstance(child, Uniterm), repr(child)
vars.update([term for term in child.toRDFTuple() if isinstance(term, Variable)])
negativeStratus = False
for child in clause.body:
if hasattr(child, 'naf') and child.naf:
negativeStratus = True
elif not hasattr(child, 'naf'):
child.naf = False
vars.update([term for term in child.toRDFTuple() if isinstance(term, Variable)])
return Rule(clause, declare=vars, nsMapping=nsMap, negativeStratus=negativeStratus)
def DisjunctiveNormalForm(program, safety=DATALOG_SAFETY_NONE, network=None):
for rule in program:
tx_horn_clause = NormalizeClause(rule.formula)
for tx_horn_clause in LloydToporTransformation(tx_horn_clause, True):
if safety in [DATALOG_SAFETY_LOOSE, DATALOG_SAFETY_STRICT]:
rule = Rule(tx_horn_clause, nsMapping=network and network.nsMap or {})
if not rule.isSafe():
if safety == DATALOG_SAFETY_LOOSE:
import warnings
warnings.warn("Ignoring unsafe rule (%s)" % rule,
SyntaxWarning,
3)
continue
elif safety == DATALOG_SAFETY_STRICT:
raise SyntaxError("Unsafe RIF Core rule: %s" % rule)
disj = [i for i in breadth_first(tx_horn_clause.body) if isinstance(i, Or)]
if len(disj) > 0:
NormalizeDisjunctions(disj, tx_horn_clause, program, network)
elif isinstance(tx_horn_clause.head, (And, Uniterm)):
# print("No Disjunction in the body")
for hc in ExtendN3Rules(network, NormalizeClause(tx_horn_clause)):
yield makeRule(hc, network and network.nsMap or {})
def MapDLPtoNetwork(network,
factGraph,
complementExpansions=[],
constructNetwork=False,
derivedPreds=[],
ignoreNegativeStratus=False,
safety=DATALOG_SAFETY_NONE):
ruleset = set()
negativeStratus = []
for horn_clause in T(factGraph, complementExpansions=complementExpansions, derivedPreds=derivedPreds):
# print("## RIF BLD Horn Rules: Before LloydTopor: ##\n", horn_clause)
# print("## RIF BLD Horn Rules: After LloydTopor: ##")
fullReduce = True
for tx_horn_clause in LloydToporTransformation(horn_clause, fullReduce):
tx_horn_clause = NormalizeClause(tx_horn_clause)
disj = [i for i in breadth_first(tx_horn_clause.body) if isinstance(i, Or)]
import warnings
if len(disj) > 0:
NormalizeDisjunctions(disj,
tx_horn_clause,
ruleset,
network,
constructNetwork,
negativeStratus,
ignoreNegativeStratus)
elif isinstance(tx_horn_clause.head, (And, Uniterm)):
# print("No Disjunction in the body")
for hc in ExtendN3Rules(network, NormalizeClause(tx_horn_clause), constructNetwork):
if safety in [DATALOG_SAFETY_LOOSE, DATALOG_SAFETY_STRICT]:
rule = Rule(hc, nsMapping=network.nsMap)
if not rule.isSafe():
if safety == DATALOG_SAFETY_LOOSE:
import warnings
warnings.warn("Ignoring unsafe rule (%s)" % rule,
SyntaxWarning,
3)
continue
elif safety == DATALOG_SAFETY_STRICT:
raise SyntaxError("Unsafe RIF Core rule: %s" % rule)
_rule = makeRule(hc, network.nsMap)
if _rule.negativeStratus:
negativeStratus.append(_rule)
if _rule is not None and (not _rule.negativeStratus or not ignoreNegativeStratus):
ruleset.add(_rule)
# Extract free variables anre add rule to ruleset
# print("#######################")
# print("########## Finished Building decision network from DLP ##########")
# renderNetwork(network).write_graphviz('out.dot')
if ignoreNegativeStratus:
return ruleset, negativeStratus
else:
return iter(ruleset)
def IsaFactFormingConclusion(head):
"""
'Relative to the def-Horn ruleset, the def-LP is thus sound; moreover, it is
complete for fact-form conclusions, i.e., for queries whose answers amount
to conjunctions of facts. However, the def-LP is a mildly weaker version of
the def-Horn ruleset, in the following sense. Every conclusion of the def-LP
must have the form of a fact. By contrast, the entailments, i.e., conclusions,
of the def-Horn ruleset are not restricted to be facts.' - Scan depth-first
looking for Clauses
"""
if isinstance(head, And):
for i in head:
if not IsaFactFormingConclusion(i):
return False
return True
elif isinstance(head, Or):
return False
elif isinstance(head, Atomic):
return True
elif isinstance(head, OriginalClause):
return False
else:
print(head)
raise
def traverseClause(condition):
if isinstance(condition, SetOperator):
for i in iter(condition):
yield i
elif isinstance(condition, Atomic):
return
def breadth_first(condition, children=traverseClause):
"""Traverse the nodes of a tree in breadth-first order.
The first argument should be the tree root; children
should be a function taking as argument a tree node and
returning an iterator of the node's children.
From http://ndirty.cute.fi/~karttu/matikka/Python/eppsteins_bf_traversal_231503.htm
"""
yield condition
last = condition
for node in breadth_first(condition, children):
for child in children(node):
yield child
last = child
if last == node:
return
def breadth_first_replace(condition,
children=traverseClause,
candidate=None,
replacement=None):
"""Traverse the nodes of a tree in breadth-first order.
The first argument should be the tree root; children
should be a function taking as argument a tree node and
returning an iterator of the node's children.
From http://ndirty.cute.fi/~karttu/matikka/Python/eppsteins_bf_traversal_231503.htm
"""
yield condition
last = condition
for node in breadth_first_replace(condition,
children,
candidate,
replacement):
for child in children(node):
yield child
if candidate and child is candidate:
#replace candidate with replacement
i = node.formulae.index(child)
node.formulae[i] = replacement
return
last = child
if last == node:
return
def ExtendN3Rules(network, horn_clause, constructNetwork=False):
"""
Extends the network with the given Horn clause (rule)
"""
from FuXi.Rete.RuleStore import Formula
from FuXi.Rete.AlphaNode import AlphaNode
rt = []
if constructNetwork:
ruleStore = network.ruleStore
lhs = BNode()
rhs = BNode()
assert isinstance(horn_clause.body, (And, Uniterm)), list(horn_clause.body)
assert len(list(horn_clause.body))
# print(horn_clause)
if constructNetwork:
for term in horn_clause.body:
ruleStore.formulae.setdefault(lhs, Formula(lhs)).append(term.toRDFTuple())
assert isinstance(horn_clause.head, (And, Uniterm)), repr(horn_clause.head)
if IsaFactFormingConclusion(horn_clause.head):
PrepareHornClauseForRETE(horn_clause)
if constructNetwork:
for term in horn_clause.head:
assert not hasattr(term, 'next')
if isinstance(term, Or):
ruleStore.formulae.setdefault(rhs, Formula(rhs)).append(term)
else:
ruleStore.formulae.setdefault(rhs, Formula(rhs)).append(term.toRDFTuple())
ruleStore.rules.append((ruleStore.formulae[lhs], ruleStore.formulae[rhs]))
network.buildNetwork(iter(ruleStore.formulae[lhs]),
iter(ruleStore.formulae[rhs]),
Rule(horn_clause))
network.alphaNodes = [node for node in list(network.nodes.values()) if isinstance(node, AlphaNode)]
rt.append(horn_clause)
else:
for hC in LloydToporTransformation(horn_clause, fullReduction=True):
rt.append(hC)
#print("normalized clause: ", hC)
for i in ExtendN3Rules(network, hC, constructNetwork):
rt.append(hC)
return rt
def PrepareHeadExistential(clause):
from FuXi.Rete.SidewaysInformationPassing import GetArgs
skolemsInHead = [
list(filter(
lambda term:isinstance(term,
BNode),
GetArgs(lit)))
for lit in iterCondition(clause.head)]
skolemsInHead = reduce(lambda x, y: x + y, skolemsInHead, [])
if skolemsInHead:
newHead = copy.deepcopy(clause.head)
_e = Exists(formula=newHead, declare=set(skolemsInHead))
clause.head = _e
return clause
def PrepareHornClauseForRETE(horn_clause):
if isinstance(horn_clause, Rule):
horn_clause = horn_clause.formula
def extractVariables(term, existential=True):
if isinstance(term, existential and BNode or Variable):
yield term
elif isinstance(term, Uniterm):
for t in term.toRDFTuple():
if isinstance(t, existential and BNode or Variable):
yield t
from FuXi.Rete.SidewaysInformationPassing import iterCondition, GetArgs
#first we identify body variables
bodyVars = set(reduce(lambda x, y: x + y,
[list(extractVariables(i, existential=False))
for i in iterCondition(horn_clause.body)]))
#then we identify head variables
headVars = set(reduce(lambda x, y: x + y,
[list(extractVariables(i, existential=False))
for i in iterCondition(horn_clause.head)]))
#then we identify those variables that should (or should not) be converted to skolem terms
updateDict = dict([(var, BNode()) for var in headVars if var not in bodyVars])
if set(updateDict.keys()).intersection(GetArgs(horn_clause.head)):
#There are skolem terms in the head
newHead = copy.deepcopy(horn_clause.head)
for uniTerm in iterCondition(newHead):
newArg = [updateDict.get(i, i) for i in uniTerm.arg]
uniTerm.arg = newArg
horn_clause.head = newHead
skolemsInBody = [
list(filter(
lambda term:isinstance(term,
BNode),
GetArgs(lit)))
for lit in iterCondition(horn_clause.body)]
skolemsInBody = reduce(lambda x, y: x + y, skolemsInBody,
[])
if skolemsInBody:
newBody = copy.deepcopy(horn_clause.body)
_e = Exists(formula=newBody, declare=set(skolemsInBody))
horn_clause.body = _e
PrepareHeadExistential(horn_clause)
def generatorFlattener(gen):
assert hasattr(gen, 'next')
i = list(gen)
i = len(i) > 1 and [hasattr(i2, 'next') and generatorFlattener(i2) or i2 for i2 in i] or i[0]
if hasattr(i, 'next'):
i = listOrThingGenerator(i)
# print(i)
return i
elif isinstance(i, SetOperator):
i.formulae = [hasattr(i2, 'next') and generatorFlattener(i2) or i2 for i2 in i.formulae]
#print(i)
return i
else:
return i
def SkolemizeExistentialClasses(term, check=True):
if check:
return isinstance(term, BNode) and SKOLEMIZED_CLASS_NS[term] or term
return SKOLEMIZED_CLASS_NS[term]
def NormalizeBooleanClassOperand(term, owlGraph):
return ((isinstance(term, BNode) and IsaBooleanClassDescription(term, owlGraph)) or \
IsaRestriction(term, owlGraph))\
and SkolemizeExistentialClasses(term) or term
def IsaBooleanClassDescription(term, owlGraph):
for s, p, o in owlGraph.triples_choices((term, [OWL_NS.unionOf,
OWL_NS.intersectionOf], None)):
return True
def IsaRestriction(term, owlGraph):
return (term, RDF.type, OWL_NS.Restriction) in owlGraph
def iterCondition(condition):
return isinstance(condition, SetOperator) and condition or iter([condition])
def Tc(owlGraph, negatedFormula):
"""
Handles the conversion of negated DL concepts into a general logic programming
condition for the body of a rule that fires when the body conjunct
is in the minimal model
"""
if (negatedFormula, OWL_NS.hasValue, None) in owlGraph:
#not ( R value i )
bodyUniTerm = Uniterm(RDF.type,
[Variable("X"),
NormalizeBooleanClassOperand(negatedFormula, owlGraph)],
newNss=owlGraph.namespaces())
condition = NormalizeClause(Clause(Tb(owlGraph, negatedFormula),
bodyUniTerm)).body
assert isinstance(condition, Uniterm)
condition.naf = True
return condition
elif (negatedFormula, OWL_NS.someValuesFrom, None) in owlGraph:
#not ( R some C )
binaryRel, unaryRel = Tb(owlGraph, negatedFormula)
negatedBinaryRel = copy.deepcopy(binaryRel)
negatedBinaryRel.naf = True
negatedUnaryRel = copy.deepcopy(unaryRel)
negatedUnaryRel.naf = True
return Or([negatedBinaryRel, And([binaryRel, negatedUnaryRel])])
elif isinstance(negatedFormula, URIRef):
return Uniterm(RDF.type,
[Variable("X"),
NormalizeBooleanClassOperand(negatedFormula, owlGraph)],
newNss=owlGraph.namespaces(),
naf=True)
else:
raise UnsupportedNegation("Unsupported negated concept: %s" % negatedFormula)
class MalformedDLPFormulaError(NotImplementedError):
def __init__(self, message):
self.message = message
def handleConjunct(conjunction, owlGraph, o, conjunctVar=Variable('X')):
for bodyTerm in Collection(owlGraph, o):
negatedFormula = False
addToConjunct = None
for negatedFormula in owlGraph.objects(subject=bodyTerm,
predicate=OWL_NS.complementOf):
addToConjunct = Tc(owlGraph, negatedFormula)
if negatedFormula:
#addToConjunct will be the term we need to add to the conjunct
conjunction.append(addToConjunct)
else:
normalizedBodyTerm = NormalizeBooleanClassOperand(bodyTerm, owlGraph)
bodyUniTerm = Uniterm(RDF.type, [conjunctVar, normalizedBodyTerm],
newNss=owlGraph.namespaces())
processedBodyTerm = Tb(owlGraph, bodyTerm, conjunctVar)
classifyingClause = NormalizeClause(Clause(processedBodyTerm, bodyUniTerm))
# redundantClassifierClause = processedBodyTerm == bodyUniTerm
if isinstance(normalizedBodyTerm, URIRef) and normalizedBodyTerm.find(SKOLEMIZED_CLASS_NS) == -1:
conjunction.append(bodyUniTerm)
elif (bodyTerm, OWL_NS.someValuesFrom, None) in owlGraph or\
(bodyTerm, OWL_NS.hasValue, None) in owlGraph:
conjunction.extend(classifyingClause.body)
elif (bodyTerm, OWL_NS.allValuesFrom, None) in owlGraph:
raise MalformedDLPFormulaError("Universal restrictions can only be used as the second argument to rdfs:subClassOf (GCIs)")
elif (bodyTerm, OWL_NS.unionOf, None) in owlGraph:
conjunction.append(classifyingClause.body)
elif (bodyTerm, OWL_NS.intersectionOf, None) in owlGraph:
conjunction.append(bodyUniTerm)
def T(owlGraph, complementExpansions=[], derivedPreds=[]):
"""
#Subsumption (purely for TBOX classification)
{?C rdfs:subClassOf ?SC. ?A rdfs:subClassOf ?C} => {?A rdfs:subClassOf ?SC}.
{?C owl:equivalentClass ?A} => {?C rdfs:subClassOf ?A. ?A rdfs:subClassOf ?C}.
{?C rdfs:subClassOf ?SC. ?SC rdfs:subClassOf ?C} => {?C owl:equivalentClass ?SC}.
T(rdfs:subClassOf(C, D)) -> Th(D(y)) :- Tb(C(y))
T(owl:equivalentClass(C, D)) -> { T(rdfs:subClassOf(C, D)
T(rdfs:subClassOf(D, C) }
A generator over the Logic Programming rules which correspond
to the DL ( unary predicate logic ) subsumption axiom described via rdfs:subClassOf
"""
for s, p, o in owlGraph.triples((None, OWL_NS.complementOf, None)):
if isinstance(o, URIRef) and isinstance(s, URIRef):
headLiteral = Uniterm(RDF.type, [Variable("X"),
SkolemizeExistentialClasses(s)],
newNss=owlGraph.namespaces())
yield NormalizeClause(Clause(Tc(owlGraph, o), headLiteral))
for c, p, d in owlGraph.triples((None, RDFS.subClassOf, None)):
try:
yield NormalizeClause(Clause(Tb(owlGraph, c), Th(owlGraph, d)))
except UnsupportedNegation:
import warnings
warnings.warn("Unable to handle negation in DL axiom (%s), skipping" % c, # e.msg,
SyntaxWarning,
3)
#assert isinstance(c, URIRef), "%s is a kind of %s"%(c, d)
for c, p, d in owlGraph.triples((None, OWL_NS.equivalentClass, None)):
if c not in derivedPreds:
yield NormalizeClause(Clause(Tb(owlGraph, c), Th(owlGraph, d)))
yield NormalizeClause(Clause(Tb(owlGraph, d), Th(owlGraph, c)))
for s, p, o in owlGraph.triples((None, OWL_NS.intersectionOf, None)):
try:
if s not in complementExpansions:
if s in derivedPreds:
import warnings
warnings.warn("Derived predicate (%s) is defined via a conjunction (consider using a complex GCI) " % owlGraph.qname(s),
SyntaxWarning,
3)
elif isinstance(s, BNode): # and (None, None, s) not in owlGraph:# and \
#(s, RDFS.subClassOf, None) in owlGraph:
#complex GCI, pass over (handled) by Tb
continue
conjunction = []
handleConjunct(conjunction, owlGraph, o)
body = And(conjunction)
head = Uniterm(RDF.type, [Variable("X"),
SkolemizeExistentialClasses(s)],
newNss=owlGraph.namespaces())
# O1 ^ O2 ^ ... ^ On => S(?X)
yield Clause(body, head)
if isinstance(s, URIRef):
# S(?X) => O1 ^ O2 ^ ... ^ On
# special case, owl:intersectionOf is a neccessary and sufficient
# criteria and should thus work in *both* directions
# This rule is not added for anonymous classes or derived predicates
if s not in derivedPreds:
yield Clause(head, body)
except UnsupportedNegation:
import warnings
warnings.warn("Unable to handle negation in DL axiom (%s), skipping" % s, # e.msg,
SyntaxWarning,
3)
for s, p, o in owlGraph.triples((None, OWL_NS.unionOf, None)):
if isinstance(s, URIRef):
#special case, owl:unionOf is a neccessary and sufficient
#criteria and should thus work in *both* directions
body = Or([Uniterm(RDF.type, [Variable("X"),
NormalizeBooleanClassOperand(i, owlGraph)],
newNss=owlGraph.namespaces()) \
for i in Collection(owlGraph, o)])
head = Uniterm(RDF.type, [Variable("X"), s], newNss=owlGraph.namespaces())
yield Clause(body, head)
for s, p, o in owlGraph.triples((None, OWL_NS.inverseOf, None)):
# T(owl:inverseOf(P, Q)) -> { Q(x, y) :- P(y, x)
# P(y, x) :- Q(x, y) }
newVar = Variable(BNode())
s = SkolemizeExistentialClasses(s) if isinstance(s, BNode) else s
o = SkolemizeExistentialClasses(o) if isinstance(o, BNode) else o
body1 = Uniterm(s, [newVar, Variable("X")], newNss=owlGraph.namespaces())
head1 = Uniterm(o, [Variable("X"), newVar], newNss=owlGraph.namespaces())
yield Clause(body1, head1)
newVar = Variable(BNode())
body2 = Uniterm(o, [Variable("X"), newVar], newNss=owlGraph.namespaces())
head2 = Uniterm(s, [newVar, Variable("X")], newNss=owlGraph.namespaces())
yield Clause(body2, head2)
for s, p, o in owlGraph.triples((None, RDF.type, OWL_NS.TransitiveProperty)):
#T(owl:TransitiveProperty(P)) -> P(x, z) :- P(x, y) ^ P(y, z)
y = Variable(BNode())
z = Variable(BNode())
x = Variable("X")
s = SkolemizeExistentialClasses(s) if isinstance(s, BNode) else s
body = And([Uniterm(s, [x, y], newNss=owlGraph.namespaces()), \
Uniterm(s, [y, z], newNss=owlGraph.namespaces())])
head = Uniterm(s, [x, z], newNss=owlGraph.namespaces())
yield Clause(body, head)
for s, p, o in owlGraph.triples((None, RDFS.subPropertyOf, None)):
# T(rdfs:subPropertyOf(P, Q)) -> Q(x, y) :- P(x, y)
x = Variable("X")
y = Variable("Y")
s = SkolemizeExistentialClasses(s) if isinstance(s, BNode) else s
o = SkolemizeExistentialClasses(o) if isinstance(o, BNode) else o
body = Uniterm(s, [x, y], newNss=owlGraph.namespaces())
head = Uniterm(o, [x, y], newNss=owlGraph.namespaces())
yield Clause(body, head)
for s, p, o in owlGraph.triples((None, OWL_NS.equivalentProperty, None)):
# T(owl:equivalentProperty(P, Q)) -> { Q(x, y) :- P(x, y)
# P(x, y) :- Q(x, y) }
x = Variable("X")
y = Variable("Y")
s = SkolemizeExistentialClasses(s) if isinstance(s, BNode) else s
o = SkolemizeExistentialClasses(o) if isinstance(o, BNode) else o
body = Uniterm(s, [x, y], newNss=owlGraph.namespaces())
head = Uniterm(o, [x, y], newNss=owlGraph.namespaces())
yield Clause(body, head)
yield Clause(head, body)
#Contribution (Symmetric DL roles)
for s, p, o in owlGraph.triples((None, RDF.type, OWL_NS.SymmetricProperty)):
#T(owl:SymmetricProperty(P)) -> P(y, x) :- P(x, y)
y = Variable("Y")
x = Variable("X")
s = SkolemizeExistentialClasses(s) if isinstance(s, BNode) else s
body = Uniterm(s, [x, y], newNss=owlGraph.namespaces())
head = Uniterm(s, [y, x], newNss=owlGraph.namespaces())
yield Clause(body, head)
for s, p, o in owlGraph.triples_choices((None,
[RDFS.range, RDFS.domain],
None)):
s = SkolemizeExistentialClasses(s) if isinstance(s, BNode) else s
if p == RDFS.range:
#T(rdfs:range(P, D)) -> D(y) := P(x, y)
x = Variable("X")
y = Variable(BNode())
body = Uniterm(s, [x, y], newNss=owlGraph.namespaces())
head = Uniterm(RDF.type, [y, o], newNss=owlGraph.namespaces())
yield Clause(body, head)
else:
#T(rdfs:domain(P, D)) -> D(x) := P(x, y)
x = Variable("X")
y = Variable(BNode())
body = Uniterm(s, [x, y], newNss=owlGraph.namespaces())
head = Uniterm(RDF.type, [x, o], newNss=owlGraph.namespaces())
yield Clause(body, head)
def LloydToporTransformation(clause, fullReduction=True):
"""
Tautological, common horn logic forms (useful for normalizing
conjunctive & disjunctive clauses)
(H ^ H0) :- B -> { H :- B
H0 :- B }
(H :- H0) :- B -> H :- B ^ H0
H :- (B v B0) -> { H :- B
H :- B0 }
"""
assert isinstance(clause, OriginalClause), repr(clause)
assert isinstance(clause.body, Condition), repr(clause)
if isinstance(clause.body, Or):
for atom in clause.body.formulae:
if hasattr(atom, 'next'):
atom = first(atom)
for clz in LloydToporTransformation(
NormalizeClause(
Clause(atom,
clause.head)
),
fullReduction=fullReduction):
yield clz
elif isinstance(clause.head, OriginalClause):
yield NormalizeClause(Clause(And([clause.body, clause.head.body]), clause.head.head))
elif fullReduction and (
(isinstance(clause.head, Exists) and
isinstance(clause.head.formula, And)) or isinstance(clause.head, And)):
if isinstance(clause.head, Exists):
head = clause.head.formula
elif isinstance(clause.head, And):
head = clause.head
for i in head:
for j in LloydToporTransformation(Clause(clause.body, i),
fullReduction=fullReduction):
if [i for i in breadth_first(j.head) if isinstance(i, And)]:
#Ands in the head need to be further flattened
yield PrepareHeadExistential(NormalizeClause(j))
else:
yield PrepareHeadExistential(j)
else:
yield clause
def Th(owlGraph, _class, variable=Variable('X'), position=LHS):
"""
DLP head (antecedent) knowledge assertional forms (ABox assertions, conjunction of
ABox assertions, and universal role restriction assertions)
"""
props = list(set(owlGraph.predicates(subject=_class)))
if OWL_NS.allValuesFrom in props:
#http://www.w3.org/TR/owl-semantics/#owl_allValuesFrom
for s, p, o in owlGraph.triples((_class, OWL_NS.allValuesFrom, None)):
prop = list(owlGraph.objects(subject=_class, predicate=OWL_NS.onProperty))[0]
newVar = Variable(BNode())
body = Uniterm(prop, [variable, newVar], newNss=owlGraph.namespaces())
for head in Th(owlGraph, o, variable=newVar):
yield Clause(body, head)
elif OWL_NS.hasValue in props:
prop = list(owlGraph.objects(subject=_class, predicate=OWL_NS.onProperty))[0]
o = first(owlGraph.objects(subject=_class, predicate=OWL_NS.hasValue))
yield Uniterm(prop, [variable, o], newNss=owlGraph.namespaces())
elif OWL_NS.someValuesFrom in props:
#http://www.w3.org/TR/owl-semantics/#someValuesFrom
for s, p, o in owlGraph.triples((_class, OWL_NS.someValuesFrom, None)):
prop = list(owlGraph.objects(subject=_class, predicate=OWL_NS.onProperty))[0]
newVar = BNode()
yield And([Uniterm(prop, [variable, newVar], newNss=owlGraph.namespaces()),
generatorFlattener(Th(owlGraph, o, variable=newVar))])
elif OWL_NS.intersectionOf in props:
from FuXi.Syntax.InfixOWL import BooleanClass
yield And([first(Th(owlGraph, h, variable)) for h in BooleanClass(_class)])
else:
#Simple class
yield Uniterm(RDF.type, [variable,
isinstance(_class, BNode) and SkolemizeExistentialClasses(_class) or _class],
newNss=owlGraph.namespaces())
def Tb(owlGraph, _class, variable=Variable('X')):
"""
DLP body (consequent knowledge assertional forms (ABox assertions,
conjunction / disjunction of ABox assertions, and exisential role restriction assertions)
These are all common EL++ templates for KR
"""
props = list(set(owlGraph.predicates(subject=_class)))
if OWL_NS.intersectionOf in props and not isinstance(_class, URIRef):
for s, p, o in owlGraph.triples((_class, OWL_NS.intersectionOf, None)):
conj = []
handleConjunct(conj, owlGraph, o, variable)
return And(conj)
elif OWL_NS.unionOf in props and not isinstance(_class, URIRef):
#http://www.w3.org/TR/owl-semantics/#owl_unionOf
for s, p, o in owlGraph.triples((_class, OWL_NS.unionOf, None)):
return Or([Tb(owlGraph, c, variable=variable) \
for c in Collection(owlGraph, o)])
elif OWL_NS.someValuesFrom in props:
#http://www.w3.org/TR/owl-semantics/#owl_someValuesFrom
prop = list(owlGraph.objects(subject=_class, predicate=OWL_NS.onProperty))[0]
o = list(owlGraph.objects(subject=_class, predicate=OWL_NS.someValuesFrom))[0]
newVar = Variable(BNode())
# body = Uniterm(prop, [variable, newVar], newNss=owlGraph.namespaces())
# head = Th(owlGraph, o, variable=newVar)
return And([Uniterm(prop, [variable, newVar], newNss=owlGraph.namespaces()),
Tb(owlGraph, o, variable=newVar)])
elif OWL_NS.hasValue in props:
#http://www.w3.org/TR/owl-semantics/#owl_hasValue
#Domain-specific rules for hasValue
#Can be achieved via pD semantics
prop = list(owlGraph.objects(subject=_class, predicate=OWL_NS.onProperty))[0]
o = first(owlGraph.objects(subject=_class, predicate=OWL_NS.hasValue))
return Uniterm(prop, [variable, o], newNss=owlGraph.namespaces())
elif OWL_NS.complementOf in props:
return Tc(owlGraph, first(owlGraph.objects(_class, OWL_NS.complementOf)))
else:
#simple class
#"Named" Uniterm
_classTerm = SkolemizeExistentialClasses(_class)
return Uniterm(RDF.type, [variable, _classTerm], newNss=owlGraph.namespaces())
# from FuXi.DLP import OWL_NS
# from FuXi.DLP import LOG
# from FuXi.DLP import Any
# from FuXi.DLP import LHS
# from FuXi.DLP import RHS
# from FuXi.DLP import breadth_first
# from FuXi.DLP import breadth_first_replace
# from FuXi.DLP import DisjunctiveNormalForm
# from FuXi.DLP import ExtendN3Rules
# from FuXi.DLP import generatorFlattener
# from FuXi.DLP import handleConjunct
# from FuXi.DLP import IsaBooleanClassDescription
# from FuXi.DLP import IsaFactFormingConclusion
# from FuXi.DLP import IsaRestriction
# from FuXi.DLP import iterCondition
# from FuXi.DLP import LloydToporTransformation
# from FuXi.DLP import makeRule
# from FuXi.DLP import MapDLPtoNetwork
# from FuXi.DLP import NormalizeBooleanClassOperand
# from FuXi.DLP import NormalizeClause
# from FuXi.DLP import PrepareHeadExistential
# from FuXi.DLP import PrepareHornClauseForRETE
# from FuXi.DLP import reduceAnd
# from FuXi.DLP import SkolemizeExistentialClasses
# from FuXi.DLP import T
# from FuXi.DLP import Tb
# from FuXi.DLP import Tc
# from FuXi.DLP import Th
# from FuXi.DLP import traverseClause
# from FuXi.DLP import UnsupportedNegation
# from FuXi.DLP import Clause
# from FuXi.DLP import MalformedDLPFormulaError
| {
"content_hash": "22f9e5c7d5c09d8475ce8b0d4b9e3e75",
"timestamp": "",
"source": "github",
"line_count": 926,
"max_line_length": 140,
"avg_line_length": 43.6231101511879,
"alnum_prop": 0.5919792053471964,
"repo_name": "mpetyx/pyrif",
"id": "414abe3e920d21da8161417079c3f9e9bb5a5a66",
"size": "40443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrif/FuXi/DLP/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AGS Script",
"bytes": "260965"
},
{
"name": "CSS",
"bytes": "16242"
},
{
"name": "JavaScript",
"bytes": "26014"
},
{
"name": "Perl",
"bytes": "1339"
},
{
"name": "Prolog",
"bytes": "5556"
},
{
"name": "Python",
"bytes": "788856"
},
{
"name": "Shell",
"bytes": "3156"
},
{
"name": "XSLT",
"bytes": "26298"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('signprot', '0009_remove_signprotstructure_extra_proteins'),
]
operations = [
migrations.AlterField(
model_name='signprotstructureextraproteins',
name='structure',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='extra_proteins', to='signprot.SignprotStructure'),
),
]
| {
"content_hash": "1ff7ec78884a844549ffdabca69f3d2a",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 156,
"avg_line_length": 30.88235294117647,
"alnum_prop": 0.6742857142857143,
"repo_name": "protwis/protwis",
"id": "599fba9d8a0323912eab17ff9863e6294712fd16",
"size": "574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "signprot/migrations/0010_auto_20201021_1033.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167612"
},
{
"name": "HTML",
"bytes": "2477269"
},
{
"name": "JavaScript",
"bytes": "3119217"
},
{
"name": "Promela",
"bytes": "467"
},
{
"name": "Python",
"bytes": "4289933"
}
],
"symlink_target": ""
} |
import subprocess
import sys
try:
import importlib.resources as importlib_resources
except ImportError:
import importlib_resources # type: ignore
import pytest
@pytest.mark.slow
def test_issue519():
"""
Test ability of Thinc mypy plugin to handle variadic arguments.
This test can take up to 45 seconds, and is thus marked as slow.
"""
# Determine the name of the parent module (which contains the test program)
parent_module_name = __name__[: __name__.rfind(".")]
# Load test program that calls a Thinc API with variadic arguments
program_text = importlib_resources.read_text(parent_module_name, "program.py")
# Ask Mypy to type-check the loaded program text
subprocess.run(
[sys.executable, "-m", "mypy", "--command", program_text], check=True
)
| {
"content_hash": "13874fd5d541a8c4eb0ba1df213c03fc",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 82,
"avg_line_length": 28.17241379310345,
"alnum_prop": 0.6940024479804161,
"repo_name": "explosion/thinc",
"id": "02601f0d7860475f221e68da486f7d110aad108d",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thinc/tests/regression/issue519/test_issue519.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "14601"
},
{
"name": "Cuda",
"bytes": "23473"
},
{
"name": "Cython",
"bytes": "77312"
},
{
"name": "Dockerfile",
"bytes": "430"
},
{
"name": "JavaScript",
"bytes": "57198"
},
{
"name": "Python",
"bytes": "668790"
},
{
"name": "Sass",
"bytes": "29988"
},
{
"name": "Shell",
"bytes": "364"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import pytest
from sbdc.datasets import bbc_load
def test_bbc_default():
data_all_classes = bbc_load()
assert data_all_classes is not None
assert data_all_classes.shape[0] == 2225
assert data_all_classes.shape[1] == 3
def test_bbc_custom_classes():
data_politics = bbc_load(classes=["politics"])
assert data_politics is not None
assert data_politics.shape[0] == 417
assert data_politics.shape[1] == 3
def test_bbc_invalid_custom_classes():
data_politics = bbc_load(classes=["i_am_invalid"])
assert data_politics is None
if __name__ == '__main__':
pytest.main([__file__])
| {
"content_hash": "8d4260d49c081c59734d9708ba06861b",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 54,
"avg_line_length": 25.46153846153846,
"alnum_prop": 0.6722054380664653,
"repo_name": "gakhov/sbdc",
"id": "4067be4af8d8255169a9c70078de8c6d19451d08",
"size": "686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sbdc/datasets/test_bbc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16696"
}
],
"symlink_target": ""
} |
'''
Following algorithm refers to the obsolete part:
Input: Hand Mask nonzero xy_points
Use laplacian of binary mask
0.Find entry segment
1.Calculate segment middle point
2.Transform nonzero points across the right semiplane* to new_polar coordinates ,
with reference center the point found and reference angle the one of the normal
to the semiplane*.
3.Start augmenting a semicircle from the reference center in the
semiplane* ,calculate zero crossings on its perimeter, with low step,
and append the nonzero points to a 2d list.
4.While 4 zerocrossings (2 edge points), calculate distance of white points and mean angle:
i. If mean angle changes, assume joint existence. Record segment as joint and go to 1.
ii.If distance increases steadily inside a checked window, assume reach of
wrist (still disputed, so dont do anything serious here).Record mean segment as special
one.
iii.If distance severely decreases, assume wrist bypassed. Begin from the
segment in list where everything was ok. Go to 6
5. If more than 4 zerocrossings, with max distance approximately same as the
distance with 4 zerocrossings, assume reach of fingers.Record mean segment as
special one. Go back to where 4 zerocrossings were observed:
If 4ii happened:
find mean segment between the two special ones. Go to 6
else:
go to 6.
6.
i. do 1 and 2
ii. find 4 closest points with almost the same radius, whose angle sum
is closer to zero.
iii. Palm is found. Record palm circle
*Semiplane is considered the one with limits defined by the normal to the reference
segment. Reference segment is found by finding the normal to the axis of link.
The direction is at 0-th joint towards the center of the image and
at n-th joint defined by previous directions.
'''
import logging
from math import pi
import os, sys
# import time
import numpy as np
import cv2
import class_objects as co
if __name__ == '__main__':
import urllib
import yaml
import timeit
import cProfile as profile
def with_laplacian(binarm):
'''
Find edges using Laplacian
'''
return cv2.Laplacian(binarm, cv2.CV_8U)
def usingcontours(imag):
'''
Find edges using cv2.findContours
'''
points = np.transpose(cv2.findContours(
imag.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1][0])
tmp = np.zeros_like(imag)
tmp[tuple(points[::-1, :])] = 255
return tmp
def find_rows_in_array(arr, rows):
'''
find indices of rows in array if they exist
'''
tmp = np.prod(np.swapaxes(
arr[:, :, None], 1, 2) == rows, axis=2)
return np.sum(np.cumsum(tmp, axis=0) * tmp == 1,
axis=1) > 0
def array_row_intersection(arr1, arr2):
'''
Returns rows, which exist in both arr1 and arr2
'''
return arr1[find_rows_in_array(arr1, arr2)]
def find_trues_segments(inp, iscircular):
if iscircular:
shift = np.argmin(inp)
inp = np.roll(inp, -shift)
a = np.concatenate(([0], inp, [0]))
dif = np.diff(a)[1:]
else:
a = np.concatenate(([0], inp, [0]))
dif = np.diff(a)
ind_start = cv2.findNonZero(
np.roll(dif == 1, 1)[:, None].astype(np.uint8))[:, 0, 1]
ind_end = cv2.findNonZero(
(dif == -1)[:, None].astype(np.uint8))[:, 0, 1]
if iscircular:
ind_start = (ind_start + shift)%len(inp)
ind_end = (ind_end + shift) %len(inp)
return np.concatenate((ind_start[:,None],ind_end[:,None]),axis=-1)
def find_largest_trues_segment(inp, iscircular=True, ret_filtered=True):
if inp.sum() == inp.size:
res = np.array([0, inp.size - 1])
filtered = inp.copy()
else:
if iscircular:
shift = np.argmin(inp)
inp = np.roll(inp, -shift)
a = np.concatenate(([0], inp, [0]))
dif = np.diff(a)[1:]
ind_start = cv2.findNonZero(
np.roll(dif == 1, 1)[:, None].astype(np.uint8))[:, 0, 1]
ind_end = cv2.findNonZero(
(dif == -1)[:, None].astype(np.uint8))[:, 0, 1]
tmp = np.cumsum(inp)[ind_end]
mass = np.diff(np.concatenate(([0], tmp)))
ind_largest = np.argmax(mass)
res = np.array([ind_start[ind_largest], ind_end[ind_largest]])
if ret_filtered:
filtered = inp.copy()
filtered[:res[0]] = 0
filtered[res[1] + 1:] = 0
filtered = np.roll(filtered, shift)
if iscircular:
res = res + shift
res = res % len(inp)
if ret_filtered:
return res, filtered.astype(bool)
else:
return res
def find_corrected_point(polar_points, ref_angle,
ref_point, ref_radius,
entry_angles=None, entry_points=None,
width=None):
'''
Find matching cocircular point.
if entry_angles are given, the best point is found to be the one with the
least angle difference with ref_angle. polar points should
else if entry_points are given, the best point is found to be the one with
distance from the entry_points similar to the distance defined by
entry_points. entry_points should be in cartesian coordinates
'''
resolution = np.sqrt(2) / 2.0
cocircular_points = co.pol_oper.find_cocircular_points(
polar_points, ref_radius, resolution)
# cocircular_points[np.abs(cocircular_points[:, 1] + pi) < 0.001, 1] *= -1
if entry_angles is not None:
check1 = np.abs(entry_angles[0] -
(cocircular_points[:, 1] + ref_angle))
check2 = np.abs(entry_angles[1] -
(cocircular_points[:, 1] + ref_angle))
else:
if width is None:
width = calculate_cart_dists(entry_points).squeeze()
cocirc_cart_points = co.pol_oper.polar_to_cart(cocircular_points, ref_point,
ref_angle)
check1 = np.abs(calculate_cart_dists(cocirc_cart_points,
entry_points[0, :]) - width)
check2 = np.abs(calculate_cart_dists(cocirc_cart_points,
entry_points[1, :]) - width)
_argmin1 = np.argmin(check1)
_min1 = check1[_argmin1]
_argmin2 = np.argmin(check2)
_min2 = check2[_argmin2]
if np.abs(_min1) < np.abs(_min2):
corrected_pol_point = cocircular_points[_argmin1].ravel()[0:2]
else:
corrected_pol_point = cocircular_points[_argmin2].ravel()[0:2]
corrected_cart_point = co.pol_oper.polar_to_cart(np.array([corrected_pol_point]), ref_point,
ref_angle).squeeze()
return corrected_pol_point, corrected_cart_point, cocircular_points
def find_segment_to_point_box(positions, entry_segment, point):
'''
Returns all positions belonging to an orthogonal, which has one side equal
to the entry segment and all the other sides are defined by the other
point, for which it is considered that it will belong to the opposite side
of the side that is the entry_segment.
'''
entry_segment = np.array(entry_segment)
e_diff = (entry_segment[1, :] - entry_segment[0, :])
segment_mag2 = float(np.dot(e_diff, e_diff))
if segment_mag2 == 0:
_lambda0 = 0
else:
_lambda0 = np.dot(entry_segment[1, :] -
entry_segment[0, :],
point - entry_segment[0, :]
) / segment_mag2
# perp_to_unit_e has direction from point to segment
# p_to_seg_vec has direction from point to segment ->mi ends up negative
# when moving positively...
p_to_seg_vec = _lambda0 * \
(entry_segment[1, :] - entry_segment[0, :]) + \
entry_segment[0, :] - point
p_to_seg_dist = np.sqrt(_lambda0 * _lambda0 * segment_mag2 +
np.dot(entry_segment[0, :] - point,
entry_segment[0, :] - point) +
2 * _lambda0 * np.dot(e_diff, entry_segment[0, :] - point))
if p_to_seg_dist <= 5:
return np.array([[]])
pos_diff = positions - entry_segment[0, :]
perp_to_unit_e = p_to_seg_vec / p_to_seg_dist
_lambda = (pos_diff[:, 0] * e_diff[0] / segment_mag2 +
pos_diff[:, 1] * e_diff[1] / segment_mag2)
_mi = pos_diff[:, 0] * perp_to_unit_e[0] + \
pos_diff[:, 1] * perp_to_unit_e[1]
tmp = -(p_to_seg_vec[0] * perp_to_unit_e[0] +
p_to_seg_vec[1] * perp_to_unit_e[1])
_mi_max = max([tmp, 0])
_mi_min = min([tmp, 0])
return (positions[(_mi < _mi_max) * (_mi > _mi_min)
* (_lambda < 1) * (_lambda > 0), :], perp_to_unit_e)
def calculate_chords_lengths(polar_points):
'''
Assuming polar points are sorted by angle
'''
return ((polar_points[:-1, 0] + polar_points[1:, 0]) / 2.0) * np.sqrt(2 * (
1 - np.cos(co.pol_oper.mod_diff(polar_points[:-1, 1], polar_points[1:, 1]))))
def calculate_cart_dists(cart_points, cart_point=None):
'''
Input either numpy array either 2*2 list
Second optional argument is a point
'''
if cart_point is None:
try:
return np.sqrt(
(cart_points[1:, 0] - cart_points[:-1, 0]) *
(cart_points[1:, 0] - cart_points[:-1, 0]) +
(cart_points[1:, 1] - cart_points[:-1, 1]) *
(cart_points[1:, 1] - cart_points[:-1, 1]))
except (TypeError, AttributeError):
return np.sqrt((cart_points[0][0] - cart_points[1][0])**2 +
(cart_points[0][1] - cart_points[1][1])**2)
else:
return np.sqrt(
(cart_points[:, 0] - cart_point[0]) *
(cart_points[:, 0] - cart_point[0]) +
(cart_points[:, 1] - cart_point[1]) *
(cart_points[:, 1] - cart_point[1]))
def find_nonzero(arr):
'''
Returns nonzero indices of 2D array
'''
return np.fliplr(cv2.findNonZero(arr).squeeze())
class LHOGE(object):
def __init__(self, frame=None):
self.hist_bin_size = co.CONST['HOG_bin_size']
self.hist_range = [[0, pi]]
self.win_overlap_ratio = 0.3
self.frame = frame
def grad_angles(self, img):
'''
Compute gradient angles on image patch for GHOG
'''
gradx, grady = np.gradient(img.astype(float))
ang = np.arctan2(grady, gradx)
ang[ang < 0] = pi + ang[ang < 0]
return ang # returns values 0 to pi
def hist_data(self, sample, weights=None):
'''
Compute normalized N-D histograms
'''
if weights is None:
weights = np.ones(sample.shape[0]) / float(sample.shape[0])
hist, edges = np.histogramdd(sample, self.hist_bin_size,
range=self.hist_range,
weights=weights)
return hist, edges
def calculate_single(self, link):
'''
Compute LHOGE features for single link.
link : points belonging to link contour
'''
segments = self.segment_link(link)
img = np.zeros(self.frame.shape)
res = np.zeros(self.frame.shape)
count = 1
inp = self.grad_angles(self.frame)
segment_ent = np.zeros(segments.shape[0])
for segment in segments:
cv2.drawContours(img, [np.int0(segment)], 0, count, -1)
segment_hog, _ = self.hist_data(inp[img == count])
segment_ent[
count - 1] = -np.sum(np.log2(segment_hog[segment_hog != 0]) * segment_hog[segment_hog != 0])
res[img == count] = np.maximum(
res[img == count], segment_ent[count - 1])
count += 1
return res
def segment_link(self, link):
box = cv2.boxPoints(cv2.minAreaRect(link))
l1 = np.linalg.norm(box[0, :] - box[1, :])
l2 = np.linalg.norm(box[1, :] - box[2, :])
if l1 > l2:
length = l1
width = l2
i11 = 0
i21 = 1
i12 = 3
i22 = 2
else:
length = l2
width = l1
i11 = 0
i21 = 3
i12 = 1
i22 = 2
hog_win_len = width
hog_overlap = width * self.win_overlap_ratio
st_rat = np.arange(0,
1,
hog_overlap / float(length))[:, None]
en_rat = np.arange(hog_win_len / float(length),
1 + hog_overlap / float(length),
hog_overlap / float(length))[:, None]
en_rat = np.minimum(en_rat, 1)
st_rat = st_rat[:en_rat.size]
st1 = st_rat * (box[i21, :]) + (1 - st_rat) * box[i11, :]
en1 = en_rat * (box[i21, :]) + (1 - en_rat) * box[i11, :]
st2 = st_rat * (box[i22, :]) + (1 - st_rat) * box[i12, :]
en2 = en_rat * (box[i22, :]) + (1 - en_rat) * box[i12, :]
segments = np.concatenate((st1[:, None, :], en1[:, None, :], en2[
:, None, :], st2[:, None, :]), axis=1)
segments = np.maximum(np.int0(segments), 0)
return segments
def add_frame(self, frame):
self.frame = frame
class FindArmSkeleton(object):
'''
Class to find arm skeleton and segment arm contour relatively to joints
'''
def __init__(self, frame=None, angle_bin_num=500, min_coords_num=50,
max_links=8, link_width_thres=50, draw=False, focus='speed'):
# skeleton holds the cartesian skeleton joints
self.skeleton = []
# skeleton_widths is populated only with rectangle_approx method and holds the
# segments that define each joint
self.skeleton_widths = []
# surrounding_skel holds the contour points that refer to each link
self.surrounding_skel = []
# hand_start holds the starting point of the hand, relatively to rest of the arm
self.hand_start = None
self.init_point = None
self.entry = None
self.entry_inds = None
self.contour = None
self.filter_mask = None
self.frame = None
self.img = None
self.armpoints = None
self.hand_mask = None
self.polar = None
self.draw = draw
self.angle_bin_num = angle_bin_num
self.min_coords_num = min_coords_num
self.max_links = max_links
self.link_width_thres = link_width_thres
self.positions_initiated = True
self.focus = focus
if frame is not None:
if not co.edges.exist:
co.edges.load_calib_data(whole_im=True, img=frame)
if co.meas.polar_positions is None:
if frame is None:
self.positions_initiated = False
else:
(self.all_cart_positions,
self.all_polar_positions) = co.meas.construct_positions(frame,
polar=True)
else:
self.all_cart_positions = co.meas.cart_positions
self.all_polar_positions = co.meas.polar_positions
self.car_res = np.sqrt(2) / 2.0
def approx_hand(self):
'''
run longest_ray method to use this function
'''
box = cv2.boxPoints(cv2.minAreaRect(self.contour.squeeze()[
self.surrounding_skel[-1], :]))[:, ::-1].astype(int)
# find box corners closest to skeleton end
hand_corners_inds = np.argsort(
calculate_cart_dists(box, self.skeleton[-1][1]))[:2]
hand_corners_inds = np.sort(hand_corners_inds)
without_corn_inds = np.concatenate((box[:hand_corners_inds[0], :],
box[hand_corners_inds[0] +
1:hand_corners_inds[1], :],
box[hand_corners_inds[1] + 1:, :]))
far_from_hand_corn0_ind = np.argmin(calculate_cart_dists(
without_corn_inds, box[hand_corners_inds[0], :]))
far_from_hand_corn1_ind = np.argmin(calculate_cart_dists(
without_corn_inds, box[hand_corners_inds[1], :]))
width = calculate_cart_dists(box[hand_corners_inds])
length = calculate_cart_dists(box[hand_corners_inds[0], :][None, :],
without_corn_inds[far_from_hand_corn0_ind, :])
rate = 1.7 * width / float(length)
point_close_to_corn0 = ((1 - rate) * box[hand_corners_inds[0], :]
+ rate * without_corn_inds[far_from_hand_corn0_ind, :]).astype(int)
point_close_to_corn1 = ((1 - rate) * box[hand_corners_inds[1], :]
+ rate * without_corn_inds[far_from_hand_corn1_ind, :]).astype(int)
new_box = np.concatenate((box[hand_corners_inds, :],
point_close_to_corn0[None, :],
point_close_to_corn1[None, :]), axis=0)
self.hand_mask = np.zeros(self.frame.shape, np.uint8)
self.hand_start = np.array([(point_close_to_corn0[0] +
point_close_to_corn1[0]) / 2.0,
(point_close_to_corn0[1] +
point_close_to_corn1[1]) / 2.0])
cv2.drawContours(self.hand_mask, [cv2.convexHull(
new_box).squeeze()[:, ::-1]], 0, 1, -1)
def detect_entry_upgraded(self, hull, hull_inds=None):
'''
Use convex hull to detect the entry of arm in the image
'''
if co.edges.nonconvex_edges_lims is None:
if self.frame is None:
raise Exception('Run reset first')
else:
co.edges.load_calib_data(whole_im=True, img=self.frame)
points_in_lims_mask = ((hull[:, 0] <= co.edges.nonconvex_edges_lims[0]) +
(hull[:, 1] <= co.edges.nonconvex_edges_lims[1]) +
(hull[:, 0] >= co.edges.nonconvex_edges_lims[2]) +
(hull[:, 1] >= co.edges.nonconvex_edges_lims[3])).ravel()
points = np.fliplr(hull[points_in_lims_mask, :])
if np.size(points) == 0:
return None
cmplx = (points[:, 0] * 1j + points[:, 1])[:, None]
dist_mat = np.abs(cmplx.T - cmplx)
ind1, ind2 = np.unravel_index(dist_mat.argmax(), dist_mat.shape)
entry = np.array([points[ind1, :], points[ind2, :]])
if hull_inds is not None:
entry_inds = np.array([hull_inds[points_in_lims_mask][ind1],
hull_inds[points_in_lims_mask][ind2]])
return entry, entry_inds
return entry
def run(self, frame=None, contour=None, method='longest_ray'):
'''
method = longest_ray or rectangle_approx, default is longest_ray
'''
if not self.reset(frame, contour):
return 0
if method == 'rectangle_approx':
self.run_rectangle_approx()
elif method == 'longest_ray':
return self.run_longest_ray()
else:
raise Exception(self.run.__doc__)
return 1
def reset(self, frame=None, contour=None):
'''
necessary function to be used before skeletonizing
'''
if frame is None:
if self.frame is not None:
frame = self.frame
else:
raise Exception('frame is needed')
else:
self.frame = frame
if contour is None:
raise Exception('contour is needed')
else:
self.contour = contour
if self.draw:
self.img = np.tile(frame[..., None] / float(np.max(frame)), (1, 1, 3))
if not self.positions_initiated:
(self.all_cart_positions,
self.all_polar_positions) = co.meas.construct_positions(frame,
polar=True)
hull_inds = cv2.convexHull(contour, returnPoints=False).squeeze()
hull_points = contour.squeeze()[hull_inds, :]
try:
self.entry, self.entry_inds = self.detect_entry_upgraded(
hull_points, hull_inds)
except:
return 0
self.armpoints = self.all_cart_positions[frame > 0]
pos = self.contour.squeeze()
self.polar = self.all_polar_positions[pos[:, 1], pos[:, 0], :]
self.surrounding_skel = []
self.skeleton_widths = []
self.init_point = np.mean(self.entry, axis=0)
self.skeleton = []
self.filter_mask = None
return 1
def run_longest_ray(self, contour=None, new_init_point=None):
'''
function to be used to compute longest_ray method
run self.reset first
'''
if contour is not None:
self.contour = contour
if new_init_point is not None:
self.init_point = new_init_point
else:
self.init_point = np.mean(self.entry, axis=0)
polar = self.polar.copy()
polar = co.pol_oper.change_origin(polar, 0, [0, 0], self.init_point)
count = 0
while polar.shape[0] > self.min_coords_num and count < self.max_links:
if not self.detect_longest_ray_inside_contour(
polar):
break
polar = self.polar[self.filter_mask, :]
polar = co.pol_oper.change_origin(polar, 0,
[0, 0],
self.skeleton[-1][1])
count += 1
if not self.surrounding_skel:
return False
self.surrounding_skel[-1][self.filter_mask] = True
self.approx_hand()
return True
def detect_longest_ray_inside_contour(self, polar=None,
detect_width_end=True):
'''
polar has origin init_point
detect_width_end is True if a rectangle approximation
is needed, when computing the ray ends
Run run_longest_ray to use this function
'''
if polar is None:
polar = self.polar
if self.skeleton:
new_cart_ref_point = self.skeleton[-1][1]
else:
new_cart_ref_point = self.init_point
# digitize initial polar coordinates
r_bins_edges = np.arange(
np.min(polar[:, 0]), np.max(polar[:, 0]) + 2, 2)
p_bins_edges = np.linspace(np.min(polar[:, 1]), np.max(
polar[:, 1]) + 0.01, self.angle_bin_num)
r_d_ind = np.minimum(np.digitize(polar[:, 0],
r_bins_edges),len(r_bins_edges)-1)
p_d_ind = np.minimum(np.digitize(polar[:, 1],
p_bins_edges),len(p_bins_edges)-1)
r_d = r_bins_edges[r_d_ind]
p_d = p_bins_edges[p_d_ind]
# :digitized coordinates
polar_d = np.concatenate((r_d[:, None], p_d[:, None]), axis=1)
bins = (r_bins_edges, p_bins_edges)
# put the digitized polar coordinates into a 2d histogram
H, _, _ = np.histogram2d(r_d, p_d, bins=bins)
# following might work in a future update of OpenCV (~-1ms)
# H = cv2.calcHist([r_d.astype(np.float32),p_d.astype(np.float32)], [0,1], None, (len(r_bins_edges)-1,angle_bin_num-1),
# (r_bins_edges, p_bins_edges))
# the frequency in each bin doesn't matter (optimization might exist)
H = H > 0
# find the angles where a line with unique intersection does not exist
a = np.sum(H, axis=0) > 2
# create a matrix with zeroed all the columns refering to above angles
s = H.copy()
s[:, a] = 0
if self.focus == 'speed':
# find where the intersection happens for each non zeroed angle
r_indices = np.argmax(s[:, :], axis=0)
# find which intersection happens farthest. This is the result.
y_ind = np.argmax(r_indices)
x_ind = r_indices[y_ind]
# find the winning point
new_pol_ref_point = np.array(
[[r_bins_edges[x_ind], p_bins_edges[y_ind]]])
elif self.focus == 'accuracy':
polpoints = polar.copy()
#polpoints[:, 1] *= 10
#polpoints = np.round(polpoints).astype(int)
p_uni, polpoints[:,1] = np.unique(polpoints[:,1],
return_inverse=True)
r_uni, polpoints[:,0] = np.unique(polpoints[:,0],
return_inverse=True)
polpoints = (polpoints).astype(int)
dists = calculate_cart_dists(polpoints)
segm_thres = polpoints.max()/float(20)
segments_to_cut = dists < segm_thres
segm_inds = find_trues_segments(segments_to_cut,iscircular=False)
segments = [np.int32(polpoints[:,::-1][start+1:end]) for [start, end] in
segm_inds]
s_new = np.zeros(tuple((polpoints.max(axis=0)+1).tolist()))
cv2.polylines(s_new, segments ,False, 1)
s_new[0,:] = 0
#ref_p = np.min(polpoints, axis=0)
#polpoints = polpoints
#s_new[polpoints[:,0], polpoints[:,1]] = 1
'''
check = s>0
h_points = find_nonzero(check.astype(np.uint8))
d = np.abs(h_points - np.median(h_points,axis=0))
mdev = np.median(d,axis=0)
mdev[mdev==0] = 0.01
rat = d / mdev.astype(float) <= 2
h_points = h_points[np.prod(rat,axis=1).astype(bool)]
print h_points
s_new = np.zeros_like(s)
s_new[h_points[:,0],h_points[:,1]] = s[h_points[:,0],h_points[:,1]]
'''
s = s_new
# find where the intersection happens for each non zeroed angle
r_indices = np.argmax(s[:, :], axis=0)
# find which intersection happens farthest. This is the result.
y_ind = np.argmax(r_indices)
x_ind = r_indices[y_ind]
dists = calculate_cart_dists(polpoints,
np.hstack((x_ind,y_ind)))
new_pol_ref_point = polar[np.argmin(dists), :][None, :]
if self.draw:
import matplotlib.pyplot as plt
fig = plt.figure()
axes = fig.add_subplot(111)
#im = (H[...,None]*np.array([[[1,0,0]]])+s[...,None]*np.array([[[0,1,0]]]))
im = (s[...,None]*np.array([[[0,1,0]]]))
im = im / np.max(im).astype(float)
cv2.circle(im, (y_ind, x_ind), 3, [1,1,1], -1)
axes.imshow(im)
#plt.hist2d(p_d,r_d, bins=(p_bins_edges, r_bins_edges))
plt.show()
old_cart_ref_point = new_cart_ref_point.copy()
new_cart_ref_point = co.pol_oper.polar_to_cart(
new_pol_ref_point, old_cart_ref_point, 0).squeeze()
if detect_width_end:
# detecting the second point to define segment end
mask1 = (np.abs(new_pol_ref_point[0, 0] - polar[:, 0])
<= 2)
mask2 = (np.abs(new_pol_ref_point[0, 1] - polar[:, 1])
<= 2 * pi / float(self.angle_bin_num))
link_end_1st_ind = (mask1 * mask2).argmax()
if self.skeleton:
ref_point = self.skeleton[-1][1]
else:
ref_point = self.init_point
self.detect_link_end_by_distance(ref_point, 0, link_end_1st_ind,
polar)
if calculate_cart_dists(np.array(self.skeleton_widths[-1][1])) < 10:
return 0
else:
# no second windth end point is detected, link points are considered the ones
# having already been rejected during ray computation
# those are:
# surr_points_r_ind = np.argmax(H[:x_ind,:],axis=0)
# However, this result can not be considered safe choice as the
# contour is not smooth and the angle bin size should get smaller,
# thus the procedure will be lower. So detect_width_end is by
# default true.
H_part_argsort = np.argpartition(H[:x_ind, :], -2, axis=0)
surr_points_r_ind = H_part_argsort[-2:, :]
surr_points_p_ind = np.arange(H.shape[1])
r_mask1 = r_d_ind[:, None] == surr_points_r_ind[0, :][None, :]
p_mask = p_d_ind[:, None] == surr_points_p_ind[None, :]
r_mask2 = r_d_ind[:, None] == surr_points_r_ind[1, :][None, :]
link_points_mask = np.sum(np.logical_and(np.logical_or(
r_mask1, r_mask2), p_mask), axis=1).astype(bool)
closing_size = co.CONST['longest_ray_closing_size']
# We perform a closing operation in 1d space, that might fix some
# irregularities, although the results are again disputable
# dilate mask to gather outliers
for count in xrange(closing_size):
link_points_mask += (np.roll(link_points_mask, 1) +
np.roll(link_points_mask, -1))
# erode mask to return it to the previous condition
for count in xrange(closing_size):
link_points_mask *= (np.roll(link_points_mask, 1) *
np.roll(link_points_mask, -1))
_, link_points_mask = find_largest_trues_segment(link_points_mask)
if self.filter_mask is not None:
surr_mask = self.filter_mask.copy()
surr_mask[self.filter_mask > 0] = link_points_mask
self.surrounding_skel.append(surr_mask)
self.filter_mask[
self.filter_mask > 0] = np.logical_not(link_points_mask)
else:
self.surrounding_skel.append(link_points_mask)
self.filter_mask = np.logical_not(link_points_mask)
self.skeleton.append([np.int0(old_cart_ref_point),
np.int0(new_cart_ref_point)])
return 1
def run_rectangle_approx(self):
'''
function to run rectangle_approx method
run self.reset first
'''
used_polar_size = self.polar.shape[0]
while (len(self.skeleton) <= self.max_links and
used_polar_size > 10):
if not self.rectangle_approx_single_link(self.polar):
break
used_polar_size = self.filter_mask.sum()
self.approx_hand()
def find_used_polar(self):
if self.filter_mask is None:
used_polar = self.polar.copy()
else:
used_polar = self.polar[self.filter_mask, :]
return used_polar
def detect_link_end_by_distance(self, ref_point, ref_angle,
link_end_1st_ind,
used_polar,
entry_segment=None,
entry_inds=None):
# finding the direction (1 or -1) of the circular vector with the closest distance
# from the width_segment point to the link_end_1st_ind
# (=w_to_crit_direction)
if entry_inds is None:
if not self.skeleton:
if self.entry is not None:
entry_segment = self.entry
entry_inds = self.entry_inds
else:
raise Exception('entry_segment and entry_inds are needed')
else:
entry_inds = np.array([0, self.filter_mask.sum()])
if entry_segment is None:
if self.skeleton_widths:
entry_segment = self.skeleton_widths[-1][1]
else:
raise Exception('entry_segment is needed')
tmp_closest_to_1st_end_width_ind = co.circ_oper.diff(np.array([link_end_1st_ind]),
entry_inds,
used_polar.shape[
0],
no_intersections=True)
closest_to_1st_end_width_ind = entry_inds[
tmp_closest_to_1st_end_width_ind]
closest_to_2nd_end_width_ind = entry_inds[
1 - tmp_closest_to_1st_end_width_ind]
w_to_crit_direction, _ = co.circ_oper.find_min_dist_direction(
closest_to_1st_end_width_ind,
link_end_1st_ind,
used_polar.shape[0])
# from here are the changes
link_end_1st = used_polar[link_end_1st_ind, :]
# The other point that sets the end of the link is
# assumed to be cocircular (rectangle approximation) with the
# link_end_1st.
length_radius = link_end_1st[0]
cocircular_with_crit_mask = np.abs(
used_polar[:, 0] - length_radius) < self.car_res
if self.draw:
cv2.circle(self.img, tuple(ref_point.astype(int)[::-1]), int(length_radius),
[0.5,0.0,0.0])
try:
cocircular_with_crit_inds = cv2.findNonZero(cocircular_with_crit_mask[:, None]
.astype(np.uint8))[:, 0, 1]
except TypeError:
return 0
# from the cocircular points we keep the one, which is closest to the
# unused width point index ( not the one closer to link_end_1st_ind),
# from the opposite direction of w_to_crit_direction
#####folowing for should be optimized######
dists = np.zeros_like(cocircular_with_crit_inds)
for count in xrange(cocircular_with_crit_inds.size):
dists[count] = co.circ_oper.find_single_direction_dist(
closest_to_2nd_end_width_ind,
cocircular_with_crit_inds[count],
used_polar.shape[0], -w_to_crit_direction)
link_end_2nd_ind = cocircular_with_crit_inds[dists.argmin()]
link_end_2nd = used_polar[link_end_2nd_ind, :]
link_end_width_segment = np.zeros((2, 2))
link_end_width_segment[0, :] = co.pol_oper.polar_to_cart(
np.array([link_end_1st]), ref_point, ref_angle).squeeze()
link_end_width_segment[1, :] = co.pol_oper.polar_to_cart(
np.array([link_end_2nd]), ref_point, ref_angle).squeeze()
skel_end = np.mean(link_end_width_segment, axis=0)
if self.skeleton:
link_length = calculate_cart_dists(
np.atleast_2d(self.skeleton[-1][1]), skel_end)
if self.skeleton:
if link_length < self.link_width_thres:
self.skeleton[-1][1] = skel_end.astype(int)
self.skeleton_widths[-1][
1] = link_end_width_segment.astype(int)
else:
self.skeleton.append([self.skeleton[-1][1],
np.mean(link_end_width_segment, axis=0).astype(int)])
self.skeleton_widths.append([self.skeleton_widths[-1][1],
link_end_width_segment.astype(int)])
else:
self.skeleton.append([np.mean(entry_segment, axis=0).astype(int),
np.mean(link_end_width_segment, axis=0).astype(int)])
self.skeleton_widths.append([entry_segment.astype(int),
link_end_width_segment.astype(int)])
surr_points_mask = np.ones(used_polar.shape[0])
# changes from here
filter1 = co.circ_oper.filter(closest_to_1st_end_width_ind,
link_end_1st_ind,
used_polar.shape[0],
w_to_crit_direction)
filter2 = co.circ_oper.filter(closest_to_2nd_end_width_ind,
link_end_2nd_ind,
used_polar.shape[0],
-w_to_crit_direction)
surr_points_mask = np.logical_xor(filter1, filter2).astype(bool)
if self.filter_mask is None:
# entry_segment must be also taken care of
dir3, _ = co.circ_oper.find_min_dist_direction(entry_inds[0],
entry_inds[1],
used_polar.shape[0])
filter3 = co.circ_oper.filter(entry_inds[0], entry_inds[1],
used_polar.shape[0], dir3)
surr_points_mask = np.logical_or(surr_points_mask, filter3)
self.surrounding_skel.append(surr_points_mask)
self.filter_mask = np.logical_not(surr_points_mask)
else:
surr_mask = self.filter_mask.copy()
surr_mask[self.filter_mask > 0] = surr_points_mask
if link_length < self.link_width_thres:
self.surrounding_skel[-1] += surr_mask
self.surrounding_skel[
-1] = self.surrounding_skel[-1].astype(bool)
else:
self.surrounding_skel.append(surr_mask.astype(bool))
self.filter_mask[self.filter_mask >
0] = np.logical_not(surr_points_mask)
self.filter_mask = self.filter_mask.astype(bool)
return 1
def rectangle_approx_single_link(self, polar, entry_inds=None, entry_segment=None,
polar_ref_point=[0, 0], polar_ref_angle=0):
'''
Use rectangle approximation of the links to find the skeleton
'''
if entry_inds is None:
if not self.skeleton:
if self.entry is not None:
entry_segment = self.entry
entry_inds = self.entry_inds
else:
raise Exception('entry_segment and entry_inds are needed')
else:
entry_inds = np.array([0, self.filter_mask.sum()])
if entry_segment is None:
if self.skeleton_widths:
entry_segment = self.skeleton_widths[-1][1]
else:
raise Exception('entry_segment is needed')
if self.filter_mask is None:
used_polar = polar.copy()
else:
used_polar = polar[self.filter_mask, :]
##############################################
#Making a guess of the rectangle orientation,#
#enclosing the first link, by examining the #
#points near the entry_segment #
##############################################
new_ref_point = [(entry_segment[0][0] + entry_segment[1][0]) /
2.0, (entry_segment[0][1] + entry_segment[1][1]) / 2.0]
used_polar = co.pol_oper.change_origin(
used_polar, polar_ref_angle, polar_ref_point, new_ref_point)
new_ref_radius = calculate_cart_dists(entry_segment) / 2.0
width_vec = entry_segment[0, :] - entry_segment[1, :]
width_vec_orient = np.arctan2(width_vec[0], width_vec[1])
angle1 = co.pol_oper.fix_angle(width_vec_orient + pi / 2)
angle2 = co.pol_oper.fix_angle(width_vec_orient - pi / 2)
try:
(_, corrected_cart_point, _) = find_corrected_point(
used_polar, polar_ref_angle, new_ref_point, new_ref_radius, [angle1, angle2])
box, perp_to_segment_unit = find_segment_to_point_box(
self.armpoints, entry_segment, np.array(corrected_cart_point))
new_ref_angle, corrected_entry_segment = find_link_direction(
box, entry_segment, perp_to_segment_unit, np.array(corrected_cart_point))
except ValueError:
seg_len = calculate_cart_dists(entry_segment)[0]
seg_angle = np.arctan2(width_vec[0], width_vec[1])
angle1 = co.pol_oper.fix_angle(seg_angle - pi / 2)
angle2 = co.pol_oper.fix_angle(seg_angle + pi / 2)
comp_angle = used_polar[
np.argmin(used_polar[(used_polar[:, 0] - seg_len / 2.0) > 2, 0]), 1]
angdiff1 = co.pol_oper.mod_diff(comp_angle, angle1)
angdiff2 = co.pol_oper.mod_diff(comp_angle, angle2)
if np.abs(angdiff1) < np.abs(angdiff2):
new_ref_angle = angle1
else:
new_ref_angle = angle2
used_polar[:, 1] -= (new_ref_angle)
co.pol_oper.mod_correct(used_polar)
intersection_mask = (np.abs(used_polar[:, 1]) <
co.CONST['angle_resolution'])
if np.sum(intersection_mask) == 0:
# No intersection found, must have reached the end on hand
self.surrounding_skel[-1][self.filter_mask] = True
return 0
# finding closest to entry_segment intersection points
cand_crit_points_inds = cv2.findNonZero(intersection_mask[:, None]
.astype(np.uint8))[:, 0, 1]
comp = co.circ_oper.diff(cand_crit_points_inds,
entry_inds,
intersection_mask.size)
cand_ind, closest_to_cand_width_point_ind = np.unravel_index(
np.argmin(comp), comp.shape)
# saving closest one's position (=link_end_1st_ind)
link_end_1st_ind = cand_crit_points_inds[cand_ind]
# finding the direction (1 or -1) of the circular vector with the closest distance
# from the width_segment point to the link_end_1st_ind
# (=w_to_crit_direction)
closest_to_1st_end_width_ind = entry_inds[
closest_to_cand_width_point_ind]
w_to_crit_direction, _ = co.circ_oper.find_min_dist_direction(
closest_to_1st_end_width_ind,
link_end_1st_ind,
intersection_mask.size)
link_end_1st = used_polar[link_end_1st_ind, :]
# The other point that sets the end of the link is
# assumed to be cocircular (rectangle approximation) with the
# link_end_1st.
length_radius = link_end_1st[0]
cocircular_with_crit_mask = np.abs(
used_polar[:, 0] - length_radius) < self.car_res
cocircular_with_crit_inds = cv2.findNonZero(cocircular_with_crit_mask[:, None]
.astype(np.uint8))[:, 0, 1]
# from the cocircular points we keep the ones , whose vectors of minimum distance from
# the width_segment point to the points have different direction from the direction
# of the corresponding one from the width_segment_point to the
# link_end_1st_ind
crit_inds_rev_dir_mask = np.zeros_like(
cocircular_with_crit_inds)
#####folowing for should be optimized######
dists = np.zeros_like(cocircular_with_crit_inds)
for count in xrange(cocircular_with_crit_inds.size):
(crit_inds_rev_dir_mask[count],
dists[count]) = co.circ_oper.find_min_dist_direction(
entry_inds[1 - closest_to_cand_width_point_ind],
cocircular_with_crit_inds[count],
intersection_mask.size)
crit_inds_rev_dir_mask = (
crit_inds_rev_dir_mask != w_to_crit_direction)
# if there are no such cocircular points then we keep the cocircular point,
# whose minimum distance is furthest from the width_segment_points
if np.sum(crit_inds_rev_dir_mask) == 0:
crit_inds_rev_dir_mask[dists.argmax()] = True
# if there are more than one passing points, then keep the one closest to the
# width_segment_points
comp = co.circ_oper.diff(cocircular_with_crit_inds[
crit_inds_rev_dir_mask],
entry_inds,
intersection_mask.size)
cand_ind, closest_to_cand_width_point_ind = np.unravel_index(
np.argmin(comp), comp.shape)
closest_to_2nd_end_width_ind = entry_inds[
closest_to_cand_width_point_ind]
# link_end_2nd_ind is the resulting second point
link_end_2nd_ind = cocircular_with_crit_inds[
crit_inds_rev_dir_mask][cand_ind]
link_end_2nd = used_polar[link_end_2nd_ind, :]
link_end_width_segment = np.zeros((2, 2))
link_end_width_segment[0, :] = co.pol_oper.polar_to_cart(
np.array([link_end_1st]), new_ref_point, new_ref_angle).squeeze()
link_end_width_segment[1, :] = co.pol_oper.polar_to_cart(
np.array([link_end_2nd]), new_ref_point, new_ref_angle).squeeze()
if self.skeleton:
self.skeleton.append([self.skeleton[-1][1],
np.mean(link_end_width_segment, axis=0).astype(int)])
self.skeleton_widths.append([self.skeleton_widths[-1][1],
link_end_width_segment.astype(int)])
else:
self.skeleton.append([np.mean(entry_segment, axis=0).astype(int),
np.mean(link_end_width_segment, axis=0).astype(int)])
self.skeleton_widths.append([entry_segment.astype(int),
link_end_width_segment.astype(int)])
surr_points_mask = np.ones(used_polar.shape[0])
dir_1st, _ = co.circ_oper.find_min_dist_direction(
closest_to_1st_end_width_ind, link_end_1st_ind, used_polar.shape[0])
dir_2nd, _ = co.circ_oper.find_min_dist_direction(
closest_to_2nd_end_width_ind, link_end_2nd_ind, used_polar.shape[0])
filter1 = co.circ_oper.filter(closest_to_1st_end_width_ind, link_end_1st_ind,
used_polar.shape[0], dir_1st)
filter2 = co.circ_oper.filter(closest_to_2nd_end_width_ind, link_end_2nd_ind,
used_polar.shape[0], dir_2nd)
surr_points_mask = np.logical_or(filter1, filter2).astype(bool)
if self.filter_mask is None:
self.surrounding_skel.append(surr_points_mask)
self.filter_mask = np.logical_not(surr_points_mask)
else:
surr_mask = self.filter_mask.copy()
surr_mask[self.filter_mask > 0] = surr_points_mask
self.surrounding_skel.append(surr_mask.astype(bool))
self.filter_mask[self.filter_mask >
0] = np.logical_not(surr_points_mask)
self.filter_mask = self.filter_mask.astype(bool)
return 1
def draw_skeleton(self, frame, show=True):
'''
draws links and skeleton on frame
'''
if show:
from matplotlib import pyplot as plt
if not self.draw:
self.img = np.tile(frame[..., None] / float(np.max(frame)), (1, 1, 3))
c_copy = self.contour.squeeze()
for surr_points_mask in self.surrounding_skel:
colr = np.random.random(3)
surr_points = c_copy[surr_points_mask, :]
for count in range(surr_points.shape[0]):
cv2.circle(self.img, tuple(surr_points[count, :]),
1, colr.astype(tuple), -1)
for link in self.skeleton:
cv2.arrowedLine(self.img, tuple(
link[0][::-1]), tuple(link[1][::-1]), [0, 0, 1], 2)
if show:
plt.figure()
plt.imshow(self.img)
plt.show()
return (self.img * np.max(frame))
def main():
'''Main Caller Function'''
if not os.path.exists('arm_example.png'):
urllib.urlretrieve("https://www.packtpub.com/\
sites/default/files/Article-Images/B04521_02_04.png",
"arm_example.png")
# binarm3d = cv2.imread('random.png')
# binarm3d = cv2.imread('arm_example.png')
#binarm = cv2.imread('random.png', -1)
binarm = cv2.imread('arm_example.png', -1)
_, cnts, _ = cv2.findContours(
binarm.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
co.edges.load_calib_data(whole_im=True, img=binarm)
#skel = FindArmSkeleton(binarm)
# binarm3d_positions = np.transpose(np.nonzero(np.ones_like(
# binarm3d[:, :, 0]))).reshape(binarm3d.shape[:-1] + (2,))
# co.edges.calib_edges = np.pad(np.zeros((binarm3d.shape[
# 0] - 2, binarm3d.shape[1] - 2), np.uint8), ((1, 1), (1, 1)), 'constant', constant_values=1)
# co.edges.find_non_convex_edges_lims(edge_tolerance=1)
# print\
# timeit.timeit(lambda: main_process_upgraded(binarm3d.copy(),
# binarm3d_positions, 0), number=100) / 100
# print\
# timeit.timeit(lambda: skel.run(binarm,cnts[0]), number=100) / 100
# profile.runctx('main_process_upgraded(binarm3d,binarm3d_positions,0)',
# globals(), locals())
#profile.runctx('skel.run(binarm, cnts[0])', globals(), locals())
for _ in range(4):
# rows, cols, _ = binarm3d.shape
rows, cols = binarm.shape
rot_mat = cv2.getRotationMatrix2D(
(np.floor(cols / 2.0), np.floor(rows / 2.0)), 90, 1)
rot_mat[0, 2] += np.floor(rows / 2.0 - cols / 2.0)
rot_mat[1, 2] += np.floor(cols / 2.0 - rows / 2.0)
# binarm3d_positions = np.transpose(np.nonzero(np.ones_like(
# binarm3d[:, :, 0]))).reshape(binarm3d.shape[:-1] + (2,))
skel = FindArmSkeleton(binarm, link_width_thres=0, draw=True,
focus='accuracy')
_, cnts, _ = cv2.findContours(
binarm.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
skel.run(binarm, cnts[0], 'longest_ray')
skel.draw_skeleton(binarm)
#_, _, res = main_process_upgraded(
# binarm3d.copy(), binarm3d_positions, 1)
# if res is not None:
# cv2.imshow('res', res[0])
# cv2.waitKey(0)
# binarm3d = 255 * \
# ((cv2.warpAffine(binarm3d, rot_mat, (rows, cols))) > 0).astype(np.uint8)
binarm = 255 *\
((cv2.warpAffine(binarm, rot_mat, (rows, cols))) > 0).astype(np.uint8)
co.meas.construct_positions(binarm,
polar=True)
# co.edges.calib_edges = np.pad(np.zeros((
# binarm3d.shape[0] - 2, binarm3d.shape[1] - 2), np.uint8), (
# (1, 1), (1, 1)), 'constant', constant_values=1)
co.edges.load_calib_data(whole_im=True, img=binarm)
# co.edges.find_non_convex_edges_lims(edge_tolerance=1)
# cv2.destroyAllWindows
LOG = logging.getLogger('__name__')
CH = logging.StreamHandler()
CH.setFormatter(logging.Formatter(
'%(funcName)20s()(%(lineno)s)-%(levelname)s:%(message)s'))
LOG.handlers = []
LOG.addHandler(CH)
LOG.setLevel('INFO')
if __name__ == '__main__':
main()
#####################################################################
#####################################################################
############################################################
###OBSOLETE:###
# define obs_skeleton for backwards compatibility
obs_skeleton = FindArmSkeleton()
def detect_entry(bin_mask):
'''
Function to detect intersection limits of mask with calibration edges
Assuming non_convex calib edges..
'''
# entry_segments=positions[bin_mask*co.masks.calib_edges>0]
entry_segments = co.edges.edges_positions[
bin_mask[co.edges.edges_positions_indices] > 0]
if entry_segments.shape[0] == 0:
return None
approx_entry_segments = entry_segments.copy()
approx_entry_segments[
entry_segments[:, 1] <
co.edges.nonconvex_edges_lims[0], 1] = co.edges.nonconvex_edges_lims[0]
approx_entry_segments[
entry_segments[:, 0] <
co.edges.nonconvex_edges_lims[1], 0] = co.edges.nonconvex_edges_lims[1]
approx_entry_segments[
entry_segments[:, 1] >
co.edges.nonconvex_edges_lims[2], 1] = co.edges.nonconvex_edges_lims[2]
approx_entry_segments[
entry_segments[:, 0] >
co.edges.nonconvex_edges_lims[3], 0] = co.edges.nonconvex_edges_lims[3]
approx_entry_points = cv2.convexHull(approx_entry_segments).squeeze()
not_approx = 0
if approx_entry_points.size == 2:
not_approx = 1
approx_entry_points = cv2.convexHull(entry_segments).squeeze()
if approx_entry_points.shape[0] == 2:
try:
if calculate_cart_dists(approx_entry_points) > np.min(bin_mask.shape) / 10.0:
return entry_segments[find_rows_in_array(
approx_entry_segments, approx_entry_points)]
else:
return np.array([])
except:
print 'lims', co.edges.nonconvex_edges_lims
print 'hull', approx_entry_points
print 'aprox segments', approx_entry_segments
print 'segments', entry_segments
raise
approx_entry_orient = np.diff(approx_entry_points, axis=0)
try:
approx_entry_orient = (approx_entry_orient /
calculate_cart_dists(
approx_entry_points)[:, None])
except:
print approx_entry_points
raise
approx_entry_vert_orient = np.dot(
approx_entry_orient, np.array([[0, -1], [1, 0]]))
num = []
for count, orient in enumerate(approx_entry_vert_orient):
if not_approx:
pos = find_segment_to_point_box(entry_segments,
np.array([approx_entry_points[count, :] + orient * 10,
approx_entry_points[count, :] - orient * 10]),
approx_entry_points[count + 1, :])[0]
else:
pos = find_segment_to_point_box(approx_entry_segments,
np.array([approx_entry_points[count, :] + orient * 10,
approx_entry_points[count, :] - orient * 10]),
approx_entry_points[count + 1, :])[0]
num.append(pos.shape[0])
_argmax = np.argmax(num)
if not_approx:
entry_points = entry_segments[find_rows_in_array(
entry_segments, approx_entry_points[_argmax:_argmax + 2, :])]
else:
entry_points = entry_segments[find_rows_in_array(
approx_entry_segments, approx_entry_points[_argmax:_argmax + 2, :])]
return entry_points
def main_process_upgraded(binarm3d, positions=None, display=0):
if len(binarm3d.shape) == 3:
binarm = binarm3d[:, :, 0].copy()
if np.max(binarm3d) != 255:
binarm3d = (binarm3d / float(np.max(binarm3d))) * 255
binarm3d = binarm3d.astype(np.uint8)
else:
binarm = binarm3d.copy()
if display == 1:
binarm3d = np.tile(binarm[:, :, None], (1, 1, 3))
if np.issubdtype(binarm3d[0, 0, 0], np.uint8):
if np.max(binarm3d) == 1:
binarm3d *= 255
else:
binarm3d = (255 * binarm3d).astype(np.uint8)
if positions is None:
if co.meas.cart_positions is None:
co.meas.construct_positions(binarm, polar=True)
obs_skeleton.positions_initiated = True
positions = co.meas.cart_positions
_, cnts, _ = cv2.findContours(
binarm, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
hand_patches, hand_patches_pos, masks = (None, None, None)
cnt_count = 0
for cnt in cnts:
if cv2.contourArea(cnt) < co.CONST['min_area']:
continue
else:
cnt_count += 1
if cnt_count > co.CONST['max_hsa_contours_num']:
break
hull_inds = cv2.convexHull(cnt, returnPoints=False)
hull_pnts = cnt[hull_inds.squeeze()].squeeze()
ref_col, ref_row, col_num, row_num = cv2.boundingRect(cnt)
img = np.zeros((row_num, col_num), np.uint8)
cnt -= np.array([[ref_col, ref_row]])
cv2.drawContours(img, [cnt], 0, 255, -1)
obs_skeleton.detect_entry_upgraded(hull_pnts)
entry = obs_skeleton.entry
if entry is None:
LOG.debug('No entry found')
co.chhm.no_entry += 1
if display == 1:
tag_im(binarm3d, 'No entry found')
co.im_results.images.append(binarm3d)
return None, None, None
entry -= np.array([[ref_row, ref_col]])
hand_patch, hand_patch_pos, mask = main_process(img, entry=entry,
positions=positions[ref_row:ref_row + row_num,
ref_col:ref_col + col_num, :] -
np.array([[ref_row, ref_col]]), display=display)
img = None
if hand_patch is not None:
hand_patch_pos += np.array([ref_row, ref_col])
img = np.zeros(binarm.shape)
img[hand_patch_pos[0]:hand_patch_pos[0] + hand_patch.shape[0],
hand_patch_pos[1]:hand_patch_pos[1] + hand_patch.shape[1]] = hand_patch
try:
hand_patches.append(hand_patch)
except:
hand_patches, hand_patches_pos, masks = ([], [], [])
hand_patches.append(hand_patch)
hand_patches_pos.append(hand_patch_pos)
masks.append(img)
return hand_patches, hand_patches_pos, masks
def main_process(binarm3d, positions=None, display=0, entry=None):
'''Main processing function'''
LOG.setLevel('INFO')
if len(binarm3d.shape) == 3:
binarm = binarm3d[:, :, 0].copy()
if np.max(binarm3d) != 255:
binarm3d = (binarm3d / float(np.max(binarm3d))) * 255
binarm3d = binarm3d.astype(np.uint8)
else:
binarm = binarm3d.copy()
if display == 1:
binarm3d = np.tile(binarm[:, :, None], (1, 1, 3))
if np.issubdtype(binarm3d[0, 0, 0], np.uint8):
if np.max(binarm3d) == 1:
binarm3d *= 255
else:
binarm3d = (255 * binarm3d).astype(np.uint8)
if positions is None:
if co.meas.cart_positions is None:
co.meas.construct_positions(binarm)
positions = co.meas.cart_positions
try:
armpoints = find_nonzero(binarm)
except AttributeError: # binarm is []
LOG.debug('No objects found')
co.chhm.no_obj += 1
if display == 1:
tag_im(binarm3d, 'No object found')
co.im_results.images.append(binarm3d)
return None, None, None
try:
points = find_nonzero(with_laplacian(binarm))
except AttributeError:
return None, None, None
points = points[:, 0] * 1j + points[:, 1]
tmp = np.angle(points)
tmp[tmp < -pi] += 2 * pi
tmp[tmp > pi] -= 2 * pi
new_polar = np.concatenate(
(np.absolute(points)[:, None], tmp[:, None]), axis=1)
if entry is None:
entry = detect_entry(binarm)
'''
except IndexError:
LOG.debug('No entry found')
if display == 1:
tag_im(binarm3d, 'No entry found')
co.im_results.images.append(binarm3d)
return None, None, None
'''
if entry is None:
LOG.debug('No entry found')
co.chhm.no_entry += 1
if display == 1:
tag_im(binarm3d, 'No entry found')
co.im_results.images.append(binarm3d)
return None, None, None
if entry.shape[0] <= 1:
LOG.debug('Arm in image corners or its entry is occluded' +
', hand segmentation algorithm cannot function')
co.chhm.in_im_corn += 1
if display == 1:
tag_im(binarm3d, 'Arm in image corners or its entry is occluded' +
', hand segmentation algorithm cannot function')
co.im_results.images.append(binarm3d)
return None, None, None
link_end_radius = 1 / 2.0 * calculate_cart_dists(entry)
link_end_segment = entry
new_ref_point = [0, 0]
new_ref_angle = 0
new_crit_ind = 0
link_end_2nd = []
resolution = np.sqrt(2) / 2.0
new_corrected_segment = entry[:]
# for _count in range(3):
link_count = 0
while True:
link_count += 1
prev_ref_point = new_ref_point[:]
prev_polar = new_polar[:]
prev_ref_angle = new_ref_angle
prev_ref_point = new_ref_point[:]
if (new_crit_ind > new_polar.shape[0] - 10 or
link_count > co.CONST['max_link_number']):
LOG.debug('Reached Mask Limits')
co.chhm.rchd_mlims += 1
if display == 1:
tag_im(binarm3d, 'Reached Mask Limits')
co.im_results.images.append(binarm3d)
return None, None, None
new_ref_point = [(link_end_segment[0][0] + link_end_segment[1][0]) /
2.0, (link_end_segment[0][1] + link_end_segment[1][1]) / 2.0]
new_polar = co.pol_oper.change_origin(
prev_polar.copy(), prev_ref_angle, prev_ref_point, new_ref_point)
new_ref_radius = calculate_cart_dists(link_end_segment) / 2.0
link_end_diff = link_end_segment[0, :] - link_end_segment[1, :]
tmpangle = np.arctan2(link_end_diff[0], link_end_diff[1])
angle1 = co.pol_oper.fix_angle(tmpangle + pi / 2)
angle2 = co.pol_oper.fix_angle(tmpangle - pi / 2)
try:
(_, corrected_cart_point, _) = find_corrected_point(
new_polar, 0, new_ref_point, new_ref_radius, [angle1, angle2])
box, perp_to_segment_unit = find_segment_to_point_box(
armpoints, link_end_segment, np.array(corrected_cart_point))
if display == 1:
binarm3d = picture_box(binarm3d, box)
new_ref_angle, new_corrected_segment = find_link_direction(
box, link_end_segment, perp_to_segment_unit, np.array(corrected_cart_point))
except ValueError:
seg_len = calculate_cart_dists(link_end_segment)[0]
seg_angle = np.arctan2(link_end_diff[
0], link_end_diff[1])
angle1 = co.pol_oper.fix_angle(seg_angle - pi / 2)
angle2 = co.pol_oper.fix_angle(seg_angle + pi / 2)
try:
comp_angle = new_polar[
np.argmin(new_polar[(new_polar[:, 0] - seg_len / 2.0) > 2, 0]), 1]
except ValueError:
return None, None, None
angdiff1 = co.pol_oper.mod_diff(comp_angle, angle1)
angdiff2 = co.pol_oper.mod_diff(comp_angle, angle2)
if np.abs(angdiff1) < np.abs(angdiff2):
new_ref_angle = angle1
else:
new_ref_angle = angle2
new_polar[:, 1] -= (new_ref_angle)
co.pol_oper.mod_correct(new_polar)
new_polar = new_polar[new_polar[:, 0] >= new_ref_radius, :]
new_polar = new_polar[new_polar[:, 0].argsort(), :]
if display == 1:
tmp = co.pol_oper.polar_to_cart(
new_polar, new_ref_point, new_ref_angle)
'''
binarm3d[tuple(tmp[np.abs(np.sqrt((tmp[:, 0] - new_ref_point[0])**2
+ (tmp[:, 1] - new_ref_point[1])**2)
- new_polar[new_crit_ind, 0]) <=
resolution].T)] = [255, 0, 0]
'''
binarm3d[link_end_segment[0][0],
link_end_segment[0][1]] = [0, 0, 255]
binarm3d[link_end_segment[1][0],
link_end_segment[1][1]] = [0, 0, 255]
cv2.line(binarm3d, (new_corrected_segment[0][1], new_corrected_segment[0][0]),
(new_corrected_segment[1][1], new_corrected_segment[1][0]), [0, 0, 255])
cand_crit_points = new_polar[np.abs(new_polar[:, 1]) < co.CONST[
'angle_resolution'], :]
if len(cand_crit_points) == 0:
LOG.debug('No cocircular points found,' +
' reached end of hand')
co.chhm.no_cocirc += 1
if display == 1:
tag_im(binarm3d, 'No cocircular points found,' +
' reached end of hand')
co.im_results.images.append(binarm3d)
return None, None, None
_min = cand_crit_points[0, :]
new_crit_ind = np.where(new_polar == _min)[0][0]
cocircular_crit = co.pol_oper.find_cocircular_points(new_polar,
new_polar[
new_crit_ind, 0],
resolution)
cocircular_crit = cocircular_crit[cocircular_crit[:, 1].argsort(), :]
crit_chords = calculate_chords_lengths(cocircular_crit)
if display == 1:
tmp = co.pol_oper.polar_to_cart(
new_polar, new_ref_point, new_ref_angle)
# binarm3d[tuple(tmp.T)] = [255, 255, 0]
'''
binarm3d[tuple(co.pol_oper.polar_to_cart(cocircular_crit, new_ref_point, new_ref_angle).T)] = [
255, 255, 0]
'''
binarm3d[tuple(tmp[np.abs(np.sqrt((tmp[:, 0] - new_ref_point[0])**2
+ (tmp[:, 1] - new_ref_point[1])**2)
- new_polar[new_crit_ind, 0]) <=
resolution].T)] = [255, 255, 0]
binarm3d[tuple(co.pol_oper.polar_to_cart(new_polar[np.abs(new_polar[new_crit_ind, 0] -
new_polar[:, 0]) < 0.1, :],
new_ref_point, new_ref_angle).T)] = [255, 0, 255]
binarm3d[np.abs(np.sqrt((positions[:, :, 0] - new_ref_point[0])**2 + (positions[
:, :, 1] - new_ref_point[1])**2) - new_ref_radius) <= resolution] = [255, 255, 0]
binarm3d[int(new_ref_point[0]), int(
new_ref_point[1])] = [255, 0, 0]
cv2.arrowedLine(binarm3d, (int(new_ref_point[1]),
int(new_ref_point[0])),
(int(new_ref_point[1] +
new_polar[new_crit_ind, 0] *
np.cos(new_ref_angle)),
int(new_ref_point[0] +
new_polar[new_crit_ind, 0] *
np.sin(new_ref_angle))), [0, 0, 255], 2, 1)
if cocircular_crit == []:
LOG.debug('Reached end of hand without finding abnormality')
co.chhm.no_abnorm += 1
if display == 1:
tag_im(binarm3d, 'Reached end of hand without finding ' +
'abnormality')
co.im_results.images.append(binarm3d)
return None, None, None
'''
cv2.arrowedLine(binarm3d, (int(new_ref_point[1]), int(new_ref_point[0])), (
int(new_ref_point[1] + new_ref_radius * np.cos(new_ref_angle)),
int(new_ref_point[0] + new_ref_radius * np.sin(new_ref_angle))),
[0, 0, 255], 2, 1)
'''
width_lo_thres = new_ref_radius * co.CONST['abnormality_tol']
check_abnormality = ((crit_chords < width_lo_thres) *
(crit_chords > 1))
reached_abnormality = np.sum(check_abnormality)
if display == 1:
interesting_points_ind = np.nonzero(reached_abnormality)[0]
for ind in interesting_points_ind:
cv2.line(binarm3d, tuple(co.pol_oper.polar_to_cart(
np.array([cocircular_crit[ind, :]]),
new_ref_point, new_ref_angle)[:, ::-1].flatten()),
tuple(co.pol_oper.polar_to_cart(
np.array([cocircular_crit[ind + 1, :]]),
new_ref_point, new_ref_angle)
[:, ::-1].flatten()), [0, 255, 0], 3)
if reached_abnormality:
hand_patch, hand_patch_pos, full_res_mask = find_hand(binarm, binarm3d, armpoints, display,
new_polar,
new_corrected_segment, new_ref_angle,
new_crit_ind, new_ref_point, resolution)
if display == 1:
try:
binarm3d[int(corrected_cart_point[0]),
int(corrected_cart_point[1])] = [255, 0, 255]
except UnboundLocalError:
pass
if __name__ == '__main__':
cv2.imshow('test', binarm3d)
cv2.waitKey(0)
if reached_abnormality:
if display == 1:
co.im_results.images.append(binarm3d)
return hand_patch, hand_patch_pos, full_res_mask
link_end_1st = new_polar[new_crit_ind, :]
link_end_radius = link_end_1st[0]
tmp = new_polar[
np.abs(new_polar[:, 0] - link_end_radius) < resolution, :]
link_end_2nd = tmp[np.argmax(np.abs(tmp[:, 1])), :]
link_end_segment[0] = co.pol_oper.polar_to_cart(
np.array([link_end_1st]), new_ref_point, new_ref_angle)
link_end_segment[1] = co.pol_oper.polar_to_cart(
np.array([link_end_2nd]), new_ref_point, new_ref_angle)
new_polar = new_polar[new_crit_ind:, :]
def find_hand(*args):
'''
Find hand when abnormality reached
'''
# binarm,polar,ref_angle,ref_point,crit_ind,corrected_segment,resolution,display,binarm3d
[binarm, binarm3d, armpoints, display, polar, corrected_segment,
ref_angle, crit_ind, ref_point, resolution] = args[0:10]
separate_hand = 0
ref_dist = calculate_cart_dists(corrected_segment)
bins = np.arange(resolution, np.max(
polar[:crit_ind + 1, 0]) + 2 * resolution, resolution)
dig_rad = np.digitize(polar[crit_ind::-1, 0], bins)
angles_bound = np.abs(np.arctan((ref_dist / 2.0) / (bins))) +\
co.CONST['angle_tol']
try:
angles_bound = angles_bound[dig_rad]
except IndexError as e:
print 'dig_rad', dig_rad
print 'bins', bins
print 'angles_bound', angles_bound
raise(e)
angles_thres = np.abs(polar[crit_ind::-1, 1]) < angles_bound
# Compmat holds in every column elements of the same radius bin
compmat = dig_rad[:, None] == np.arange(bins.shape[0])
# angles_thres-1 gives -1 for elements outside bounds and 0 for elements
# inside bounds
#(angles_thres-1)+compmat == 1 is in every column True for elements of
# the column radius bin that reside inside angles bounds
# sameline is true if there are such elements in a column
# and false when there are no such elements
sameline = np.sum(
(((angles_thres[:, None] - 1) + compmat) == 1), axis=0) == 1
# compmat[:,sameline] holds the columns for which there is at least one
# such element inside.
# sum over axis=1 returns a column that holds all elements with this
# criterion
dig_rad_thres = np.sum(compmat[:, sameline], axis=1) > 0
dig_rad_thres = dig_rad_thres.astype(int)
# I make TRUE some solo FALSES, so that to create compact TRUEs segments
# 'dilate'
dig_rad_thres[1:-1] += (np.roll(dig_rad_thres, 1) +
np.roll(dig_rad_thres, -1))[1:-1]
# 'erode'
dig_rad_thres[1:-1] *= (np.roll(dig_rad_thres, 1)
* np.roll(dig_rad_thres, -1))[1:-1]
if np.sum(dig_rad_thres) == 0:
LOG.debug('Hand not found but reached abnormality')
if display == 1:
tag_im(binarm3d, 'Hand not found but reached abnormality')
co.chhm.rchd_abnorm += 1
return None, None, None
_, dig_rad_thres = find_largest_trues_segment(dig_rad_thres > 0)
used_polar = polar[crit_ind::-1, :][dig_rad_thres]
if display == 1:
binarm3d[tuple(co.pol_oper.polar_to_cart(
used_polar,
ref_point, ref_angle).T)] = [255, 0, 0]
# sort_inds holds the indices that sort the used_polar, first by
# corresponding bin radius and afterwards by angles
sort_inds = np.lexsort(
(used_polar[:, 1], bins[dig_rad[dig_rad_thres]]))[::-1]
# used_polar now holds values at the above order
used_polar = used_polar[sort_inds, :]
# chords length between consecutive rows of used_polar is
# calculated. In interchanging bins radii the distance is either too small,
# so it does no bad, or big enough to get a working length that can help, or
# too big, that is thrown away
same_rad_dists = calculate_chords_lengths(used_polar)
dist_threshold = ((np.abs(same_rad_dists)
<= co.CONST['dist_tol'] * np.abs(ref_dist)) *
(np.abs(same_rad_dists) >
2 * np.abs(ref_dist) / 3.0))
if display == 1:
binarm3d[tuple(co.pol_oper.polar_to_cart(
used_polar[np.concatenate(
(dist_threshold, [0]), axis=0).astype(bool)],
ref_point, ref_angle).T)] = [0, 255, 0]
same_rad_dists[np.logical_not(dist_threshold)] = 1000
if display == 3:
flag = 1
tmp = co.pol_oper.polar_to_cart(
used_polar, ref_point, ref_angle)[:, ::-1]
for count1, (row1, row2) in enumerate(zip(tmp[:-1, :], tmp[1:, :])):
if dist_threshold[count1] == 0:
cv2.line(binarm3d, tuple(row1), tuple(row2), [255, 0, 0], 1)
elif flag == 1:
cv2.line(binarm3d, tuple(row1), tuple(row2), [0, 0, 255], 1)
flag = 0
try:
ind = np.argmin(same_rad_dists)
chosen = used_polar[ind:ind + 2, :]
if display == 2:
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(bins[dig_rad[dig_rad_thres]][
sort_inds], used_polar[:, 1])
plt.draw()
plt.pause(0.1)
plt.waitforbuttonpress(timeout=-1)
plt.close(fig)
wristpoints = co.pol_oper.polar_to_cart(
chosen, ref_point, ref_angle)
wrist_radius = calculate_cart_dists(
wristpoints) / 2.0
hand_edges = co.pol_oper.polar_to_cart(
polar[polar[:, 0] > np.min(chosen[:, 0])], ref_point, ref_angle)
if separate_hand:
hand_box = binarm[np.min(hand_edges[:, 0]):
np.max(hand_edges[:, 0]),
np.min(hand_edges[:, 1]):
np.max(hand_edges[:, 1])]
# perform top hat transform, with suggested
# finger radius smaller than wrist radius
struct_el = cv2.getStructuringElement(
cv2.MORPH_ELLIPSE, tuple(2 * [int(0.9 * wrist_radius)]))
palm = cv2.morphologyEx(
hand_box, cv2.MORPH_OPEN, struct_el)
fingers = hand_box - palm
fingers = cv2.morphologyEx(fingers, cv2.MORPH_OPEN, np.ones(
tuple(2 * [int(struct_el.shape[0] / 4)]), np.uint8))
if display == 1:
convexhand = cv2.convexHull(hand_edges).squeeze()
cv2.drawContours(
binarm3d, [np.fliplr(convexhand)], 0, [0, 0, 255], 2)
binarm3d[tuple(co.pol_oper.polar_to_cart(np.array(
[np.mean(polar[ind:, :],
axis=0)]), ref_point, ref_angle).
astype(int).T)] = [255, 0, 255]
LOG.debug('Hand found')
if display == 1:
tag_im(binarm3d, 'Hand found')
co.chhm.found += 1
if display == 2:
if separate_hand:
cv2.imshow('Fingers', fingers)
cv2.imshow('Palm', palm)
binarm3d[tuple(hand_edges.T)] = [255, 0, 0]
binarm3d[tuple(wristpoints.T)] = [0, 0, 255]
# binarm3d[tuple(co.pol_oper.polar_to_cart(polar[polar[:,1]>0],ref_point,ref_angle).T)]=[255,0,0]
hand_patch = binarm[np.min(hand_edges[:, 0]):
np.max(hand_edges[:, 0]),
np.min(hand_edges[:, 1]):
np.max(hand_edges[:, 1])]
full_res_mask = np.zeros(binarm.shape)
full_res_mask[np.min(hand_edges[:, 0]):
np.max(hand_edges[:, 0]),
np.min(hand_edges[:, 1]):
np.max(hand_edges[:, 1])] = hand_patch
hand_patch_pos = np.array(
[hand_edges[:, 0].min(), hand_edges[:, 1].min()])
# hand_patch has same values as input data.
# hand_patch_pos denotes the hand_patch upper left corner absolute
# location
# full_res_mask is hand_patch in place
return hand_patch, hand_patch_pos, full_res_mask
except IndexError:
LOG.debug('Hand not found but reached abnormality')
if display == 1:
tag_im(binarm3d, 'Hand not found but reached abnormality')
co.chhm.rchd_abnorm += 1
return None, None, None
def find_link_direction(
xy_points, entry_segment, perp_to_segment_unit, point):
'''
function call:
find_link_direction(xy_points, entry_segment,
perp_to_segment_unit, point)
'''
vecs = np.array(point) - np.array(entry_segment)
incl = np.arctan2(vecs[:, 0], vecs[:, 1])
init_incl_bounds = [np.arctan2(
perp_to_segment_unit[0], perp_to_segment_unit[1])]
if init_incl_bounds[0] + pi > pi:
init_incl_bounds.append(init_incl_bounds[0] - pi)
else:
init_incl_bounds.append(init_incl_bounds[0] + pi)
num = [0, 0]
for count in range(2):
complex_points = (xy_points[:, 0] -
entry_segment[count][0]) * 1j + (xy_points[:, 1] -
entry_segment[count][1])
angles = np.angle(complex_points)
incl_bound = init_incl_bounds[np.argmin(np.abs(co.pol_oper.mod_diff(
np.array(init_incl_bounds), np.array([incl[count], incl[count]]))))]
_max = max([incl[count], incl_bound])
_min = min([incl[count], incl_bound])
num[count] = np.sum(co.pol_oper.mod_between_vals(angles, _min, _max))
# Thales theorem:
tmp = np.array(point) - \
np.array(entry_segment[(np.argmax(num) + 1) % 2][:])
return np.arctan2(tmp[0], tmp[1]), [entry_segment[np.argmax(num)][:], point]
def picture_box(binarm3d, points):
'''Draw box on image'''
tmp = np.zeros_like(binarm3d[:, :, 0])
tmp[tuple(points.T)] = 255
binarm3d[tmp[:, :] > 0] = [0, 255, 0]
return binarm3d
def tag_im(img, text):
'''
Tag top right of img with description in red
'''
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, text, (0, 20), font, 0.5, (0, 0, 255), 2)
| {
"content_hash": "b668e90cca8ac663e8f43f3f0e258ebf",
"timestamp": "",
"source": "github",
"line_count": 1748,
"max_line_length": 127,
"avg_line_length": 45.401029748283754,
"alnum_prop": 0.5311046987815173,
"repo_name": "VasLem/KinectPainting",
"id": "0492849d2c70e3fa31939323c73366d75d6920bc",
"size": "79361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hand_segmentation_alg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "130926"
},
{
"name": "Python",
"bytes": "763393"
}
],
"symlink_target": ""
} |
import multiprocessing
import os
import os.path
import sys
import unittest
import unittest.mock
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
import autoninja
class AutoninjaTest(unittest.TestCase):
def test_autoninja(self):
autoninja.main([])
def test_autoninja_goma(self):
with unittest.mock.patch(
'os.path.exists',
return_value=True) as mock_exists, unittest.mock.patch(
'autoninja.open', unittest.mock.mock_open(
read_data='use_goma=true')) as mock_open, unittest.mock.patch(
'subprocess.call', return_value=0):
args = autoninja.main([]).split()
mock_exists.assert_called()
mock_open.assert_called_once()
self.assertIn('-j', args)
parallel_j = int(args[args.index('-j') + 1])
self.assertGreater(parallel_j, multiprocessing.cpu_count())
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "602eb0ac318133598773b13622a7e894",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 27.257142857142856,
"alnum_prop": 0.6540880503144654,
"repo_name": "CoherentLabs/depot_tools",
"id": "55e6b5b17811a90d4b37e4099e849f210a43e98a",
"size": "1144",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/autoninja_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "27896"
},
{
"name": "PowerShell",
"bytes": "5337"
},
{
"name": "Python",
"bytes": "2549026"
},
{
"name": "Roff",
"bytes": "5283"
},
{
"name": "Shell",
"bytes": "64165"
}
],
"symlink_target": ""
} |
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from nyse import *
from nn import *
from keras.optimizers import SGD
# import theano
# theano.compile.mode.Mode(linker='py', optimizer='fast_compile')
class RNN:
def __init__(self, input_length, hidden_cnt, input_dim, output_dim):
self.input_dim = input_dim
self.output_dim = output_dim
self.input_length = input_length
self.hidden_cnt = hidden_cnt
self.model = self.__prepare_model()
def __prepare_model(self):
print('Build model...')
model = Sequential()
model.add(LSTM(output_dim=self.hidden_cnt,
input_dim=self.input_dim,
input_length=self.input_length,
return_sequences=False))
model.add(Dropout(0.5))
model.add(Dense(self.hidden_cnt, activation='tanh'))
model.add(Dense(self.output_dim, activation='softmax'))
print('Compile model...')
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
return model
def change_input_dim(self, input_dim):
self.input_dim = input_dim
self.model = self.__prepare_model()
def get_model(self):
return self.model
def main():
input_length = 100
hidden_cnt = 50
nn = NeuralNetwork(RNN(input_length, hidden_cnt))
data = get_test_data(input_length)
print("TRAIN")
nn.train(data)
print("TEST")
nn.test(data)
print("TRAIN WITH CROSS-VALIDATION")
nn.run_with_cross_validation(data, 2)
print("FEATURE SELECTION")
features = nn.feature_selection(data)
print("Selected features: {0}".format(features))
if __name__ == '__main__':
main() | {
"content_hash": "3f4e2e27b77fab34e3fc297562753925",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 72,
"avg_line_length": 31.016666666666666,
"alnum_prop": 0.6260075228371843,
"repo_name": "dzitkowskik/StockPredictionRNN",
"id": "d07c611b87616a45cb160623f12c7ca087201671",
"size": "1861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/nyse-rnn/rnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30872"
}
],
"symlink_target": ""
} |
import datetime
from django.db import connection
from django.db.models.query import QuerySet, Q
from django.utils.timezone import now as timezone_now
from sqlalchemy.sql import (
column,
literal,
func,
)
from zerver.lib.request import REQ
from zerver.models import (
Message,
Recipient,
UserMessage,
UserProfile,
)
from typing import Any, Dict, List, Optional, Tuple
# Only use these constants for events.
ORIG_TOPIC = "orig_subject"
TOPIC_NAME = "subject"
TOPIC_LINKS = "subject_links"
MATCH_TOPIC = "match_subject"
# This constant is actually embedded into
# the JSON data for message edit history,
# so we'll always need to handle legacy data
# unless we do a pretty tricky migration.
LEGACY_PREV_TOPIC = "prev_subject"
# This constant is pretty closely coupled to the
# database, but it's the JSON field.
EXPORT_TOPIC_NAME = "subject"
'''
The following functions are for user-facing APIs
where we'll want to support "subject" for a while.
'''
def get_topic_from_message_info(message_info: Dict[str, Any]) -> str:
'''
Use this where you are getting dicts that are based off of messages
that may come from the outside world, especially from third party
APIs and bots.
We prefer 'topic' to 'subject' here. We expect at least one field
to be present (or the caller must know how to handle KeyError).
'''
if 'topic' in message_info:
return message_info['topic']
return message_info['subject']
def REQ_topic() -> Optional[str]:
# REQ handlers really return a REQ, but we
# lie to make the rest of the type matching work.
return REQ(
whence='topic',
aliases=['subject'],
converter=lambda x: x.strip(),
default=None,
)
'''
TRY TO KEEP THIS DIVIDING LINE.
Below this line we want to make it so that functions are only
using "subject" in the DB sense, and nothing customer facing.
'''
# This is used in low-level message functions in
# zerver/lib/message.py, and it's not user facing.
DB_TOPIC_NAME = "subject"
MESSAGE__TOPIC = 'message__subject'
def topic_match_sa(topic_name: str) -> Any:
# _sa is short for Sql Alchemy, which we use mostly for
# queries that search messages
topic_cond = func.upper(column("subject")) == func.upper(literal(topic_name))
return topic_cond
def topic_column_sa() -> Any:
return column("subject")
def filter_by_exact_message_topic(query: QuerySet, message: Message) -> QuerySet:
topic_name = message.topic_name()
return query.filter(subject=topic_name)
def filter_by_topic_name_via_message(query: QuerySet, topic_name: str) -> QuerySet:
return query.filter(message__subject__iexact=topic_name)
def messages_for_topic(stream_id: int, topic_name: str) -> QuerySet:
return Message.objects.filter(
recipient__type_id=stream_id,
subject__iexact=topic_name,
)
def save_message_for_edit_use_case(message: Message) -> None:
message.save(update_fields=["subject", "content", "rendered_content",
"rendered_content_version", "last_edit_time",
"edit_history"])
def user_message_exists_for_topic(user_profile: UserProfile,
recipient: Recipient,
topic_name: str) -> bool:
return UserMessage.objects.filter(
user_profile=user_profile,
message__recipient=recipient,
message__subject__iexact=topic_name,
).exists()
def update_messages_for_topic_edit(message: Message,
propagate_mode: str,
orig_topic_name: str,
topic_name: str) -> List[Message]:
propagate_query = Q(recipient = message.recipient, subject = orig_topic_name)
# We only change messages up to 2 days in the past, to avoid hammering our
# DB by changing an unbounded amount of messages
if propagate_mode == 'change_all':
before_bound = timezone_now() - datetime.timedelta(days=2)
propagate_query = (propagate_query & ~Q(id = message.id) &
Q(pub_date__range=(before_bound, timezone_now())))
if propagate_mode == 'change_later':
propagate_query = propagate_query & Q(id__gt = message.id)
messages = Message.objects.filter(propagate_query).select_related()
# Evaluate the query before running the update
messages_list = list(messages)
messages.update(subject=topic_name)
for m in messages_list:
# The cached ORM object is not changed by messages.update()
# and the remote cache update requires the new value
m.set_topic_name(topic_name)
return messages_list
def generate_topic_history_from_db_rows(rows: List[Tuple[str, int]]) -> List[Dict[str, Any]]:
canonical_topic_names = {} # type: Dict[str, Tuple[int, str]]
# Sort rows by max_message_id so that if a topic
# has many different casings, we use the most
# recent row.
rows = sorted(rows, key=lambda tup: tup[1])
for (topic_name, max_message_id) in rows:
canonical_name = topic_name.lower()
canonical_topic_names[canonical_name] = (max_message_id, topic_name)
history = []
for canonical_topic, (max_message_id, topic_name) in canonical_topic_names.items():
history.append(dict(
name=topic_name,
max_id=max_message_id)
)
return sorted(history, key=lambda x: -x['max_id'])
def get_topic_history_for_stream(user_profile: UserProfile,
recipient: Recipient,
public_history: bool) -> List[Dict[str, Any]]:
cursor = connection.cursor()
if public_history:
query = '''
SELECT
"zerver_message"."subject" as topic,
max("zerver_message".id) as max_message_id
FROM "zerver_message"
WHERE (
"zerver_message"."recipient_id" = %s
)
GROUP BY (
"zerver_message"."subject"
)
ORDER BY max("zerver_message".id) DESC
'''
cursor.execute(query, [recipient.id])
else:
query = '''
SELECT
"zerver_message"."subject" as topic,
max("zerver_message".id) as max_message_id
FROM "zerver_message"
INNER JOIN "zerver_usermessage" ON (
"zerver_usermessage"."message_id" = "zerver_message"."id"
)
WHERE (
"zerver_usermessage"."user_profile_id" = %s AND
"zerver_message"."recipient_id" = %s
)
GROUP BY (
"zerver_message"."subject"
)
ORDER BY max("zerver_message".id) DESC
'''
cursor.execute(query, [user_profile.id, recipient.id])
rows = cursor.fetchall()
cursor.close()
return generate_topic_history_from_db_rows(rows)
def get_topic_history_for_web_public_stream(recipient: Recipient) -> List[Dict[str, Any]]:
cursor = connection.cursor()
query = '''
SELECT
"zerver_message"."subject" as topic,
max("zerver_message".id) as max_message_id
FROM "zerver_message"
WHERE (
"zerver_message"."recipient_id" = %s
)
GROUP BY (
"zerver_message"."subject"
)
ORDER BY max("zerver_message".id) DESC
'''
cursor.execute(query, [recipient.id])
rows = cursor.fetchall()
cursor.close()
return generate_topic_history_from_db_rows(rows)
| {
"content_hash": "0a856d3d1a8f5c77a6c61d595c10d32e",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 93,
"avg_line_length": 32.93392070484582,
"alnum_prop": 0.6264044943820225,
"repo_name": "tommyip/zulip",
"id": "3cc05fb5f00c39b01ea6f636cb2e3a8c293f4297",
"size": "7476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/lib/topic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "400301"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "718599"
},
{
"name": "JavaScript",
"bytes": "3092201"
},
{
"name": "Perl",
"bytes": "398763"
},
{
"name": "Puppet",
"bytes": "71123"
},
{
"name": "Python",
"bytes": "6889539"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "119898"
},
{
"name": "TypeScript",
"bytes": "14645"
}
],
"symlink_target": ""
} |
__author__ = 'mwalker'
from django.conf.urls import *
urlpatterns = patterns('clinics.views',
url(r'^(?P<filter_term>[a-zA-Z]+)/$', 'filter_clinics', name='filter_clinics'),
url(r'^redirect/(?P<clinic_id>\d+)/$', 'redirect', name='redirect'),
url(r'^$', 'index', name='injury_clinics'),
)
| {
"content_hash": "de32cb76e29f8c5354b992a2c7b41d64",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 83,
"avg_line_length": 30.4,
"alnum_prop": 0.6085526315789473,
"repo_name": "marksweb/django-cms-app-examples",
"id": "e485e5289c669fabc959958c53c09dd9aa960e76",
"size": "304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clinics/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1126"
},
{
"name": "Python",
"bytes": "36302"
}
],
"symlink_target": ""
} |
import csv
from django.conf.urls import url
from django.contrib import admin
from django.urls import reverse
from django.http import HttpResponseRedirect
from huxley.core.models import PositionPaper
class PositionPaperAdmin(admin.ModelAdmin):
search_fields = (
'assignment__committee__name',
'assignment__country__name'
)
def get_urls(self):
return super(PositionPaperAdmin, self).get_urls()
| {
"content_hash": "4be477a346712cb9798f8e968e5863b4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 57,
"avg_line_length": 22.842105263157894,
"alnum_prop": 0.7327188940092166,
"repo_name": "bmun/huxley",
"id": "6f386205222c0fbee0abf2ec7f2985ce0bdba27d",
"size": "583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "huxley/core/admin/position_paper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "13301"
},
{
"name": "JavaScript",
"bytes": "400597"
},
{
"name": "Less",
"bytes": "19215"
},
{
"name": "Python",
"bytes": "635783"
},
{
"name": "Shell",
"bytes": "2475"
}
],
"symlink_target": ""
} |
import ctypes
import os
import struct
import sys
# Each edit operation is assigned different cost, such as:
# 'w' means swap operation, the cost is 0;
# 's' means substitution operation, the cost is 2;
# 'a' means insertion operation, the cost is 1;
# 'd' means deletion operation, the cost is 3;
# The smaller cost results in the better similarity.
COST = {'w': 0, 's': 2, 'a': 1, 'd': 3}
def damerau_levenshtein(s1, s2, cost):
"""Calculates the Damerau-Levenshtein distance between two strings.
The Levenshtein distance says the minimum number of single-character edits
(i.e. insertions, deletions, swap or substitution) required to change one
string to the other.
The idea is to reserve a matrix to hold the Levenshtein distances between
all prefixes of the first string and all prefixes of the second, then we
can compute the values in the matrix in a dynamic programming fashion. To
avoid a large space complexity, only the last three rows in the matrix is
needed.(row2 holds the current row, row1 holds the previous row, and row0
the row before that.)
More details:
https://en.wikipedia.org/wiki/Levenshtein_distance
https://github.com/git/git/commit/8af84dadb142f7321ff0ce8690385e99da8ede2f
"""
if s1 == s2:
return 0
len1 = len(s1)
len2 = len(s2)
if len1 == 0:
return len2 * cost['a']
if len2 == 0:
return len1 * cost['d']
row1 = [i * cost['a'] for i in range(len2 + 1)]
row2 = row1[:]
row0 = row1[:]
for i in range(len1):
row2[0] = (i + 1) * cost['d']
for j in range(len2):
# substitution
sub_cost = row1[j] + (s1[i] != s2[j]) * cost['s']
# insertion
ins_cost = row2[j] + cost['a']
# deletion
del_cost = row1[j + 1] + cost['d']
# swap
swp_condition = ((i > 0) and
(j > 0) and
(s1[i - 1] == s2[j]) and
(s1[i] == s2[j - 1])
)
# min cost
if swp_condition:
swp_cost = row0[j - 1] + cost['w']
p_cost = min(sub_cost, ins_cost, del_cost, swp_cost)
else:
p_cost = min(sub_cost, ins_cost, del_cost)
row2[j + 1] = p_cost
row0, row1, row2 = row1, row2, row0
return row1[-1]
def terminal_width(stdout):
if hasattr(os, 'get_terminal_size'):
# python 3.3 onwards has built-in support for getting terminal size
try:
return os.get_terminal_size().columns
except OSError:
return None
if sys.platform == 'win32':
return _get_terminal_width_windows(stdout)
else:
return _get_terminal_width_ioctl(stdout)
def _get_terminal_width_windows(stdout):
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
std_to_win_handle = {
sys.stdin: STD_INPUT_HANDLE,
sys.stdout: STD_OUTPUT_HANDLE,
sys.stderr: STD_ERROR_HANDLE}
std_handle = std_to_win_handle.get(stdout)
if not std_handle:
return None
handle = ctypes.windll.kernel32.GetStdHandle(std_handle)
csbi = ctypes.create_string_buffer(22)
res = ctypes.windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
if res:
(size_x, size_y, cur_pos_x, cur_pos_y, attr,
left, top, right, bottom, max_size_x, max_size_y) = struct.unpack(
"hhhhHhhhhhh", csbi.raw)
return size_x
def _get_terminal_width_ioctl(stdout):
from fcntl import ioctl
import termios
try:
# winsize structure has 4 unsigned short fields
winsize = b'\0' * struct.calcsize('hhhh')
try:
winsize = ioctl(stdout, termios.TIOCGWINSZ, winsize)
except IOError:
return None
except TypeError:
# this is raised in unit tests as stdout is sometimes a StringIO
return None
winsize = struct.unpack('hhhh', winsize)
columns = winsize[1]
if not columns:
return None
return columns
except IOError:
return None
| {
"content_hash": "dd6253d138ca3ec1e8ec2893b701364a",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 82,
"avg_line_length": 29.845070422535212,
"alnum_prop": 0.5809344030202926,
"repo_name": "openstack/cliff",
"id": "50f3ab61218c7a3459fc596ffd4ee8bbfd638ea0",
"size": "4784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cliff/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "249070"
},
{
"name": "Shell",
"bytes": "1076"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import glyphsLib
from fontTools.designspaceLib import DesignSpaceDocument
from glyphsLib.builder.instances import apply_instance_data
import defcon
import pytest
import py.path
from ..test_helpers import write_designspace_and_UFOs
DATA = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
@pytest.mark.parametrize(
"instance_names",
[None, ["Extra Light"], ["Regular", "Bold"]],
ids=["default", "include_1", "include_2"],
)
def test_apply_instance_data(tmpdir, instance_names):
font = glyphsLib.GSFont(os.path.join(DATA, "GlyphsUnitTestSans.glyphs"))
instance_dir = "instances"
designspace = glyphsLib.to_designspace(font, instance_dir=instance_dir)
path = str(tmpdir / (font.familyName + ".designspace"))
write_designspace_and_UFOs(designspace, path)
test_designspace = DesignSpaceDocument()
test_designspace.read(designspace.path)
if instance_names is None:
# Collect all instances.
test_instances = [instance.filename for instance in test_designspace.instances]
else:
# Collect only selected instances.
test_instances = [
instance.filename
for instance in test_designspace.instances
if instance.styleName in instance_names
]
# Generate dummy UFOs for collected instances so we don't actually need to
# interpolate.
tmpdir.mkdir(instance_dir)
for instance in test_instances:
ufo = defcon.Font()
ufo.save(str(tmpdir / instance))
ufos = apply_instance_data(designspace.path, include_filenames=test_instances)
for filename in test_instances:
assert os.path.isdir(str(tmpdir / filename))
assert len(ufos) == len(test_instances)
for ufo in ufos:
assert ufo.info.openTypeOS2WeightClass is not None
assert ufo.info.openTypeOS2WidthClass is not None
def test_reexport_apply_instance_data():
# this is for compatibility with fontmake
# https://github.com/googlei18n/fontmake/issues/451
from glyphsLib.interpolation import apply_instance_data as reexported
assert reexported is apply_instance_data
def test_reencode_glyphs(tmpdir):
data_dir = py.path.local(DATA)
designspace_path = data_dir / "TestReencode.designspace"
designspace_path.copy(tmpdir)
ufo_path = data_dir / "TestReencode-Regular.ufo"
ufo_path.copy(tmpdir.ensure_dir("TestReencode-Regular.ufo"))
instance_dir = tmpdir.ensure_dir("instance_ufo")
ufo_path.copy(instance_dir.ensure_dir("TestReencode-Regular.ufo"))
ufo_path.copy(instance_dir.ensure_dir("TestReencodeUI-Regular.ufo"))
ufos = apply_instance_data(str(tmpdir / "TestReencode.designspace"))
assert len(ufos) == 2
assert ufos[0]["A"].unicode == 0x0041
assert ufos[0]["A.alt"].unicode is None
assert ufos[0]["C"].unicode == 0x0043
# Reencode Glyphs: A.alt=0041, C=
assert ufos[1]["A"].unicode is None
assert ufos[1]["A.alt"].unicode == 0x0041
assert ufos[1]["C"].unicode is None
| {
"content_hash": "f8a83d5a2b7a0b983429ff1ee8e5f6c0",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 87,
"avg_line_length": 34.82022471910113,
"alnum_prop": 0.7011939335269441,
"repo_name": "googlei18n/glyphsLib",
"id": "764e2c5d4300dd9ed6ac02b371f2665f2b04dc13",
"size": "3713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/builder/instances_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "715643"
}
],
"symlink_target": ""
} |
from flask import json
from util import *
from flask import Flask
from flask import jsonify
from flask import json
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route("/buoys")
def getAllBuoys():
return returnJson(["buoy1", "buoy2"])
@app.route("/buoys/<int:buoyId>")
def getBuoy(buoyId):
return returnJson(["buoy%d" % buoyId])
if __name__ == "__main__":
app.run(debug=True) | {
"content_hash": "d005ec8178ae0da2cddc6244cea6fd58",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 42,
"avg_line_length": 21.238095238095237,
"alnum_prop": 0.6300448430493274,
"repo_name": "Ilink/waveform",
"id": "c82cd59061c24aa9fa6c19c9753e0d9701c5eaff",
"size": "446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/waveform.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15203"
}
],
"symlink_target": ""
} |
"""py2/py3 metaclass utility, from jinja2 via Bokeh."""
def with_metaclass(meta, *bases):
"""Add metaclasses in both Python 2 and Python 3.
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
# this is a hack to keep yapf from inserting a blank line that pep257 complains about
empty = {}
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, empty)
| {
"content_hash": "6355793fd4f9400aa0afa4e7bc3d3c50",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 89,
"avg_line_length": 32.48780487804878,
"alnum_prop": 0.6313813813813813,
"repo_name": "conda/kapsel",
"id": "8e829ff8f3c237a2cc36120239c3b1733e46b208",
"size": "1663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conda_kapsel/internal/metaclass.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "107"
},
{
"name": "Python",
"bytes": "1285062"
},
{
"name": "Shell",
"bytes": "378"
}
],
"symlink_target": ""
} |
"""
This module contains a quadratic equation solver,
but uses an optional parameter so multiple root functions
do not need to be defined.
"""
import numpy as np
a = 2.1
b = 5.4
c = 1.2
# The following shows how to define optional parameters.
# The last input has a default value of True.
# If a user calls the function with only 3 inputs, the positive
# variable remains True. Users can also call it with a 4th input
# specifying whether positive should be true or false
def root(a,b,c, positive=True):
"""
This function takes the coefficients of a quadratic equation and
calculates a root.
By default, it calculates the positive root. If positive=False,
it calculates the negative root
"""
disc = np.sqrt(b**2 - 4*a*c)
# If statements allow you to do something if a condition is true,
# otherwise do something else.
#
# There is a also a third way of using ifs, namely
# if (condition):
# do stuff
# elif (another condition):
# do other stuff
# else:
# do something completely different
if positive:
root = -b + disc
else:
root = -b - disc
root /= 2*a
# This tells python to return the root value when the root function is called
return root
def evalQuad(a,b,c,x):
"""
This function evaluates the quadratic equation
a * x**2 + b*x + c
"""
return a * x**2 + b*x + c
# There are 3 ways to get the positive root.
# Two of which are commented out
root1 = root(a,b,c)
#root1 = root(a,b,c,True)
#root1 = root(a,b,c,positive=True)
# There are 2 ways to get the negative root.
# The latter of which is commented out.
root2 = root(a,b,c,False)
#root2 = root(a,b,c,positive=False)
quad_at_root1 = evalQuad(a,b,c, root1)
quad_at_root2 = evalQuad(a,b,c, root2)
print( "The quadratic equation is:" )
print( "{:f} * x^2 + {:f} x + {:f}".format(a,b,c) )
print( " = {:.2f} * x^2 + {:.2f} x + {:.2f}".format(a,b,c) )# only printing 2 places after the decimal
print( )
print( "The positive root is {:f}".format(root1) )
print( "subsituting the positive root into the equation gives:\n {:f}".format(quad_at_root1) )
print( )
print( "The negative root is {:f}".format(root2) )
print( "subsituting the negative root into the equation gives:\n {:f}".format(quad_at_root2) )
| {
"content_hash": "22a5f4707b5bf9d2d6843e7633badca2",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 103,
"avg_line_length": 30.733333333333334,
"alnum_prop": 0.6568329718004339,
"repo_name": "solter/pyTutorial",
"id": "0671ad983342925d8717b2fe249fedb29ae296ea",
"size": "2329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/quadratic_eqn/single_root_function.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27038"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColumnValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="column", parent_name="indicator.domain", **kwargs):
super(ColumnValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
| {
"content_hash": "cd2216ac4706acadb8b6676ceb55b46f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 87,
"avg_line_length": 36.833333333333336,
"alnum_prop": 0.6085972850678733,
"repo_name": "plotly/plotly.py",
"id": "1112db9257a1f6e38a51ef288813fc71a61ebe0c",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/indicator/domain/_column.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Event parser and human readable log generator."""
from __future__ import annotations
from collections.abc import Callable
from typing import Any
import voluptuous as vol
from homeassistant.components import frontend
from homeassistant.components.recorder.const import DOMAIN as RECORDER_DOMAIN
from homeassistant.components.recorder.filters import (
extract_include_exclude_filter_conf,
merge_include_exclude_filters,
sqlalchemy_filter_from_include_exclude_conf,
)
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_ENTITY_ID,
ATTR_NAME,
EVENT_LOGBOOK_ENTRY,
)
from homeassistant.core import Context, Event, HomeAssistant, ServiceCall, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entityfilter import (
INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA,
convert_include_exclude_filter,
)
from homeassistant.helpers.integration_platform import (
async_process_integration_platforms,
)
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import bind_hass
from . import rest_api, websocket_api
from .const import (
ATTR_MESSAGE,
DOMAIN,
LOGBOOK_ENTITIES_FILTER,
LOGBOOK_ENTRY_DOMAIN,
LOGBOOK_ENTRY_ENTITY_ID,
LOGBOOK_ENTRY_MESSAGE,
LOGBOOK_ENTRY_NAME,
LOGBOOK_FILTERS,
)
from .models import LazyEventPartialState # noqa: F401
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA}, extra=vol.ALLOW_EXTRA
)
LOG_MESSAGE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_DOMAIN): cv.slug,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
}
)
@bind_hass
def log_entry(
hass: HomeAssistant,
name: str,
message: str,
domain: str | None = None,
entity_id: str | None = None,
context: Context | None = None,
) -> None:
"""Add an entry to the logbook."""
hass.add_job(async_log_entry, hass, name, message, domain, entity_id, context)
@callback
@bind_hass
def async_log_entry(
hass: HomeAssistant,
name: str,
message: str,
domain: str | None = None,
entity_id: str | None = None,
context: Context | None = None,
) -> None:
"""Add an entry to the logbook."""
data = {LOGBOOK_ENTRY_NAME: name, LOGBOOK_ENTRY_MESSAGE: message}
if domain is not None:
data[LOGBOOK_ENTRY_DOMAIN] = domain
if entity_id is not None:
data[LOGBOOK_ENTRY_ENTITY_ID] = entity_id
hass.bus.async_fire(EVENT_LOGBOOK_ENTRY, data, context=context)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Logbook setup."""
hass.data[DOMAIN] = {}
@callback
def log_message(service: ServiceCall) -> None:
"""Handle sending notification message service calls."""
message = service.data[ATTR_MESSAGE]
name = service.data[ATTR_NAME]
domain = service.data.get(ATTR_DOMAIN)
entity_id = service.data.get(ATTR_ENTITY_ID)
if entity_id is None and domain is None:
# If there is no entity_id or
# domain, the event will get filtered
# away so we use the "logbook" domain
domain = DOMAIN
message.hass = hass
message = message.async_render(parse_result=False)
async_log_entry(hass, name, message, domain, entity_id, service.context)
frontend.async_register_built_in_panel(
hass, "logbook", "logbook", "hass:format-list-bulleted-type"
)
recorder_conf = config.get(RECORDER_DOMAIN, {})
logbook_conf = config.get(DOMAIN, {})
recorder_filter = extract_include_exclude_filter_conf(recorder_conf)
logbook_filter = extract_include_exclude_filter_conf(logbook_conf)
merged_filter = merge_include_exclude_filters(recorder_filter, logbook_filter)
possible_merged_entities_filter = convert_include_exclude_filter(merged_filter)
if not possible_merged_entities_filter.empty_filter:
filters = sqlalchemy_filter_from_include_exclude_conf(merged_filter)
entities_filter = possible_merged_entities_filter
else:
filters = None
entities_filter = None
hass.data[LOGBOOK_FILTERS] = filters
hass.data[LOGBOOK_ENTITIES_FILTER] = entities_filter
websocket_api.async_setup(hass)
rest_api.async_setup(hass, config, filters, entities_filter)
hass.services.async_register(DOMAIN, "log", log_message, schema=LOG_MESSAGE_SCHEMA)
await async_process_integration_platforms(hass, DOMAIN, _process_logbook_platform)
return True
async def _process_logbook_platform(
hass: HomeAssistant, domain: str, platform: Any
) -> None:
"""Process a logbook platform."""
@callback
def _async_describe_event(
domain: str,
event_name: str,
describe_callback: Callable[[Event], dict[str, Any]],
) -> None:
"""Teach logbook how to describe a new event."""
hass.data[DOMAIN][event_name] = (domain, describe_callback)
platform.async_describe_events(hass, _async_describe_event)
| {
"content_hash": "c753948d7c650fc6cafd54923cb2a9e1",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 87,
"avg_line_length": 31.89937106918239,
"alnum_prop": 0.6910488958990536,
"repo_name": "toddeye/home-assistant",
"id": "1abfcaba6ff37761f0dd876059f5dca0ef251b5d",
"size": "5072",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/logbook/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Top10',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=225)),
('address', models.TextField(max_length=225)),
('date', models.TextField(max_length=225)),
],
),
]
| {
"content_hash": "ee515b0622466a7b388b8a3a0d76aefa",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 114,
"avg_line_length": 26.08695652173913,
"alnum_prop": 0.56,
"repo_name": "okwow123/freebaram",
"id": "659fa3747d5b69ded3d9592024d2952c992452c6",
"size": "672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bootcamp/top10/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10357"
},
{
"name": "HTML",
"bytes": "63691"
},
{
"name": "JavaScript",
"bytes": "101494"
},
{
"name": "Python",
"bytes": "99135"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_broadcast_domain
short_description: NetApp ONTAP manage broadcast domains.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Modify a ONTAP broadcast domain.
options:
state:
description:
- Whether the specified broadcast domain should exist or not.
choices: ['present', 'absent']
default: present
name:
description:
- Specify the broadcast domain name.
required: true
aliases:
- broadcast_domain
from_name:
description:
- Specify the broadcast domain name to be split into new broadcast domain.
version_added: "2.8"
mtu:
description:
- Specify the required mtu for the broadcast domain.
ipspace:
description:
- Specify the required ipspace for the broadcast domain.
- A domain ipspace can not be modified after the domain has been created.
ports:
description:
- Specify the ports associated with this broadcast domain. Should be comma separated.
- It represents the expected state of a list of ports at any time.
- Add a port if it is specified in expected state but not in current state.
- Delete a port if it is specified in current state but not in expected state.
- For split action, it represents the ports to be split from current broadcast domain and added to the new broadcast domain.
- if all ports are removed or splited from a broadcast domain, the broadcast domain will be deleted automatically.
'''
EXAMPLES = """
- name: create broadcast domain
na_ontap_broadcast_domain:
state: present
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
name: ansible_domain
mtu: 1000
ipspace: Default
ports: ["khutton-vsim1:e0d-12", "khutton-vsim1:e0d-13"]
- name: modify broadcast domain
na_ontap_broadcast_domain:
state: present
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
name: ansible_domain
mtu: 1100
ipspace: Default
ports: ["khutton-vsim1:e0d-12", "khutton-vsim1:e0d-13"]
- name: split broadcast domain
na_ontap_broadcast_domain:
state: present
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
from_name: ansible_domain
name: new_ansible_domain
mtu: 1200
ipspace: Default
ports: khutton-vsim1:e0d-12
- name: delete broadcast domain
na_ontap_broadcast_domain:
state: absent
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
name: ansible_domain
ipspace: Default
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapBroadcastDomain(object):
"""
Create, Modifies and Destroys a Broadcast domain
"""
def __init__(self):
"""
Initialize the ONTAP Broadcast Domain class
"""
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
name=dict(required=True, type='str', aliases=["broadcast_domain"]),
ipspace=dict(required=False, type='str'),
mtu=dict(required=False, type='str'),
ports=dict(required=False, type='list'),
from_name=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
return
def get_broadcast_domain(self, broadcast_domain=None):
"""
Return details about the broadcast domain
:param broadcast_domain: specific broadcast domain to get.
:return: Details about the broadcas domain. None if not found.
:rtype: dict
"""
if broadcast_domain is None:
broadcast_domain = self.parameters['name']
domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
broadcast_domain_info.add_new_child('broadcast-domain', broadcast_domain)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(broadcast_domain_info)
domain_get_iter.add_child_elem(query)
result = self.server.invoke_successfully(domain_get_iter, True)
domain_exists = None
# check if broadcast_domain exists
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) == 1:
domain_info = result.get_child_by_name('attributes-list').\
get_child_by_name('net-port-broadcast-domain-info')
domain_name = domain_info.get_child_content('broadcast-domain')
domain_mtu = domain_info.get_child_content('mtu')
domain_ipspace = domain_info.get_child_content('ipspace')
domain_ports = domain_info.get_child_by_name('ports')
if domain_ports is not None:
ports = [port.get_child_content('port') for port in domain_ports.get_children()]
else:
ports = []
domain_exists = {
'domain-name': domain_name,
'mtu': domain_mtu,
'ipspace': domain_ipspace,
'ports': ports
}
return domain_exists
def create_broadcast_domain(self):
"""
Creates a new broadcast domain
"""
domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-create')
domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
if self.parameters.get('ipspace'):
domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
if self.parameters.get('mtu'):
domain_obj.add_new_child("mtu", self.parameters['mtu'])
if self.parameters.get('ports'):
ports_obj = netapp_utils.zapi.NaElement('ports')
domain_obj.add_child_elem(ports_obj)
for port in self.parameters['ports']:
ports_obj.add_new_child('net-qualified-port-name', port)
try:
self.server.invoke_successfully(domain_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating broadcast domain %s: %s' %
(self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def delete_broadcast_domain(self, broadcast_domain=None):
"""
Deletes a broadcast domain
"""
if broadcast_domain is None:
broadcast_domain = self.parameters['name']
domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-destroy')
domain_obj.add_new_child("broadcast-domain", broadcast_domain)
if self.parameters.get('ipspace'):
domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
try:
self.server.invoke_successfully(domain_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting broadcast domain %s: %s' %
(broadcast_domain, to_native(error)),
exception=traceback.format_exc())
def modify_broadcast_domain(self):
"""
Modifies ipspace and mtu options of a broadcast domain
"""
domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-modify')
domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
if self.parameters.get('mtu'):
domain_obj.add_new_child("mtu", self.parameters['mtu'])
if self.parameters.get('ipspace'):
domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
try:
self.server.invoke_successfully(domain_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error modifying broadcast domain %s: %s' %
(self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def split_broadcast_domain(self):
"""
split broadcast domain
"""
domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-split')
domain_obj.add_new_child("broadcast-domain", self.parameters['from_name'])
domain_obj.add_new_child("new-broadcast-domain", self.parameters['name'])
if self.parameters.get('ports'):
ports_obj = netapp_utils.zapi.NaElement('ports')
domain_obj.add_child_elem(ports_obj)
for port in self.parameters['ports']:
ports_obj.add_new_child('net-qualified-port-name', port)
if self.parameters.get('ipspace'):
domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
try:
self.server.invoke_successfully(domain_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error splitting broadcast domain %s: %s' %
(self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
if len(self.get_broadcast_domain_ports(self.parameters['from_name'])) == 0:
self.delete_broadcast_domain(self.parameters['from_name'])
def modify_redirect(self, modify):
"""
:param modify: modify attributes.
"""
for attribute in modify.keys():
if attribute == 'mtu':
self.modify_broadcast_domain()
if attribute == 'ports':
self.modify_broadcast_domain_ports()
def get_modify_attributes(self, current, split):
"""
:param current: current state.
:param split: True or False of split action.
:return: list of modified attributes.
"""
modify = None
if self.parameters['state'] == 'present':
# split already handled ipspace and ports.
if self.parameters.get('from_name'):
current = self.get_broadcast_domain(self.parameters['from_name'])
if split:
modify = self.na_helper.get_modified_attributes(current, self.parameters)
if modify.get('ipspace'):
del modify['ipspace']
if modify.get('ports'):
del modify['ports']
# ipspace can not be modified.
else:
modify = self.na_helper.get_modified_attributes(current, self.parameters)
if modify.get('ipspace'):
self.module.fail_json(msg='A domain ipspace can not be modified after the domain has been created.',
exception=traceback.format_exc())
return modify
def modify_broadcast_domain_ports(self):
"""
compare current and desire ports. Call add or remove ports methods if needed.
:return: None.
"""
current_ports = self.get_broadcast_domain_ports()
expect_ports = self.parameters['ports']
# if want to remove all ports, simply delete the broadcast domain.
if len(expect_ports) == 0:
self.delete_broadcast_domain()
return
ports_to_remove = list(set(current_ports) - set(expect_ports))
ports_to_add = list(set(expect_ports) - set(current_ports))
if len(ports_to_add) > 0:
self.add_broadcast_domain_ports(ports_to_add)
if len(ports_to_remove) > 0:
self.delete_broadcast_domain_ports(ports_to_remove)
def add_broadcast_domain_ports(self, ports):
"""
Creates new broadcast domain ports
"""
domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-add-ports')
domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
if self.parameters.get('ipspace'):
domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
if ports:
ports_obj = netapp_utils.zapi.NaElement('ports')
domain_obj.add_child_elem(ports_obj)
for port in ports:
ports_obj.add_new_child('net-qualified-port-name', port)
try:
self.server.invoke_successfully(domain_obj, True)
return True
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating port for broadcast domain %s: %s' %
(self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def delete_broadcast_domain_ports(self, ports):
"""
Deletes broadcast domain ports
:param: ports to be deleted.
"""
domain_obj = netapp_utils.zapi.NaElement('net-port-broadcast-domain-remove-ports')
domain_obj.add_new_child("broadcast-domain", self.parameters['name'])
if self.parameters.get('ipspace'):
domain_obj.add_new_child("ipspace", self.parameters['ipspace'])
if ports:
ports_obj = netapp_utils.zapi.NaElement('ports')
domain_obj.add_child_elem(ports_obj)
for port in ports:
ports_obj.add_new_child('net-qualified-port-name', port)
try:
self.server.invoke_successfully(domain_obj, True)
return True
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting port for broadcast domain %s: %s' %
(self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def get_broadcast_domain_ports(self, broadcast_domain=None):
"""
Return details about the broadcast domain ports.
:return: Details about the broadcast domain ports. None if not found.
:rtype: list
"""
if broadcast_domain is None:
broadcast_domain = self.parameters['name']
domain_get_iter = netapp_utils.zapi.NaElement('net-port-broadcast-domain-get-iter')
broadcast_domain_info = netapp_utils.zapi.NaElement('net-port-broadcast-domain-info')
broadcast_domain_info.add_new_child('broadcast-domain', broadcast_domain)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(broadcast_domain_info)
domain_get_iter.add_child_elem(query)
result = self.server.invoke_successfully(domain_get_iter, True)
ports = []
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) == 1:
domain_info = result.get_child_by_name('attributes-list').get_child_by_name('net-port-broadcast-domain-info')
domain_ports = domain_info.get_child_by_name('ports')
if domain_ports is not None:
ports = [port.get_child_content('port') for port in domain_ports.get_children()]
return ports
def apply(self):
"""
Run Module based on play book
"""
self.asup_log_for_cserver("na_ontap_broadcast_domain")
current = self.get_broadcast_domain()
cd_action, split = None, None
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if cd_action == 'create':
# either create new domain or split domain.
if self.parameters.get('from_name'):
split = self.na_helper.is_rename_action(self.get_broadcast_domain(self.parameters['from_name']), current)
if split is None:
self.module.fail_json(msg='A domain can not be split if it does not exist.',
exception=traceback.format_exc())
if split:
cd_action = None
modify = self.get_modify_attributes(current, split)
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
if split:
self.split_broadcast_domain()
if cd_action == 'create':
self.create_broadcast_domain()
elif cd_action == 'delete':
self.delete_broadcast_domain()
elif modify:
self.modify_redirect(modify)
self.module.exit_json(changed=self.na_helper.changed)
def asup_log_for_cserver(self, event_name):
"""
Fetch admin vserver for the given cluster
Create and Autosupport log event with the given module name
:param event_name: Name of the event log
:return: None
"""
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event(event_name, cserver)
def main():
"""
Creates the NetApp ONTAP Broadcast Domain Object that can be created, deleted and modified.
"""
obj = NetAppOntapBroadcastDomain()
obj.apply()
if __name__ == '__main__':
main()
| {
"content_hash": "32570dae3366c33de7c6e37fd6c3ecbe",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 128,
"avg_line_length": 42.76798143851508,
"alnum_prop": 0.6002278522215592,
"repo_name": "SergeyCherepanov/ansible",
"id": "314402074a1b07aca64264671b69b857bc8dbc59",
"size": "18575",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/storage/netapp/na_ontap_broadcast_domain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
"""
======================================================
Smoothing of timeSeries data using convolution filters
======================================================
How to smooth a TimeSeries using a convolution filter
kernel from `~astropy.convolution` and `~astropy.convolution.convolve`
function.
"""
import matplotlib.pyplot as plt
from astropy.convolution import convolve, Box1DKernel
from sunpy.timeseries import TimeSeries
from sunpy.data.sample import NOAAINDICES_TIMESERIES as noaa_ind
###############################################################################
# Let's first create a TimeSeries from sample data
ts_noaa_ind = TimeSeries(noaa_ind, source='NOAAIndices')
###############################################################################
# Now we will extract data values from the TimeSeries and apply a BoxCar filter
# to get smooth data. Boxcar smoothing is equivalent to taking our signal and
# using it to make a new signal where each element is the average of w adjacent
# elements. Here we will use AstroPy’s convolve function with a “boxcar” kernel
# of width w = 10.
ts_noaa_ind.data['sunspot SWO Smoothed'] = convolve(
ts_noaa_ind.data['sunspot SWO'].values, kernel=Box1DKernel(10))
###############################################################################
# Plotting original and smoothed timeseries
plt.ylabel('Sunspot Number')
plt.xlabel('Time')
plt.title('Smoothing of Time Series')
plt.plot(ts_noaa_ind.data['sunspot SWO'])
plt.plot(ts_noaa_ind.data['sunspot SWO Smoothed'])
plt.legend()
plt.show()
| {
"content_hash": "c72884b404fc2596fb814ed2dcd33698",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 42.08108108108108,
"alnum_prop": 0.6017983301220295,
"repo_name": "dpshelio/sunpy",
"id": "ccf63c33330cf570fef84f9e51cc971ca3d065e2",
"size": "1563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/time_series/timeseries_convolution_filter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "73732"
},
{
"name": "IDL",
"bytes": "5746"
},
{
"name": "Python",
"bytes": "1922243"
},
{
"name": "Shell",
"bytes": "235"
}
],
"symlink_target": ""
} |
from scraper import FacebookScraper
| {
"content_hash": "1ccf2bb1ddb2d4cb844796a8fb045ebe",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 35,
"avg_line_length": 36,
"alnum_prop": 0.8888888888888888,
"repo_name": "nprapps/graeae",
"id": "d5b58f2b1d8ad6443842e186cb6f795f75997fd3",
"size": "36",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapers/facebook/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "122377"
},
{
"name": "HTML",
"bytes": "303664"
},
{
"name": "JavaScript",
"bytes": "486080"
},
{
"name": "Nginx",
"bytes": "136"
},
{
"name": "Python",
"bytes": "134356"
},
{
"name": "Shell",
"bytes": "83"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('plea', '0015_datavalidation'),
]
operations = [
migrations.AlterField(
model_name='datavalidation',
name='date_entered',
field=models.DateTimeField(auto_now_add=True),
),
]
| {
"content_hash": "89d9f36ddbddcb77b2230aac19f50d99",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 58,
"avg_line_length": 21.833333333333332,
"alnum_prop": 0.6055979643765903,
"repo_name": "ministryofjustice/manchester_traffic_offences_pleas",
"id": "6d19ac10e326ab949a28d465ec7593a6649347c3",
"size": "417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/plea/migrations/0016_auto_20151125_1601.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "867"
},
{
"name": "Gherkin",
"bytes": "10122"
},
{
"name": "HTML",
"bytes": "184454"
},
{
"name": "JavaScript",
"bytes": "52955"
},
{
"name": "Python",
"bytes": "792658"
},
{
"name": "SCSS",
"bytes": "43568"
},
{
"name": "Shell",
"bytes": "1766"
}
],
"symlink_target": ""
} |
import smtplib
from email.mime.text import MIMEText
from collections import OrderedDict
import simplejson
from datetime import date, timedelta
#def emailResults(folder, filename):
def email_send( msg_content ):
yesterday = date.today() - timedelta(1)
# print yesterday.strftime('%d-%m-%y')
# print yesterday
# Mesage body
# doc = folder + filename + '.txt'
# with open(doc, 'r') as readText:
# msg = MIMEText(readText.read())
msg = MIMEText( msg_content )
# NOTE: e-mail and pswd read from file
filename = "/.mqtt_logger"
f = open( filename )
try:
json_struct = simplejson.load(f)
except Exception as e:
print "ERROR: Cannot load .json file: ", filename
print " ", e
exit(1)
f.close()
# Settings
FROM = json_struct['user']
PSWD = json_struct['password']
TO = FROM
# TO = 'h.n.nikolov@gmail.com'
# Headers
msg['To'] = TO
msg['From'] = FROM
msg['Subject'] = 'Home MQTT Logger, ' + str(yesterday)
# print msg
# SMTP
send = smtplib.SMTP('smtp.mail.ru', 587)
send.starttls()
send.login ( FROM, PSWD)
send.sendmail( FROM, TO, msg.as_string() )
send.quit()
if __name__ == '__main__':
msg = "My test e-mail sent from python."
# Init a day history ordered dict
keys = []
for i in range(1,25):
topic = "power_meter/processed/" + str(i)
keys.append(topic)
items = [(key, None) for key in keys]
day_history = OrderedDict(items)
# Initialize data
for i in range(1,25):
topic = "power_meter/processed/" + str(i)
current_hour = { 'T': i, 'E': i, 'W': i, 'G': i }
day_history[topic] = current_hour
# Send e-mail
email_send( simplejson.dumps(day_history) )
# TODO: Create a class; Use simplejson
| {
"content_hash": "e5ae68acd11a19aecf26cdb43e93482d",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 58,
"avg_line_length": 24.766233766233768,
"alnum_prop": 0.56790770844258,
"repo_name": "hnikolov/mqtt_meter_logger",
"id": "0bfd0643a194a589dc1cd78153274f6ce97458c3",
"size": "1907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/email_send.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12870"
}
],
"symlink_target": ""
} |
'''
Created on 14/8/2014
@author: victor
'''
import unittest
from hivprotmut.external.blast.blastOutputParser import BlastOutputParser
import hivprotmut.external.blast.test.data as test_data
import os
class Test(unittest.TestCase):
def test_parse(self):
"""
Tests if it can read the xml alignments.
"""
expected_alignments = [
{
'score': 436.0,
'query_seq': 'PQVTLWQRPLVTIKIGGQLKEALLDTGADDTVLEEMSLPGRWKPKMIGGIGGFIKVRQYDQILIEICGHKAIGTVLVGPTPVNIIGRNLLTQIGCTLNF',
'midline__': 'PQ+TLW+RPLVTI+IGGQLKEALLDTGADDTV+EE +LPG WKPK IGGI GFIKVRQYDQI +EI GHKAIGTVLVGPTPVNIIGRNLLTQIG TLNF',
'hit_seq__': 'PQITLWKRPLVTIRIGGQLKEALLDTGADDTVIEEXNLPGXWKPKXIGGIXGFIKVRQYDQIPVEIXGHKAIGTVLVGPTPVNIIGRNLLTQIGXTLNF',
'gaps': 0,
'hit_chain': 'A',
'pdb': {
'id': '3HZC'
}
}
]
bo = BlastOutputParser(os.path.join(test_data.__path__[0], "two_hits_example"), True)
self.assertEqual(len(bo.alignments), 1)
self.assertDictEqual(expected_alignments[0], bo.alignments[0])
def test_two_hits(self):
"""
If an alignment has two hits, we only store the first.
"""
expected_alignments = [{
'score': 436.0,
'gaps': 0,
'query_seq': 'PQVTLWQRPLVTIKIGGQLKEALLDTGADDTVLEEMSLPGRWKPKMIGGIGGFIKVRQYDQILIEICGHKAIGTVLVGPTPVNIIGRNLLTQIGCTLNF',
'midline__': 'PQ+TLW+RPLVTI+IGGQLKEALLDTGADDTV+EE +LPG WKPK IGGI GFIKVRQYDQI +EI GHKAIGTVLVGPTPVNIIGRNLLTQIG TLNF',
'hit_seq__': 'PQITLWKRPLVTIRIGGQLKEALLDTGADDTVIEEXNLPGXWKPKXIGGIXGFIKVRQYDQIPVEIXGHKAIGTVLVGPTPVNIIGRNLLTQIGXTLNF',
'hit_chain': 'A',
'pdb': {
'id': '3HZC'
}
},
{
'score': 407.0,
'gaps': 0,
'query_seq': 'PQVTLWQRPLVTIKIGGQLKEALLDTGADDTVLEEMSLPGRWKPKMIGGIGGFIKVRQYDQILIEICGHKAIGTVLVGPTPVNIIGRNLLTQIGCTLNF',
'midline__': 'PQ+TLW+RPLVTI+IGGQLKEALLDTGADDTV+EE +LPG WKPK IGGIGGFIKVRQYDQI +EI GHKAIGTVLVGPTPVNIIGRNLLTQIG TLNF',
'hit_seq__': 'PQITLWKRPLVTIRIGGQLKEALLDTGADDTVIEEXNLPGXWKPKXIGGIGGFIKVRQYDQIPVEIXGHKAIGTVLVGPTPVNIIGRNLLTQIGXTLNF',
'hit_chain': 'A',
'pdb': {
'id': '3HZC'
}
}]
bo = BlastOutputParser(os.path.join(test_data.__path__[0], "two_hits_example"), False)
self.assertEqual(len(bo.alignments), 2)
self.assertDictEqual(expected_alignments[0], bo.alignments[0])
self.assertDictEqual(expected_alignments[1], bo.alignments[1])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test_two_hits']
unittest.main() | {
"content_hash": "b530d957a801ddad7c9ef0b846ff97af",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 148,
"avg_line_length": 43.098591549295776,
"alnum_prop": 0.5666666666666667,
"repo_name": "victor-gil-sepulveda/PhD-HIVProteaseMutation",
"id": "9342ede187006ec7b22df6178a199e338135eccd",
"size": "3060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hivprotmut/external/blast/test/TestBlastOutputParser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52485"
}
],
"symlink_target": ""
} |
import sys
import math
class Graph:
def __init__(self, **kwargs):
"""Creates a Graph.
Kwargs:
graph (dictionary) : an already existing dictionary of vertex and edges.
is_directed (bool) : is the graph directed (true) or not (false).
Returns:
An undirected empty Graph by default.
"""
self.__data = kwargs.get('graph', {})
self.__is_directed = kwargs.get('is_directed', False)
def add_vertex(self, vertex):
"""Adds a vertex to the graph. If vertex already exists, does nothing.
Args:
vertex : a vertex to add.
"""
if not self.contains_vertex(vertex):
self.__data[vertex] = []
def remove_vertex(self, vertex):
"""Removes a vertex to the graph. If the vertex is not in graph, does nothing.
Args:
vertex : a vertex to remove.
"""
connections = self.connected_vertex(vertex)
if len(connections) == 0:
self.__data.pop(vertex, None)
else:
# First erasing all occurences in connected vertex.
for connected in connections:
if len(self.connected_vertex(connected)) != 0:
self.connected_vertex(connected).remove(vertex)
self.__data.pop(vertex, None)
def connected_vertex(self, vertex):
"""Gets all the connected vertex of a given vertex.
Returns:
A list of vertex if the vertex exists.
An empty list if the vertex doesn't exist.
"""
return self.__data.get(vertex, [])
def contains_vertex(self, vertex):
"""Checks if the graph has the specified vertex.
Args:
vertex : a vertex.
Returns:
True if the vertex is in the graph.
False if the vertex is not in the graph.
"""
return vertex in self.__data.keys()
def add_edge(self, first, second):
"""Add an edge between two vertex. If a vertex is not valid or doesn't exist, does nothing.
Args:
first : a vertex.
second : a vertex.
"""
if self.contains_vertex(first) and self.contains_vertex(second):
self.__data[first].append(second)
# If undirected graph, don't forget to add edge in the other side too.
if not self.__is_directed:
self.__data[second].append(first)
def remove_edge(self, first, second):
"""Removes an edge between two vertex. If a vertex is not valid or doesn't exist, does nothing.
Args:
first : a vertex.
second : a vertex.
"""
if second in self.connected_vertex(first):
self.__data[first].remove(second)
# If undirected graph, don't forget to delete in the other side too.
if not self.__is_directed:
if first in self.connected_vertex(second):
self.__data[second].remove(first)
def has_edge(self, first, second):
"""Checks if there is an edge between two vertex.
If one of the vertex in not in the graph or doesn't exist, return False.
Args:
first : a vertex.
second : a vertex.
Returns:
False if one or both of the vertex are not set or not in graph or not connected.
True:
if is_directed : there is a connexion from first to second.
else : there is a connexion from first to second and from second to first.
"""
has_connexion = False
if self.contains_vertex(first) and self.contains_vertex(second):
if self.__is_directed:
has_connexion = second in self.__data[first]
else:
has_connexion = first in self.__data[second] and second in self.__data[first]
return has_connexion
def interior_degree(self, vertex):
"""Returns the interior degree of a vertex, a.k.a the number of edges coming to the vertex.
Args:
vertex : An existing vertex
Returns:
An integer giving the interior degree of the vertex.
"""
total = 0
for key in self.__data:
total += self.__data[key].count(vertex)
return total
def exterior_degree(self, vertex):
"""Returns the exterior degree of a vertex, a.k.a the number of edges extending from the vertex.
Args:
vertex : An existing vertex
Returns:
An integer giving the exterior degree of the vertex.
"""
return len(self.connected_vertex(vertex))
def degree(self, vertex):
"""Returns the degree of a vertex, a.k.a the number of edges coming to and extending from the vertex.
Args:
vertex : An existing vertex
Returns:
An integer giving the (interior + exterior) degree of the vertex.
"""
return self.interior_degree(vertex) + self.exterior_degree(vertex)
def to_list(self):
"""Returns the graph in the form of an adjacent list using a dictionary.
Returns:
A dictionary representing the graph.
"""
return self.__data
def to_matrix(self):
pass
@staticmethod
def bfs_paths(graph, start, goal):
"""Uses the Breadth First Search algorithms to find all paths from a vertex to another.
Arguments:
graph (Graph) : a non empty graph.
start (vertex) : the vertex from where we start.
goal (vertex) : the vertex to reach.
Returns:
A generator.
"""
queue = [(start, [start])]
while queue:
(vertex, path) = queue.pop(0)
for next in set(graph.to_list()[vertex]) - set(path):
if next == goal:
yield path + [next]
else:
queue.append((next, path + [next]))
@staticmethod
def bfs_shortest_path(graph, start, goal):
"""Gets the sortest path between two vertexes using the BFS algorithm.
Arguments:
graph (Graph) : a non empty graph.
start (vertex) : the vertex from where we start.
goal (vertex) : the vertex to reach.
Returns:
A list which is the shortest path.
"""
try:
return next(Graph.bfs_paths(graph, start, goal))
except StopIteration:
return None
graph = Graph()
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
# n: the total number of nodes in the level, including the gateways
# l: the number of links
# e: the number of exit gateways
n, l, e = [int(i) for i in input().split()]
for i in range(l):
# n1: N1 and N2 defines a link between these nodes
n1, n2 = [int(j) for j in input().split()]
graph.add_vertex(n1)
graph.add_vertex(n2)
graph.add_edge(n1, n2)
gateways = []
for i in range(e):
ei = int(input()) # the index of a gateway node
gateways.append(ei)
# game loop
while True:
si = int(input()) # The index of the node on which the Skynet agent is positioned this turn
# The main loop is inspired by DeVoTeD, thanks to him !
path = range(1000)
for gate in gateways:
current = Graph.bfs_shortest_path(graph, si, gate)
if current != None and len(current) < len(path):
path = current
print(str(path[0]) + " " + str(path[1]))
| {
"content_hash": "782411f8a1e631ec10f0cf8bee1ec0b4",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 109,
"avg_line_length": 30.350597609561753,
"alnum_prop": 0.5707534786033079,
"repo_name": "informaticienzero/CodinGame",
"id": "d1f5691f4b8b29d48bf2027cd64a2629924aae3a",
"size": "7618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Moyens/Skynet Revolution - Episode 1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "3000"
},
{
"name": "C++",
"bytes": "19619"
},
{
"name": "Haskell",
"bytes": "483"
},
{
"name": "JavaScript",
"bytes": "929"
},
{
"name": "Python",
"bytes": "66324"
},
{
"name": "Shell",
"bytes": "643"
}
],
"symlink_target": ""
} |
"""SCons.Tool.hpc++
Tool-specific initialization for c++ on HP/UX.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/hpc++.py 2009/09/04 16:33:07 david"
import os.path
import string
import SCons.Util
cplusplus = __import__('c++', globals(), locals(), [])
acc = None
# search for the acc compiler and linker front end
try:
dirs = os.listdir('/opt')
except (IOError, OSError):
# Not being able to read the directory because it doesn't exist
# (IOError) or isn't readable (OSError) is okay.
dirs = []
for dir in dirs:
cc = '/opt/' + dir + '/bin/aCC'
if os.path.exists(cc):
acc = cc
break
def generate(env):
"""Add Builders and construction variables for g++ to an Environment."""
cplusplus.generate(env)
if acc:
env['CXX'] = acc or 'aCC'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS +Z')
# determine version of aCC
line = os.popen(acc + ' -V 2>&1').readline().rstrip()
if string.find(line, 'aCC: HP ANSI C++') == 0:
env['CXXVERSION'] = string.split(line)[-1]
if env['PLATFORM'] == 'cygwin':
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
else:
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS +Z')
def exists(env):
return acc
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "09888ed9ece53c600a5f3c6522478e8d",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 89,
"avg_line_length": 31.95294117647059,
"alnum_prop": 0.680780559646539,
"repo_name": "cournape/numscons",
"id": "a23bfe3fd581e2ec7590a1e8245e29ce47b1e718",
"size": "2716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numscons/scons-local/scons-local-1.2.0/SCons/Tool/hpc++.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1275"
},
{
"name": "FORTRAN",
"bytes": "146"
},
{
"name": "Python",
"bytes": "2033297"
},
{
"name": "Shell",
"bytes": "421"
}
],
"symlink_target": ""
} |
import random
from maze_builder.mazes.maze import Route
class FeatureFactory(object):
def _count(self, castle):
return int(castle.x*castle.y*self.density)
class Courtyards(FeatureFactory):
def __init__(self, density, widthfun=lambda: random.randint(2, 5), lengthfun=None):
self.density = density
self.widthfun = widthfun
self.lengthfun = lengthfun or widthfun
def make(self, castle, count=None):
count = count or self._count(castle)
if castle.verbose >= 3:
print('Generating <={} courtyards...'.format(count))
for _ in range(count):
width, length = self.widthfun(), self.lengthfun()
castle.allocate_feature(self.finish, width, length, retries=2, data=(width, length))
def finish(self, castle, pos, lower, upper, data):
width, length = data
castle.force_connect(lower)
for room in upper:
room.blocked = True
castle.add_feature('courtyard', pos[0], pos[1], 0, width, length or width)
if castle.verbose >= 4:
print('Placed {}x{} courtyard at {}, {}'.format(width, length or width, pos[0], pos[1]))
class Stairs(FeatureFactory):
def __init__(self, density):
self.density = density
def make(self, castle, count=None):
count = count or self._count(castle)
if castle.verbose >= 3:
print('Generating <={} stairs...'.format(count))
for _ in range(count):
dims, uprel, dir = random.choice((
# Dimensions, relative of upper room, direction
[(2, 1), (1, 1), (0, 1)],
[(1, 2), (1, 1), (1, 0)],
[(2, 1), (1, 0), (0, -1)],
[(1, 2), (0, 1), (-1, 0)]
))
castle.allocate_feature(self.finish, *dims, retries=2, margin=1, data=(dims, uprel, dir))
def finish(self, castle, pos, lower, upper, data):
dims, uprel, dir = data
for route in castle.force_connect(lower):
if 'wall' in route.data:
castle.walls.discard(route.data['wall'])
upper = castle.upper_rooms[lower[0].x + uprel[0]][lower[0].y + uprel[1]]
castle.topology.force(Route([upper] + lower))
center = (lower[0].x + lower[1].x + 1) / 2, (lower[0].y + lower[1].y + 1) / 2
if castle.verbose >= 4:
print('Placed {}x{} stair at {}, {}'.format(dims[0], dims[1], pos[0], pos[1]))
castle.add_feature('stair', center[0], center[1], 0, dir[0], dir[1])
class Towers(FeatureFactory):
def __init__(self, density, widthfun=lambda: random.randint(1, 3), lengthfun=lambda: random.randint(1, 3)):
self.density = density
self.widthfun = widthfun
self.lengthfun = lengthfun
def make(self, castle, count=None):
count = count or self._count(castle)
if castle.verbose >= 3:
print('Generating <={} towers...'.format(count))
for _ in range(count):
width, length = self.widthfun(), self.lengthfun()
castle.allocate_feature(self.finish, width, length, retries=2, upout=True, data=(width, length))
def finish(self, castle, pos, lower, upper, data):
width, length = data
for room in lower + upper:
room.blocked = True
castle.add_feature('tower', pos[0], pos[1], 0, width, length or width)
if castle.verbose >= 4:
print('Placed {}x{} tower at {}, {}'.format(width, length or width, pos[0], pos[1]))
class Spires(FeatureFactory):
def __init__(self, density):
self.density = density
def make(self, castle, count=None):
count = count or self._count(castle)
if castle.verbose >= 3:
print('Generating <={} spires...'.format(count))
for _ in range(count):
castle.allocate_feature(self.finish, 2, 2, retries=1)
def finish(self, castle, pos, lower, upper, data=None):
castle.force_connect(lower)
castle.add_feature('spire', pos[0] + 1, pos[1] + 1, 0)
if castle.verbose >= 4:
print('Placed {}x{} spire at {}, {}'.format(2, 2, pos[0], pos[1]))
| {
"content_hash": "d5611c927de1652e954321794c9f9481",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 111,
"avg_line_length": 34.775,
"alnum_prop": 0.5669781931464174,
"repo_name": "kcsaff/maze-builder",
"id": "8c6201263ae84418323334a1694d6306446b6105",
"size": "4173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maze_builder/castles/features.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "293"
},
{
"name": "Python",
"bytes": "227162"
}
],
"symlink_target": ""
} |
import numpy as np
class Register:
def __init__(self, size):
if size > 20:
raise Exception("Register size too large to simulate")
self.size = size
self.qubits = [ 0+0j for x in range(2**size) ]
self.qubits[0] = 1+0j
def __str__(self):
return str( [ str(x)+" |"+str(i)+">" for i, x in enumerate(self.qubits) if x != 0] )
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.qubits == other.qubits
else:
return False
def injectState(self, qubits):
if len(qubits) == (2**self.size):
self.qubits = qubits
return True
else:
return False
def bit_is_set(self, num, bit):
if (num & (1<<bit)) > 0:
return True
else:
return False
def updateNicely(self, reg, value, idx):
if reg[idx] == 0:
reg[idx] = value
else:
reg[idx] = reg[idx] + value
return reg
def X(self, bit):
newReg = [0 for x in range(2**self.size)]
for i, val in enumerate(self.qubits):
newIdx = i ^ (1 << bit)
newReg[newIdx] = val
self.qubits = newReg
def Y(self, bit):
newReg = [0 for x in range(2**self.size)]
for i, val in enumerate(self.qubits):
newIdx = i ^ (1 << bit)
if self.bit_is_set(i, bit):
newReg[newIdx] = val*(0-1j)
else:
newReg[newIdx] = val*(0+1j)
self.qubits = newReg
def Z(self, bit):
newReg = [0 for x in range(2**self.size)]
for i, val in enumerate(self.qubits):
if self.bit_is_set(i, bit):
newReg[i] = val*(-1)
else:
newReg[i] = val*(1)
self.qubits = newReg
def hadamard(self, bit):
newReg = [0 for x in range(2**self.size)]
sqrt2 = 1/(2**.5)
for i, val in enumerate(self.qubits):
newIdx = i ^ (1 << bit)
if self.bit_is_set(i, bit):
self.updateNicely(newReg, val*sqrt2, newIdx)
self.updateNicely(newReg, -val*sqrt2, i)
else:
self.updateNicely(newReg, val*sqrt2, newIdx)
self.updateNicely(newReg, val*sqrt2, i)
self.qubits = newReg
def cnot(self, setBit, changeBit):
newReg = [0 for x in range(2**self.size)]
for i, val in enumerate(self.qubits):
newIdx = i ^ (1 << changeBit)
if self.bit_is_set(i, setBit):
self.updateNicely(newReg, val, newIdx)
else:
self.updateNicely(newReg, val, i)
self.qubits = newReg
| {
"content_hash": "263f6b468320627a1adc4faa88c76e08",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 92,
"avg_line_length": 29.180851063829788,
"alnum_prop": 0.4939846882974845,
"repo_name": "byronwasti/Quantum-Computer-Simulation",
"id": "a1ee0add94cee184d9c9fd7e234c044bb12c144c",
"size": "2743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/quantum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Haskell",
"bytes": "3344"
},
{
"name": "Python",
"bytes": "4588"
}
],
"symlink_target": ""
} |
import os
import conda.lock
# Propose as an enhancement to conda. This would be in order to be more specific with conda's lock files.
class Locked(conda.lock.Locked):
def __init__(self, directory_to_lock):
"""
Lock the given directory for use, unlike conda.lock.Lock which
locks the directory passed, meaning you have to come up with another
name for the directory to lock.
"""
dirname, basename = os.path.split(directory_to_lock.rstrip(os.pathsep))
path = os.path.join(dirname, '.conda-lock_' + basename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
return conda.lock.Locked.__init__(self, path)
| {
"content_hash": "5571489787cab6cf2cec5396c165b5de",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 105,
"avg_line_length": 36.63157894736842,
"alnum_prop": 0.6551724137931034,
"repo_name": "SciTools/conda-gitenv",
"id": "9ad5b7830348b284a232e12f25f808078927fb2c",
"size": "696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conda_gitenv/lock.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "133277"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class DocenteConfig(AppConfig):
name = 'docente'
| {
"content_hash": "1c516905edb02199306550143b031094",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17.8,
"alnum_prop": 0.7528089887640449,
"repo_name": "Bleno/sisgestor-django",
"id": "714250e8a1609253e52016305407214702797946",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docente/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5967"
},
{
"name": "Python",
"bytes": "22758"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Sura(models.Model):
"""
Model to hold the Quran Chapters "Sura"
"""
index = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Sura'))
def __str__(self):
return self.name
class Meta:
ordering = ['index']
class AyahManager(models.Manager):
def get_sura_text(self, sura_id):
"""Get all sura ayat"""
return self.filter(sura_id=sura_id)
def get_sura_ayah(self, sura_id, ayah_num):
"""Get one ayah from sura"""
return self.get_sura_text(sura_id).filter(number=ayah_num)
def get_sura_ayat_range(self, sura_id, from_ayah, to_ayah):
"""Get sura ayat from range (from ayah number to ahay number)"""
return self.get_sura_text(sura_id).filter(number__lte=to_ayah,
number__gte=from_ayah)
class Ayah(models.Model):
"""
Model to hold chapters' text ot Verse "Ayat"
"""
number = models.PositiveIntegerField(verbose_name=_('Number'))
sura = models.ForeignKey(Sura, related_name='ayat', on_delete=models.CASCADE)
text = models.TextField()
objects = AyahManager()
def next_ayah(self):
try:
return Ayah.objects.get(pk=self.pk+1)
except Ayah.DoesNotExist:
return None
def __str__(self):
return '{ayah.sura.index} - {ayah.number}'.format(ayah=self)
class Meta:
unique_together = ['number', 'sura']
ordering = ['sura', 'number']
| {
"content_hash": "cf5a897bddbfab6132cab71b9fcfe4e1",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 81,
"avg_line_length": 29.36842105263158,
"alnum_prop": 0.6123058542413381,
"repo_name": "EmadMokhtar/tafseer_api",
"id": "efd95655de3f3d163201eaa8a138d5e3b0a82bd1",
"size": "1698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quran_text/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11952"
},
{
"name": "Dockerfile",
"bytes": "514"
},
{
"name": "Python",
"bytes": "37380"
}
],
"symlink_target": ""
} |
"""
WSGI config for learn_models project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "learn_models.settings")
application = get_wsgi_application()
| {
"content_hash": "eb2a21f5f2d3d0dde958bfcdcf478d21",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.0625,
"alnum_prop": 0.770573566084788,
"repo_name": "lichengshuang/createvhost",
"id": "e81acecd3b350d49f2782eeeba558a980e23a5fd",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/others/django/learn_models/learn_models/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "84170"
},
{
"name": "C",
"bytes": "25320"
},
{
"name": "CSS",
"bytes": "1323"
},
{
"name": "HTML",
"bytes": "26691"
},
{
"name": "JavaScript",
"bytes": "205981"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "915418"
},
{
"name": "Roff",
"bytes": "6734"
},
{
"name": "Shell",
"bytes": "1548839"
},
{
"name": "Vim script",
"bytes": "56257"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.http import QueryDict
from cms.api import add_plugin
from cms.utils.plugins import build_plugin_tree
from cmsplugin_cascade.models import CascadeElement
from cmsplugin_cascade.bootstrap3.container import (BootstrapContainerPlugin, BootstrapRowPlugin,
BootstrapRowForm, BootstrapColumnPlugin, BS3_BREAKPOINT_KEYS)
from cmsplugin_cascade.generic.cms_plugins import HeadingPlugin
from .test_base import CascadeTestCase
from .utils import get_request_context
class SectionPluginTest(CascadeTestCase):
def setUp(self):
super(SectionPluginTest, self).setUp()
# add a Bootstrap Container Plugin
container_model = add_plugin(self.placeholder, BootstrapContainerPlugin, 'en',
glossary={'breakpoints': BS3_BREAKPOINT_KEYS})
self.assertIsInstance(container_model, CascadeElement)
container_plugin = container_model.get_plugin_class_instance(self.admin_site)
self.assertIsInstance(container_plugin, BootstrapContainerPlugin)
ModelForm = container_plugin.get_form(self.request, container_model)
post_data = QueryDict('', mutable=True)
post_data.setlist('breakpoints', ['sm', 'md'])
form = ModelForm(post_data, None, instance=container_model)
html = form.as_p()
self.assertInHTML(
'<input id="id_glossary_breakpoints_0" name="breakpoints" type="checkbox" value="xs" />',
html)
self.assertInHTML(
'<input checked="checked" id="id_glossary_breakpoints_2" name="breakpoints" type="checkbox" value="md" />',
html)
self.assertInHTML('<input id="id_glossary_fluid" name="fluid" type="checkbox" />', html)
container_plugin.save_model(self.request, container_model, form, False)
self.assertListEqual(container_model.glossary['breakpoints'], ['sm', 'md'])
self.assertTrue('fluid' in container_model.glossary)
self.assertEqual(str(container_model), 'for tablets, laptops')
# add a RowPlugin with 1 ColumnPlugin
row_model = add_plugin(self.placeholder, BootstrapRowPlugin, 'en', target=container_model)
row_plugin = row_model.get_plugin_class_instance()
row_change_form = BootstrapRowForm({'num_children': 1})
row_change_form.full_clean()
row_plugin.save_model(self.request, row_model, row_change_form, False)
self.assertDictEqual(row_model.glossary, {})
self.assertIsInstance(row_model, CascadeElement)
column_models = CascadeElement.objects.filter(parent_id=row_model.id)
self.assertEqual(column_models.count(), 1)
# work with the ColumnPlugin
self.column_model = column_models.first()
self.assertIsInstance(self.column_model, CascadeElement)
self.column_plugin = self.column_model.get_plugin_class_instance()
self.assertIsInstance(self.column_plugin, BootstrapColumnPlugin)
self.assertEqual(self.column_model.parent.id, row_model.id)
self.plugin_list = [container_model, row_model, self.column_model]
def test_section(self):
heading_model = add_plugin(self.placeholder, HeadingPlugin, 'en', target=self.column_model)
self.assertIsInstance(heading_model, CascadeElement)
heading_plugin = heading_model.get_plugin_class_instance(self.admin_site)
self.assertIsInstance(heading_plugin, HeadingPlugin)
ModelForm = heading_plugin.get_form(self.request, heading_model)
post_data = QueryDict('', mutable=True)
post_data.update(tag_type='h2', content="Hello", element_id='foo')
form = ModelForm(post_data, None, instance=heading_model)
html = form.as_p()
needle = '<input id="id_glossary_element_id" name="element_id" type="text" value="foo" />'
self.assertInHTML(needle, html)
self.assertTrue(form.is_valid())
heading_plugin.save_model(self.request, heading_model, form, False)
# check identifier
html = heading_plugin.get_identifier(heading_model)
expected = '<code>h2</code>: Hello <code>id="foo"</code>'
self.assertHTMLEqual(html, expected)
# render the Container Plugin with the Heading Plgin as a child
self.plugin_list.append(heading_model)
build_plugin_tree(self.plugin_list)
context = get_request_context(self.request)
html = heading_model.render_plugin(context)
expected = '<h2 id="foo">Hello</h2>'
self.assertHTMLEqual(html, expected)
# add another heading model with the same id
heading_model = add_plugin(self.placeholder, HeadingPlugin, 'en', target=self.column_model)
form = ModelForm(post_data, None, instance=heading_model)
self.assertFalse(form.is_valid())
expected = '<ul class="errorlist"><li>glossary<ul class="errorlist"><li>The element ID `foo` is not unique for this page.</li></ul></li></ul>'
print(str(form.errors))
self.assertHTMLEqual(str(form.errors), expected)
| {
"content_hash": "1290416733f4efa86d86038c4a071212",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 150,
"avg_line_length": 53.189473684210526,
"alnum_prop": 0.6837522264001583,
"repo_name": "rfleschenberg/djangocms-cascade",
"id": "8e553b1a4f42e14ade527ccbbee112329ddb898b",
"size": "5077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_section.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3158"
},
{
"name": "HTML",
"bytes": "15968"
},
{
"name": "JavaScript",
"bytes": "89011"
},
{
"name": "Python",
"bytes": "270069"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="scatterpolar", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| {
"content_hash": "e76317dc38a50ea4b23865da68994304",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 87,
"avg_line_length": 37.09090909090909,
"alnum_prop": 0.6323529411764706,
"repo_name": "plotly/plotly.py",
"id": "b73384f3e2da01ebec64f728c3d0ab4c76272f86",
"size": "408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterpolar/_uirevision.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import datetime
import json
import logging
import logging.config
import os
import traceback
from os.path import join as pjoin
import unittest2 as unittest
from curwmysqladapter import MySQLAdapter
from observation.obs_raw_data.ObsRawData import \
get_dialog_timeseries, \
get_wu_timeseries, \
create_raw_timeseries
from cms_utils.UtilTimeseries import extract_single_variable_timeseries
class ObsRawDataTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
cls.root_dir = os.path.dirname(os.path.realpath(__file__))
cls.config = json.loads(open(pjoin(cls.root_dir, '../../config/CONFIG.json')).read())
# Initialize Logger
logging_config = json.loads(open(pjoin(cls.root_dir, '../../config/LOGGING_CONFIG.json')).read())
logging.config.dictConfig(logging_config)
cls.logger = logging.getLogger('MySQLAdapterTest')
cls.logger.addHandler(logging.StreamHandler())
cls.logger.info('setUpClass')
MYSQL_HOST = "localhost"
MYSQL_USER = "root"
MYSQL_DB = "curw"
MYSQL_PASSWORD = ""
if 'MYSQL_HOST' in cls.config:
MYSQL_HOST = cls.config['MYSQL_HOST']
if 'MYSQL_USER' in cls.config:
MYSQL_USER = cls.config['MYSQL_USER']
if 'MYSQL_DB' in cls.config:
MYSQL_DB = cls.config['MYSQL_DB']
if 'MYSQL_PASSWORD' in cls.config:
MYSQL_PASSWORD = cls.config['MYSQL_PASSWORD']
cls.adapter = MySQLAdapter(host=MYSQL_HOST, user=MYSQL_USER, password=MYSQL_PASSWORD, db=MYSQL_DB)
cls.eventIds = []
except Exception as e:
print(e)
traceback.print_exc()
@classmethod
def tearDownClass(cls):
cls.logger.info('tearDownClass')
def setUp(self):
self.logger.info('setUp')
def tearDown(self):
self.logger.info('tearDown')
def test_createRawDataForLastHour(self):
self.logger.info('createRawDataForLastHour')
OBS_CONFIG = pjoin(self.root_dir, "../../config/StationConfig.json")
CON_DATA = json.loads(open(OBS_CONFIG).read())
stations = CON_DATA['stations']
self.logger.debug('stations %s', stations)
start_date_time = datetime.datetime(2018, 1, 2, 12, 0, 0)
end_date_time = datetime.datetime(2018, 1, 2, 16, 0, 0)
duration = dict(start_date_time=start_date_time, end_date_time=end_date_time)
username = self.config['DIALOG_IOT_USERNAME'] if 'DIALOG_IOT_USERNAME' in self.config else None
password = self.config['DIALOG_IOT_PASSWORD'] if 'DIALOG_IOT_PASSWORD' in self.config else None
opts = dict(forceInsert=False, dialog_iot_username=username, dialog_iot_password=password)
create_raw_timeseries(self.adapter, stations, duration, opts)
def test_getDialogTimeseries(self):
self.logger.info('getDialogTimeseries')
start_date_time = datetime.datetime(2018, 1, 3, 0, 0, 0)
end_date_time = datetime.datetime(2018, 1, 3, 1, 0, 0)
username = self.config['DIALOG_IOT_USERNAME'] if 'DIALOG_IOT_USERNAME' in self.config else None
password = self.config['DIALOG_IOT_PASSWORD'] if 'DIALOG_IOT_PASSWORD' in self.config else None
opts = dict(dialog_iot_username=username, dialog_iot_password=password)
station = {'stationId': '3674010756837033'}
dialog_timeseries = get_dialog_timeseries(station, start_date_time, end_date_time, opts)
print('Length:', len(dialog_timeseries))
print(dialog_timeseries[10:])
self.assertGreater(len(dialog_timeseries), 0)
def test_getDialogTimeseriesWithoutAuth(self):
self.logger.info('test_getDialogTimeseriesWithoutAuth')
start_date_time = datetime.datetime(2018, 1, 3, 0, 0, 0)
end_date_time = datetime.datetime(2018, 1, 3, 1, 0, 0)
dialog_timeseries = get_dialog_timeseries({'stationId': '3674010756837033'}, start_date_time, end_date_time)
print('Length:', len(dialog_timeseries))
print(dialog_timeseries)
self.assertEqual(len(dialog_timeseries), 0)
def test_getWUndergroundTimeseries(self):
self.logger.info('getWUndergroundTimeseries')
start_date_time = datetime.datetime(2018, 1, 1, 0, 0, 0)
end_date_time = datetime.datetime(2018, 1, 1, 23, 0, 0)
wu_timeseries = get_wu_timeseries({'stationId': 'IBATTARA3'}, start_date_time, end_date_time)
print(wu_timeseries)
self.assertGreater(len(wu_timeseries), 0)
def test_getWUndergroundTimeseriesYatiwawala(self):
self.logger.info('getWUndergroundTimeseries')
start_date_time = datetime.datetime(2018, 1, 1, 23, 0, 0)
end_date_time = datetime.datetime(2018, 1, 2, 1, 0, 0)
wu_timeseries = get_wu_timeseries({'stationId': 'Yatiwawala', 'name': 'Yatiwawala'}, start_date_time, end_date_time)
print('wu_timeseries', wu_timeseries)
self.assertGreater(len(wu_timeseries), 0)
def test_extractSinglePrecipitationDialogTimeseries(self):
self.logger.info('test_extractSinglePrecipitationDialogTimeseries')
start_date_time = datetime.datetime(2018, 1, 2, 0, 0, 0)
end_date_time = datetime.datetime(2018, 1, 2, 23, 0, 0)
dialog_timeseries = get_dialog_timeseries({'stationId': '3674010756837033'}, start_date_time, end_date_time)
print(dialog_timeseries)
self.assertGreater(len(dialog_timeseries), 0)
extractedTimeseries = extract_single_variable_timeseries(dialog_timeseries, 'Precipitation')
print(extractedTimeseries)
self.assertTrue(isinstance(extractedTimeseries[0], list))
self.assertEqual(len(extractedTimeseries[0]), 2)
def test_extractSinglePrecipitationWUndergroundTimeseries(self):
self.logger.info('test_extractSinglePrecipitationWUndergroundTimeseries')
start_date_time = datetime.datetime(2018, 1, 1, 0, 0, 0)
end_date_time = datetime.datetime(2018, 1, 1, 23, 0, 0)
wu_timeseries = get_wu_timeseries({'stationId': 'IBATTARA3'}, start_date_time, end_date_time)
print(wu_timeseries)
self.assertGreater(len(wu_timeseries), 0)
extractedTimeseries = extract_single_variable_timeseries(wu_timeseries, 'Precipitation')
print(extractedTimeseries)
self.assertTrue(isinstance(extractedTimeseries[0], list))
self.assertEqual(len(extractedTimeseries[0]), 2)
| {
"content_hash": "178a5c540f25e47688c902e55df2a747",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 124,
"avg_line_length": 47.21739130434783,
"alnum_prop": 0.6611418047882136,
"repo_name": "gihankarunarathne/cfcwm-cms",
"id": "0004dbd2799cb3ba62479c771d7ffbabd047ecbc",
"size": "6516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "observation/tests/obs_raw_data/ObsRawDataTest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "496"
},
{
"name": "Python",
"bytes": "136577"
}
],
"symlink_target": ""
} |
"""
This module contains the logic for collecting server group information from a
cloud and deploying server groups on a cloud.
Example:
src_handler = ServerGroupsHandler(src_cloud)
dst_handler = ServerGroupsHandler(dst_cloud)
dst_handler.deploy_server_groups(src_handler.get_server_groups())
"""
import copy
import pprint
from novaclient import exceptions as nova_exc
from cloudferrylib.base import compute
from cloudferrylib.os.identity import keystone
from cloudferrylib.utils import log
from cloudferrylib.utils import proxy_client
from cloudferrylib.utils import utils
LOG = log.getLogger(__name__)
SQL_SELECT_ALL_GROUPS = ("SELECT user_id, project_id, uuid, name, id FROM "
"instance_groups WHERE deleted=0;")
SQL_SELECT_POLICY = ("SELECT policy FROM instance_group_policy WHERE "
"group_id=%s AND deleted=0;")
SQL_SELECT_GROUP_ID = "SELECT id FROM instance_groups WHERE uuid='{0}';"
SQL_DELETE_MEMBER = "DELETE FROM instance_group_member WHERE group_id={0};"
SQL_DELETE_POLICY = "DELETE FROM instance_group_member WHERE group_id={0};"
SQL_DELETE_GROUP = "DELETE_FROM instance_groups WHERE uuid='{0}';"
SQL_INSERT_GROUP = ("INSERT INTO instance_groups (uuid, name, project_id, "
"user_id, deleted) VALUES('{0}', '{1}', '{2}', '{3}', 0);")
SQL_INSERT_POLICY = ("INSERT INTO instance_group_policy (group_id, policy, "
"deleted) VALUES({0}, '{1}', 0)")
class ServerGroupsHandler(compute.Compute):
"""
Handler for Nova Server/Instance Groups on specified cloud.
Allows for collection, creation and duplicate detection.
"""
def __init__(self, cloud):
super(ServerGroupsHandler, self).__init__()
self.cloud = cloud
self.compute = self.cloud.resources[utils.COMPUTE_RESOURCE]
self.identity = self.cloud.resources[utils.IDENTITY_RESOURCE]
self.config = copy.deepcopy(self.identity.config)
def _execute(self, sql):
"""
Logs SQL statement and executes using mysql connector from
COMPUTE_RESOURCE
"""
LOG.debug("SQL statement: %s", sql)
return self.compute.mysql_connector.execute(sql)
@property
def _nova_client(self):
"""
Property that returns COMPUTE_RESOURCE nova client
"""
return self.compute.nova_client
def get_server_groups(self):
"""
Return list of dictionaries containing server group details
Returns:
list: Empty if no server groups exist or server groups are not
supported
[
{
"user": "<user name>",
"tenant": "<tenant name>",
"uuid": "<group uuid>",
"name": "<group name>",
"policies": [<policy_name>, ...]
}
]
"""
groups = []
try:
with proxy_client.expect_exception(nova_exc.NotFound):
self._nova_client.server_groups.list()
for row in self._execute(SQL_SELECT_ALL_GROUPS).fetchall():
LOG.debug("Resulting row: %s", row)
sql = SQL_SELECT_POLICY % row[4]
policies = []
for policy in self._execute(sql).fetchall():
policies.append(policy[0])
tenant_name = self.identity.try_get_tenant_name_by_id(row[1])
if tenant_name is None:
LOG.info("Tenant '%s' does not exist on the SRC. Skipping "
"server group '%s'...", row[1], row[3])
continue
groups.append(
{"user": self.identity.try_get_username_by_id(row[0]),
"tenant": tenant_name,
"uuid": row[2],
"name": row[3],
"policies": policies})
except nova_exc.NotFound:
LOG.info("Cloud does not support server_groups")
return groups
def _delete_server_group(self, server_group):
"""
Uses the sql connector to do the following:
Retrieves the appropriate group id using the groups UUID
Removes associated members
Removes associated policies
Removes group itself
"""
sql = SQL_SELECT_GROUP_ID.format(server_group['uuid'])
gid = self._execute(sql).fetchone()[0]
sql = SQL_DELETE_MEMBER.format(gid)
self._execute(sql)
sql = SQL_DELETE_POLICY.format(gid)
self._execute(sql)
sql = SQL_DELETE_GROUP.format(server_group['uuid'])
self._execute(sql)
def deploy_server_groups(self, src_groups):
"""
For each server groups in source cloud, UUID is checked for existence
in destination cloud.
If not existing the server group is created.
If UUID matches a comparison is made and if there is a difference the
server group is deleted and recreated.
If UUID matches a comparison is made and there is no differences the
group is skipped.
"""
dst_groups = self.get_server_groups()
for src_group in src_groups:
LOG.debug("Source HOST server_groups: %s",
pprint.pformat(src_group))
dst_exists = False
for dst_group in dst_groups:
LOG.debug("Destination HOST server_groups: %s",
pprint.pformat(dst_group))
if src_group['uuid'] == dst_group['uuid']:
if _compare_groups(src_group, dst_group):
LOG.info("skipping matching server_group in "
"destination: %s", src_group['name'])
dst_exists = True
else:
LOG.info("deleting server_group collision "
"in destination: %s", dst_group['name'])
self._delete_server_group(dst_group)
if not dst_exists:
self._deploy_server_group(src_group)
def _deploy_server_group(self, server_group):
"""
Uses the sql connector to do the following:
Inserts specified uuid, name, tenant uuid and user uuid into
destination cloud.
Retrieves internally incremented group id
Inserts associated policies using group id
"""
LOG.info("Deploying server_group for tenant %s to destination: %s",
server_group['tenant'], server_group['name'])
try:
with proxy_client.expect_exception(
keystone.ks_exceptions.NotFound):
tenant_id = self.identity.get_tenant_id_by_name(
server_group["tenant"])
except keystone.ks_exceptions.NotFound:
LOG.info("Tenant '%s' does not exist on DST. Skipping server group"
" '%s' with id='%s'...",
server_group['tenant'],
server_group['name'],
server_group['uuid'])
return
sql = SQL_INSERT_GROUP.format(
server_group['uuid'],
server_group['name'],
tenant_id,
self.identity.try_get_user_by_name(
server_group["user"], self.config.cloud.user).id,
)
self._execute(sql)
sql = SQL_SELECT_GROUP_ID.format(server_group['uuid'])
gid = self._execute(sql).fetchone()[0]
for policy in server_group['policies']:
sql = SQL_INSERT_POLICY.format(gid, policy)
self._execute(sql)
return server_group
def get_server_group_id_by_vm(self, instance_id, instance_tenant):
"""
Get Nova Server Group by it's member
:param instance_id: VM's ID
:param instance_tenant: VM's tenant name
:return str: Nova Server Group ID
"""
client_config = copy.deepcopy(self.config)
client_config.cloud.tenant = instance_tenant
with keystone.AddAdminUserToNonAdminTenant(
self.identity.keystone_client,
client_config.cloud.user,
instance_tenant):
nclient = self.compute.get_client(client_config)
try:
server_group_list = nclient.server_groups.list()
except nova_exc.NotFound:
LOG.info("Cloud does not support server_groups")
return
for server_group in server_group_list:
if instance_id in server_group.members:
return server_group.id
LOG.debug("Instance '%s' is not a member of any server group...",
instance_id)
def _compare_groups(group_a, group_b):
"""
Compares server group_a with server_group b
Returns:
bool: True if specified values are equal, otherwise false
"""
return (group_a['policies'] == group_b['policies'] and
group_a['tenant'] == group_b['tenant'] and
group_a['name'] == group_b['name'] and
group_a['user'] == group_b['user'] and
group_a['policies'] == group_b['policies'])
| {
"content_hash": "41725f74310051d708ccfb90cac24f9b",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 79,
"avg_line_length": 36.81349206349206,
"alnum_prop": 0.5623585210736229,
"repo_name": "mgrygoriev/CloudFerry",
"id": "34ab67787233cee60b608751da62469dfefb2e5d",
"size": "9853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudferrylib/os/compute/server_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2615"
},
{
"name": "Python",
"bytes": "775433"
},
{
"name": "Ruby",
"bytes": "5181"
},
{
"name": "Shell",
"bytes": "34787"
}
],
"symlink_target": ""
} |
import numpy as np
__all__ = ['gp_predict', 'gp_covariance']
def gp_kernel(x, y, kernel_type='gaussian', **kwargs):
"""
Gaussian process kernel score
"""
if kernel_type == 'gaussian':
beta = kwargs.get('beta', 0.3)
return np.exp(-((x[0] - y[0]) ** 2 +
(x[1] - y[1]) ** 2) ** (0.5) / (2 * beta ** 2))
else:
raise NotImplementedError('Kernel ({}) not implemented'
.format(kernel_type))
def gp_covariance(x, y, kernel_type='gaussian', **kwargs):
"""
Compute Gram matrix for GP
"""
return np.array([[gp_kernel(xi, yi, kernel_type) for xi in x] for yi in y])
def gp_predict(target, train_data, gram_matrix, train_labels):
"""
Predict Value of a node sampled with gaussian process regression
around neighboring nodes
Parameters
------------
target : array-like, shape (2)
[abs, ord] of target point
train_data : array-like, shape (2 x N)
training data, [abs, ord] of the points in a certain
radius of the target point
train_labels : array-like, shape (N)
Values of the training data
gram_matrix : array-like, shape (N x N)
The Gram matrix
Returns
---------
y_pred : float
Predicted value for the target
sigma_new : float
Variance of target point prediction
"""
k = [gp_kernel(target, yy) for yy in train_data]
Sinv = np.linalg.pinv(gram_matrix)
y_pred = np.dot(k, Sinv).dot(train_labels) # y = K K^-1 y
sigma_new = gp_kernel(target, target) - np.dot(k, Sinv).dot(k)
return y_pred, sigma_new
| {
"content_hash": "1ba6d91eaeaec1ace0f49f2c4def9304",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 30.48148148148148,
"alnum_prop": 0.5710814094775213,
"repo_name": "makokal/scalable-irl",
"id": "a0584b17005953e45389eaa36580d20eb996bd66",
"size": "1647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sirl/algorithms/function_approximation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "364"
},
{
"name": "Python",
"bytes": "137714"
}
],
"symlink_target": ""
} |
"""
GrantedByMe Cryptographic helper
.. moduleauthor:: GrantedByMe <info@grantedby.me>
"""
# -*- coding: utf-8 -*-
import base64
import json
import os
import random
import string
import hashlib
import time
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives import hashes, hmac
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.padding import PKCS7
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.fernet import Fernet
from enum import Enum
class JWEEncType(Enum):
"""JSONWebEncryptionEncryptionType Enumerations"""
A128CBC_HS256 = 1 # AES_128_CBC_HMAC_SHA_256
A192CBC_HS256 = 2 # AES_192_CBC_HMAC_SHA_256
A1256CBC_HS256 = 3 # AES_256_CBC_HMAC_SHA_256
A128GCM = 4 # AES GCM using 128-bit key
A192GCM = 5 # AES GCM using 192-bit key
A256GCM = 6 # AES GCM using 256-bit key
class JWEAlgType(Enum):
"""JSONWebEncryptionAlgorithmType Enumerations"""
RSA1_5 = 1 # RSAES-PKCS1-v1_5
RSA_OAEP = 2 # RSAES OAEP using default parameters
A128KW = 3 # AES Key Wrap with default initial value using 128-bit key
A192KW = 4 # AES Key Wrap with default initial value using 192-bit key
A256KW = 5 # AES Key Wrap with default initial value using 256-bit key
A128GCMKW = 6 # AES GCM using 128-bit key
A192GCMKW = 7 # AES GCM using 192-bit key
A256GCMKW = 8 # AES GCM using 256-bit key
class JWSAlgType(Enum):
"""JSONWebSignatureAlgorithmType Enumerations"""
HS256 = 1 # HMAC using SHA-256
HS384 = 2 # HMAC using SHA-384
HS512 = 3 # HMAC using SHA-512
RS256 = 4 # RSASSA-PKCS1-v1_5 using SHA-256
RS384 = 5 # RSASSA-PKCS1-v1_5 using SHA-384
RS512 = 6 # RSASSA-PKCS1-v1_5 using SHA-512
ES256 = 7 # ECDSA using P-256 and SHA-256
ES384 = 8 # ECDSA using P-384 and SHA-384
ES512 = 9 # ECDSA using P-521 and SHA-512
PS256 = 10 # RSASSA-PSS using SHA-256 and MGF1 with SHA-256
PS384 = 11 # RSASSA-PSS using SHA-384 and MGF1 with SHA-384
PS512 = 12 # RSASSA-PSS using SHA-512 and MGF1 with SHA-512
class SecurityException(Exception):
"""
Raised when a Security check has been failed (invalid input, signature, etc.)
"""
pass
class GBMCrypto(object):
"""GBMCrypto class"""
def __init__(self):
"""Constructor"""
raise Exception('Static class instantiation error')
########################################
# HELPERS
########################################
@classmethod
def random_string(cls, length=128):
"""
Return a random string by given length which defaults to 128 chars.
:param length:
:return:
"""
return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(length))
@classmethod
def sha512(cls, message):
"""
Hashes an input using SHA-512.
Additionally normalizes line endings of input before hashing to handle unix / win compatibility issues.
:param message:
:return:
"""
if isinstance(message, str):
message = message.replace('\r\n', '\n')
message = message.replace('\r', '\n')
message = message.encode('utf-8')
return hashlib.sha512(message).hexdigest()
########################################
# ASYMMETRIC (RSA)
########################################
@classmethod
def generate_keypair(cls):
"""RSA keypair generator"""
return rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
@classmethod
def serialize_key(cls, key):
"""RSA key serializer"""
if isinstance(key, rsa.RSAPrivateKey):
return key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
)
if isinstance(key, rsa.RSAPublicKey):
return key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
return None
@classmethod
def serialize_key_der(cls, key):
"""RSA public key DER serializer"""
if isinstance(key, rsa.RSAPublicKey):
return base64.b64encode(key.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.PKCS1
))
return None
@classmethod
def load_private_key(cls, private_key):
"""RSA private key PEM loader"""
return serialization.load_pem_private_key(private_key, password=None, backend=default_backend())
@classmethod
def load_public_key(cls, public_key):
"""RSA public key PEM/DER loader"""
if not public_key.decode('utf-8').startswith('-----'):
return serialization.load_der_public_key(base64.b64decode(public_key), backend=default_backend())
return serialization.load_pem_public_key(public_key, backend=default_backend())
@classmethod
def encrypt_rsa(cls, public_key, message_bytes):
"""RSA encrypt"""
return public_key.encrypt(message_bytes,
padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA1()), algorithm=hashes.SHA1(),
label=None))
@classmethod
def decrypt_rsa(cls, private_key, cipher_bytes):
"""RSA decrypt"""
return private_key.decrypt(cipher_bytes,
padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA1()), algorithm=hashes.SHA1(),
label=None))
@classmethod
def sign_rsa(cls, private_key, message_bytes):
"""RSA sign"""
signer = private_key.signer(padding.PSS(mgf=padding.MGF1(hashes.SHA512()), salt_length=64), hashes.SHA512())
signer.update(message_bytes)
return signer.finalize()
@classmethod
def verify_rsa(cls, public_key, message_bytes, signature_bytes):
"""RSA verify"""
verifier = public_key.verifier(signature_bytes, padding.PSS(mgf=padding.MGF1(hashes.SHA512()), salt_length=64),
hashes.SHA512())
verifier.update(message_bytes)
return verifier.verify()
@classmethod
def encrypt_rsa_json(cls, public_pem, private_pem, source, algorithm=None):
"""Encrypt and sign message dictionary key pair"""
if public_pem is None or private_pem is None or source is None:
raise SecurityException('TypeError')
if isinstance(source, dict):
message_text = json.dumps(source)
else:
message_text = source
message_bytes = message_text.encode('utf-8')
public_key = GBMCrypto.load_public_key(public_pem.encode('utf-8'))
private_key = GBMCrypto.load_private_key(private_pem.encode('utf-8'))
payload = GBMCrypto.encrypt_rsa(public_key, message_bytes)
if algorithm == JWSAlgType.RS512.name:
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA512())
signer.update(message_bytes)
signature = signer.finalize()
else:
signature = GBMCrypto.sign_rsa(private_key, message_bytes)
return base64.b64encode(payload).decode('utf-8'), base64.b64encode(signature).decode('utf-8')
@classmethod
def decrypt_rsa_json(cls, public_pem, private_pem, payload, signature, algorithm=None):
"""Decrypt message using key pair"""
if public_pem is None or private_pem is None or payload is None or signature is None:
raise SecurityException('TypeError')
public_key = GBMCrypto.load_public_key(public_pem.encode('utf-8'))
private_key = GBMCrypto.load_private_key(private_pem.encode('utf-8'))
message_bytes = GBMCrypto.decrypt_rsa(private_key, base64.b64decode(payload))
if algorithm == JWSAlgType.RS512.name:
try:
verifier = public_key.verifier(base64.b64decode(signature), padding.PKCS1v15(), hashes.SHA512())
verifier.update(message_bytes)
verifier.verify()
except:
raise SecurityException('Invalid signature')
else:
try:
# PSS/MGF1/SHA512
GBMCrypto.verify_rsa(public_key, message_bytes, base64.b64decode(signature))
except:
raise SecurityException('Invalid signature')
message_text = message_bytes.decode('utf-8')
try:
result = json.loads(message_text)
except:
raise Exception('Invalid RSA-JSON: ' + str(message_text))
return result
########################################
# SYMMETRIC (AES+HMAC)
########################################
@classmethod
def encrypt_aes(cls, message, key=None, iv=None):
"""Encrypts an input using AES encryption."""
if message is None:
raise SecurityException('TypeError')
if isinstance(message, str):
message = message.encode('utf-8')
padder = PKCS7(128).padder()
message = padder.update(message) + padder.finalize()
if key is None:
key = os.urandom(32)
elif isinstance(key, str):
key = key.encode('utf-8')
if iv is None:
iv = os.urandom(16)
elif isinstance(iv, str):
iv = iv.encode('utf-8')
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend())
encryptor = cipher.encryptor()
result = encryptor.update(message) + encryptor.finalize()
return result, key, iv
@classmethod
def decrypt_aes(cls, message, key, iv):
"""Decrypts an input using AES encryption"""
if message is None or key is None or iv is None:
raise SecurityException('TypeError')
if isinstance(key, str):
key = key.encode('utf-8')
if isinstance(iv, str):
iv = iv.encode('utf-8')
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend())
decryptor = cipher.decryptor()
result = decryptor.update(message) + decryptor.finalize()
unpadder = PKCS7(128).unpadder()
result = unpadder.update(result) + unpadder.finalize()
return result
@classmethod
def sign_aes(cls, message, key):
"""Signs an input using HMAC/SHA-256"""
if message is None or key is None:
raise SecurityException('TypeError')
if isinstance(message, str):
message = message.encode('utf-8')
if isinstance(key, str):
key = key.encode('utf-8')
h = hmac.HMAC(key, hashes.SHA256(), backend=default_backend())
h.update(message)
return h.finalize()
@classmethod
def verify_aes(cls, message, key, signature):
"""Verifies a HMAC signature"""
if isinstance(message, str):
message = message.encode('utf-8')
if isinstance(key, str):
key = key.encode('utf-8')
if isinstance(signature, str):
signature = signature.encode('utf-8')
h = hmac.HMAC(key, hashes.SHA256(), backend=default_backend())
h.update(message)
h.verify(signature)
@classmethod
def encrypt_aes_json(cls, source):
"""Encrypt message dictionary"""
if isinstance(source, dict):
message_text = json.dumps(source)
else:
message_text = source
result = GBMCrypto.encrypt_aes(message_text.encode('utf-8'))
signature_bytes = GBMCrypto.sign_aes(message_text.encode('utf-8'), result[1])
cipher_b64 = base64.b64encode(result[0]).decode('utf-8')
key_b64 = base64.b64encode(result[1]).decode('utf-8')
iv_b64 = base64.b64encode(result[2]).decode('utf-8')
signature_b64 = base64.b64encode(signature_bytes).decode('utf-8')
return cipher_b64, key_b64, iv_b64, signature_b64
@classmethod
def decrypt_aes_json(cls, message, key, iv, signature):
"""Decrypt message dictionary"""
if message is None or key is None or iv is None:
raise SecurityException('TypeError')
message = base64.b64decode(message)
key = base64.b64decode(key)
iv = base64.b64decode(iv)
message_bytes = GBMCrypto.decrypt_aes(message, key, iv)
signature = base64.b64decode(signature)
GBMCrypto.verify_aes(message_bytes, key, signature)
message_text = message_bytes.decode('utf-8')
try:
result = json.loads(message_text)
except:
raise Exception('Invalid AES-JSON: ' + str(message_text))
return result
########################################
# FERNET
#
# Fernet is built on top of a number of standard cryptographic primitives.
#
# Specifically it uses:
# - AES in CBC mode with a 128-bit key for encryption; using PKCS7 padding.
# - HMAC using SHA256 for authentication.
# - Initialization vectors are generated using os.urandom().
########################################
@classmethod
def encrypt_fernet(cls, message, secret):
"""Fernet encryption"""
if message is None or secret is None:
raise SecurityException('TypeError')
if isinstance(message, str):
message = message.encode('utf-8')
if isinstance(secret, str):
secret = secret.encode('utf-8')
f = Fernet(secret)
return f.encrypt(message)
@classmethod
def decrypt_fernet(cls, message, secret):
"""Fernet decryption"""
if message is None or secret is None:
raise SecurityException('TypeError')
if isinstance(message, str):
message = message.encode('utf-8')
if isinstance(secret, str):
secret = secret.encode('utf-8')
f = Fernet(secret)
return f.decrypt(message)
@classmethod
def encrypt_fernet_string(cls, message, secret):
"""Fernet string encryption"""
return base64.urlsafe_b64encode(GBMCrypto.encrypt_fernet(message, secret)).decode('utf-8')
@classmethod
def decrypt_fernet_string(cls, message, secret):
"""Fernet string decryption"""
try:
plain_message = base64.urlsafe_b64decode(message)
except:
return None
if not plain_message:
return None
return GBMCrypto.decrypt_fernet(plain_message, secret).decode('utf-8')
########################################
# Compound Wrapper
########################################
@classmethod
def encrypt_compound(cls, data, public_key, private_key, algorithm=None, is_optional_compound=True):
"""Encrypts and signs an input using RSA and optionally AES/HMAC encryption and signature"""
# use default RSA algorithm if none specified
if not algorithm:
algorithm = JWSAlgType.PS512.name
# serialize dictionary to json string
plain_text = json.dumps(data)
# Message length is small enough to use signed RSA encryption only
if len(plain_text) < 215 and is_optional_compound:
rsa_tuple = GBMCrypto.encrypt_rsa_json(public_key, private_key, plain_text, algorithm)
return {'payload': rsa_tuple[0], 'signature': rsa_tuple[1]}
# Use signed AES encryption using keys wrapped in signed RSA encryption
aes_tuple = GBMCrypto.encrypt_aes_json(plain_text)
aes_dict = {
'cipher_key': aes_tuple[1],
'cipher_iv': aes_tuple[2],
'signature': aes_tuple[3],
'timestamp': int(time.time())
}
rsa_tuple = GBMCrypto.encrypt_rsa_json(public_key, private_key, aes_dict, algorithm)
return {'payload': rsa_tuple[0], 'signature': rsa_tuple[1], 'message': aes_tuple[0]}
@classmethod
def decrypt_compound(cls, data, public_key, private_key):
"""Decrypts and verifies an input using RSA and optional AES encryption with signature"""
if not data or 'payload' not in data or 'signature' not in data:
raise SecurityException('TypeError')
if 'alg' not in data:
data['alg'] = JWSAlgType.PS512.value
cipher_json = GBMCrypto.decrypt_rsa_json(public_key,
private_key,
data['payload'],
data['signature'],
data['alg'])
# return RSA decrypted object if not encrypted using AES (non-compound asymmetric)
if 'message' not in data and 'cipher_key' not in cipher_json and 'cipher_iv' not in cipher_json and 'signature' not in cipher_json:
return cipher_json
return GBMCrypto.decrypt_aes_json(data['message'],
cipher_json['cipher_key'],
cipher_json['cipher_iv'],
cipher_json['signature'])
| {
"content_hash": "5857f646a8100f2b155ea99f1e0ea845",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 139,
"avg_line_length": 40.67981438515081,
"alnum_prop": 0.5957907945017966,
"repo_name": "grantedbyme/grantedbyme-python-sdk",
"id": "b9853f099f736a830fa6495442ebc985acdfb196",
"size": "18654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grantedbyme/gbm_crypto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27894"
}
],
"symlink_target": ""
} |
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
from _yaml import CParser, CEmitter
from constructor import *
from serializer import *
from representer import *
from resolver import *
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
def __init__(self, stream):
CParser.__init__(self, stream)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class CSafeLoader(CParser, SafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class CLoader(CParser, Constructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
Constructor.__init__(self)
Resolver.__init__(self)
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CDumper(CEmitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| {
"content_hash": "ca8cccb14c8f198a1cbd72b3855f551e",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 73,
"avg_line_length": 39.705882352941174,
"alnum_prop": 0.6103703703703703,
"repo_name": "croxis/SpaceDrive",
"id": "e726baf13ec847a22be651cc53e8721f1be33b0b",
"size": "3375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spacedrive/renderpipeline/rplibs/yaml/yaml_py2/cyaml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1288"
},
{
"name": "C",
"bytes": "21897"
},
{
"name": "C++",
"bytes": "165025"
},
{
"name": "GLSL",
"bytes": "741524"
},
{
"name": "Groff",
"bytes": "119"
},
{
"name": "Python",
"bytes": "1523574"
}
],
"symlink_target": ""
} |
"""
@file async_tcp_client.py
@author Woong Gyu La a.k.a Chris. <juhgiyo@gmail.com>
<http://github.com/juhgiyo/pyserver>
@date March 10, 2016
@brief AsyncTcpClient Interface
@version 0.1
@section LICENSE
The MIT License (MIT)
Copyright (c) 2016 Woong Gyu La <juhgiyo@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@section DESCRIPTION
AsyncTcpClient Class.
"""
import asyncore
import socket
from collections import deque
import threading
from .async_controller import AsyncController
from .callback_interface import *
from .server_conf import *
# noinspection PyDeprecation
from .preamble import *
import traceback
'''
Interfaces
variables
- hostname
- port
- addr = (hostname,port)
- callback
functions
- def send(data)
- def close() # close the socket
'''
class AsyncTcpClient(asyncore.dispatcher):
def __init__(self, hostname, port, callback, no_delay=True):
asyncore.dispatcher.__init__(self)
self.is_closing = False
self.callback = None
if callback is not None and isinstance(callback, ITcpSocketCallback):
self.callback = callback
else:
raise Exception('callback is None or not an instance of ITcpSocketCallback class')
self.hostname = hostname
self.port = port
self.addr = (hostname, port)
self.send_queue = deque() # thread-safe dequeue
self.transport = {'packet': None, 'type': PacketType.SIZE, 'size': SIZE_PACKET_LENGTH, 'offset': 0}
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
if no_delay:
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.set_reuse_addr()
err = None
try:
self.connect((hostname, port))
AsyncController.instance().add(self)
except Exception as e:
err = e
finally:
def callback_connection():
if self.callback is not None:
self.callback.on_newconnection(self, err)
thread = threading.Thread(target=callback_connection)
thread.start()
def handle_connect(self):
pass
def handle_read(self):
try:
data = self.recv(self.transport['size'])
if data is None or len(data) == 0:
return
if self.transport['packet'] is None:
self.transport['packet'] = data
else:
self.transport['packet'] += data
read_size = len(data)
if read_size < self.transport['size']:
self.transport['offset'] += read_size
self.transport['size'] -= read_size
else:
if self.transport['type'] == PacketType.SIZE:
should_receive = Preamble.to_should_receive(self.transport['packet'])
if should_receive < 0:
preamble_offset = Preamble.check_preamble(self.transport['packet'])
self.transport['offset'] = len(self.transport['packet']) - preamble_offset
self.transport['size'] = preamble_offset
# self.transport['packet'] = self.transport['packet'][
# len(self.transport['packet']) - preamble_offset:]
self.transport['packet'] = self.transport['packet'][preamble_offset:]
return
self.transport = {'packet': None, 'type': PacketType.DATA, 'size': should_receive, 'offset': 0}
else:
receive_packet = self.transport
self.transport = {'packet': None, 'type': PacketType.SIZE, 'size': SIZE_PACKET_LENGTH, 'offset': 0}
self.callback.on_received(self, receive_packet['packet'])
except Exception as e:
print(e)
traceback.print_exc()
# def writable(self):
# return len(self.send_queue) != 0
def handle_write(self):
if len(self.send_queue) != 0:
send_obj = self.send_queue.popleft()
state = State.SUCCESS
try:
sent = asyncore.dispatcher.send(self, send_obj['data'][send_obj['offset']:])
if sent < len(send_obj['data']):
send_obj['offset'] = send_obj['offset'] + sent
self.send_queue.appendleft(send_obj)
return
except Exception as e:
print(e)
traceback.print_exc()
state = State.FAIL_SOCKET_ERROR
try:
if self.callback is not None:
self.callback.on_sent(self, state, send_obj['data'][SIZE_PACKET_LENGTH:])
except Exception as e:
print(e)
traceback.print_exc()
def close(self):
if not self.is_closing:
self.handle_close()
def handle_error(self):
if not self.is_closing:
self.handle_close()
def handle_close(self):
try:
self.is_closing = True
asyncore.dispatcher.close(self)
AsyncController.instance().discard(self)
if self.callback is not None:
self.callback.on_disconnect(self)
except Exception as e:
print(e)
traceback.print_exc()
def send(self, data):
self.send_queue.append({'data': Preamble.to_preamble_packet(len(data)) + data, 'offset': 0})
def gethostbyname(self, arg):
return self.socket.gethostbyname(arg)
def gethostname(self):
return self.socket.gethostname()
| {
"content_hash": "5b833f3b97706210a9768728d3d10aa0",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 119,
"avg_line_length": 36.861111111111114,
"alnum_prop": 0.60120572720422,
"repo_name": "juhgiyo/pyserver",
"id": "ca5e1b2cda1152c5013e4295be1ea1c9e917502b",
"size": "6653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyserver/network/async_tcp_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55969"
}
],
"symlink_target": ""
} |
from pants.goal.task_registrar import TaskRegistrar as task
from squarepants.plugins.thrift_linter.tasks.thrift_linter import ThriftLinterDummy
def register_goals():
task(name='thrift-linter', action=ThriftLinterDummy).install().with_description('Standin for thrift-linter options')
| {
"content_hash": "9f680cea2eb8d74ee4a1604c693124fc",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 118,
"avg_line_length": 47.833333333333336,
"alnum_prop": 0.818815331010453,
"repo_name": "ericzundel/mvn2pants",
"id": "5929ac28edf396e2604416c77268c74b74c8d54f",
"size": "333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/squarepants/plugins/thrift_linter/register.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "283"
},
{
"name": "Python",
"bytes": "641401"
},
{
"name": "Shell",
"bytes": "240"
}
],
"symlink_target": ""
} |
import os, sys, subprocess, difflib
from scripts.support import run_command, split_wast
print '[ processing and updating testcases... ]\n'
for asm in sorted(os.listdir('test')):
if asm.endswith('.asm.js'):
for precise in [1, 0]:
for opts in [1, 0]:
cmd = [os.path.join('bin', 'asm2wasm'), os.path.join('test', asm)]
wasm = asm.replace('.asm.js', '.fromasm')
if not precise:
cmd += ['--imprecise']
wasm += '.imprecise'
if not opts:
cmd += ['--no-opts']
wasm += '.no-opts'
print '..', asm, wasm
print ' ', ' '.join(cmd)
actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
with open(os.path.join('test', wasm), 'w') as o: o.write(actual)
for dot_s_dir in ['dot_s', 'llvm_autogenerated']:
for s in sorted(os.listdir(os.path.join('test', dot_s_dir))):
if not s.endswith('.s'): continue
print '..', s
wasm = s.replace('.s', '.wast')
full = os.path.join('test', dot_s_dir, s)
stack_alloc = ['--allocate-stack=1024'] if dot_s_dir == 'llvm_autogenerated' else []
cmd = [os.path.join('bin', 's2wasm'), full, '--emscripten-glue'] + stack_alloc
if s.startswith('start_'):
cmd.append('--start')
actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
assert err == '', 'bad err:' + err
expected_file = os.path.join('test', dot_s_dir, wasm)
with open(expected_file, 'w') as o: o.write(actual)
'''
for wasm in ['address.wast']:#os.listdir(os.path.join('test', 'spec')):
if wasm.endswith('.wast'):
print '..', wasm
asm = wasm.replace('.wast', '.2asm.js')
proc = subprocess.Popen([os.path.join('bin', 'wasm2asm'), os.path.join('test', 'spec', wasm)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
actual, err = proc.communicate()
assert proc.returncode == 0, err
assert err == '', 'bad err:' + err
expected_file = os.path.join('test', asm)
open(expected_file, 'w').write(actual)
'''
for t in sorted(os.listdir(os.path.join('test', 'print'))):
if t.endswith('.wast'):
print '..', t
wasm = os.path.basename(t).replace('.wast', '')
cmd = [os.path.join('bin', 'wasm-shell'), os.path.join('test', 'print', t), '--print']
print ' ', ' '.join(cmd)
actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
with open(os.path.join('test', 'print', wasm + '.txt'), 'w') as o: o.write(actual)
cmd = [os.path.join('bin', 'wasm-shell'), os.path.join('test', 'print', t), '--print-minified']
print ' ', ' '.join(cmd)
actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
with open(os.path.join('test', 'print', wasm + '.minified.txt'), 'w') as o: o.write(actual)
for t in sorted(os.listdir(os.path.join('test', 'passes'))):
if t.endswith('.wast'):
print '..', t
passname = os.path.basename(t).replace('.wast', '')
opts = ['-O'] if passname == 'O' else ['--' + p for p in passname.split('_')]
t = os.path.join('test', 'passes', t)
actual = ''
for module, asserts in split_wast(t):
assert len(asserts) == 0
with open('split.wast', 'w') as o: o.write(module)
cmd = [os.path.join('bin', 'wasm-opt')] + opts + ['split.wast', '--print']
actual += run_command(cmd)
with open(os.path.join('test', 'passes', passname + '.txt'), 'w') as o: o.write(actual)
print '\n[ checking wasm-opt -o notation... ]\n'
wast = os.path.join('test', 'hello_world.wast')
cmd = [os.path.join('bin', 'wasm-opt'), wast, '-o', 'a.wast']
run_command(cmd)
open(wast, 'w').write(open('a.wast').read())
print '\n[ checking binary format testcases... ]\n'
for wast in sorted(os.listdir('test')):
if wast.endswith('.wast') and not wast in []: # blacklist some known failures
cmd = [os.path.join('bin', 'wasm-as'), os.path.join('test', wast), '-o', 'a.wasm']
print ' '.join(cmd)
if os.path.exists('a.wasm'): os.unlink('a.wasm')
subprocess.check_call(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert os.path.exists('a.wasm')
cmd = [os.path.join('bin', 'wasm-dis'), 'a.wasm', '-o', 'a.wast']
print ' '.join(cmd)
if os.path.exists('a.wast'): os.unlink('a.wast')
subprocess.check_call(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert os.path.exists('a.wast')
actual = open('a.wast').read()
with open(os.path.join('test', wast + '.fromBinary'), 'w') as o: o.write(actual)
print '\n[ checking example testcases... ]\n'
for t in sorted(os.listdir(os.path.join('test', 'example'))):
output_file = os.path.join('bin', 'example')
cmd = ['-Isrc', '-g', '-lasmjs', '-lsupport', '-Llib/.', '-pthread', '-o', output_file]
if t.endswith('.txt'):
# check if there is a trace in the file, if so, we should build it
out = subprocess.Popen([os.path.join('scripts', 'clean_c_api_trace.py'), os.path.join('test', 'example', t)], stdout=subprocess.PIPE).communicate()[0]
if len(out) == 0:
print ' (no trace in ', t, ')'
continue
print ' (will check trace in ', t, ')'
src = 'trace.cpp'
with open(src, 'w') as o: o.write(out)
expected = os.path.join('test', 'example', t + '.txt')
else:
src = os.path.join('test', 'example', t)
expected = os.path.join('test', 'example', '.'.join(t.split('.')[:-1]) + '.txt')
if src.endswith(('.c', '.cpp')):
# build the C file separately
extra = [os.environ.get('CC') or 'gcc',
src, '-c', '-o', 'example.o',
'-Isrc', '-g', '-Llib/.', '-pthread']
print 'build: ', ' '.join(extra)
subprocess.check_call(extra)
# Link against the binaryen C library DSO, using an executable-relative rpath
cmd = ['example.o', '-lbinaryen'] + cmd + ['-Wl,-rpath=$ORIGIN/../lib']
else:
continue
print ' ', t, src, expected
if os.environ.get('COMPILER_FLAGS'):
for f in os.environ.get('COMPILER_FLAGS').split(' '):
cmd.append(f)
cmd = [os.environ.get('CXX') or 'g++', '-std=c++11'] + cmd
try:
print 'link: ', ' '.join(cmd)
subprocess.check_call(cmd)
print 'run...', output_file
proc = subprocess.Popen([output_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
actual, err = proc.communicate()
assert proc.returncode == 0, [proc.returncode, actual, err]
with open(expected, 'w') as o: o.write(actual)
finally:
os.remove(output_file)
if sys.platform == 'darwin':
# Also removes debug directory produced on Mac OS
shutil.rmtree(output_file + '.dSYM')
print '\n[ checking wasm-opt testcases... ]\n'
for t in os.listdir('test'):
if t.endswith('.wast') and not t.startswith('spec'):
print '..', t
t = os.path.join('test', t)
cmd = [os.path.join('bin', 'wasm-opt'), t, '--print']
actual = run_command(cmd)
actual = actual.replace('printing before:\n', '')
open(t, 'w').write(actual)
print '\n[ success! ]'
| {
"content_hash": "d3b1f60d140ac9daa353f32fc2561b0b",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 154,
"avg_line_length": 42.707317073170735,
"alnum_prop": 0.5962307252998287,
"repo_name": "ddcc/binaryen",
"id": "1ecbb7a40d949aa374db7fe35c73846ede49f14d",
"size": "7027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auto_update_tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5520424"
},
{
"name": "C",
"bytes": "72784"
},
{
"name": "C++",
"bytes": "1405890"
},
{
"name": "CMake",
"bytes": "6678"
},
{
"name": "JavaScript",
"bytes": "3673309"
},
{
"name": "Python",
"bytes": "60669"
},
{
"name": "Shell",
"bytes": "1735"
}
],
"symlink_target": ""
} |
from flask import render_template
from flask.views import MethodView
from .exceptions import ImproperlyConfigured
class BaseView(MethodView):
"""Base class for all other views."""
def __init__(self, **kwargs):
"""
Construct the view.
Assigns the keyword arguments passed to the view instance for
convenience and flexibility.
"""
for key, value in kwargs.iteritems():
setattr(self, key, value)
def dispatch_request(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
return super(BaseView, self).dispatch_request(*args, **kwargs)
class TemplateView(BaseView):
"""Render a given template."""
methods = ['GET']
#: The name of the template to be rendered, or an iterable with template
#: names the first one existing will be rendered.
template = None
context = {}
def get_template(self):
if not self.template:
raise ImproperlyConfigured(
'You must either specify a template, or override '
'`get_template()` method.'
)
return self.template
def get_context(self, **kwargs):
context = {}
context.update(self.context)
context['params'] = kwargs
return context
def render(self, **kwargs):
template = self.get_template()
context = self.get_context(**kwargs)
return render_template(template, **context)
def get(self, **kwargs):
return self.render(**kwargs)
| {
"content_hash": "f3ca5b3fa111d31d5989593fb8d981a1",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 76,
"avg_line_length": 27.07017543859649,
"alnum_prop": 0.61049902786779,
"repo_name": "kvesteri/flask-generic-views",
"id": "bf36817dd421062ca898bb90285b943db1425583",
"size": "1543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_generic_views/core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "46760"
}
],
"symlink_target": ""
} |
import abc
import inspect
import sys
from wordinserter.operations import ChildlessOperation, IgnoredOperation, Group, Text
from wordinserter.exceptions import InsertError
import contextlib
from collections.abc import Iterable
def renders(*operations):
def _wrapper(func):
func.renders_operations = operations
if any(isinstance(op, ChildlessOperation) for op in operations):
if not all(isinstance(op, ChildlessOperation) for op in operations):
raise Exception("Cannot mix ChildlessOperations and normal Operations")
return func
return contextlib.contextmanager(func)
return _wrapper
class NewOperations(object):
def __init__(self, ops):
self.ops = ops
class BaseRenderer(abc.ABC):
def __init__(self, debug=False, hooks=None):
self.debug = debug
self.render_methods = {}
self.hooks = hooks or {}
for name, method in inspect.getmembers(self, inspect.ismethod):
if hasattr(method, "renders_operations"):
for op in method.renders_operations:
if op in self.render_methods:
raise RuntimeError("{0} has multiple renderer functions defined!".format(op.__class__))
self.render_methods[op] = method
def _call_hook(self, key, operation, *args):
cls = operation.__class__
if key in self.hooks and cls in self.hooks[key]:
hooks = self.hooks[key][cls]
if not isinstance(hooks, Iterable):
hooks = [hooks]
for hook in hooks:
hook(operation, self, *args)
def new_operations(self, operations):
return NewOperations(Group(operations))
@contextlib.contextmanager
def with_hooks(self, operation, *args):
self._call_hook("pre", operation, *args)
yield
self._call_hook("post", operation, *args)
@renders(IgnoredOperation, Group)
def ignored_element(self, *args, **kwargs):
yield
def render_operation(self, operation, args=None, indent=0, **kwargs):
method = self.render_methods.get(operation.__class__, None)
if method is None or not callable(method): # The callable check isn't strictly needed, but Pycharm likes it
raise NotImplementedError(
"Operation {0} not supported by this renderer".format(operation.__class__.__name__))
with self.with_hooks(operation):
if isinstance(operation, ChildlessOperation):
if self.debug:
output = operation.__class__.__name__ \
if not isinstance(operation, Text) else operation.short_text
output = output.encode(errors="replace")
print((" " * indent) + str(output))
method(operation, *args or [])
else:
if self.debug:
method = DebugMethod(method, indent)
with method(operation, *args or []) as new_args:
if isinstance(new_args, NewOperations):
self._render(new_args.ops, None, indent + 1, **kwargs)
else:
self._render(operation.children, new_args, indent + 1, **kwargs)
def render(self, *args, **kwargs):
# This is the entrypoint to rendering something. This is here so we can override this function,
# which is the first one to be called when rendering. The _render is reentrant, so it gets called by
# render_operation, so this is the place to do setup/teardown code in subclasses.
return self._render(*args, **kwargs)
def _render(self, operations, args=None, indent=0, **kwargs):
for operation in operations:
try:
self.render_operation(operation, args=None, indent=0, **kwargs)
except InsertError:
raise
except Exception as e:
raise InsertError(operation, sys.exc_info()) from e
class DebugMethod(object):
def __init__(self, method, indent):
self.method = method
self.indent = indent
self.operation = None
def __call__(self, operation, *args):
self.operation = operation
self.inner_manager = self.method(operation, *args)
return self
def __enter__(self):
print((" " * self.indent) + self.operation.__class__.__name__
+ " " + (str(self.operation.format) if self.operation.format is not None else ""))
return self.inner_manager.__enter__()
def __exit__(self, *args):
print((" " * self.indent) + "/" + self.operation.__class__.__name__
+ " " + (str(self.operation.format) if self.operation.format is not None else ""))
return self.inner_manager.__exit__(*args)
from .com import COMRenderer
| {
"content_hash": "420d43b18a82bdf680d67004fa1933f6",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 116,
"avg_line_length": 36.47761194029851,
"alnum_prop": 0.5963584288052373,
"repo_name": "orf/wordinserter",
"id": "95331f5471944919ad6ef923a08f2855353b7d9e",
"size": "4888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wordinserter/renderers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14887"
},
{
"name": "Python",
"bytes": "79383"
}
],
"symlink_target": ""
} |
from LRUCacheTest import LRUCacheTest
from TaskMutexTest import TaskMutexTest
if __name__ == "__main__":
import unittest
unittest.main()
| {
"content_hash": "a0ff56355cc9dd7725f8903ef3ed5a4c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 39,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.7571428571428571,
"repo_name": "appleseedhq/gaffer",
"id": "5c3a8c7ec9cee0b06b1895a70730456f2380b221",
"size": "1943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferTest/IECorePreviewTest/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39910"
},
{
"name": "C++",
"bytes": "7337901"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "7531988"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
} |
from time import ctime
from functools import partial
from signal import signal, SIGINT
from ..fdbus_h import *
from ..exceptions.exceptions import *
from ..fdobjects.fdobjects import FileDescriptorPool, FileDescriptor, FDBus
class Server(FDBus, Thread):
def __init__(self, path):
super(Server, self).__init__(path)
self.clients = ClientPool()
self.server_event_poll = poll()
self.running = True
self.sock = self.socket()
signal(SIGINT, self.server_interrupt)
@property
def listen(self):
return libc.listen(self.sock, DEFAULT_CLIENTS)
@property
def bind(self):
server_address = pointer(sockaddr_un(AF_UNIX, self.path))
self.serv_sk_addr = cast(server_address, POINTER(sockaddr))
server_size = sizeof(sockaddr_un)
return libc.bind(self.sock, self.serv_sk_addr, server_size)
def accept(self):
client_size = c_int(sizeof(sockaddr_un))
client_size_ptr = pointer(client_size)
client = libc.accept(self.sock, self.serv_sk_addr, client_size_ptr)
if client == -1:
error_msg = get_error_msg()
raise AcceptError(error_msg)
self.server_event_poll.register(client, EVENT_MASK)
self.clients[client] = PyCClientWrapper(client)
# some naming aspect of the messaging
# have the server create an id, not just the fd of the client
def client_ev(self, client, ev):
if ev & (POLLHUP | POLLNVAL):
# set up array of functions to take point to which one occured
libc.close(client)
self.server_event_poll.unregister(client)
self.clients.remove(client)
else:
client_req_buffer = cast(REQ_BUFFER(), c_void_p)
ret = libc.recv(client, client_req_buffer, MSG_LEN, MSG_FLAGS)
if ret == -1:
error_msg = get_error_msg()
raise RecvError(error_msg)
msg_raw = cast(client_req_buffer, c_char_p).value
msg = msg_raw.split(':')
try:
protocol = PROTOCOL_NUMBERS[msg[0]]
except KeyError:
raise InvalidProtoError(msg[0])
try:
cmd = COMMAND_NUMBERS[msg[1]]
except KeyError:
raise InvalidCmdError(msg[1])
self.proto_funcs[protocol](client, cmd, msg)
def shutdown(self):
ret = libc.unlink(self.path)
if ret == -1:
error_msg = get_error_msg()
raise UnlinkError(error_msg)
if any(ret == -1 for ret in map(libc.close, self.clients)):
error_msg = get_error_msg()
raise CloseError(error_msg)
ret = libc.close(self.sock)
if ret == -1:
error_msg = get_error_msg()
raise CloseError(error_msg)
self.close_pool()
def server_interrupt(self, sig, frame):
self.running = False
self.shutdown()
def passfd(self, client, fd_name):
recepient = self.clients[int(client)].fd
self.send_fd(fd_name, recepient)
@property
def current_clients(self):
return self.clients.dump()
def remove_client(self, client):
self.clients.remove(client)
def client_peer_req(self, client):
peers = filter(partial(lambda c1, c2: c1 != c2, str(client)),
map(str, self.current_clients))
peer_dump = self.build_msg(PASS, PASS_PEER, *peers)
ret = libc.send(client, cast(peer_dump, c_void_p),
MSG_LEN, MSG_FLAGS)
if ret == -1:
error_msg = get_error_msg()
raise SendError(error_msg)
def run(self):
# poll for incoming messages to shutdown
if self.bind == -1:
error_msg = get_error_msg()
raise BindError(error_msg)
if self.listen == -1:
error_msg = get_error_msg()
raise ListenError(errno)
self.server_event_poll.register(self.sock, EVENT_MASK)
while self.running:
events = self.server_event_poll.poll(1)
if events:
if events[0][0] == self.sock:
self.accept()
else:
self.client_ev(*events[0])
self.shutdown()
# have a clients name/id themselves (str's)
class ClientPool(object):
def __init__(self):
self.fdpool = {}
def remove(self, client):
del self.fdpool[client]
def dump(self):
return self.fdpool.keys()
def __len__(self):
return len(self.fdpool)
def __iter__(self):
for fd in self.fdpool:
yield fd
def __setitem__(self, item, value):
self.fdpool[item] = value
def __getitem__(self, item):
try:
client = self.fdpool[item]
except KeyError:
raise UnknownDescriptorError(item)
return client
def __len__(self):
return len(self.fdpool)
def __str__(self):
return str(self.fdpool)
class PyCClientWrapper(object):
# specify / name -> each client ...? provide more detailed info on each
# client. More decoupled client to fds
def __init__(self, client_c_fd):
self.fd = client_c_fd
| {
"content_hash": "8506dece0e27ad2b14e5b928854599e7",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 76,
"avg_line_length": 32.26219512195122,
"alnum_prop": 0.5671895671895671,
"repo_name": "tijko/fdbus",
"id": "825ca05a609be702a58641885b5910d81b2d46ae",
"size": "5338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fdbus/server/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36941"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import getpass
import inspect
import os
import sys
import textwrap
from oslo.utils import encodeutils
from oslo.utils import strutils
import prettytable
import six
from six import moves
from cloudkittyclient.openstack.common._i18n import _
class MissingArgs(Exception):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = _("Missing arguments: %s") % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, '__self__', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
def arg(*args, **kwargs):
"""Decorator for CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*args, **kwargs):
"""Returns the first environment variable set.
If all are empty, defaults to '' or keyword arg `default`.
"""
for arg in args:
value = os.environ.get(arg)
if value:
return value
return kwargs.get('default', '')
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(func, 'arguments'):
func.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in func.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.arguments.insert(0, (args, kwargs))
def unauthenticated(func):
"""Adds 'unauthenticated' attribute to decorated function.
Usage:
>>> @unauthenticated
... def mymethod(f):
... pass
"""
func.unauthenticated = True
return func
def isunauthenticated(func):
"""Checks if the function does not require authentication.
Mark such functions with the `@unauthenticated` decorator.
:returns: bool
"""
return getattr(func, 'unauthenticated', False)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None, field_labels=None):
"""Print a list or objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param field_labels: Labels to use in the heading of the table, default to
fields.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
field_labels = field_labels or fields
if len(field_labels) != len(fields):
raise ValueError(_("Field labels list %(labels)s has different number "
"of elements than fields list %(fields)s"),
{'labels': field_labels, 'fields': fields})
if sortby_index is None:
kwargs = {}
else:
kwargs = {'sortby': field_labels[sortby_index]}
pt = prettytable.PrettyTable(field_labels)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
if six.PY3:
print(encodeutils.safe_encode(pt.get_string(**kwargs)).decode())
else:
print(encodeutils.safe_encode(pt.get_string(**kwargs)))
def print_dict(dct, dict_property="Property", wrap=0):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
"""
pt = prettytable.PrettyTable([dict_property, 'Value'])
pt.align = 'l'
for k, v in six.iteritems(dct):
# convert dict to str to check length
if isinstance(v, dict):
v = six.text_type(v)
if wrap > 0:
v = textwrap.fill(six.text_type(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
if six.PY3:
print(encodeutils.safe_encode(pt.get_string()).decode())
else:
print(encodeutils.safe_encode(pt.get_string()))
def get_password(max_password_prompts=3):
"""Read password from TTY."""
verify = strutils.bool_from_string(env("OS_VERIFY_PASSWORD"))
pw = None
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
# Check for Ctrl-D
try:
for __ in moves.range(max_password_prompts):
pw1 = getpass.getpass("OS Password: ")
if verify:
pw2 = getpass.getpass("Please verify: ")
else:
pw2 = pw1
if pw1 == pw2 and pw1:
pw = pw1
break
except EOFError:
pass
return pw
def service_type(stype):
"""Adds 'service_type' attribute to decorated function.
Usage:
.. code-block:: python
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""Retrieves service type from function."""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def exit(msg=''):
if msg:
print (msg, file=sys.stderr)
sys.exit(1)
| {
"content_hash": "cde7cccc05f53273304822d23bba2f42",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 79,
"avg_line_length": 28.59288537549407,
"alnum_prop": 0.5948299695880565,
"repo_name": "FNST-OpenStack/python-cloudkittyclient",
"id": "368025ee9c6f1a746c25ef1c88672b9a738e2c79",
"size": "7952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudkittyclient/openstack/common/cliutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "211706"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
app_name = 'feedback'
urlpatterns = [
# /profile/contact:url to take the feedback form
url(r'^$', views.feedback_process, name='feedback_process'),
]
| {
"content_hash": "3bcb4b18e20475b299c2ea6ef78c8fde",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 64,
"avg_line_length": 21.4,
"alnum_prop": 0.6962616822429907,
"repo_name": "Nikita1710/ANUFifty50-Online-Mentoring-Platform",
"id": "bee2c8e23923a2933dbdf02dbbcb52f58b8256b6",
"size": "214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/fifty_fifty/feedback/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "263138"
},
{
"name": "HTML",
"bytes": "520705"
},
{
"name": "JavaScript",
"bytes": "442204"
},
{
"name": "Python",
"bytes": "163730"
}
],
"symlink_target": ""
} |
import nose
import traces
from scipy import stats
import numpy
def test_quantiles():
data = [15, 15, 20, 20, 20, 35, 35, 40, 40, 50, 50]
histogram = traces.Histogram(data)
alpha = 0.5
q_list = [0.05, 0.25, 0.5, 0.75, 0.95]
q_values = histogram.quantiles(q_list, alpha=alpha, smallest_count=1)
reference = \
stats.mstats.mquantiles(data, prob=q_list, alphap=0.5, betap=0.5)
for i, j in zip(q_values, reference):
assert i == j
def test_normalize():
data = [15, 15, 20, 20, 20, 35, 35, 40, 40, 50, 50]
histogram = traces.Histogram(data)
normalized = histogram.normalized()
assert sum(normalized.values()) == 1.0
def _test_statistics(normalized):
data_list = [
[1, 2, 3, 5, 6, 7],
[1, 2, 3, 5, 6],
[1, 1],
[1, 1, 1, 1, 1, 1, 1, 2],
[i + 0.25 for i in [1, 1, 1, 1, 1, 1, 1, 2]],
]
for data in data_list:
histogram = traces.Histogram(data)
if normalized:
histogram = histogram.normalized()
n = 1
else:
n = len(data)
nose.tools.assert_almost_equal(histogram.total(), n)
nose.tools.assert_almost_equal(histogram.mean(), numpy.mean(data))
nose.tools.assert_almost_equal(histogram.variance(), numpy.var(data))
nose.tools.assert_almost_equal(
histogram.standard_deviation(),
numpy.std(data),
)
nose.tools.assert_almost_equal(histogram.max(), numpy.max(data))
nose.tools.assert_almost_equal(histogram.min(), numpy.min(data))
nose.tools.assert_almost_equal(
histogram.quantile(0.5),
numpy.median(data),
)
q_list = [0.001, 0.01, 0.05, 0.25, 0.5, 0.75, 0.95, 0.99, 0.999]
# linear interpolation
result = histogram.quantiles(q_list)
reference = stats.mstats.mquantiles(
data, prob=q_list, alphap=0.5, betap=0.5,
)
for i, j in zip(result, reference):
nose.tools.assert_almost_equal(i, j)
# make sure ot throw an error for bad quantile values
try:
histogram.quantile(-1)
except ValueError:
pass
def test_statistics():
return _test_statistics(True)
def test_normalized_statistics():
return _test_statistics(False)
def test_quantile_interpolation():
data = [1, 1, 1, 2, 3, 5, 6, 7]
histogram = traces.Histogram(data)
normalized = histogram.normalized()
q_list = [0.001, 0.01, 0.05, 0.25, 0.5, 0.75, 0.95, 0.99, 0.999]
# just do the inverse of the emperical cdf
result = histogram.quantiles(q_list, alpha=0, smallest_count=1)
answer = [1.0, 1.0, 1.0, 1.0, 2.5, 5.5, 7.0, 7.0, 7.0]
for i, j in zip(result, answer):
nose.tools.assert_almost_equal(i, j)
# same thing with normalized
result = normalized.quantiles(
q_list, alpha=0, smallest_count=1.0 / len(data))
for i, j in zip(result, answer):
nose.tools.assert_almost_equal(i, j)
# now do the linear interpolation method
result = histogram.quantiles(q_list, alpha=0.5, smallest_count=1)
answer = stats.mstats.mquantiles(
data, prob=q_list, alphap=0.5, betap=0.5,
)
for i, j in zip(result, answer):
nose.tools.assert_almost_equal(i, j)
# same thing with normalized
result = normalized.quantiles(
q_list, alpha=0.5, smallest_count=1.0 / len(data),
)
for i, j in zip(result, answer):
nose.tools.assert_almost_equal(i, j)
def test_addition():
hist_a = traces.Histogram([1, 1, 1, 2, 3, 5])
hist_b = traces.Histogram([0, 0, 1, 2, 2])
together = hist_a.add(hist_b)
assert list(together.items()) == [(0, 2), (1, 4), (2, 3), (3, 1), (5, 1)]
def test_minmax_with_zeros():
histogram = traces.Histogram()
histogram[0] += 0
histogram[1] += 1
histogram[2] += 1
histogram[3] += 0
nose.tools.eq_(histogram.min(), 1)
nose.tools.eq_(histogram.max(), 2)
def test_histogram_stats_with_nones():
histogram = traces.Histogram()
nose.tools.eq_(histogram.mean(), None)
nose.tools.eq_(histogram.variance(), None)
nose.tools.eq_(histogram.standard_deviation(), None)
nose.tools.eq_(histogram.min(), None)
nose.tools.eq_(histogram.max(), None)
nose.tools.eq_(histogram.median(), None)
histogram = traces.Histogram.from_dict({None: 1}, key=hash)
nose.tools.eq_(histogram.mean(), None)
nose.tools.eq_(histogram.variance(), None)
nose.tools.eq_(histogram.standard_deviation(), None)
nose.tools.eq_(histogram.min(), None)
nose.tools.eq_(histogram.max(), None)
nose.tools.eq_(histogram.median(), None)
ts = traces.TimeSeries()
ts[0] = None
ts[1] = 5
ts[2] = 6
ts[3] = None
ts[9] = 7
ts[10] = None
histogram = ts.distribution(start=0, end=10)
nose.tools.eq_(histogram.mean(), 6)
| {
"content_hash": "61e6b66e2736d5db75b765ce9ad624ce",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 77,
"avg_line_length": 27.6123595505618,
"alnum_prop": 0.5916581892166837,
"repo_name": "datascopeanalytics/traces",
"id": "e72a2e9fb26f23bd0fbcf48b147d813df4782b94",
"size": "4915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_histogram.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2140"
},
{
"name": "Python",
"bytes": "100644"
}
],
"symlink_target": ""
} |
import fnmatch
import os
from os.path import dirname, exists, join
from setuptools import find_packages, setup, Extension
from setuptools.command.build_ext import build_ext
import subprocess
import sys
import setuptools
#
# pybind-specific compilation stuff
#
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14] compiler flag.
The c++14 is preferred over c++11 (when it is available).
"""
if has_flag(compiler, '-std=c++14'):
return '-std=c++14'
elif has_flag(compiler, '-std=c++11'):
return '-std=c++11'
else:
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'msvc': ['/EHsc'],
'unix': [],
}
if sys.platform == 'darwin':
c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
if ct == 'unix':
opts.append('-s') # strip
opts.append('-g0') # remove debug symbols
opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, '-fvisibility=hidden'):
opts.append('-fvisibility=hidden')
for ext in self.extensions:
ext.extra_compile_args = opts
build_ext.build_extensions(self)
ext_modules = [
Extension(
'example/_example',
[
'example.cpp',
'ndarray_converter.cpp',
],
include_dirs=[
# Path to pybind11 headers
get_pybind_include(),
get_pybind_include(user=True),
],
libraries=['opencv_core', 'opencv_highgui'],
language='c++',
),
]
setup(
name='pybind11-opencv-numpy-example',
version='0.1',
author='Dustin Spicuzza',
author_email='dustin@virtualroadside.com',
packages=find_packages(),
ext_modules=ext_modules,
install_requires=None,
cmdclass={'build_ext': BuildExt},
zip_safe=False,
)
| {
"content_hash": "2f78b8ce54098cbfa984a895eae39903",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 76,
"avg_line_length": 28.833333333333332,
"alnum_prop": 0.6040462427745664,
"repo_name": "DTChuck/HSImage",
"id": "e7f462d30a991fb73f4ce696b48a9aa3b37bf262",
"size": "3192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pybind11_opencv_numpy/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "240"
},
{
"name": "C++",
"bytes": "834454"
},
{
"name": "Makefile",
"bytes": "26405"
},
{
"name": "Python",
"bytes": "9143"
},
{
"name": "QMake",
"bytes": "2161"
},
{
"name": "Shell",
"bytes": "2082"
},
{
"name": "TeX",
"bytes": "2698"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
from .config import *
from .wrappers import *
logger = logging.getLogger(__name__)
class Transactions(ListWrapper):
def wrap(self, resource):
return Transaction(resource, self.client)
class Transaction(Wrapper):
@property
def attributes(self):
return self.resource.attributes
def __getattr__(self, name):
try:
return self.resource.attributes[name]
except:
return super(Transaction, self).__getattr__(name)
def approve(self, mfa_token=None):
if mfa_token:
return self.with_mfa(mfa_token).resource.approve({})
return self.resource.approve({})
@property
def mfa_uri(self):
try:
return self.resource.__dict__['mfa_uri']
except KeyError:
pass
try:
return self.resource.__dict__['attributes']['mfa_uri']
except KeyError:
pass
return None
def cancel(self):
try:
return self.resource.cancel()
except Exception as e:
logger.debug(e)
| {
"content_hash": "8b96ce22ace343639e44492544fc7919",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 66,
"avg_line_length": 22.294117647058822,
"alnum_prop": 0.5875109938434476,
"repo_name": "GemHQ/round-py",
"id": "360f4bf32cc1d9b0feebeb4cb461d8fa7e451daa",
"size": "1227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "round/transactions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71021"
}
],
"symlink_target": ""
} |
from ._node import Node
class ConflictNode(Node):
pass
| {
"content_hash": "a7e796fc8cae9909473c069fb5b625db",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 25,
"avg_line_length": 12.2,
"alnum_prop": 0.7049180327868853,
"repo_name": "artPlusPlus/dijon",
"id": "577b5e1be2a144abac8c816455de6c5b2bb12905",
"size": "61",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dijon/_nodes/_conflict_node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36241"
}
],
"symlink_target": ""
} |
"""Averaging of model weights."""
# pylint: disable=missing-docstring
# pylint: disable=g-complex-comprehension
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
class Averaged(object):
def __init__(self, tensors):
tensors = list(tensors)
with tf.variable_scope('averaged'):
self._num_samples = tf.Variable(0, name='num_samples', trainable=False)
with tf.variable_scope('avg'):
self._averages = [
tf.get_variable(
tensor.name.replace('/', '-').replace(':', '-'),
tensor.get_shape(), initializer=tf.zeros_initializer(),
trainable=False)
for tensor in tensors]
with tf.variable_scope('save'):
self._saves = [
tf.get_variable(
tensor.name.replace('/', '-').replace(':', '-'),
tensor.get_shape(), initializer=tf.zeros_initializer(),
trainable=False)
for tensor in tensors]
self._tensors = tensors
self._take_sample = self._make_take_sample()
self._switch = self._make_swith_to_average()
self._restore = self._make_restore()
self._reset = self._make_reset()
def take_sample(self):
tf.get_default_session().run(self._take_sample)
def switch_to_average(self):
tf.get_default_session().run(self._switch)
def restore(self):
tf.get_default_session().run(self._restore)
def reset(self):
tf.get_default_session().run(self._reset)
def __enter__(self):
self.switch_to_average()
def __exit__(self, type_, value, traceback):
self.restore()
def _make_take_sample(self):
assignments = []
n = tf.cast(self._num_samples, tf.float32)
mu = 1.0 / (1.0 + n)
for tensor, average in zip(self._tensors, self._averages):
assignments.append(tf.assign_add(average, (tensor-average)*mu))
add_to_averages = tf.group(assignments)
with tf.control_dependencies([add_to_averages]):
incr_num_samples = tf.assign(self._num_samples, self._num_samples + 1)
return incr_num_samples
def _make_swith_to_average(self):
assignments = []
for save, tensor, average in zip(
self._saves, self._tensors, self._averages):
with tf.control_dependencies([save.assign(tensor)]):
assignments.append(tensor.assign(average))
return tf.group(assignments)
def _make_restore(self):
assignments = []
for save, tensor in zip(self._saves, self._tensors):
assignments.append(tf.assign(tensor, save))
return tf.group(assignments)
def _make_reset(self):
return tf.assign(self._num_samples, 0)
# TODO(melisgl): I think this works with ResourceVariables but not with normal
# Variables. Deferred until TF2.0.
def _swap(x, y):
x_value = x.read_value()
y_value = y.read_value()
with tf.control_dependencies([x_value, y_value]):
swap = tf.group(y.assign(x_value), x.assign(y_value))
return swap
| {
"content_hash": "9765cc9e91be82cfa096094bae27208a",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 78,
"avg_line_length": 32.193548387096776,
"alnum_prop": 0.6352705410821643,
"repo_name": "deepmind/lamb",
"id": "28c0e4db39c1dc7c34bab0a867443ddc9294a581",
"size": "3689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lamb/averaged.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "249602"
},
{
"name": "Shell",
"bytes": "84088"
}
],
"symlink_target": ""
} |
from chatterbot.adapters.storage import StorageAdapter
from chatterbot.conversation import Statement
import random
import twitter
class TwitterAdapter(StorageAdapter):
"""
The TwitterAdapter allows ChatterBot to read tweets from twitter.
"""
def __init__(self, **kwargs):
super(TwitterAdapter, self).__init__(**kwargs)
self.api = twitter.Api(
consumer_key=kwargs["twitter_consumer_key"],
consumer_secret=kwargs["twitter_consumer_secret"],
access_token_key=kwargs["twitter_access_token_key"],
access_token_secret=kwargs["twitter_access_token_secret"]
)
def count(self):
return 1
def find(self, statement_text):
tweets = self.api.GetSearch(term=statement_text, count=20)
tweet = random.choice(tweets)
return Statement(tweet.text)
def filter(self, **kwargs):
"""
Returns a list of statements in the database
that match the parameters specified.
"""
statement_text = kwargs.get('text')
# if not statement_text:
# statement_text = kwargs.get('in_response_to__contains')
# data['in_reply_to_status_id_str']
# If no text parameter was given get a selection of recent tweets
if not statement_text:
statements = []
for i in range(0, 20):
statements.append(self.get_random())
return statements
tweets = self.api.GetSearch(term=statement_text)
tweet = random.choice(tweets)
statement = Statement(tweet.text)
return [statement]
def update(self, statement):
return statement
def choose_word(self, words):
"""
Light weight search for a valid word if one exists.
"""
for word in words:
# If the word contains only letters with a length from 4 to 9
if word.isalpha() and (len(word) > 3 or len(word) <= 9):
return word
return None
def get_random(self):
"""
Returns a random statement from the api.
To generate a random tweet, search twitter for recent tweets
containing the term 'random'. Then randomly select one tweet
from the current set of tweets. Randomly choose one word from
the selected random tweet, and make a second search request.
Return one random tweet selected from the search results.
"""
tweets = self.api.GetSearch(term="random", count=5)
tweet = random.choice(tweets)
words = tweet.text.split()
word = self.choose_word(words)
# If a valid word is found, make a second search request
if word:
tweets = self.api.GetSearch(term=word, count=5)
if tweets:
tweet = random.choice(tweets)
# TODO: Handle non-ascii characters properly
cleaned_text = ''.join(
[i if ord(i) < 128 else ' ' for i in tweet.text]
)
return Statement(cleaned_text)
def drop(self):
"""
Twitter is only a simulated data source in
this case so it cannot be removed.
"""
pass
| {
"content_hash": "ff483b2f5263d1a37b8c8ec599a0ea59",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 73,
"avg_line_length": 31.0873786407767,
"alnum_prop": 0.6005621486570893,
"repo_name": "imminent-tuba/thesis",
"id": "5cd68ab6a29aa59f73416905857be2c0afc48774",
"size": "3202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/chatterbot/chatterbot/adapters/storage/twitter_storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "203080"
},
{
"name": "HTML",
"bytes": "642"
},
{
"name": "JavaScript",
"bytes": "47372"
},
{
"name": "Python",
"bytes": "71261"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from google.cloud.vision_helpers.decorators import add_single_feature_methods
from google.cloud.vision_helpers import VisionHelpers
import sys
import warnings
from google.cloud.vision_v1p2beta1 import types
from google.cloud.vision_v1p2beta1.gapic import enums
from google.cloud.vision_v1p2beta1.gapic import image_annotator_client as iac
if sys.version_info[:2] == (2, 7):
message = (
"A future version of this library will drop support for Python 2.7."
"More details about Python 2 support for Google Cloud Client Libraries"
"can be found at https://cloud.google.com/python/docs/python2-sunset/"
)
warnings.warn(message, DeprecationWarning)
@add_single_feature_methods
class ImageAnnotatorClient(VisionHelpers, iac.ImageAnnotatorClient):
__doc__ = iac.ImageAnnotatorClient.__doc__
enums = enums
__all__ = ("enums", "types", "ImageAnnotatorClient")
| {
"content_hash": "cfc4b72a4c28a7677ff011e3086c8ab1",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 33.607142857142854,
"alnum_prop": 0.7492029755579172,
"repo_name": "tswast/google-cloud-python",
"id": "565100ef1ae812ad2b608f825900f2ec2c83d939",
"size": "1544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vision/google/cloud/vision_v1p2beta1/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
import nnets.core.utils as nut
import theano
import theano.tensor as T
import numpy,timeit
import pdb
class network(nut.persistent_container):
#############################################
def __call__(self,x):return self.predictor(x)
#############################################
'''
grow should set the 'predictor' attribute for the instance
grow should set the 'errors' attribute for the instance
grow should set the 'negative_log_likelihood' attribute for the instance
'''
def grow(self,n_in,n_out,**kws):raise NotImplemented()
#############################################
'''
networks can be saved and loaded
every network has a name, an input shape, and an output shape
subclasses should overload the grow method to build specific network architecture
'''
def __init__(self,name,n_in,n_out,**kws):
nut.persistent_container.__init__(self,'networks',name)
self.features = n_in
self.categories = n_out
self.level = 0.0
self.grow(n_in,n_out,**kws)
#############################################
def setuptraining(self,dsets,max_time = 60,
learning_rate = 0.01,batch_size = 100,
L1_reg = 0.00,L2_reg = 0.0001):
train_x,train_y,valid_x,valid_y,test_x,test_y = dsets
x,y,index = self.input,T.ivector('y'),T.lscalar()
# the y labels are presented as 1D vector of [int] labels
# index is for indexing a [mini]batch
# sgd requires:
# self.negative_log_likelihood
# self.L1_reg,self.L1,self.L2_reg,self.L2_sqr
# self.errors
# sgd creates:
# self.test_f,self.valid_f,self.train_f
# self.n_train_batches
# self.n_valid_batches
# self.n_test_batches
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2);
negloglike = self.negative_log_likelihood(y)
cost = (negloglike+L1_reg*self.L1+L2_reg*self.L2_sqr)
# compiling a Theano function that computes the mistakes
# that are made by the model on a minibatch
ips,ops = [index],self.errors(y)
b1,b2 = index*batch_size,(index+1)*batch_size
self.test_f = theano.function(ips,ops,
givens = {x: test_x[b1:b2],y: test_y[b1:b2]})
self.valid_f = theano.function(ips,ops,
givens = {x: valid_x[b1:b2],y: valid_y[b1:b2]})
# compute the gradient of cost with respect to theta (sorted in params)
# the resulting gradients will be stored in a list gparams
gparams = [T.grad(cost,param) for param in self.params]
updates = [(p,p-learning_rate*gp) for p,gp in zip(self.params,gparams)]
# compiling a Theano function `train_model` that returns the cost, but in the
# same time updates the parameter of the model based
# on the rules defined in `updates`
self.train_f = theano.function(ips,cost,updates = updates,
givens = {x: train_x[b1:b2],y: train_y[b1:b2]})
# compute number of minibatches for training, validation and testing
self.n_train_batches = train_x.get_value(borrow = True).shape[0]//batch_size
self.n_valid_batches = valid_x.get_value(borrow = True).shape[0]//batch_size
self.n_test_batches = test_x.get_value(borrow = True).shape[0]//batch_size
assert self.n_train_batches > 0
assert self.n_valid_batches > 0
assert self.n_test_batches > 0
#############################################
# stochastic gradient descent optimization
# early-stopping parameters
# patience -> look as this many examples regardless
# patience_increase -> wait this much longer when a new best is found
# improvement_threshold -> a relative improvement of this much is considered significant
def trainingloop(self,learning_rate = 0.01,
patience = 10000,patience_increase = 2,improvement_threshold = 0.995,
n_epochs = 10000,batch_size = 100,start_time = None,max_time = 60.0):
if start_time is None:start_time = timeit.default_timer()
v_frequency = min(self.n_train_batches,patience//2)
best_v_loss,best_i,test_score = numpy.inf,0,0.0
epoch,done_looping = 0,False
while (epoch < n_epochs) and (not done_looping):
if timeit.default_timer()-start_time > max_time:
print('... training session timed out ...')
break
epoch = epoch + 1
for minibatch_i in range(self.n_train_batches):
minibatch_avg_cost = self.train_f(minibatch_i)
i = (epoch - 1) * self.n_train_batches + minibatch_i
if (i + 1) % v_frequency == 0:
# compute zero-one loss on validation set
v_losses = [self.valid_f(i) for i in range(self.n_valid_batches)]
this_v_loss = numpy.mean(v_losses)
print('\tepoch %i, minibatch %i/%i, validation error %f %%' %
(epoch,minibatch_i+1,self.n_train_batches,this_v_loss*100.))
if this_v_loss < best_v_loss:
if (this_v_loss < best_v_loss*improvement_threshold):
patience = max(patience,i*patience_increase)
best_v_loss,best_i = this_v_loss,i
test_losses = [self.test_f(i) for i in range(self.n_test_batches)]
test_score = numpy.mean(test_losses)
print(('\tepoch %i, minibatch %i/%i, test error of best model %f %%') %
(epoch,minibatch_i+1,self.n_train_batches,test_score*100.))
if test_score < 0.001:
print('... training session ending early ...')
done_looping = True
break
if patience <= i:
done_looping = True
break
m = ('\toptimization complete...\n\tbest validation score of %3f %% '
'\n\tobtained at iteration %i, with test performance %3f %%')
print(m % (best_v_loss*100.,best_i+1,test_score*100.))
test_losses = [self.test_f(i) for i in range(self.n_test_batches)]
test_score = numpy.mean(test_losses)
self.level = 1.0-test_score
def train(self,dsets,level,max_time = 60,**tkws):
if self.level < level:
print('... training network \'%s\' for %i seconds ...' % (self.name,max_time))
s_time = timeit.default_timer()
self.setuptraining(nut.theanodatawrap(dsets),max_time = max_time,**tkws)
self.trainingloop(start_time = s_time,max_time = max_time,**tkws)
e_time = timeit.default_timer()
d_time = e_time-s_time
print('... trained network \'%s\' for %f seconds ...' % (self.name,d_time))
self.save()
print('... saved network \'%s\'; trained to level %f ...' % (self.name,self.level))
return self
| {
"content_hash": "c28f12e3fd94e9e258732adf4f21aa17",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 95,
"avg_line_length": 42.67261904761905,
"alnum_prop": 0.5611661319570372,
"repo_name": "ctogle/nnets",
"id": "40af020197c339e77960b557705362762884320b",
"size": "7169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/nnets/nnetworks/nnet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "120706"
},
{
"name": "Shell",
"bytes": "83"
}
],
"symlink_target": ""
} |
import sys,os,optparse,time
from socket import gethostname
# Check if DISPLAY variable is set
try:
os.environ["DISPLAY"]
import pygtk,gtk
except:
print("Error: DISPLAY environment variable not set.")
sys.exit(1)
# Global Configuration File
CONF_FILE = "/etc/classification-banner"
# Returns Username
def get_user():
try:
user = os.getlogin()
except:
user = ''
pass
return user
# Returns Hostname
def get_host():
host = gethostname()
host = host.split('.')[0]
return host
# Classification Banner Class
class Classification_Banner:
"""Class to create and refresh the actual banner."""
def __init__(self, message="UNCLASSIFIED", fgcolor="#000000",
bgcolor="#00CC00", face="liberation-sans", size="small",
weight="bold", x=0, y=0, esc=True, opacity=0.75, sys_info=False):
"""Set up and display the main window
Keyword arguments:
message -- The classification level to display
fgcolor -- Foreground color of the text to display
bgcolor -- Background color of the banner the text is against
face -- Font face to use for the displayed text
size -- Size of font to use for text
weight -- Bold or normal
hres -- Horizontal Screen Resolution (int) [ requires vres ]
vres -- Vertical Screen Resolution (int) [ requires hres ]
opacity -- Opacity of window (float) [0 .. 1, default 0.75]
"""
self.hres = x
self.vres = y
# Dynamic Resolution Scaling
self.monitor = gtk.gdk.Screen()
self.monitor.connect("size-changed", self.resize)
# Newer versions of pygtk have this method
try:
self.monitor.connect("monitors-changed", self.resize)
except:
pass
# Create Main Window
self.window = gtk.Window()
self.window.set_position(gtk.WIN_POS_CENTER)
self.window.connect("hide", self.restore)
self.window.connect("key-press-event", self.keypress)
self.window.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(bgcolor))
self.window.set_property('skip-taskbar-hint', True)
self.window.set_property('skip-pager-hint', True)
self.window.set_property('destroy-with-parent', True)
self.window.stick()
self.window.set_decorated(False)
self.window.set_keep_above(True)
self.window.set_app_paintable(True)
try:
self.window.set_opacity(opacity)
except:
pass
# Set the default window size
self.window.set_default_size(int(self.hres), 5)
# Create Main Horizontal Box to Populate
self.hbox = gtk.HBox()
# Create the Center Vertical Box
self.vbox_center = gtk.VBox()
self.center_label = gtk.Label(
"<span font_family='%s' weight='%s' foreground='%s' size='%s'>%s</span>" %
(face, weight, fgcolor, size, message))
self.center_label.set_use_markup(True)
self.center_label.set_justify(gtk.JUSTIFY_CENTER)
self.vbox_center.pack_start(self.center_label, True, True, 0)
# Create the Right-Justified Vertical Box to Populate for hostname
self.vbox_right = gtk.VBox()
self.host_label = gtk.Label(
"<span font_family='%s' weight='%s' foreground='%s' size='%s'>%s</span>" %
(face, weight, fgcolor, size, get_host()))
self.host_label.set_use_markup(True)
self.host_label.set_justify(gtk.JUSTIFY_RIGHT)
self.host_label.set_width_chars(20)
# Create the Left-Justified Vertical Box to Populate for user
self.vbox_left = gtk.VBox()
self.user_label = gtk.Label(
"<span font_family='%s' weight='%s' foreground='%s' size='%s'>%s</span>" %
(face, weight, fgcolor, size, get_user()))
self.user_label.set_use_markup(True)
self.user_label.set_justify(gtk.JUSTIFY_LEFT)
self.user_label.set_width_chars(20)
# Create the Right-Justified Vertical Box to Populate for ESC message
self.vbox_esc_right = gtk.VBox()
self.esc_label = gtk.Label(
"<span font_family='liberation-sans' weight='normal' foreground='%s' size='xx-small'> (ESC to hide temporarily) </span>" %
(fgcolor))
self.esc_label.set_use_markup(True)
self.esc_label.set_justify(gtk.JUSTIFY_RIGHT)
self.esc_label.set_width_chars(20)
# Empty Label for formatting purposes
self.vbox_empty = gtk.VBox()
self.empty_label = gtk.Label(
"<span font_family='liberation-sans' weight='normal'> </span>")
self.empty_label.set_use_markup(True)
self.empty_label.set_width_chars(20)
if not esc:
if not sys_info:
self.hbox.pack_start(self.vbox_center, True, True, 0)
else:
self.vbox_right.pack_start(self.host_label, True, True, 0)
self.vbox_left.pack_start(self.user_label, True, True, 0)
self.hbox.pack_start(self.vbox_right, False, True, 20)
self.hbox.pack_start(self.vbox_center, True, True, 0)
self.hbox.pack_start(self.vbox_left, False, True, 20)
else:
if esc and not sys_info:
self.empty_label.set_justify(gtk.JUSTIFY_LEFT)
self.vbox_empty.pack_start(self.empty_label, True, True, 0)
self.vbox_esc_right.pack_start(self.esc_label, True, True, 0)
self.hbox.pack_start(self.vbox_esc_right, False, True, 0)
self.hbox.pack_start(self.vbox_center, True, True, 0)
self.hbox.pack_start(self.vbox_empty, False, True, 0)
if sys_info:
self.vbox_right.pack_start(self.host_label, True, True, 0)
self.vbox_left.pack_start(self.user_label, True, True, 0)
self.hbox.pack_start(self.vbox_right, False, True, 20)
self.hbox.pack_start(self.vbox_center, True, True, 0)
self.hbox.pack_start(self.vbox_left, False, True, 20)
self.window.add(self.hbox)
self.window.show_all()
self.width, self.height = self.window.get_size()
# Restore Minimized Window
def restore(self, widget, data=None):
self.window.deiconify()
self.window.present()
return True
# Destroy Classification Banner Window on Resize (Display Banner Will Relaunch)
def resize(self, widget, data=None):
self.window.destroy()
return True
# Press ESC to hide window for 15 seconds
def keypress(self, widget, event=None):
if event.keyval == 65307:
if not gtk.events_pending():
self.window.iconify()
self.window.hide()
time.sleep(15)
self.window.show()
self.window.deiconify()
self.window.present()
return True
class Display_Banner:
"""Display Classification Banner Message"""
def __init__(self):
# Dynamic Resolution Scaling
self.monitor = gtk.gdk.Screen()
self.monitor.connect("size-changed", self.resize)
# Newer versions of pygtk have this method
try:
self.monitor.connect("monitors-changed", self.resize)
except:
pass
# Launch Banner
self.config, self.args = self.configure()
self.execute(self.config)
# Read Global configuration
def configure(self):
config = {}
try:
execfile(CONF_FILE, config)
except:
pass
defaults = {}
defaults["message"] = config.get("message", "UNCLASSIFIED")
defaults["fgcolor"] = config.get("fgcolor", "#FFFFFF")
defaults["bgcolor"] = config.get("bgcolor", "#007A33")
defaults["face"] = config.get("face", "liberation-sans")
defaults["size"] = config.get("size", "small")
defaults["weight"] = config.get("weight", "bold")
defaults["show_top"] = config.get("show_top", True)
defaults["show_bottom"] = config.get("show_bottom", True)
defaults["hres"] = config.get("hres", 0)
defaults["vres"] = config.get("vres", 0)
defaults["sys_info"] = config.get("sys_info", False)
defaults["opacity"] = config.get("opacity", 0.75)
defaults["esc"] = config.get("esc", True)
defaults["spanning"] = config.get("spanning", False)
# Use the global config to set defaults for command line options
parser = optparse.OptionParser()
parser.add_option("-m", "--message", default=defaults["message"],
help="Set the Classification message")
parser.add_option("-f", "--fgcolor", default=defaults["fgcolor"],
help="Set the Foreground (text) color")
parser.add_option("-b", "--bgcolor", default=defaults["bgcolor"],
help="Set the Background color")
parser.add_option("-x", "--hres", default=defaults["hres"], type="int",
help="Set the Horizontal Screen Resolution")
parser.add_option("-y", "--vres", default=defaults["vres"], type="int",
help="Set the Vertical Screen Resolution")
parser.add_option("-o", "--opacity", default=defaults["opacity"],
type="float", dest="opacity",
help="Set the window opacity for composted window managers")
parser.add_option("--face", default=defaults["face"], help="Font face")
parser.add_option("--size", default=defaults["size"], help="Font size")
parser.add_option("--weight", default=defaults["weight"],
help="Set the Font weight")
parser.add_option("--disable-esc-msg", default=defaults["esc"],
dest="esc", action="store_false",
help="Disable the 'ESC to hide' message")
parser.add_option("--hide-top", default=defaults["show_top"],
dest="show_top", action="store_false",
help="Disable the top banner")
parser.add_option("--hide-bottom", default=defaults["show_bottom"],
dest="show_bottom", action="store_false",
help="Disable the bottom banner")
parser.add_option("--system-info", default=defaults["sys_info"],
dest="sys_info", action="store_true",
help="Show user and hostname in the top banner")
parser.add_option("--enable-spanning", default=defaults["spanning"],
dest="spanning", action="store_true",
help="Enable banner(s) to span across screens as a single banner")
options, args = parser.parse_args()
return options, args
# Launch the Classification Banner Window(s)
def execute(self, options):
self.num_monitor = 0
if options.hres == 0 or options.vres == 0:
# Try Xrandr to determine primary monitor resolution
try:
self.screen = os.popen("xrandr | grep ' current ' | awk '{ print $8$9$10+0 }'").readlines()[0]
self.x = self.screen.split('x')[0]
self.y = self.screen.split('x')[1].split('+')[0]
except:
try:
self.screen = os.popen("xrandr | grep ' connected ' | awk '{ print $3 }'").readlines()[0]
self.x = self.screen.split('x')[0]
self.y = self.screen.split('x')[1].split('+')[0]
except:
self.screen = os.popen("xrandr | grep '^\*0' | awk '{ print $2$3$4 }'").readlines()[0]
self.x = self.screen.split('x')[0]
self.y = self.screen.split('x')[1].split('+')[0]
else:
# Fail back to GTK method
self.display = gtk.gdk.display_get_default()
self.screen = self.display.get_default_screen()
self.x = self.screen.get_width()
self.y = self.screen.get_height()
else:
# Resoultion Set Staticly
self.x = options.hres
self.y = options.vres
if not options.spanning and self.num_monitor > 1:
for monitor in range(self.num_monitor):
mon_geo = self.screen.get_monitor_geometry(monitor)
self.x_location, self.y_location, self.x, self.y = mon_geo
self.banners(options)
else:
self.x_location = 0
self.y_location = 0
self.banners(options)
def banners(self, options):
if options.show_top:
top = Classification_Banner(
options.message,
options.fgcolor,
options.bgcolor,
options.face,
options.size,
options.weight,
self.x,
self.y,
options.esc,
options.opacity,
options.sys_info)
top.window.move(self.x_location, self.y_location)
if options.show_bottom:
bottom = Classification_Banner(
options.message,
options.fgcolor,
options.bgcolor,
options.face,
options.size,
options.weight,
self.x,
self.y,
options.esc,
options.opacity)
bottom.window.move(self.x_location, int(bottom.vres))
# Relaunch the Classification Banner on Screen Resize
def resize(self, widget, data=None):
self.config, self.args = self.configure()
self.execute(self.config)
return True
# Main Program Loop
if __name__ == "__main__":
run = Display_Banner()
gtk.main()
| {
"content_hash": "563903a4acd29612b99f43f589c7483d",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 136,
"avg_line_length": 40.31534090909091,
"alnum_prop": 0.5552815164540906,
"repo_name": "fcaviggia/hardening-script-el6-kickstart",
"id": "a3bfd78e0341d9f353ad64e934420a083d9dfcbb",
"size": "14494",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "config/hardening/classification-banner.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "66241"
},
{
"name": "Shell",
"bytes": "19850"
}
],
"symlink_target": ""
} |
import vtk
def main():
colors = vtk.vtkNamedColors()
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
vol = vtk.vtkStructuredPoints()
vol.SetDimensions(26, 26, 26)
vol.SetOrigin(-0.5, -0.5, -0.5)
sp = 1.0 / 25.0
vol.SetSpacing(sp, sp, sp)
scalars = vtk.vtkDoubleArray()
scalars.SetNumberOfComponents(1)
scalars.SetNumberOfTuples(26 * 26 * 26)
for k in range(0, 26):
z = -0.5 + k * sp
kOffset = k * 26 * 26
for j in range(0, 26):
y = -0.5 + j * sp
jOffset = j * 26
for i in range(0, 26):
x = -0.5 + i * sp
s = x * x + y * y + z * z - (0.4 * 0.4)
offset = i + jOffset + kOffset
scalars.InsertTuple1(offset, s)
vol.GetPointData().SetScalars(scalars)
contour = vtk.vtkContourFilter()
contour.SetInputData(vol)
contour.SetValue(0, 0.0)
volMapper = vtk.vtkPolyDataMapper()
volMapper.SetInputConnection(contour.GetOutputPort())
volMapper.ScalarVisibilityOff()
volActor = vtk.vtkActor()
volActor.SetMapper(volMapper)
volActor.GetProperty().EdgeVisibilityOn()
volActor.GetProperty().SetColor(colors.GetColor3d("Salmon"))
renderer.AddActor(volActor)
renderer.SetBackground(colors.GetColor3d("SlateGray"))
renWin.SetSize(512, 512)
# Interact with the data.
renWin.Render()
iren.Start()
if __name__ == "__main__":
main()
| {
"content_hash": "ecf84e371aeb9ee024cdef18fac6cccb",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 64,
"avg_line_length": 27.789473684210527,
"alnum_prop": 0.6029040404040404,
"repo_name": "lorensen/VTKExamples",
"id": "001753645257990a17ed7de81d4fa8957ff66e9c",
"size": "1607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Python/StructuredPoints/Vol.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "322226"
},
{
"name": "C++",
"bytes": "4187688"
},
{
"name": "CMake",
"bytes": "155244"
},
{
"name": "CSS",
"bytes": "556"
},
{
"name": "G-code",
"bytes": "377583"
},
{
"name": "GLSL",
"bytes": "5375"
},
{
"name": "HTML",
"bytes": "635483160"
},
{
"name": "Java",
"bytes": "629442"
},
{
"name": "JavaScript",
"bytes": "18199"
},
{
"name": "Python",
"bytes": "1376010"
},
{
"name": "Shell",
"bytes": "3481"
}
],
"symlink_target": ""
} |
import unittest, shutil, argparse, os
import scans
class TestCommandHelp(unittest.TestCase):
def test_help_parser_for_each_command(self):
for cmd_name, parser_fun in scans.__commands__:
parser = parser_fun(argparse.ArgumentParser())
helpstring = parser.format_help()
class TestScans(unittest.TestCase):
# to be executed prior to running tests
def setUp(self):
self.cms_selection = scans.Selection()
pass
# to be executed after running tests, regardless of pass/fail
# only called if setUp succeeds
def tearDown(self):
pass
def test_selection_return_code(self):
self.assertEqual( self.cms_selection.main(), 0 )
| {
"content_hash": "3dc83f49cd14b8c043d1c371dc6484a2",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 65,
"avg_line_length": 32.09090909090909,
"alnum_prop": 0.6728045325779037,
"repo_name": "broadinstitute/cms",
"id": "09cb727695d70087a8bd4fe85da6546369eaebc5",
"size": "725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cms/test/unit/test_scans.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "550664"
},
{
"name": "C++",
"bytes": "330"
},
{
"name": "Makefile",
"bytes": "5127"
},
{
"name": "Python",
"bytes": "1359651"
},
{
"name": "Shell",
"bytes": "3921"
}
],
"symlink_target": ""
} |
from _tree import Tree
from _walk import walk
from treebeard._at import set_at
from _path import Path
def structural_copy(tree):
result = Tree()
r = Path(result)
for path, leaf in walk(tree):
set_at(r, path, leaf)
return result
| {
"content_hash": "23a1443077ab4956498ca6e8d379cee4",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 33,
"avg_line_length": 21.166666666666668,
"alnum_prop": 0.6653543307086615,
"repo_name": "dbew/treebeard",
"id": "28476903ee0eb8775e94bc0e4081efdbdc009955",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treebeard/_copy.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "38072"
}
],
"symlink_target": ""
} |
"""Generated message classes for dataflow version v1b3.
Manages Google Cloud Dataflow projects on Google Cloud Platform.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'dataflow'
class ApproximateProgress(_messages.Message):
r"""Obsolete in favor of ApproximateReportedProgress and
ApproximateSplitRequest.
Fields:
percentComplete: Obsolete.
position: Obsolete.
remainingTime: Obsolete.
"""
percentComplete = _messages.FloatField(1, variant=_messages.Variant.FLOAT)
position = _messages.MessageField('Position', 2)
remainingTime = _messages.StringField(3)
class ApproximateReportedProgress(_messages.Message):
r"""A progress measurement of a WorkItem by a worker.
Fields:
consumedParallelism: Total amount of parallelism in the portion of input
of this task that has already been consumed and is no longer active. In
the first two examples above (see remaining_parallelism), the value
should be 29 or 2 respectively. The sum of remaining_parallelism and
consumed_parallelism should equal the total amount of parallelism in
this work item. If specified, must be finite.
fractionConsumed: Completion as fraction of the input consumed, from 0.0
(beginning, nothing consumed), to 1.0 (end of the input, entire input
consumed).
position: A Position within the work to represent a progress.
remainingParallelism: Total amount of parallelism in the input of this
task that remains, (i.e. can be delegated to this task and any new tasks
via dynamic splitting). Always at least 1 for non-finished work items
and 0 for finished. "Amount of parallelism" refers to how many non-
empty parts of the input can be read in parallel. This does not
necessarily equal number of records. An input that can be read in
parallel down to the individual records is called "perfectly
splittable". An example of non-perfectly parallelizable input is a
block-compressed file format where a block of records has to be read as
a whole, but different blocks can be read in parallel. Examples: * If
we are processing record #30 (starting at 1) out of 50 in a perfectly
splittable 50-record input, this value should be 21 (20 remaining + 1
current). * If we are reading through block 3 in a block-compressed file
consisting of 5 blocks, this value should be 3 (since blocks 4 and 5
can be processed in parallel by new tasks via dynamic splitting and
the current task remains processing block 3). * If we are reading
through the last block in a block-compressed file, or reading or
processing the last record in a perfectly splittable input, this value
should be 1, because apart from the current task, no additional
remainder can be split off.
"""
consumedParallelism = _messages.MessageField('ReportedParallelism', 1)
fractionConsumed = _messages.FloatField(2)
position = _messages.MessageField('Position', 3)
remainingParallelism = _messages.MessageField('ReportedParallelism', 4)
class ApproximateSplitRequest(_messages.Message):
r"""A suggestion by the service to the worker to dynamically split the
WorkItem.
Fields:
fractionConsumed: A fraction at which to split the work item, from 0.0
(beginning of the input) to 1.0 (end of the input).
fractionOfRemainder: The fraction of the remainder of work to split the
work item at, from 0.0 (split at the current position) to 1.0 (end of
the input).
position: A Position at which to split the work item.
"""
fractionConsumed = _messages.FloatField(1)
fractionOfRemainder = _messages.FloatField(2)
position = _messages.MessageField('Position', 3)
class AutoscalingEvent(_messages.Message):
r"""A structured message reporting an autoscaling decision made by the
Dataflow service.
Enums:
EventTypeValueValuesEnum: The type of autoscaling event to report.
Fields:
currentNumWorkers: The current number of workers the job has.
description: A message describing why the system decided to adjust the
current number of workers, why it failed, or why the system decided to
not make any changes to the number of workers.
eventType: The type of autoscaling event to report.
targetNumWorkers: The target number of workers the worker pool wants to
resize to use.
time: The time this event was emitted to indicate a new target or current
num_workers value.
workerPool: A short and friendly name for the worker pool this event
refers to, populated from the value of
PoolStageRelation::user_pool_name.
"""
class EventTypeValueValuesEnum(_messages.Enum):
r"""The type of autoscaling event to report.
Values:
TYPE_UNKNOWN: Default type for the enum. Value should never be
returned.
TARGET_NUM_WORKERS_CHANGED: The TARGET_NUM_WORKERS_CHANGED type should
be used when the target worker pool size has changed at the start of
an actuation. An event should always be specified as
TARGET_NUM_WORKERS_CHANGED if it reflects a change in the
target_num_workers.
CURRENT_NUM_WORKERS_CHANGED: The CURRENT_NUM_WORKERS_CHANGED type should
be used when actual worker pool size has been changed, but the
target_num_workers has not changed.
ACTUATION_FAILURE: The ACTUATION_FAILURE type should be used when we
want to report an error to the user indicating why the current number
of workers in the pool could not be changed. Displayed in the current
status and history widgets.
NO_CHANGE: Used when we want to report to the user a reason why we are
not currently adjusting the number of workers. Should specify both
target_num_workers, current_num_workers and a decision_message.
"""
TYPE_UNKNOWN = 0
TARGET_NUM_WORKERS_CHANGED = 1
CURRENT_NUM_WORKERS_CHANGED = 2
ACTUATION_FAILURE = 3
NO_CHANGE = 4
currentNumWorkers = _messages.IntegerField(1)
description = _messages.MessageField('StructuredMessage', 2)
eventType = _messages.EnumField('EventTypeValueValuesEnum', 3)
targetNumWorkers = _messages.IntegerField(4)
time = _messages.StringField(5)
workerPool = _messages.StringField(6)
class AutoscalingSettings(_messages.Message):
r"""Settings for WorkerPool autoscaling.
Enums:
AlgorithmValueValuesEnum: The algorithm to use for autoscaling.
Fields:
algorithm: The algorithm to use for autoscaling.
maxNumWorkers: The maximum number of workers to cap scaling at.
"""
class AlgorithmValueValuesEnum(_messages.Enum):
r"""The algorithm to use for autoscaling.
Values:
AUTOSCALING_ALGORITHM_UNKNOWN: The algorithm is unknown, or unspecified.
AUTOSCALING_ALGORITHM_NONE: Disable autoscaling.
AUTOSCALING_ALGORITHM_BASIC: Increase worker count over time to reduce
job execution time.
"""
AUTOSCALING_ALGORITHM_UNKNOWN = 0
AUTOSCALING_ALGORITHM_NONE = 1
AUTOSCALING_ALGORITHM_BASIC = 2
algorithm = _messages.EnumField('AlgorithmValueValuesEnum', 1)
maxNumWorkers = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class BigQueryIODetails(_messages.Message):
r"""Metadata for a BigQuery connector used by the job.
Fields:
dataset: Dataset accessed in the connection.
projectId: Project accessed in the connection.
query: Query used to access data in the connection.
table: Table accessed in the connection.
"""
dataset = _messages.StringField(1)
projectId = _messages.StringField(2)
query = _messages.StringField(3)
table = _messages.StringField(4)
class BigTableIODetails(_messages.Message):
r"""Metadata for a BigTable connector used by the job.
Fields:
instanceId: InstanceId accessed in the connection.
projectId: ProjectId accessed in the connection.
tableId: TableId accessed in the connection.
"""
instanceId = _messages.StringField(1)
projectId = _messages.StringField(2)
tableId = _messages.StringField(3)
class CPUTime(_messages.Message):
r"""Modeled after information exposed by /proc/stat.
Fields:
rate: Average CPU utilization rate (% non-idle cpu / second) since
previous sample.
timestamp: Timestamp of the measurement.
totalMs: Total active CPU time across all cores (ie., non-idle) in
milliseconds since start-up.
"""
rate = _messages.FloatField(1)
timestamp = _messages.StringField(2)
totalMs = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
class ComponentSource(_messages.Message):
r"""Description of an interstitial value between transforms in an execution
stage.
Fields:
name: Dataflow service generated name for this source.
originalTransformOrCollection: User name for the original user transform
or collection with which this source is most closely associated.
userName: Human-readable name for this transform; may be user or system
generated.
"""
name = _messages.StringField(1)
originalTransformOrCollection = _messages.StringField(2)
userName = _messages.StringField(3)
class ComponentTransform(_messages.Message):
r"""Description of a transform executed as part of an execution stage.
Fields:
name: Dataflow service generated name for this source.
originalTransform: User name for the original user transform with which
this transform is most closely associated.
userName: Human-readable name for this transform; may be user or system
generated.
"""
name = _messages.StringField(1)
originalTransform = _messages.StringField(2)
userName = _messages.StringField(3)
class ComputationTopology(_messages.Message):
r"""All configuration data for a particular Computation.
Fields:
computationId: The ID of the computation.
inputs: The inputs to the computation.
keyRanges: The key ranges processed by the computation.
outputs: The outputs from the computation.
stateFamilies: The state family values.
systemStageName: The system stage name.
"""
computationId = _messages.StringField(1)
inputs = _messages.MessageField('StreamLocation', 2, repeated=True)
keyRanges = _messages.MessageField('KeyRangeLocation', 3, repeated=True)
outputs = _messages.MessageField('StreamLocation', 4, repeated=True)
stateFamilies = _messages.MessageField('StateFamilyConfig', 5, repeated=True)
systemStageName = _messages.StringField(6)
class ConcatPosition(_messages.Message):
r"""A position that encapsulates an inner position and an index for the
inner position. A ConcatPosition can be used by a reader of a source that
encapsulates a set of other sources.
Fields:
index: Index of the inner source.
position: Position within the inner source.
"""
index = _messages.IntegerField(1, variant=_messages.Variant.INT32)
position = _messages.MessageField('Position', 2)
class CounterMetadata(_messages.Message):
r"""CounterMetadata includes all static non-name non-value counter
attributes.
Enums:
KindValueValuesEnum: Counter aggregation kind.
StandardUnitsValueValuesEnum: System defined Units, see above enum.
Fields:
description: Human-readable description of the counter semantics.
kind: Counter aggregation kind.
otherUnits: A string referring to the unit type.
standardUnits: System defined Units, see above enum.
"""
class KindValueValuesEnum(_messages.Enum):
r"""Counter aggregation kind.
Values:
INVALID: Counter aggregation kind was not set.
SUM: Aggregated value is the sum of all contributed values.
MAX: Aggregated value is the max of all contributed values.
MIN: Aggregated value is the min of all contributed values.
MEAN: Aggregated value is the mean of all contributed values.
OR: Aggregated value represents the logical 'or' of all contributed
values.
AND: Aggregated value represents the logical 'and' of all contributed
values.
SET: Aggregated value is a set of unique contributed values.
DISTRIBUTION: Aggregated value captures statistics about a distribution.
LATEST_VALUE: Aggregated value tracks the latest value of a variable.
"""
INVALID = 0
SUM = 1
MAX = 2
MIN = 3
MEAN = 4
OR = 5
AND = 6
SET = 7
DISTRIBUTION = 8
LATEST_VALUE = 9
class StandardUnitsValueValuesEnum(_messages.Enum):
r"""System defined Units, see above enum.
Values:
BYTES: Counter returns a value in bytes.
BYTES_PER_SEC: Counter returns a value in bytes per second.
MILLISECONDS: Counter returns a value in milliseconds.
MICROSECONDS: Counter returns a value in microseconds.
NANOSECONDS: Counter returns a value in nanoseconds.
TIMESTAMP_MSEC: Counter returns a timestamp in milliseconds.
TIMESTAMP_USEC: Counter returns a timestamp in microseconds.
TIMESTAMP_NSEC: Counter returns a timestamp in nanoseconds.
"""
BYTES = 0
BYTES_PER_SEC = 1
MILLISECONDS = 2
MICROSECONDS = 3
NANOSECONDS = 4
TIMESTAMP_MSEC = 5
TIMESTAMP_USEC = 6
TIMESTAMP_NSEC = 7
description = _messages.StringField(1)
kind = _messages.EnumField('KindValueValuesEnum', 2)
otherUnits = _messages.StringField(3)
standardUnits = _messages.EnumField('StandardUnitsValueValuesEnum', 4)
class CounterStructuredName(_messages.Message):
r"""Identifies a counter within a per-job namespace. Counters whose
structured names are the same get merged into a single value for the job.
Enums:
OriginValueValuesEnum: One of the standard Origins defined above.
PortionValueValuesEnum: Portion of this counter, either key or value.
Fields:
componentStepName: Name of the optimized step being executed by the
workers.
executionStepName: Name of the stage. An execution step contains multiple
component steps.
inputIndex: Index of an input collection that's being read from/written to
as a side input. The index identifies a step's side inputs starting by 1
(e.g. the first side input has input_index 1, the third has input_index
3). Side inputs are identified by a pair of (original_step_name,
input_index). This field helps uniquely identify them.
name: Counter name. Not necessarily globally-unique, but unique within the
context of the other fields. Required.
origin: One of the standard Origins defined above.
originNamespace: A string containing a more specific namespace of the
counter's origin.
originalRequestingStepName: The step name requesting an operation, such as
GBK. I.e. the ParDo causing a read/write from shuffle to occur, or a
read from side inputs.
originalStepName: System generated name of the original step in the user's
graph, before optimization.
portion: Portion of this counter, either key or value.
workerId: ID of a particular worker.
"""
class OriginValueValuesEnum(_messages.Enum):
r"""One of the standard Origins defined above.
Values:
SYSTEM: Counter was created by the Dataflow system.
USER: Counter was created by the user.
"""
SYSTEM = 0
USER = 1
class PortionValueValuesEnum(_messages.Enum):
r"""Portion of this counter, either key or value.
Values:
ALL: Counter portion has not been set.
KEY: Counter reports a key.
VALUE: Counter reports a value.
"""
ALL = 0
KEY = 1
VALUE = 2
componentStepName = _messages.StringField(1)
executionStepName = _messages.StringField(2)
inputIndex = _messages.IntegerField(3, variant=_messages.Variant.INT32)
name = _messages.StringField(4)
origin = _messages.EnumField('OriginValueValuesEnum', 5)
originNamespace = _messages.StringField(6)
originalRequestingStepName = _messages.StringField(7)
originalStepName = _messages.StringField(8)
portion = _messages.EnumField('PortionValueValuesEnum', 9)
workerId = _messages.StringField(10)
class CounterStructuredNameAndMetadata(_messages.Message):
r"""A single message which encapsulates structured name and metadata for a
given counter.
Fields:
metadata: Metadata associated with a counter
name: Structured name of the counter.
"""
metadata = _messages.MessageField('CounterMetadata', 1)
name = _messages.MessageField('CounterStructuredName', 2)
class CounterUpdate(_messages.Message):
r"""An update to a Counter sent from a worker.
Fields:
boolean: Boolean value for And, Or.
cumulative: True if this counter is reported as the total cumulative
aggregate value accumulated since the worker started working on this
WorkItem. By default this is false, indicating that this counter is
reported as a delta.
distribution: Distribution data
floatingPoint: Floating point value for Sum, Max, Min.
floatingPointList: List of floating point numbers, for Set.
floatingPointMean: Floating point mean aggregation value for Mean.
integer: Integer value for Sum, Max, Min.
integerGauge: Gauge data
integerList: List of integers, for Set.
integerMean: Integer mean aggregation value for Mean.
internal: Value for internally-defined counters used by the Dataflow
service.
nameAndKind: Counter name and aggregation type.
shortId: The service-generated short identifier for this counter. The
short_id -> (name, metadata) mapping is constant for the lifetime of a
job.
stringList: List of strings, for Set.
structuredNameAndMetadata: Counter structured name and metadata.
"""
boolean = _messages.BooleanField(1)
cumulative = _messages.BooleanField(2)
distribution = _messages.MessageField('DistributionUpdate', 3)
floatingPoint = _messages.FloatField(4)
floatingPointList = _messages.MessageField('FloatingPointList', 5)
floatingPointMean = _messages.MessageField('FloatingPointMean', 6)
integer = _messages.MessageField('SplitInt64', 7)
integerGauge = _messages.MessageField('IntegerGauge', 8)
integerList = _messages.MessageField('IntegerList', 9)
integerMean = _messages.MessageField('IntegerMean', 10)
internal = _messages.MessageField('extra_types.JsonValue', 11)
nameAndKind = _messages.MessageField('NameAndKind', 12)
shortId = _messages.IntegerField(13)
stringList = _messages.MessageField('StringList', 14)
structuredNameAndMetadata = _messages.MessageField('CounterStructuredNameAndMetadata', 15)
class CreateJobFromTemplateRequest(_messages.Message):
r"""A request to create a Cloud Dataflow job from a template.
Messages:
ParametersValue: The runtime parameters to pass to the job.
Fields:
environment: The runtime environment for the job.
gcsPath: Required. A Cloud Storage path to the template from which to
create the job. Must be a valid Cloud Storage URL, beginning with
`gs://`.
jobName: Required. The job name to use for the created job.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to
which to direct the request.
parameters: The runtime parameters to pass to the job.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParametersValue(_messages.Message):
r"""The runtime parameters to pass to the job.
Messages:
AdditionalProperty: An additional property for a ParametersValue object.
Fields:
additionalProperties: Additional properties of type ParametersValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ParametersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
environment = _messages.MessageField('RuntimeEnvironment', 1)
gcsPath = _messages.StringField(2)
jobName = _messages.StringField(3)
location = _messages.StringField(4)
parameters = _messages.MessageField('ParametersValue', 5)
class CustomSourceLocation(_messages.Message):
r"""Identifies the location of a custom souce.
Fields:
stateful: Whether this source is stateful.
"""
stateful = _messages.BooleanField(1)
class DataDiskAssignment(_messages.Message):
r"""Data disk assignment for a given VM instance.
Fields:
dataDisks: Mounted data disks. The order is important a data disk's
0-based index in this list defines which persistent directory the disk
is mounted to, for example the list of {
"myproject-1014-104817-4c2-harness-0-disk-0" }, {
"myproject-1014-104817-4c2-harness-0-disk-1" }.
vmInstance: VM instance name the data disks mounted to, for example
"myproject-1014-104817-4c2-harness-0".
"""
dataDisks = _messages.StringField(1, repeated=True)
vmInstance = _messages.StringField(2)
class DataflowProjectsJobsAggregatedRequest(_messages.Message):
r"""A DataflowProjectsJobsAggregatedRequest object.
Enums:
FilterValueValuesEnum: The kind of filter to use.
ViewValueValuesEnum: Level of information requested in response. Default
is `JOB_VIEW_SUMMARY`.
Fields:
filter: The kind of filter to use.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains this job.
pageSize: If there are many jobs, limit response to at most this many. The
actual number of jobs returned will be the lesser of max_responses and
an unspecified server-defined limit.
pageToken: Set this to the 'next_page_token' field of a previous response
to request additional results in a long list.
projectId: The project which owns the jobs.
view: Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
"""
class FilterValueValuesEnum(_messages.Enum):
r"""The kind of filter to use.
Values:
UNKNOWN: <no description>
ALL: <no description>
TERMINATED: <no description>
ACTIVE: <no description>
"""
UNKNOWN = 0
ALL = 1
TERMINATED = 2
ACTIVE = 3
class ViewValueValuesEnum(_messages.Enum):
r"""Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
filter = _messages.EnumField('FilterValueValuesEnum', 1)
location = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 6)
class DataflowProjectsJobsCreateRequest(_messages.Message):
r"""A DataflowProjectsJobsCreateRequest object.
Enums:
ViewValueValuesEnum: The level of information requested in response.
Fields:
job: A Job resource to be passed as the request body.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
replaceJobId: Deprecated. This field is now in the Job message.
view: The level of information requested in response.
"""
class ViewValueValuesEnum(_messages.Enum):
r"""The level of information requested in response.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
job = _messages.MessageField('Job', 1)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
replaceJobId = _messages.StringField(4)
view = _messages.EnumField('ViewValueValuesEnum', 5)
class DataflowProjectsJobsDebugGetConfigRequest(_messages.Message):
r"""A DataflowProjectsJobsDebugGetConfigRequest object.
Fields:
getDebugConfigRequest: A GetDebugConfigRequest resource to be passed as
the request body.
jobId: The job id.
projectId: The project id.
"""
getDebugConfigRequest = _messages.MessageField('GetDebugConfigRequest', 1)
jobId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class DataflowProjectsJobsDebugSendCaptureRequest(_messages.Message):
r"""A DataflowProjectsJobsDebugSendCaptureRequest object.
Fields:
jobId: The job id.
projectId: The project id.
sendDebugCaptureRequest: A SendDebugCaptureRequest resource to be passed
as the request body.
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
sendDebugCaptureRequest = _messages.MessageField('SendDebugCaptureRequest', 3)
class DataflowProjectsJobsGetMetricsRequest(_messages.Message):
r"""A DataflowProjectsJobsGetMetricsRequest object.
Fields:
jobId: The job to get messages for.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
projectId: A project id.
startTime: Return only metric data that has changed since this time.
Default is to return all information about all metrics for the job.
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
startTime = _messages.StringField(4)
class DataflowProjectsJobsGetRequest(_messages.Message):
r"""A DataflowProjectsJobsGetRequest object.
Enums:
ViewValueValuesEnum: The level of information requested in response.
Fields:
jobId: The job ID.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
view: The level of information requested in response.
"""
class ViewValueValuesEnum(_messages.Enum):
r"""The level of information requested in response.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class DataflowProjectsJobsListRequest(_messages.Message):
r"""A DataflowProjectsJobsListRequest object.
Enums:
FilterValueValuesEnum: The kind of filter to use.
ViewValueValuesEnum: Level of information requested in response. Default
is `JOB_VIEW_SUMMARY`.
Fields:
filter: The kind of filter to use.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains this job.
pageSize: If there are many jobs, limit response to at most this many. The
actual number of jobs returned will be the lesser of max_responses and
an unspecified server-defined limit.
pageToken: Set this to the 'next_page_token' field of a previous response
to request additional results in a long list.
projectId: The project which owns the jobs.
view: Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
"""
class FilterValueValuesEnum(_messages.Enum):
r"""The kind of filter to use.
Values:
UNKNOWN: <no description>
ALL: <no description>
TERMINATED: <no description>
ACTIVE: <no description>
"""
UNKNOWN = 0
ALL = 1
TERMINATED = 2
ACTIVE = 3
class ViewValueValuesEnum(_messages.Enum):
r"""Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
filter = _messages.EnumField('FilterValueValuesEnum', 1)
location = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 6)
class DataflowProjectsJobsMessagesListRequest(_messages.Message):
r"""A DataflowProjectsJobsMessagesListRequest object.
Enums:
MinimumImportanceValueValuesEnum: Filter to only get messages with
importance >= level
Fields:
endTime: Return only messages with timestamps < end_time. The default is
now (i.e. return up to the latest messages available).
jobId: The job to get messages about.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
minimumImportance: Filter to only get messages with importance >= level
pageSize: If specified, determines the maximum number of messages to
return. If unspecified, the service may choose an appropriate default,
or may return an arbitrarily large number of results.
pageToken: If supplied, this should be the value of next_page_token
returned by an earlier call. This will cause the next page of results to
be returned.
projectId: A project id.
startTime: If specified, return only messages with timestamps >=
start_time. The default is the job creation time (i.e. beginning of
messages).
"""
class MinimumImportanceValueValuesEnum(_messages.Enum):
r"""Filter to only get messages with importance >= level
Values:
JOB_MESSAGE_IMPORTANCE_UNKNOWN: <no description>
JOB_MESSAGE_DEBUG: <no description>
JOB_MESSAGE_DETAILED: <no description>
JOB_MESSAGE_BASIC: <no description>
JOB_MESSAGE_WARNING: <no description>
JOB_MESSAGE_ERROR: <no description>
"""
JOB_MESSAGE_IMPORTANCE_UNKNOWN = 0
JOB_MESSAGE_DEBUG = 1
JOB_MESSAGE_DETAILED = 2
JOB_MESSAGE_BASIC = 3
JOB_MESSAGE_WARNING = 4
JOB_MESSAGE_ERROR = 5
endTime = _messages.StringField(1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3)
minimumImportance = _messages.EnumField('MinimumImportanceValueValuesEnum', 4)
pageSize = _messages.IntegerField(5, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(6)
projectId = _messages.StringField(7, required=True)
startTime = _messages.StringField(8)
class DataflowProjectsJobsUpdateRequest(_messages.Message):
r"""A DataflowProjectsJobsUpdateRequest object.
Fields:
job: A Job resource to be passed as the request body.
jobId: The job ID.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
"""
job = _messages.MessageField('Job', 1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
class DataflowProjectsJobsWorkItemsLeaseRequest(_messages.Message):
r"""A DataflowProjectsJobsWorkItemsLeaseRequest object.
Fields:
jobId: Identifies the workflow job this worker belongs to.
leaseWorkItemRequest: A LeaseWorkItemRequest resource to be passed as the
request body.
projectId: Identifies the project this worker belongs to.
"""
jobId = _messages.StringField(1, required=True)
leaseWorkItemRequest = _messages.MessageField('LeaseWorkItemRequest', 2)
projectId = _messages.StringField(3, required=True)
class DataflowProjectsJobsWorkItemsReportStatusRequest(_messages.Message):
r"""A DataflowProjectsJobsWorkItemsReportStatusRequest object.
Fields:
jobId: The job which the WorkItem is part of.
projectId: The project which owns the WorkItem's job.
reportWorkItemStatusRequest: A ReportWorkItemStatusRequest resource to be
passed as the request body.
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
reportWorkItemStatusRequest = _messages.MessageField('ReportWorkItemStatusRequest', 3)
class DataflowProjectsLocationsJobsCreateRequest(_messages.Message):
r"""A DataflowProjectsLocationsJobsCreateRequest object.
Enums:
ViewValueValuesEnum: The level of information requested in response.
Fields:
job: A Job resource to be passed as the request body.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
replaceJobId: Deprecated. This field is now in the Job message.
view: The level of information requested in response.
"""
class ViewValueValuesEnum(_messages.Enum):
r"""The level of information requested in response.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
job = _messages.MessageField('Job', 1)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
replaceJobId = _messages.StringField(4)
view = _messages.EnumField('ViewValueValuesEnum', 5)
class DataflowProjectsLocationsJobsDebugGetConfigRequest(_messages.Message):
r"""A DataflowProjectsLocationsJobsDebugGetConfigRequest object.
Fields:
getDebugConfigRequest: A GetDebugConfigRequest resource to be passed as
the request body.
jobId: The job id.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
projectId: The project id.
"""
getDebugConfigRequest = _messages.MessageField('GetDebugConfigRequest', 1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
class DataflowProjectsLocationsJobsDebugSendCaptureRequest(_messages.Message):
r"""A DataflowProjectsLocationsJobsDebugSendCaptureRequest object.
Fields:
jobId: The job id.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
projectId: The project id.
sendDebugCaptureRequest: A SendDebugCaptureRequest resource to be passed
as the request body.
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
sendDebugCaptureRequest = _messages.MessageField('SendDebugCaptureRequest', 4)
class DataflowProjectsLocationsJobsGetMetricsRequest(_messages.Message):
r"""A DataflowProjectsLocationsJobsGetMetricsRequest object.
Fields:
jobId: The job to get messages for.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
projectId: A project id.
startTime: Return only metric data that has changed since this time.
Default is to return all information about all metrics for the job.
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
startTime = _messages.StringField(4)
class DataflowProjectsLocationsJobsGetRequest(_messages.Message):
r"""A DataflowProjectsLocationsJobsGetRequest object.
Enums:
ViewValueValuesEnum: The level of information requested in response.
Fields:
jobId: The job ID.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
view: The level of information requested in response.
"""
class ViewValueValuesEnum(_messages.Enum):
r"""The level of information requested in response.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class DataflowProjectsLocationsJobsListRequest(_messages.Message):
r"""A DataflowProjectsLocationsJobsListRequest object.
Enums:
FilterValueValuesEnum: The kind of filter to use.
ViewValueValuesEnum: Level of information requested in response. Default
is `JOB_VIEW_SUMMARY`.
Fields:
filter: The kind of filter to use.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains this job.
pageSize: If there are many jobs, limit response to at most this many. The
actual number of jobs returned will be the lesser of max_responses and
an unspecified server-defined limit.
pageToken: Set this to the 'next_page_token' field of a previous response
to request additional results in a long list.
projectId: The project which owns the jobs.
view: Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
"""
class FilterValueValuesEnum(_messages.Enum):
r"""The kind of filter to use.
Values:
UNKNOWN: <no description>
ALL: <no description>
TERMINATED: <no description>
ACTIVE: <no description>
"""
UNKNOWN = 0
ALL = 1
TERMINATED = 2
ACTIVE = 3
class ViewValueValuesEnum(_messages.Enum):
r"""Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
filter = _messages.EnumField('FilterValueValuesEnum', 1)
location = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 6)
class DataflowProjectsLocationsJobsMessagesListRequest(_messages.Message):
r"""A DataflowProjectsLocationsJobsMessagesListRequest object.
Enums:
MinimumImportanceValueValuesEnum: Filter to only get messages with
importance >= level
Fields:
endTime: Return only messages with timestamps < end_time. The default is
now (i.e. return up to the latest messages available).
jobId: The job to get messages about.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
minimumImportance: Filter to only get messages with importance >= level
pageSize: If specified, determines the maximum number of messages to
return. If unspecified, the service may choose an appropriate default,
or may return an arbitrarily large number of results.
pageToken: If supplied, this should be the value of next_page_token
returned by an earlier call. This will cause the next page of results to
be returned.
projectId: A project id.
startTime: If specified, return only messages with timestamps >=
start_time. The default is the job creation time (i.e. beginning of
messages).
"""
class MinimumImportanceValueValuesEnum(_messages.Enum):
r"""Filter to only get messages with importance >= level
Values:
JOB_MESSAGE_IMPORTANCE_UNKNOWN: <no description>
JOB_MESSAGE_DEBUG: <no description>
JOB_MESSAGE_DETAILED: <no description>
JOB_MESSAGE_BASIC: <no description>
JOB_MESSAGE_WARNING: <no description>
JOB_MESSAGE_ERROR: <no description>
"""
JOB_MESSAGE_IMPORTANCE_UNKNOWN = 0
JOB_MESSAGE_DEBUG = 1
JOB_MESSAGE_DETAILED = 2
JOB_MESSAGE_BASIC = 3
JOB_MESSAGE_WARNING = 4
JOB_MESSAGE_ERROR = 5
endTime = _messages.StringField(1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3, required=True)
minimumImportance = _messages.EnumField('MinimumImportanceValueValuesEnum', 4)
pageSize = _messages.IntegerField(5, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(6)
projectId = _messages.StringField(7, required=True)
startTime = _messages.StringField(8)
class DataflowProjectsLocationsJobsUpdateRequest(_messages.Message):
r"""A DataflowProjectsLocationsJobsUpdateRequest object.
Fields:
job: A Job resource to be passed as the request body.
jobId: The job ID.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
"""
job = _messages.MessageField('Job', 1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
class DataflowProjectsLocationsJobsWorkItemsLeaseRequest(_messages.Message):
r"""A DataflowProjectsLocationsJobsWorkItemsLeaseRequest object.
Fields:
jobId: Identifies the workflow job this worker belongs to.
leaseWorkItemRequest: A LeaseWorkItemRequest resource to be passed as the
request body.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the WorkItem's job.
projectId: Identifies the project this worker belongs to.
"""
jobId = _messages.StringField(1, required=True)
leaseWorkItemRequest = _messages.MessageField('LeaseWorkItemRequest', 2)
location = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
class DataflowProjectsLocationsJobsWorkItemsReportStatusRequest(_messages.Message):
r"""A DataflowProjectsLocationsJobsWorkItemsReportStatusRequest object.
Fields:
jobId: The job which the WorkItem is part of.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the WorkItem's job.
projectId: The project which owns the WorkItem's job.
reportWorkItemStatusRequest: A ReportWorkItemStatusRequest resource to be
passed as the request body.
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
reportWorkItemStatusRequest = _messages.MessageField('ReportWorkItemStatusRequest', 4)
class DataflowProjectsLocationsSqlValidateRequest(_messages.Message):
r"""A DataflowProjectsLocationsSqlValidateRequest object.
Fields:
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to
which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
query: The sql query to validate.
"""
location = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
query = _messages.StringField(3)
class DataflowProjectsLocationsTemplatesCreateRequest(_messages.Message):
r"""A DataflowProjectsLocationsTemplatesCreateRequest object.
Fields:
createJobFromTemplateRequest: A CreateJobFromTemplateRequest resource to
be passed as the request body.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to
which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
"""
createJobFromTemplateRequest = _messages.MessageField('CreateJobFromTemplateRequest', 1)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class DataflowProjectsLocationsTemplatesGetRequest(_messages.Message):
r"""A DataflowProjectsLocationsTemplatesGetRequest object.
Enums:
ViewValueValuesEnum: The view to retrieve. Defaults to METADATA_ONLY.
Fields:
gcsPath: Required. A Cloud Storage path to the template from which to
create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to
which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
view: The view to retrieve. Defaults to METADATA_ONLY.
"""
class ViewValueValuesEnum(_messages.Enum):
r"""The view to retrieve. Defaults to METADATA_ONLY.
Values:
METADATA_ONLY: <no description>
"""
METADATA_ONLY = 0
gcsPath = _messages.StringField(1)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class DataflowProjectsLocationsTemplatesLaunchRequest(_messages.Message):
r"""A DataflowProjectsLocationsTemplatesLaunchRequest object.
Fields:
dynamicTemplate_gcsPath: Path to dynamic template spec file on GCS. The
file must be a Json serialized DynamicTemplateFieSpec object.
dynamicTemplate_stagingLocation: Cloud Storage path for staging
dependencies. Must be a valid Cloud Storage URL, beginning with `gs://`.
gcsPath: A Cloud Storage path to the template from which to create the
job. Must be valid Cloud Storage URL, beginning with 'gs://'.
launchTemplateParameters: A LaunchTemplateParameters resource to be passed
as the request body.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to
which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
validateOnly: If true, the request is validated but not actually executed.
Defaults to false.
"""
dynamicTemplate_gcsPath = _messages.StringField(1)
dynamicTemplate_stagingLocation = _messages.StringField(2)
gcsPath = _messages.StringField(3)
launchTemplateParameters = _messages.MessageField('LaunchTemplateParameters', 4)
location = _messages.StringField(5, required=True)
projectId = _messages.StringField(6, required=True)
validateOnly = _messages.BooleanField(7)
class DataflowProjectsLocationsWorkerMessagesRequest(_messages.Message):
r"""A DataflowProjectsLocationsWorkerMessagesRequest object.
Fields:
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job.
projectId: The project to send the WorkerMessages to.
sendWorkerMessagesRequest: A SendWorkerMessagesRequest resource to be
passed as the request body.
"""
location = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
sendWorkerMessagesRequest = _messages.MessageField('SendWorkerMessagesRequest', 3)
class DataflowProjectsTemplatesCreateRequest(_messages.Message):
r"""A DataflowProjectsTemplatesCreateRequest object.
Fields:
createJobFromTemplateRequest: A CreateJobFromTemplateRequest resource to
be passed as the request body.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
"""
createJobFromTemplateRequest = _messages.MessageField('CreateJobFromTemplateRequest', 1)
projectId = _messages.StringField(2, required=True)
class DataflowProjectsTemplatesGetRequest(_messages.Message):
r"""A DataflowProjectsTemplatesGetRequest object.
Enums:
ViewValueValuesEnum: The view to retrieve. Defaults to METADATA_ONLY.
Fields:
gcsPath: Required. A Cloud Storage path to the template from which to
create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to
which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
view: The view to retrieve. Defaults to METADATA_ONLY.
"""
class ViewValueValuesEnum(_messages.Enum):
r"""The view to retrieve. Defaults to METADATA_ONLY.
Values:
METADATA_ONLY: <no description>
"""
METADATA_ONLY = 0
gcsPath = _messages.StringField(1)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class DataflowProjectsTemplatesLaunchRequest(_messages.Message):
r"""A DataflowProjectsTemplatesLaunchRequest object.
Fields:
dynamicTemplate_gcsPath: Path to dynamic template spec file on GCS. The
file must be a Json serialized DynamicTemplateFieSpec object.
dynamicTemplate_stagingLocation: Cloud Storage path for staging
dependencies. Must be a valid Cloud Storage URL, beginning with `gs://`.
gcsPath: A Cloud Storage path to the template from which to create the
job. Must be valid Cloud Storage URL, beginning with 'gs://'.
launchTemplateParameters: A LaunchTemplateParameters resource to be passed
as the request body.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to
which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
validateOnly: If true, the request is validated but not actually executed.
Defaults to false.
"""
dynamicTemplate_gcsPath = _messages.StringField(1)
dynamicTemplate_stagingLocation = _messages.StringField(2)
gcsPath = _messages.StringField(3)
launchTemplateParameters = _messages.MessageField('LaunchTemplateParameters', 4)
location = _messages.StringField(5)
projectId = _messages.StringField(6, required=True)
validateOnly = _messages.BooleanField(7)
class DataflowProjectsWorkerMessagesRequest(_messages.Message):
r"""A DataflowProjectsWorkerMessagesRequest object.
Fields:
projectId: The project to send the WorkerMessages to.
sendWorkerMessagesRequest: A SendWorkerMessagesRequest resource to be
passed as the request body.
"""
projectId = _messages.StringField(1, required=True)
sendWorkerMessagesRequest = _messages.MessageField('SendWorkerMessagesRequest', 2)
class DatastoreIODetails(_messages.Message):
r"""Metadata for a Datastore connector used by the job.
Fields:
namespace: Namespace used in the connection.
projectId: ProjectId accessed in the connection.
"""
namespace = _messages.StringField(1)
projectId = _messages.StringField(2)
class DerivedSource(_messages.Message):
r"""Specification of one of the bundles produced as a result of splitting a
Source (e.g. when executing a SourceSplitRequest, or when splitting an
active task using WorkItemStatus.dynamic_source_split), relative to the
source being split.
Enums:
DerivationModeValueValuesEnum: What source to base the produced source on
(if any).
Fields:
derivationMode: What source to base the produced source on (if any).
source: Specification of the source.
"""
class DerivationModeValueValuesEnum(_messages.Enum):
r"""What source to base the produced source on (if any).
Values:
SOURCE_DERIVATION_MODE_UNKNOWN: The source derivation is unknown, or
unspecified.
SOURCE_DERIVATION_MODE_INDEPENDENT: Produce a completely independent
Source with no base.
SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT: Produce a Source based on the
Source being split.
SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT: Produce a Source based on the
base of the Source being split.
"""
SOURCE_DERIVATION_MODE_UNKNOWN = 0
SOURCE_DERIVATION_MODE_INDEPENDENT = 1
SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT = 2
SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT = 3
derivationMode = _messages.EnumField('DerivationModeValueValuesEnum', 1)
source = _messages.MessageField('Source', 2)
class Disk(_messages.Message):
r"""Describes the data disk used by a workflow job.
Fields:
diskType: Disk storage type, as defined by Google Compute Engine. This
must be a disk type appropriate to the project and zone in which the
workers will run. If unknown or unspecified, the service will attempt
to choose a reasonable default. For example, the standard persistent
disk type is a resource name typically ending in "pd-standard". If SSD
persistent disks are available, the resource name typically ends with
"pd-ssd". The actual valid values are defined the Google Compute Engine
API, not by the Cloud Dataflow API; consult the Google Compute Engine
documentation for more information about determining the set of
available disk types for a particular project and zone. Google Compute
Engine Disk types are local to a particular project in a particular
zone, and so the resource name will typically look something like this:
compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-
standard
mountPoint: Directory in a VM where disk is mounted.
sizeGb: Size of disk in GB. If zero or unspecified, the service will
attempt to choose a reasonable default.
"""
diskType = _messages.StringField(1)
mountPoint = _messages.StringField(2)
sizeGb = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class DisplayData(_messages.Message):
r"""Data provided with a pipeline or transform to provide descriptive info.
Fields:
boolValue: Contains value if the data is of a boolean type.
durationValue: Contains value if the data is of duration type.
floatValue: Contains value if the data is of float type.
int64Value: Contains value if the data is of int64 type.
javaClassValue: Contains value if the data is of java class type.
key: The key identifying the display data. This is intended to be used as
a label for the display data when viewed in a dax monitoring system.
label: An optional label to display in a dax UI for the element.
namespace: The namespace for the key. This is usually a class name or
programming language namespace (i.e. python module) which defines the
display data. This allows a dax monitoring system to specially handle
the data and perform custom rendering.
shortStrValue: A possible additional shorter value to display. For example
a java_class_name_value of com.mypackage.MyDoFn will be stored with
MyDoFn as the short_str_value and com.mypackage.MyDoFn as the
java_class_name value. short_str_value can be displayed and
java_class_name_value will be displayed as a tooltip.
strValue: Contains value if the data is of string type.
timestampValue: Contains value if the data is of timestamp type.
url: An optional full URL.
"""
boolValue = _messages.BooleanField(1)
durationValue = _messages.StringField(2)
floatValue = _messages.FloatField(3, variant=_messages.Variant.FLOAT)
int64Value = _messages.IntegerField(4)
javaClassValue = _messages.StringField(5)
key = _messages.StringField(6)
label = _messages.StringField(7)
namespace = _messages.StringField(8)
shortStrValue = _messages.StringField(9)
strValue = _messages.StringField(10)
timestampValue = _messages.StringField(11)
url = _messages.StringField(12)
class DistributionUpdate(_messages.Message):
r"""A metric value representing a distribution.
Fields:
count: The count of the number of elements present in the distribution.
histogram: (Optional) Histogram of value counts for the distribution.
max: The maximum value present in the distribution.
min: The minimum value present in the distribution.
sum: Use an int64 since we'd prefer the added precision. If overflow is a
common problem we can detect it and use an additional int64 or a double.
sumOfSquares: Use a double since the sum of squares is likely to overflow
int64.
"""
count = _messages.MessageField('SplitInt64', 1)
histogram = _messages.MessageField('Histogram', 2)
max = _messages.MessageField('SplitInt64', 3)
min = _messages.MessageField('SplitInt64', 4)
sum = _messages.MessageField('SplitInt64', 5)
sumOfSquares = _messages.FloatField(6)
class DynamicSourceSplit(_messages.Message):
r"""When a task splits using WorkItemStatus.dynamic_source_split, this
message describes the two parts of the split relative to the description of
the current task's input.
Fields:
primary: Primary part (continued to be processed by worker). Specified
relative to the previously-current source. Becomes current.
residual: Residual part (returned to the pool of work). Specified relative
to the previously-current source.
"""
primary = _messages.MessageField('DerivedSource', 1)
residual = _messages.MessageField('DerivedSource', 2)
class Environment(_messages.Message):
r"""Describes the environment in which a Dataflow Job runs.
Enums:
FlexResourceSchedulingGoalValueValuesEnum: Which Flexible Resource
Scheduling mode to run in.
Messages:
InternalExperimentsValue: Experimental settings.
SdkPipelineOptionsValue: The Cloud Dataflow SDK pipeline options specified
by the user. These options are passed through the service and are used
to recreate the SDK pipeline options on the worker in a language
agnostic and platform independent way.
UserAgentValue: A description of the process that generated the request.
VersionValue: A structure describing which components and their versions
of the service are required in order to run the job.
Fields:
clusterManagerApiService: The type of cluster manager API to use. If
unknown or unspecified, the service will attempt to choose a reasonable
default. This should be in the form of the API service name, e.g.
"compute.googleapis.com".
dataset: The dataset for the current project where various workflow
related tables are stored. The supported resource type is: Google
BigQuery: bigquery.googleapis.com/{dataset}
experiments: The list of experiments to enable.
flexResourceSchedulingGoal: Which Flexible Resource Scheduling mode to run
in.
internalExperiments: Experimental settings.
sdkPipelineOptions: The Cloud Dataflow SDK pipeline options specified by
the user. These options are passed through the service and are used to
recreate the SDK pipeline options on the worker in a language agnostic
and platform independent way.
serviceAccountEmail: Identity to run virtual machines as. Defaults to the
default account.
serviceKmsKeyName: If set, contains the Cloud KMS key identifier used to
encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK).
Format:
projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY
tempStoragePrefix: The prefix of the resources the system should use for
temporary storage. The system will append the suffix "/temp-{JOBNAME}
to this resource prefix, where {JOBNAME} is the value of the job_name
field. The resulting bucket and object prefix is used as the prefix of
the resources used to store temporary data needed during the job
execution. NOTE: This will override the value in taskrunner_settings.
The supported resource type is: Google Cloud Storage:
storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
userAgent: A description of the process that generated the request.
version: A structure describing which components and their versions of the
service are required in order to run the job.
workerPools: The worker pools. At least one "harness" worker pool must be
specified in order for the job to have workers.
workerRegion: The Compute Engine region
(https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
which worker processing should occur, e.g. "us-west1". Mutually
exclusive with worker_zone. If neither worker_region nor worker_zone is
specified, default to the control plane's region.
workerZone: The Compute Engine zone (https://cloud.google.com/compute/docs
/regions-zones/regions-zones) in which worker processing should occur,
e.g. "us-west1-a". Mutually exclusive with worker_region. If neither
worker_region nor worker_zone is specified, a zone in the control
plane's region is chosen based on available capacity.
"""
class FlexResourceSchedulingGoalValueValuesEnum(_messages.Enum):
r"""Which Flexible Resource Scheduling mode to run in.
Values:
FLEXRS_UNSPECIFIED: Run in the default mode.
FLEXRS_SPEED_OPTIMIZED: Optimize for lower execution time.
FLEXRS_COST_OPTIMIZED: Optimize for lower cost.
"""
FLEXRS_UNSPECIFIED = 0
FLEXRS_SPEED_OPTIMIZED = 1
FLEXRS_COST_OPTIMIZED = 2
@encoding.MapUnrecognizedFields('additionalProperties')
class InternalExperimentsValue(_messages.Message):
r"""Experimental settings.
Messages:
AdditionalProperty: An additional property for a
InternalExperimentsValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a InternalExperimentsValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class SdkPipelineOptionsValue(_messages.Message):
r"""The Cloud Dataflow SDK pipeline options specified by the user. These
options are passed through the service and are used to recreate the SDK
pipeline options on the worker in a language agnostic and platform
independent way.
Messages:
AdditionalProperty: An additional property for a SdkPipelineOptionsValue
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a SdkPipelineOptionsValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class UserAgentValue(_messages.Message):
r"""A description of the process that generated the request.
Messages:
AdditionalProperty: An additional property for a UserAgentValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a UserAgentValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class VersionValue(_messages.Message):
r"""A structure describing which components and their versions of the
service are required in order to run the job.
Messages:
AdditionalProperty: An additional property for a VersionValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a VersionValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterManagerApiService = _messages.StringField(1)
dataset = _messages.StringField(2)
experiments = _messages.StringField(3, repeated=True)
flexResourceSchedulingGoal = _messages.EnumField('FlexResourceSchedulingGoalValueValuesEnum', 4)
internalExperiments = _messages.MessageField('InternalExperimentsValue', 5)
sdkPipelineOptions = _messages.MessageField('SdkPipelineOptionsValue', 6)
serviceAccountEmail = _messages.StringField(7)
serviceKmsKeyName = _messages.StringField(8)
tempStoragePrefix = _messages.StringField(9)
userAgent = _messages.MessageField('UserAgentValue', 10)
version = _messages.MessageField('VersionValue', 11)
workerPools = _messages.MessageField('WorkerPool', 12, repeated=True)
workerRegion = _messages.StringField(13)
workerZone = _messages.StringField(14)
class ExecutionStageState(_messages.Message):
r"""A message describing the state of a particular execution stage.
Enums:
ExecutionStageStateValueValuesEnum: Executions stage states allow the same
set of values as JobState.
Fields:
currentStateTime: The time at which the stage transitioned to this state.
executionStageName: The name of the execution stage.
executionStageState: Executions stage states allow the same set of values
as JobState.
"""
class ExecutionStageStateValueValuesEnum(_messages.Enum):
r"""Executions stage states allow the same set of values as JobState.
Values:
JOB_STATE_UNKNOWN: The job's run state isn't specified.
JOB_STATE_STOPPED: `JOB_STATE_STOPPED` indicates that the job has not
yet started to run.
JOB_STATE_RUNNING: `JOB_STATE_RUNNING` indicates that the job is
currently running.
JOB_STATE_DONE: `JOB_STATE_DONE` indicates that the job has successfully
completed. This is a terminal job state. This state may be set by the
Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It
may also be set via a Cloud Dataflow `UpdateJob` call, if the job has
not yet reached a terminal state.
JOB_STATE_FAILED: `JOB_STATE_FAILED` indicates that the job has failed.
This is a terminal job state. This state may only be set by the Cloud
Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_CANCELLED: `JOB_STATE_CANCELLED` indicates that the job has
been explicitly cancelled. This is a terminal job state. This state
may only be set via a Cloud Dataflow `UpdateJob` call, and only if the
job has not yet reached another terminal state.
JOB_STATE_UPDATED: `JOB_STATE_UPDATED` indicates that the job was
successfully updated, meaning that this job was stopped and another
job was started, inheriting state from this one. This is a terminal
job state. This state may only be set by the Cloud Dataflow service,
and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_DRAINING: `JOB_STATE_DRAINING` indicates that the job is in
the process of draining. A draining job has stopped pulling from its
input sources and is processing any data that remains in-flight. This
state may be set via a Cloud Dataflow `UpdateJob` call, but only as a
transition from `JOB_STATE_RUNNING`. Jobs that are draining may only
transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or
`JOB_STATE_FAILED`.
JOB_STATE_DRAINED: `JOB_STATE_DRAINED` indicates that the job has been
drained. A drained job terminated by stopping pulling from its input
sources and processing any data that remained in-flight when draining
was requested. This state is a terminal state, may only be set by the
Cloud Dataflow service, and only as a transition from
`JOB_STATE_DRAINING`.
JOB_STATE_PENDING: `JOB_STATE_PENDING` indicates that the job has been
created but is not yet running. Jobs that are pending may only
transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`.
JOB_STATE_CANCELLING: `JOB_STATE_CANCELLING` indicates that the job has
been explicitly cancelled and is in the process of stopping. Jobs
that are cancelling may only transition to `JOB_STATE_CANCELLED` or
`JOB_STATE_FAILED`.
JOB_STATE_QUEUED: `JOB_STATE_QUEUED` indicates that the job has been
created but is being delayed until launch. Jobs that are queued may
only transition to `JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`.
"""
JOB_STATE_UNKNOWN = 0
JOB_STATE_STOPPED = 1
JOB_STATE_RUNNING = 2
JOB_STATE_DONE = 3
JOB_STATE_FAILED = 4
JOB_STATE_CANCELLED = 5
JOB_STATE_UPDATED = 6
JOB_STATE_DRAINING = 7
JOB_STATE_DRAINED = 8
JOB_STATE_PENDING = 9
JOB_STATE_CANCELLING = 10
JOB_STATE_QUEUED = 11
currentStateTime = _messages.StringField(1)
executionStageName = _messages.StringField(2)
executionStageState = _messages.EnumField('ExecutionStageStateValueValuesEnum', 3)
class ExecutionStageSummary(_messages.Message):
r"""Description of the composing transforms, names/ids, and input/outputs of
a stage of execution. Some composing transforms and sources may have been
generated by the Dataflow service during execution planning.
Enums:
KindValueValuesEnum: Type of tranform this stage is executing.
Fields:
componentSource: Collections produced and consumed by component transforms
of this stage.
componentTransform: Transforms that comprise this execution stage.
id: Dataflow service generated id for this stage.
inputSource: Input sources for this stage.
kind: Type of tranform this stage is executing.
name: Dataflow service generated name for this stage.
outputSource: Output sources for this stage.
"""
class KindValueValuesEnum(_messages.Enum):
r"""Type of tranform this stage is executing.
Values:
UNKNOWN_KIND: Unrecognized transform type.
PAR_DO_KIND: ParDo transform.
GROUP_BY_KEY_KIND: Group By Key transform.
FLATTEN_KIND: Flatten transform.
READ_KIND: Read transform.
WRITE_KIND: Write transform.
CONSTANT_KIND: Constructs from a constant value, such as with Create.of.
SINGLETON_KIND: Creates a Singleton view of a collection.
SHUFFLE_KIND: Opening or closing a shuffle session, often as part of a
GroupByKey.
"""
UNKNOWN_KIND = 0
PAR_DO_KIND = 1
GROUP_BY_KEY_KIND = 2
FLATTEN_KIND = 3
READ_KIND = 4
WRITE_KIND = 5
CONSTANT_KIND = 6
SINGLETON_KIND = 7
SHUFFLE_KIND = 8
componentSource = _messages.MessageField('ComponentSource', 1, repeated=True)
componentTransform = _messages.MessageField('ComponentTransform', 2, repeated=True)
id = _messages.StringField(3)
inputSource = _messages.MessageField('StageSource', 4, repeated=True)
kind = _messages.EnumField('KindValueValuesEnum', 5)
name = _messages.StringField(6)
outputSource = _messages.MessageField('StageSource', 7, repeated=True)
class FailedLocation(_messages.Message):
r"""Indicates which [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) failed
to respond to a request for data.
Fields:
name: The name of the [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that failed to respond.
"""
name = _messages.StringField(1)
class FileIODetails(_messages.Message):
r"""Metadata for a File connector used by the job.
Fields:
filePattern: File Pattern used to access files by the connector.
"""
filePattern = _messages.StringField(1)
class FlattenInstruction(_messages.Message):
r"""An instruction that copies its inputs (zero or more) to its (single)
output.
Fields:
inputs: Describes the inputs to the flatten instruction.
"""
inputs = _messages.MessageField('InstructionInput', 1, repeated=True)
class FloatingPointList(_messages.Message):
r"""A metric value representing a list of floating point numbers.
Fields:
elements: Elements of the list.
"""
elements = _messages.FloatField(1, repeated=True)
class FloatingPointMean(_messages.Message):
r"""A representation of a floating point mean metric contribution.
Fields:
count: The number of values being aggregated.
sum: The sum of all values being aggregated.
"""
count = _messages.MessageField('SplitInt64', 1)
sum = _messages.FloatField(2)
class GetDebugConfigRequest(_messages.Message):
r"""Request to get updated debug configuration for component.
Fields:
componentId: The internal component id for which debug configuration is
requested.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
workerId: The worker id, i.e., VM hostname.
"""
componentId = _messages.StringField(1)
location = _messages.StringField(2)
workerId = _messages.StringField(3)
class GetDebugConfigResponse(_messages.Message):
r"""Response to a get debug configuration request.
Fields:
config: The encoded debug configuration for the requested component.
"""
config = _messages.StringField(1)
class GetTemplateResponse(_messages.Message):
r"""The response to a GetTemplate request.
Fields:
metadata: The template metadata describing the template name, available
parameters, etc.
status: The status of the get template request. Any problems with the
request will be indicated in the error_details.
"""
metadata = _messages.MessageField('TemplateMetadata', 1)
status = _messages.MessageField('Status', 2)
class Histogram(_messages.Message):
r"""Histogram of value counts for a distribution. Buckets have an inclusive
lower bound and exclusive upper bound and use "1,2,5 bucketing": The first
bucket range is from [0,1) and all subsequent bucket boundaries are powers
of ten multiplied by 1, 2, or 5. Thus, bucket boundaries are 0, 1, 2, 5, 10,
20, 50, 100, 200, 500, 1000, ... Negative values are not supported.
Fields:
bucketCounts: Counts of values in each bucket. For efficiency, prefix and
trailing buckets with count = 0 are elided. Buckets can store the full
range of values of an unsigned long, with ULLONG_MAX falling into the
59th bucket with range [1e19, 2e19).
firstBucketOffset: Starting index of first stored bucket. The non-
inclusive upper-bound of the ith bucket is given by:
pow(10,(i-first_bucket_offset)/3) * (1,2,5)[(i-first_bucket_offset)%3]
"""
bucketCounts = _messages.IntegerField(1, repeated=True)
firstBucketOffset = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class HotKeyDetection(_messages.Message):
r"""Proto describing a hot key detected on a given WorkItem.
Fields:
hotKeyAge: The age of the hot key measured from when it was first
detected.
systemName: System-defined name of the step containing this hot key.
Unique across the workflow.
userStepName: User-provided name of the step that contains this hot key.
"""
hotKeyAge = _messages.StringField(1)
systemName = _messages.StringField(2)
userStepName = _messages.StringField(3)
class InstructionInput(_messages.Message):
r"""An input of an instruction, as a reference to an output of a producer
instruction.
Fields:
outputNum: The output index (origin zero) within the producer.
producerInstructionIndex: The index (origin zero) of the parallel
instruction that produces the output to be consumed by this input. This
index is relative to the list of instructions in this input's
instruction's containing MapTask.
"""
outputNum = _messages.IntegerField(1, variant=_messages.Variant.INT32)
producerInstructionIndex = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class InstructionOutput(_messages.Message):
r"""An output of an instruction.
Messages:
CodecValue: The codec to use to encode data being written via this output.
Fields:
codec: The codec to use to encode data being written via this output.
name: The user-provided name of this output.
onlyCountKeyBytes: For system-generated byte and mean byte metrics,
certain instructions should only report the key size.
onlyCountValueBytes: For system-generated byte and mean byte metrics,
certain instructions should only report the value size.
originalName: System-defined name for this output in the original workflow
graph. Outputs that do not contribute to an original instruction do not
set this.
systemName: System-defined name of this output. Unique across the
workflow.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class CodecValue(_messages.Message):
r"""The codec to use to encode data being written via this output.
Messages:
AdditionalProperty: An additional property for a CodecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a CodecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
codec = _messages.MessageField('CodecValue', 1)
name = _messages.StringField(2)
onlyCountKeyBytes = _messages.BooleanField(3)
onlyCountValueBytes = _messages.BooleanField(4)
originalName = _messages.StringField(5)
systemName = _messages.StringField(6)
class IntegerGauge(_messages.Message):
r"""A metric value representing temporal values of a variable.
Fields:
timestamp: The time at which this value was measured. Measured as msecs
from epoch.
value: The value of the variable represented by this gauge.
"""
timestamp = _messages.StringField(1)
value = _messages.MessageField('SplitInt64', 2)
class IntegerList(_messages.Message):
r"""A metric value representing a list of integers.
Fields:
elements: Elements of the list.
"""
elements = _messages.MessageField('SplitInt64', 1, repeated=True)
class IntegerMean(_messages.Message):
r"""A representation of an integer mean metric contribution.
Fields:
count: The number of values being aggregated.
sum: The sum of all values being aggregated.
"""
count = _messages.MessageField('SplitInt64', 1)
sum = _messages.MessageField('SplitInt64', 2)
class Job(_messages.Message):
r"""Defines a job to be run by the Cloud Dataflow service.
Enums:
CurrentStateValueValuesEnum: The current state of the job. Jobs are
created in the `JOB_STATE_STOPPED` state unless otherwise specified. A
job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal
state. After a job has reached a terminal state, no further state
updates may be made. This field may be mutated by the Cloud Dataflow
service; callers cannot mutate it.
RequestedStateValueValuesEnum: The job's requested state. `UpdateJob` may
be used to switch between the `JOB_STATE_STOPPED` and
`JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may
also be used to directly set a job's requested state to
`JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the
job if it has not already reached a terminal state.
TypeValueValuesEnum: The type of Cloud Dataflow job.
Messages:
LabelsValue: User-defined labels for this job. The labels map can contain
no more than 64 entries. Entries of the labels map are UTF8 strings
that comply with the following restrictions: * Keys must conform to
regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp:
[\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally
constrained to be <= 128 bytes in size.
TransformNameMappingValue: The map of transform name prefixes of the job
to be replaced to the corresponding name prefixes of the new job.
Fields:
clientRequestId: The client's unique identifier of the job, re-used across
retried attempts. If this field is set, the service will ensure its
uniqueness. The request to create a job will fail if the service has
knowledge of a previously submitted job with the same client's ID and
job name. The caller may use this field to ensure idempotence of job
creation across retried attempts to create a job. By default, the field
is empty and, in that case, the service ignores it.
createTime: The timestamp when the job was initially created. Immutable
and set by the Cloud Dataflow service.
createdFromSnapshotId: If this is specified, the job's initial state is
populated from the given snapshot.
currentState: The current state of the job. Jobs are created in the
`JOB_STATE_STOPPED` state unless otherwise specified. A job in the
`JOB_STATE_RUNNING` state may asynchronously enter a terminal state.
After a job has reached a terminal state, no further state updates may
be made. This field may be mutated by the Cloud Dataflow service;
callers cannot mutate it.
currentStateTime: The timestamp associated with the current state.
environment: The environment for the job.
executionInfo: Deprecated.
id: The unique ID of this job. This field is set by the Cloud Dataflow
service when the Job is created, and is immutable for the life of the
job.
jobMetadata: This field is populated by the Dataflow service to support
filtering jobs by the metadata values provided here. Populated for
ListJobs and all GetJob views SUMMARY and higher.
labels: User-defined labels for this job. The labels map can contain no
more than 64 entries. Entries of the labels map are UTF8 strings that
comply with the following restrictions: * Keys must conform to regexp:
\p{Ll}\p{Lo}{0,62} * Values must conform to regexp:
[\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally
constrained to be <= 128 bytes in size.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains this job.
name: The user-specified Cloud Dataflow job name. Only one Job with a
given name may exist in a project at any given time. If a caller
attempts to create a Job with the same name as an already-existing Job,
the attempt returns the existing Job. The name must match the regular
expression `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`
pipelineDescription: Preliminary field: The format of this data may change
at any time. A description of the user pipeline and stages through which
it is executed. Created by Cloud Dataflow service. Only retrieved with
JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL.
projectId: The ID of the Cloud Platform project that the job belongs to.
replaceJobId: If this job is an update of an existing job, this field is
the job ID of the job it replaced. When sending a `CreateJobRequest`,
you can update a job by specifying it here. The job named here is
stopped, and its intermediate state is transferred to this job.
replacedByJobId: If another job is an update of this job (and thus, this
job is in `JOB_STATE_UPDATED`), this field contains the ID of that job.
requestedState: The job's requested state. `UpdateJob` may be used to
switch between the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` states,
by setting requested_state. `UpdateJob` may also be used to directly
set a job's requested state to `JOB_STATE_CANCELLED` or
`JOB_STATE_DONE`, irrevocably terminating the job if it has not already
reached a terminal state.
stageStates: This field may be mutated by the Cloud Dataflow service;
callers cannot mutate it.
startTime: The timestamp when the job was started (transitioned to
JOB_STATE_PENDING). Flexible resource scheduling jobs are started with
some delay after job creation, so start_time is unset before start and
is updated when the job is started by the Cloud Dataflow service. For
other jobs, start_time always equals to create_time and is immutable and
set by the Cloud Dataflow service.
steps: Exactly one of step or steps_location should be specified. The
top-level steps that constitute the entire job.
stepsLocation: The GCS location where the steps are stored.
tempFiles: A set of files the system should be aware of that are used for
temporary storage. These temporary files will be removed on job
completion. No duplicates are allowed. No file patterns are supported.
The supported files are: Google Cloud Storage:
storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
transformNameMapping: The map of transform name prefixes of the job to be
replaced to the corresponding name prefixes of the new job.
type: The type of Cloud Dataflow job.
"""
class CurrentStateValueValuesEnum(_messages.Enum):
r"""The current state of the job. Jobs are created in the
`JOB_STATE_STOPPED` state unless otherwise specified. A job in the
`JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After
a job has reached a terminal state, no further state updates may be made.
This field may be mutated by the Cloud Dataflow service; callers cannot
mutate it.
Values:
JOB_STATE_UNKNOWN: The job's run state isn't specified.
JOB_STATE_STOPPED: `JOB_STATE_STOPPED` indicates that the job has not
yet started to run.
JOB_STATE_RUNNING: `JOB_STATE_RUNNING` indicates that the job is
currently running.
JOB_STATE_DONE: `JOB_STATE_DONE` indicates that the job has successfully
completed. This is a terminal job state. This state may be set by the
Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It
may also be set via a Cloud Dataflow `UpdateJob` call, if the job has
not yet reached a terminal state.
JOB_STATE_FAILED: `JOB_STATE_FAILED` indicates that the job has failed.
This is a terminal job state. This state may only be set by the Cloud
Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_CANCELLED: `JOB_STATE_CANCELLED` indicates that the job has
been explicitly cancelled. This is a terminal job state. This state
may only be set via a Cloud Dataflow `UpdateJob` call, and only if the
job has not yet reached another terminal state.
JOB_STATE_UPDATED: `JOB_STATE_UPDATED` indicates that the job was
successfully updated, meaning that this job was stopped and another
job was started, inheriting state from this one. This is a terminal
job state. This state may only be set by the Cloud Dataflow service,
and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_DRAINING: `JOB_STATE_DRAINING` indicates that the job is in
the process of draining. A draining job has stopped pulling from its
input sources and is processing any data that remains in-flight. This
state may be set via a Cloud Dataflow `UpdateJob` call, but only as a
transition from `JOB_STATE_RUNNING`. Jobs that are draining may only
transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or
`JOB_STATE_FAILED`.
JOB_STATE_DRAINED: `JOB_STATE_DRAINED` indicates that the job has been
drained. A drained job terminated by stopping pulling from its input
sources and processing any data that remained in-flight when draining
was requested. This state is a terminal state, may only be set by the
Cloud Dataflow service, and only as a transition from
`JOB_STATE_DRAINING`.
JOB_STATE_PENDING: `JOB_STATE_PENDING` indicates that the job has been
created but is not yet running. Jobs that are pending may only
transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`.
JOB_STATE_CANCELLING: `JOB_STATE_CANCELLING` indicates that the job has
been explicitly cancelled and is in the process of stopping. Jobs
that are cancelling may only transition to `JOB_STATE_CANCELLED` or
`JOB_STATE_FAILED`.
JOB_STATE_QUEUED: `JOB_STATE_QUEUED` indicates that the job has been
created but is being delayed until launch. Jobs that are queued may
only transition to `JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`.
"""
JOB_STATE_UNKNOWN = 0
JOB_STATE_STOPPED = 1
JOB_STATE_RUNNING = 2
JOB_STATE_DONE = 3
JOB_STATE_FAILED = 4
JOB_STATE_CANCELLED = 5
JOB_STATE_UPDATED = 6
JOB_STATE_DRAINING = 7
JOB_STATE_DRAINED = 8
JOB_STATE_PENDING = 9
JOB_STATE_CANCELLING = 10
JOB_STATE_QUEUED = 11
class RequestedStateValueValuesEnum(_messages.Enum):
r"""The job's requested state. `UpdateJob` may be used to switch between
the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` states, by setting
requested_state. `UpdateJob` may also be used to directly set a job's
requested state to `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably
terminating the job if it has not already reached a terminal state.
Values:
JOB_STATE_UNKNOWN: The job's run state isn't specified.
JOB_STATE_STOPPED: `JOB_STATE_STOPPED` indicates that the job has not
yet started to run.
JOB_STATE_RUNNING: `JOB_STATE_RUNNING` indicates that the job is
currently running.
JOB_STATE_DONE: `JOB_STATE_DONE` indicates that the job has successfully
completed. This is a terminal job state. This state may be set by the
Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It
may also be set via a Cloud Dataflow `UpdateJob` call, if the job has
not yet reached a terminal state.
JOB_STATE_FAILED: `JOB_STATE_FAILED` indicates that the job has failed.
This is a terminal job state. This state may only be set by the Cloud
Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_CANCELLED: `JOB_STATE_CANCELLED` indicates that the job has
been explicitly cancelled. This is a terminal job state. This state
may only be set via a Cloud Dataflow `UpdateJob` call, and only if the
job has not yet reached another terminal state.
JOB_STATE_UPDATED: `JOB_STATE_UPDATED` indicates that the job was
successfully updated, meaning that this job was stopped and another
job was started, inheriting state from this one. This is a terminal
job state. This state may only be set by the Cloud Dataflow service,
and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_DRAINING: `JOB_STATE_DRAINING` indicates that the job is in
the process of draining. A draining job has stopped pulling from its
input sources and is processing any data that remains in-flight. This
state may be set via a Cloud Dataflow `UpdateJob` call, but only as a
transition from `JOB_STATE_RUNNING`. Jobs that are draining may only
transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or
`JOB_STATE_FAILED`.
JOB_STATE_DRAINED: `JOB_STATE_DRAINED` indicates that the job has been
drained. A drained job terminated by stopping pulling from its input
sources and processing any data that remained in-flight when draining
was requested. This state is a terminal state, may only be set by the
Cloud Dataflow service, and only as a transition from
`JOB_STATE_DRAINING`.
JOB_STATE_PENDING: `JOB_STATE_PENDING` indicates that the job has been
created but is not yet running. Jobs that are pending may only
transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`.
JOB_STATE_CANCELLING: `JOB_STATE_CANCELLING` indicates that the job has
been explicitly cancelled and is in the process of stopping. Jobs
that are cancelling may only transition to `JOB_STATE_CANCELLED` or
`JOB_STATE_FAILED`.
JOB_STATE_QUEUED: `JOB_STATE_QUEUED` indicates that the job has been
created but is being delayed until launch. Jobs that are queued may
only transition to `JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`.
"""
JOB_STATE_UNKNOWN = 0
JOB_STATE_STOPPED = 1
JOB_STATE_RUNNING = 2
JOB_STATE_DONE = 3
JOB_STATE_FAILED = 4
JOB_STATE_CANCELLED = 5
JOB_STATE_UPDATED = 6
JOB_STATE_DRAINING = 7
JOB_STATE_DRAINED = 8
JOB_STATE_PENDING = 9
JOB_STATE_CANCELLING = 10
JOB_STATE_QUEUED = 11
class TypeValueValuesEnum(_messages.Enum):
r"""The type of Cloud Dataflow job.
Values:
JOB_TYPE_UNKNOWN: The type of the job is unspecified, or unknown.
JOB_TYPE_BATCH: A batch job with a well-defined end point: data is read,
data is processed, data is written, and the job is done.
JOB_TYPE_STREAMING: A continuously streaming job with no end: data is
read, processed, and written continuously.
"""
JOB_TYPE_UNKNOWN = 0
JOB_TYPE_BATCH = 1
JOB_TYPE_STREAMING = 2
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""User-defined labels for this job. The labels map can contain no more
than 64 entries. Entries of the labels map are UTF8 strings that comply
with the following restrictions: * Keys must conform to regexp:
\p{Ll}\p{Lo}{0,62} * Values must conform to regexp:
[\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally
constrained to be <= 128 bytes in size.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class TransformNameMappingValue(_messages.Message):
r"""The map of transform name prefixes of the job to be replaced to the
corresponding name prefixes of the new job.
Messages:
AdditionalProperty: An additional property for a
TransformNameMappingValue object.
Fields:
additionalProperties: Additional properties of type
TransformNameMappingValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a TransformNameMappingValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clientRequestId = _messages.StringField(1)
createTime = _messages.StringField(2)
createdFromSnapshotId = _messages.StringField(3)
currentState = _messages.EnumField('CurrentStateValueValuesEnum', 4)
currentStateTime = _messages.StringField(5)
environment = _messages.MessageField('Environment', 6)
executionInfo = _messages.MessageField('JobExecutionInfo', 7)
id = _messages.StringField(8)
jobMetadata = _messages.MessageField('JobMetadata', 9)
labels = _messages.MessageField('LabelsValue', 10)
location = _messages.StringField(11)
name = _messages.StringField(12)
pipelineDescription = _messages.MessageField('PipelineDescription', 13)
projectId = _messages.StringField(14)
replaceJobId = _messages.StringField(15)
replacedByJobId = _messages.StringField(16)
requestedState = _messages.EnumField('RequestedStateValueValuesEnum', 17)
stageStates = _messages.MessageField('ExecutionStageState', 18, repeated=True)
startTime = _messages.StringField(19)
steps = _messages.MessageField('Step', 20, repeated=True)
stepsLocation = _messages.StringField(21)
tempFiles = _messages.StringField(22, repeated=True)
transformNameMapping = _messages.MessageField('TransformNameMappingValue', 23)
type = _messages.EnumField('TypeValueValuesEnum', 24)
class JobExecutionInfo(_messages.Message):
r"""Additional information about how a Cloud Dataflow job will be executed
that isn't contained in the submitted job.
Messages:
StagesValue: A mapping from each stage to the information about that
stage.
Fields:
stages: A mapping from each stage to the information about that stage.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class StagesValue(_messages.Message):
r"""A mapping from each stage to the information about that stage.
Messages:
AdditionalProperty: An additional property for a StagesValue object.
Fields:
additionalProperties: Additional properties of type StagesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a StagesValue object.
Fields:
key: Name of the additional property.
value: A JobExecutionStageInfo attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('JobExecutionStageInfo', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
stages = _messages.MessageField('StagesValue', 1)
class JobExecutionStageInfo(_messages.Message):
r"""Contains information about how a particular google.dataflow.v1beta3.Step
will be executed.
Fields:
stepName: The steps associated with the execution stage. Note that stages
may have several steps, and that a given step might be run by more than
one stage.
"""
stepName = _messages.StringField(1, repeated=True)
class JobMessage(_messages.Message):
r"""A particular message pertaining to a Dataflow job.
Enums:
MessageImportanceValueValuesEnum: Importance level of the message.
Fields:
id: Deprecated.
messageImportance: Importance level of the message.
messageText: The text of the message.
time: The timestamp of the message.
"""
class MessageImportanceValueValuesEnum(_messages.Enum):
r"""Importance level of the message.
Values:
JOB_MESSAGE_IMPORTANCE_UNKNOWN: The message importance isn't specified,
or is unknown.
JOB_MESSAGE_DEBUG: The message is at the 'debug' level: typically only
useful for software engineers working on the code the job is running.
Typically, Dataflow pipeline runners do not display log messages at
this level by default.
JOB_MESSAGE_DETAILED: The message is at the 'detailed' level: somewhat
verbose, but potentially useful to users. Typically, Dataflow
pipeline runners do not display log messages at this level by default.
These messages are displayed by default in the Dataflow monitoring UI.
JOB_MESSAGE_BASIC: The message is at the 'basic' level: useful for
keeping track of the execution of a Dataflow pipeline. Typically,
Dataflow pipeline runners display log messages at this level by
default, and these messages are displayed by default in the Dataflow
monitoring UI.
JOB_MESSAGE_WARNING: The message is at the 'warning' level: indicating a
condition pertaining to a job which may require human intervention.
Typically, Dataflow pipeline runners display log messages at this
level by default, and these messages are displayed by default in the
Dataflow monitoring UI.
JOB_MESSAGE_ERROR: The message is at the 'error' level: indicating a
condition preventing a job from succeeding. Typically, Dataflow
pipeline runners display log messages at this level by default, and
these messages are displayed by default in the Dataflow monitoring UI.
"""
JOB_MESSAGE_IMPORTANCE_UNKNOWN = 0
JOB_MESSAGE_DEBUG = 1
JOB_MESSAGE_DETAILED = 2
JOB_MESSAGE_BASIC = 3
JOB_MESSAGE_WARNING = 4
JOB_MESSAGE_ERROR = 5
id = _messages.StringField(1)
messageImportance = _messages.EnumField('MessageImportanceValueValuesEnum', 2)
messageText = _messages.StringField(3)
time = _messages.StringField(4)
class JobMetadata(_messages.Message):
r"""Metadata available primarily for filtering jobs. Will be included in the
ListJob response and Job SUMMARY view.
Fields:
bigTableDetails: Identification of a BigTable source used in the Dataflow
job.
bigqueryDetails: Identification of a BigQuery source used in the Dataflow
job.
datastoreDetails: Identification of a Datastore source used in the
Dataflow job.
fileDetails: Identification of a File source used in the Dataflow job.
pubsubDetails: Identification of a PubSub source used in the Dataflow job.
sdkVersion: The SDK version used to run the job.
spannerDetails: Identification of a Spanner source used in the Dataflow
job.
"""
bigTableDetails = _messages.MessageField('BigTableIODetails', 1, repeated=True)
bigqueryDetails = _messages.MessageField('BigQueryIODetails', 2, repeated=True)
datastoreDetails = _messages.MessageField('DatastoreIODetails', 3, repeated=True)
fileDetails = _messages.MessageField('FileIODetails', 4, repeated=True)
pubsubDetails = _messages.MessageField('PubSubIODetails', 5, repeated=True)
sdkVersion = _messages.MessageField('SdkVersion', 6)
spannerDetails = _messages.MessageField('SpannerIODetails', 7, repeated=True)
class JobMetrics(_messages.Message):
r"""JobMetrics contains a collection of metrics describing the detailed
progress of a Dataflow job. Metrics correspond to user-defined and system-
defined metrics in the job. This resource captures only the most recent
values of each metric; time-series data can be queried for them (under the
same metric names) from Cloud Monitoring.
Fields:
metricTime: Timestamp as of which metric values are current.
metrics: All metrics for this job.
"""
metricTime = _messages.StringField(1)
metrics = _messages.MessageField('MetricUpdate', 2, repeated=True)
class KeyRangeDataDiskAssignment(_messages.Message):
r"""Data disk assignment information for a specific key-range of a sharded
computation. Currently we only support UTF-8 character splits to simplify
encoding into JSON.
Fields:
dataDisk: The name of the data disk where data for this range is stored.
This name is local to the Google Cloud Platform project and uniquely
identifies the disk within that project, for example
"myproject-1014-104817-4c2-harness-0-disk-1".
end: The end (exclusive) of the key range.
start: The start (inclusive) of the key range.
"""
dataDisk = _messages.StringField(1)
end = _messages.StringField(2)
start = _messages.StringField(3)
class KeyRangeLocation(_messages.Message):
r"""Location information for a specific key-range of a sharded computation.
Currently we only support UTF-8 character splits to simplify encoding into
JSON.
Fields:
dataDisk: The name of the data disk where data for this range is stored.
This name is local to the Google Cloud Platform project and uniquely
identifies the disk within that project, for example
"myproject-1014-104817-4c2-harness-0-disk-1".
deliveryEndpoint: The physical location of this range assignment to be
used for streaming computation cross-worker message delivery.
deprecatedPersistentDirectory: DEPRECATED. The location of the persistent
state for this range, as a persistent directory in the worker local
filesystem.
end: The end (exclusive) of the key range.
start: The start (inclusive) of the key range.
"""
dataDisk = _messages.StringField(1)
deliveryEndpoint = _messages.StringField(2)
deprecatedPersistentDirectory = _messages.StringField(3)
end = _messages.StringField(4)
start = _messages.StringField(5)
class LaunchTemplateParameters(_messages.Message):
r"""Parameters to provide to the template being launched.
Messages:
ParametersValue: The runtime parameters to pass to the job.
TransformNameMappingValue: Only applicable when updating a pipeline. Map
of transform name prefixes of the job to be replaced to the
corresponding name prefixes of the new job.
Fields:
environment: The runtime environment for the job.
jobName: Required. The job name to use for the created job.
parameters: The runtime parameters to pass to the job.
transformNameMapping: Only applicable when updating a pipeline. Map of
transform name prefixes of the job to be replaced to the corresponding
name prefixes of the new job.
update: If set, replace the existing pipeline with the name specified by
jobName with this pipeline, preserving state.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParametersValue(_messages.Message):
r"""The runtime parameters to pass to the job.
Messages:
AdditionalProperty: An additional property for a ParametersValue object.
Fields:
additionalProperties: Additional properties of type ParametersValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ParametersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class TransformNameMappingValue(_messages.Message):
r"""Only applicable when updating a pipeline. Map of transform name
prefixes of the job to be replaced to the corresponding name prefixes of
the new job.
Messages:
AdditionalProperty: An additional property for a
TransformNameMappingValue object.
Fields:
additionalProperties: Additional properties of type
TransformNameMappingValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a TransformNameMappingValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
environment = _messages.MessageField('RuntimeEnvironment', 1)
jobName = _messages.StringField(2)
parameters = _messages.MessageField('ParametersValue', 3)
transformNameMapping = _messages.MessageField('TransformNameMappingValue', 4)
update = _messages.BooleanField(5)
class LaunchTemplateResponse(_messages.Message):
r"""Response to the request to launch a template.
Fields:
job: The job that was launched, if the request was not a dry run and the
job was successfully launched.
"""
job = _messages.MessageField('Job', 1)
class LeaseWorkItemRequest(_messages.Message):
r"""Request to lease WorkItems.
Messages:
UnifiedWorkerRequestValue: Untranslated bag-of-bytes WorkRequest from
UnifiedWorker.
Fields:
currentWorkerTime: The current timestamp at the worker.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the WorkItem's job.
requestedLeaseDuration: The initial lease period.
unifiedWorkerRequest: Untranslated bag-of-bytes WorkRequest from
UnifiedWorker.
workItemTypes: Filter for WorkItem type.
workerCapabilities: Worker capabilities. WorkItems might be limited to
workers with specific capabilities.
workerId: Identifies the worker leasing work -- typically the ID of the
virtual machine running the worker.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UnifiedWorkerRequestValue(_messages.Message):
r"""Untranslated bag-of-bytes WorkRequest from UnifiedWorker.
Messages:
AdditionalProperty: An additional property for a
UnifiedWorkerRequestValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a UnifiedWorkerRequestValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
currentWorkerTime = _messages.StringField(1)
location = _messages.StringField(2)
requestedLeaseDuration = _messages.StringField(3)
unifiedWorkerRequest = _messages.MessageField('UnifiedWorkerRequestValue', 4)
workItemTypes = _messages.StringField(5, repeated=True)
workerCapabilities = _messages.StringField(6, repeated=True)
workerId = _messages.StringField(7)
class LeaseWorkItemResponse(_messages.Message):
r"""Response to a request to lease WorkItems.
Messages:
UnifiedWorkerResponseValue: Untranslated bag-of-bytes WorkResponse for
UnifiedWorker.
Fields:
unifiedWorkerResponse: Untranslated bag-of-bytes WorkResponse for
UnifiedWorker.
workItems: A list of the leased WorkItems.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UnifiedWorkerResponseValue(_messages.Message):
r"""Untranslated bag-of-bytes WorkResponse for UnifiedWorker.
Messages:
AdditionalProperty: An additional property for a
UnifiedWorkerResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a UnifiedWorkerResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
unifiedWorkerResponse = _messages.MessageField('UnifiedWorkerResponseValue', 1)
workItems = _messages.MessageField('WorkItem', 2, repeated=True)
class ListJobMessagesResponse(_messages.Message):
r"""Response to a request to list job messages.
Fields:
autoscalingEvents: Autoscaling events in ascending timestamp order.
jobMessages: Messages in ascending timestamp order.
nextPageToken: The token to obtain the next page of results if there are
more.
"""
autoscalingEvents = _messages.MessageField('AutoscalingEvent', 1, repeated=True)
jobMessages = _messages.MessageField('JobMessage', 2, repeated=True)
nextPageToken = _messages.StringField(3)
class ListJobsResponse(_messages.Message):
r"""Response to a request to list Cloud Dataflow jobs. This may be a
partial response, depending on the page size in the ListJobsRequest.
Fields:
failedLocation: Zero or more messages describing the [regional endpoints]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that failed to respond.
jobs: A subset of the requested job information.
nextPageToken: Set if there may be more results than fit in this response.
"""
failedLocation = _messages.MessageField('FailedLocation', 1, repeated=True)
jobs = _messages.MessageField('Job', 2, repeated=True)
nextPageToken = _messages.StringField(3)
class MapTask(_messages.Message):
r"""MapTask consists of an ordered set of instructions, each of which
describes one particular low-level operation for the worker to perform in
order to accomplish the MapTask's WorkItem. Each instruction must appear in
the list before any instructions which depends on its output.
Fields:
counterPrefix: Counter prefix that can be used to prefix counters. Not
currently used in Dataflow.
instructions: The instructions in the MapTask.
stageName: System-defined name of the stage containing this MapTask.
Unique across the workflow.
systemName: System-defined name of this MapTask. Unique across the
workflow.
"""
counterPrefix = _messages.StringField(1)
instructions = _messages.MessageField('ParallelInstruction', 2, repeated=True)
stageName = _messages.StringField(3)
systemName = _messages.StringField(4)
class MetricShortId(_messages.Message):
r"""The metric short id is returned to the user alongside an offset into
ReportWorkItemStatusRequest
Fields:
metricIndex: The index of the corresponding metric in the
ReportWorkItemStatusRequest. Required.
shortId: The service-generated short identifier for the metric.
"""
metricIndex = _messages.IntegerField(1, variant=_messages.Variant.INT32)
shortId = _messages.IntegerField(2)
class MetricStructuredName(_messages.Message):
r"""Identifies a metric, by describing the source which generated the
metric.
Messages:
ContextValue: Zero or more labeled fields which identify the part of the
job this metric is associated with, such as the name of a step or
collection. For example, built-in counters associated with steps will
have context['step'] = <step-name>. Counters associated with
PCollections in the SDK will have context['pcollection'] = <pcollection-
name>.
Fields:
context: Zero or more labeled fields which identify the part of the job
this metric is associated with, such as the name of a step or
collection. For example, built-in counters associated with steps will
have context['step'] = <step-name>. Counters associated with
PCollections in the SDK will have context['pcollection'] = <pcollection-
name>.
name: Worker-defined metric name.
origin: Origin (namespace) of metric name. May be blank for user-define
metrics; will be "dataflow" for metrics defined by the Dataflow service
or SDK.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ContextValue(_messages.Message):
r"""Zero or more labeled fields which identify the part of the job this
metric is associated with, such as the name of a step or collection. For
example, built-in counters associated with steps will have context['step']
= <step-name>. Counters associated with PCollections in the SDK will have
context['pcollection'] = <pcollection-name>.
Messages:
AdditionalProperty: An additional property for a ContextValue object.
Fields:
additionalProperties: Additional properties of type ContextValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ContextValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
context = _messages.MessageField('ContextValue', 1)
name = _messages.StringField(2)
origin = _messages.StringField(3)
class MetricUpdate(_messages.Message):
r"""Describes the state of a metric.
Fields:
cumulative: True if this metric is reported as the total cumulative
aggregate value accumulated since the worker started working on this
WorkItem. By default this is false, indicating that this metric is
reported as a delta that is not associated with any WorkItem.
distribution: A struct value describing properties of a distribution of
numeric values.
gauge: A struct value describing properties of a Gauge. Metrics of gauge
type show the value of a metric across time, and is aggregated based on
the newest value.
internal: Worker-computed aggregate value for internal use by the Dataflow
service.
kind: Metric aggregation kind. The possible metric aggregation kinds are
"Sum", "Max", "Min", "Mean", "Set", "And", "Or", and "Distribution". The
specified aggregation kind is case-insensitive. If omitted, this is not
an aggregated value but instead a single metric sample value.
meanCount: Worker-computed aggregate value for the "Mean" aggregation
kind. This holds the count of the aggregated values and is used in
combination with mean_sum above to obtain the actual mean aggregate
value. The only possible value type is Long.
meanSum: Worker-computed aggregate value for the "Mean" aggregation kind.
This holds the sum of the aggregated values and is used in combination
with mean_count below to obtain the actual mean aggregate value. The
only possible value types are Long and Double.
name: Name of the metric.
scalar: Worker-computed aggregate value for aggregation kinds "Sum",
"Max", "Min", "And", and "Or". The possible value types are Long,
Double, and Boolean.
set: Worker-computed aggregate value for the "Set" aggregation kind. The
only possible value type is a list of Values whose type can be Long,
Double, or String, according to the metric's type. All Values in the
list must be of the same type.
updateTime: Timestamp associated with the metric value. Optional when
workers are reporting work progress; it will be filled in responses from
the metrics API.
"""
cumulative = _messages.BooleanField(1)
distribution = _messages.MessageField('extra_types.JsonValue', 2)
gauge = _messages.MessageField('extra_types.JsonValue', 3)
internal = _messages.MessageField('extra_types.JsonValue', 4)
kind = _messages.StringField(5)
meanCount = _messages.MessageField('extra_types.JsonValue', 6)
meanSum = _messages.MessageField('extra_types.JsonValue', 7)
name = _messages.MessageField('MetricStructuredName', 8)
scalar = _messages.MessageField('extra_types.JsonValue', 9)
set = _messages.MessageField('extra_types.JsonValue', 10)
updateTime = _messages.StringField(11)
class MountedDataDisk(_messages.Message):
r"""Describes mounted data disk.
Fields:
dataDisk: The name of the data disk. This name is local to the Google
Cloud Platform project and uniquely identifies the disk within that
project, for example "myproject-1014-104817-4c2-harness-0-disk-1".
"""
dataDisk = _messages.StringField(1)
class MultiOutputInfo(_messages.Message):
r"""Information about an output of a multi-output DoFn.
Fields:
tag: The id of the tag the user code will emit to this output by; this
should correspond to the tag of some SideInputInfo.
"""
tag = _messages.StringField(1)
class NameAndKind(_messages.Message):
r"""Basic metadata about a counter.
Enums:
KindValueValuesEnum: Counter aggregation kind.
Fields:
kind: Counter aggregation kind.
name: Name of the counter.
"""
class KindValueValuesEnum(_messages.Enum):
r"""Counter aggregation kind.
Values:
INVALID: Counter aggregation kind was not set.
SUM: Aggregated value is the sum of all contributed values.
MAX: Aggregated value is the max of all contributed values.
MIN: Aggregated value is the min of all contributed values.
MEAN: Aggregated value is the mean of all contributed values.
OR: Aggregated value represents the logical 'or' of all contributed
values.
AND: Aggregated value represents the logical 'and' of all contributed
values.
SET: Aggregated value is a set of unique contributed values.
DISTRIBUTION: Aggregated value captures statistics about a distribution.
LATEST_VALUE: Aggregated value tracks the latest value of a variable.
"""
INVALID = 0
SUM = 1
MAX = 2
MIN = 3
MEAN = 4
OR = 5
AND = 6
SET = 7
DISTRIBUTION = 8
LATEST_VALUE = 9
kind = _messages.EnumField('KindValueValuesEnum', 1)
name = _messages.StringField(2)
class Package(_messages.Message):
r"""The packages that must be installed in order for a worker to run the
steps of the Cloud Dataflow job that will be assigned to its worker pool.
This is the mechanism by which the Cloud Dataflow SDK causes code to be
loaded onto the workers. For example, the Cloud Dataflow Java SDK might use
this to install jars containing the user's code and all of the various
dependencies (libraries, data files, etc.) required in order for that code
to run.
Fields:
location: The resource to read the package from. The supported resource
type is: Google Cloud Storage: storage.googleapis.com/{bucket}
bucket.storage.googleapis.com/
name: The name of the package.
"""
location = _messages.StringField(1)
name = _messages.StringField(2)
class ParDoInstruction(_messages.Message):
r"""An instruction that does a ParDo operation. Takes one main input and
zero or more side inputs, and produces zero or more outputs. Runs user code.
Messages:
UserFnValue: The user function to invoke.
Fields:
input: The input.
multiOutputInfos: Information about each of the outputs, if user_fn is a
MultiDoFn.
numOutputs: The number of outputs.
sideInputs: Zero or more side inputs.
userFn: The user function to invoke.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UserFnValue(_messages.Message):
r"""The user function to invoke.
Messages:
AdditionalProperty: An additional property for a UserFnValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a UserFnValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
input = _messages.MessageField('InstructionInput', 1)
multiOutputInfos = _messages.MessageField('MultiOutputInfo', 2, repeated=True)
numOutputs = _messages.IntegerField(3, variant=_messages.Variant.INT32)
sideInputs = _messages.MessageField('SideInputInfo', 4, repeated=True)
userFn = _messages.MessageField('UserFnValue', 5)
class ParallelInstruction(_messages.Message):
r"""Describes a particular operation comprising a MapTask.
Fields:
flatten: Additional information for Flatten instructions.
name: User-provided name of this operation.
originalName: System-defined name for the operation in the original
workflow graph.
outputs: Describes the outputs of the instruction.
parDo: Additional information for ParDo instructions.
partialGroupByKey: Additional information for PartialGroupByKey
instructions.
read: Additional information for Read instructions.
systemName: System-defined name of this operation. Unique across the
workflow.
write: Additional information for Write instructions.
"""
flatten = _messages.MessageField('FlattenInstruction', 1)
name = _messages.StringField(2)
originalName = _messages.StringField(3)
outputs = _messages.MessageField('InstructionOutput', 4, repeated=True)
parDo = _messages.MessageField('ParDoInstruction', 5)
partialGroupByKey = _messages.MessageField('PartialGroupByKeyInstruction', 6)
read = _messages.MessageField('ReadInstruction', 7)
systemName = _messages.StringField(8)
write = _messages.MessageField('WriteInstruction', 9)
class Parameter(_messages.Message):
r"""Structured data associated with this message.
Fields:
key: Key or name for this parameter.
value: Value for this parameter.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
class ParameterMetadata(_messages.Message):
r"""Metadata for a specific parameter.
Fields:
helpText: Required. The help text to display for the parameter.
isOptional: Optional. Whether the parameter is optional. Defaults to
false.
label: Required. The label to display for the parameter.
name: Required. The name of the parameter.
regexes: Optional. Regexes that the parameter must match.
"""
helpText = _messages.StringField(1)
isOptional = _messages.BooleanField(2)
label = _messages.StringField(3)
name = _messages.StringField(4)
regexes = _messages.StringField(5, repeated=True)
class PartialGroupByKeyInstruction(_messages.Message):
r"""An instruction that does a partial group-by-key. One input and one
output.
Messages:
InputElementCodecValue: The codec to use for interpreting an element in
the input PTable.
ValueCombiningFnValue: The value combining function to invoke.
Fields:
input: Describes the input to the partial group-by-key instruction.
inputElementCodec: The codec to use for interpreting an element in the
input PTable.
originalCombineValuesInputStoreName: If this instruction includes a
combining function this is the name of the intermediate store between
the GBK and the CombineValues.
originalCombineValuesStepName: If this instruction includes a combining
function, this is the name of the CombineValues instruction lifted into
this instruction.
sideInputs: Zero or more side inputs.
valueCombiningFn: The value combining function to invoke.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class InputElementCodecValue(_messages.Message):
r"""The codec to use for interpreting an element in the input PTable.
Messages:
AdditionalProperty: An additional property for a InputElementCodecValue
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a InputElementCodecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ValueCombiningFnValue(_messages.Message):
r"""The value combining function to invoke.
Messages:
AdditionalProperty: An additional property for a ValueCombiningFnValue
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ValueCombiningFnValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
input = _messages.MessageField('InstructionInput', 1)
inputElementCodec = _messages.MessageField('InputElementCodecValue', 2)
originalCombineValuesInputStoreName = _messages.StringField(3)
originalCombineValuesStepName = _messages.StringField(4)
sideInputs = _messages.MessageField('SideInputInfo', 5, repeated=True)
valueCombiningFn = _messages.MessageField('ValueCombiningFnValue', 6)
class PipelineDescription(_messages.Message):
r"""A descriptive representation of submitted pipeline as well as the
executed form. This data is provided by the Dataflow service for ease of
visualizing the pipeline and interpreting Dataflow provided metrics.
Fields:
displayData: Pipeline level display data.
executionPipelineStage: Description of each stage of execution of the
pipeline.
originalPipelineTransform: Description of each transform in the pipeline
and collections between them.
"""
displayData = _messages.MessageField('DisplayData', 1, repeated=True)
executionPipelineStage = _messages.MessageField('ExecutionStageSummary', 2, repeated=True)
originalPipelineTransform = _messages.MessageField('TransformSummary', 3, repeated=True)
class Position(_messages.Message):
r"""Position defines a position within a collection of data. The value can
be either the end position, a key (used with ordered collections), a byte
offset, or a record index.
Fields:
byteOffset: Position is a byte offset.
concatPosition: CloudPosition is a concat position.
end: Position is past all other positions. Also useful for the end
position of an unbounded range.
key: Position is a string key, ordered lexicographically.
recordIndex: Position is a record index.
shufflePosition: CloudPosition is a base64 encoded BatchShufflePosition
(with FIXED sharding).
"""
byteOffset = _messages.IntegerField(1)
concatPosition = _messages.MessageField('ConcatPosition', 2)
end = _messages.BooleanField(3)
key = _messages.StringField(4)
recordIndex = _messages.IntegerField(5)
shufflePosition = _messages.StringField(6)
class PubSubIODetails(_messages.Message):
r"""Metadata for a PubSub connector used by the job.
Fields:
subscription: Subscription used in the connection.
topic: Topic accessed in the connection.
"""
subscription = _messages.StringField(1)
topic = _messages.StringField(2)
class PubsubLocation(_messages.Message):
r"""Identifies a pubsub location to use for transferring data into or out of
a streaming Dataflow job.
Fields:
dropLateData: Indicates whether the pipeline allows late-arriving data.
idLabel: If set, contains a pubsub label from which to extract record ids.
If left empty, record deduplication will be strictly best effort.
subscription: A pubsub subscription, in the form of
"pubsub.googleapis.com/subscriptions/<project-id>/<subscription-name>"
timestampLabel: If set, contains a pubsub label from which to extract
record timestamps. If left empty, record timestamps will be generated
upon arrival.
topic: A pubsub topic, in the form of "pubsub.googleapis.com/topics
/<project-id>/<topic-name>"
trackingSubscription: If set, specifies the pubsub subscription that will
be used for tracking custom time timestamps for watermark estimation.
withAttributes: If true, then the client has requested to get pubsub
attributes.
"""
dropLateData = _messages.BooleanField(1)
idLabel = _messages.StringField(2)
subscription = _messages.StringField(3)
timestampLabel = _messages.StringField(4)
topic = _messages.StringField(5)
trackingSubscription = _messages.StringField(6)
withAttributes = _messages.BooleanField(7)
class ReadInstruction(_messages.Message):
r"""An instruction that reads records. Takes no inputs, produces one output.
Fields:
source: The source to read from.
"""
source = _messages.MessageField('Source', 1)
class ReportWorkItemStatusRequest(_messages.Message):
r"""Request to report the status of WorkItems.
Messages:
UnifiedWorkerRequestValue: Untranslated bag-of-bytes
WorkProgressUpdateRequest from UnifiedWorker.
Fields:
currentWorkerTime: The current timestamp at the worker.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the WorkItem's job.
unifiedWorkerRequest: Untranslated bag-of-bytes WorkProgressUpdateRequest
from UnifiedWorker.
workItemStatuses: The order is unimportant, except that the order of the
WorkItemServiceState messages in the ReportWorkItemStatusResponse
corresponds to the order of WorkItemStatus messages here.
workerId: The ID of the worker reporting the WorkItem status. If this
does not match the ID of the worker which the Dataflow service believes
currently has the lease on the WorkItem, the report will be dropped
(with an error response).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UnifiedWorkerRequestValue(_messages.Message):
r"""Untranslated bag-of-bytes WorkProgressUpdateRequest from
UnifiedWorker.
Messages:
AdditionalProperty: An additional property for a
UnifiedWorkerRequestValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a UnifiedWorkerRequestValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
currentWorkerTime = _messages.StringField(1)
location = _messages.StringField(2)
unifiedWorkerRequest = _messages.MessageField('UnifiedWorkerRequestValue', 3)
workItemStatuses = _messages.MessageField('WorkItemStatus', 4, repeated=True)
workerId = _messages.StringField(5)
class ReportWorkItemStatusResponse(_messages.Message):
r"""Response from a request to report the status of WorkItems.
Messages:
UnifiedWorkerResponseValue: Untranslated bag-of-bytes
WorkProgressUpdateResponse for UnifiedWorker.
Fields:
unifiedWorkerResponse: Untranslated bag-of-bytes
WorkProgressUpdateResponse for UnifiedWorker.
workItemServiceStates: A set of messages indicating the service-side state
for each WorkItem whose status was reported, in the same order as the
WorkItemStatus messages in the ReportWorkItemStatusRequest which
resulting in this response.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UnifiedWorkerResponseValue(_messages.Message):
r"""Untranslated bag-of-bytes WorkProgressUpdateResponse for
UnifiedWorker.
Messages:
AdditionalProperty: An additional property for a
UnifiedWorkerResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a UnifiedWorkerResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
unifiedWorkerResponse = _messages.MessageField('UnifiedWorkerResponseValue', 1)
workItemServiceStates = _messages.MessageField('WorkItemServiceState', 2, repeated=True)
class ReportedParallelism(_messages.Message):
r"""Represents the level of parallelism in a WorkItem's input, reported by
the worker.
Fields:
isInfinite: Specifies whether the parallelism is infinite. If true,
"value" is ignored. Infinite parallelism means the service will assume
that the work item can always be split into more non-empty work items by
dynamic splitting. This is a work-around for lack of support for
infinity by the current JSON-based Java RPC stack.
value: Specifies the level of parallelism in case it is finite.
"""
isInfinite = _messages.BooleanField(1)
value = _messages.FloatField(2)
class ResourceUtilizationReport(_messages.Message):
r"""Worker metrics exported from workers. This contains resource utilization
metrics accumulated from a variety of sources. For more information, see go
/df-resource-signals.
Fields:
cpuTime: CPU utilization samples.
"""
cpuTime = _messages.MessageField('CPUTime', 1, repeated=True)
class ResourceUtilizationReportResponse(_messages.Message):
r"""Service-side response to WorkerMessage reporting resource utilization.
"""
class RuntimeEnvironment(_messages.Message):
r"""The environment values to set at runtime.
Enums:
IpConfigurationValueValuesEnum: Configuration for VM IPs.
Messages:
AdditionalUserLabelsValue: Additional user labels to be specified for the
job. Keys and values should follow the restrictions specified in the
[labeling restrictions](https://cloud.google.com/compute/docs/labeling-
resources#restrictions) page.
Fields:
additionalExperiments: Additional experiment flags for the job.
additionalUserLabels: Additional user labels to be specified for the job.
Keys and values should follow the restrictions specified in the
[labeling restrictions](https://cloud.google.com/compute/docs/labeling-
resources#restrictions) page.
bypassTempDirValidation: Whether to bypass the safety checks for the job's
temporary directory. Use with caution.
ipConfiguration: Configuration for VM IPs.
kmsKeyName: Optional. Name for the Cloud KMS key for the job. Key format
is: projects/<project>/locations/<location>/keyRings/<keyring>/cryptoKey
s/<key>
machineType: The machine type to use for the job. Defaults to the value
from the template if not specified.
maxWorkers: The maximum number of Google Compute Engine instances to be
made available to your pipeline during execution, from 1 to 1000.
network: Network to which VMs will be assigned. If empty or unspecified,
the service will use the network "default".
numWorkers: The initial number of Google Compute Engine instnaces for the
job.
serviceAccountEmail: The email address of the service account to run the
job as.
subnetwork: Subnetwork to which VMs will be assigned, if desired.
Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
tempLocation: The Cloud Storage path to use for temporary files. Must be a
valid Cloud Storage URL, beginning with `gs://`.
workerRegion: The Compute Engine region
(https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
which worker processing should occur, e.g. "us-west1". Mutually
exclusive with worker_zone. If neither worker_region nor worker_zone is
specified, default to the control plane's region.
workerZone: The Compute Engine zone (https://cloud.google.com/compute/docs
/regions-zones/regions-zones) in which worker processing should occur,
e.g. "us-west1-a". Mutually exclusive with worker_region. If neither
worker_region nor worker_zone is specified, a zone in the control
plane's region is chosen based on available capacity. If both
`worker_zone` and `zone` are set, `worker_zone` takes precedence.
zone: The Compute Engine [availability
zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones)
for launching worker instances to run your pipeline. In the future,
worker_zone will take precedence.
"""
class IpConfigurationValueValuesEnum(_messages.Enum):
r"""Configuration for VM IPs.
Values:
WORKER_IP_UNSPECIFIED: The configuration is unknown, or unspecified.
WORKER_IP_PUBLIC: Workers should have public IP addresses.
WORKER_IP_PRIVATE: Workers should have private IP addresses.
"""
WORKER_IP_UNSPECIFIED = 0
WORKER_IP_PUBLIC = 1
WORKER_IP_PRIVATE = 2
@encoding.MapUnrecognizedFields('additionalProperties')
class AdditionalUserLabelsValue(_messages.Message):
r"""Additional user labels to be specified for the job. Keys and values
should follow the restrictions specified in the [labeling
restrictions](https://cloud.google.com/compute/docs/labeling-
resources#restrictions) page.
Messages:
AdditionalProperty: An additional property for a
AdditionalUserLabelsValue object.
Fields:
additionalProperties: Additional properties of type
AdditionalUserLabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a AdditionalUserLabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
additionalExperiments = _messages.StringField(1, repeated=True)
additionalUserLabels = _messages.MessageField('AdditionalUserLabelsValue', 2)
bypassTempDirValidation = _messages.BooleanField(3)
ipConfiguration = _messages.EnumField('IpConfigurationValueValuesEnum', 4)
kmsKeyName = _messages.StringField(5)
machineType = _messages.StringField(6)
maxWorkers = _messages.IntegerField(7, variant=_messages.Variant.INT32)
network = _messages.StringField(8)
numWorkers = _messages.IntegerField(9, variant=_messages.Variant.INT32)
serviceAccountEmail = _messages.StringField(10)
subnetwork = _messages.StringField(11)
tempLocation = _messages.StringField(12)
workerRegion = _messages.StringField(13)
workerZone = _messages.StringField(14)
zone = _messages.StringField(15)
class SdkVersion(_messages.Message):
r"""The version of the SDK used to run the job.
Enums:
SdkSupportStatusValueValuesEnum: The support status for this SDK version.
Fields:
sdkSupportStatus: The support status for this SDK version.
version: The version of the SDK used to run the job.
versionDisplayName: A readable string describing the version of the SDK.
"""
class SdkSupportStatusValueValuesEnum(_messages.Enum):
r"""The support status for this SDK version.
Values:
UNKNOWN: Cloud Dataflow is unaware of this version.
SUPPORTED: This is a known version of an SDK, and is supported.
STALE: A newer version of the SDK family exists, and an update is
recommended.
DEPRECATED: This version of the SDK is deprecated and will eventually be
no longer supported.
UNSUPPORTED: Support for this SDK version has ended and it should no
longer be used.
"""
UNKNOWN = 0
SUPPORTED = 1
STALE = 2
DEPRECATED = 3
UNSUPPORTED = 4
sdkSupportStatus = _messages.EnumField('SdkSupportStatusValueValuesEnum', 1)
version = _messages.StringField(2)
versionDisplayName = _messages.StringField(3)
class SendDebugCaptureRequest(_messages.Message):
r"""Request to send encoded debug information.
Fields:
componentId: The internal component id for which debug information is
sent.
data: The encoded debug information.
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
workerId: The worker id, i.e., VM hostname.
"""
componentId = _messages.StringField(1)
data = _messages.StringField(2)
location = _messages.StringField(3)
workerId = _messages.StringField(4)
class SendDebugCaptureResponse(_messages.Message):
r"""Response to a send capture request.
nothing"""
class SendWorkerMessagesRequest(_messages.Message):
r"""A request for sending worker messages to the service.
Fields:
location: The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job.
workerMessages: The WorkerMessages to send.
"""
location = _messages.StringField(1)
workerMessages = _messages.MessageField('WorkerMessage', 2, repeated=True)
class SendWorkerMessagesResponse(_messages.Message):
r"""The response to the worker messages.
Fields:
workerMessageResponses: The servers response to the worker messages.
"""
workerMessageResponses = _messages.MessageField('WorkerMessageResponse', 1, repeated=True)
class SeqMapTask(_messages.Message):
r"""Describes a particular function to invoke.
Messages:
UserFnValue: The user function to invoke.
Fields:
inputs: Information about each of the inputs.
name: The user-provided name of the SeqDo operation.
outputInfos: Information about each of the outputs.
stageName: System-defined name of the stage containing the SeqDo
operation. Unique across the workflow.
systemName: System-defined name of the SeqDo operation. Unique across the
workflow.
userFn: The user function to invoke.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UserFnValue(_messages.Message):
r"""The user function to invoke.
Messages:
AdditionalProperty: An additional property for a UserFnValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a UserFnValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
inputs = _messages.MessageField('SideInputInfo', 1, repeated=True)
name = _messages.StringField(2)
outputInfos = _messages.MessageField('SeqMapTaskOutputInfo', 3, repeated=True)
stageName = _messages.StringField(4)
systemName = _messages.StringField(5)
userFn = _messages.MessageField('UserFnValue', 6)
class SeqMapTaskOutputInfo(_messages.Message):
r"""Information about an output of a SeqMapTask.
Fields:
sink: The sink to write the output value to.
tag: The id of the TupleTag the user code will tag the output value by.
"""
sink = _messages.MessageField('Sink', 1)
tag = _messages.StringField(2)
class ShellTask(_messages.Message):
r"""A task which consists of a shell command for the worker to execute.
Fields:
command: The shell command to run.
exitCode: Exit code for the task.
"""
command = _messages.StringField(1)
exitCode = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class SideInputInfo(_messages.Message):
r"""Information about a side input of a DoFn or an input of a SeqDoFn.
Messages:
KindValue: How to interpret the source element(s) as a side input value.
Fields:
kind: How to interpret the source element(s) as a side input value.
sources: The source(s) to read element(s) from to get the value of this
side input. If more than one source, then the elements are taken from
the sources, in the specified order if order matters. At least one
source is required.
tag: The id of the tag the user code will access this side input by; this
should correspond to the tag of some MultiOutputInfo.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class KindValue(_messages.Message):
r"""How to interpret the source element(s) as a side input value.
Messages:
AdditionalProperty: An additional property for a KindValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a KindValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
kind = _messages.MessageField('KindValue', 1)
sources = _messages.MessageField('Source', 2, repeated=True)
tag = _messages.StringField(3)
class Sink(_messages.Message):
r"""A sink that records can be encoded and written to.
Messages:
CodecValue: The codec to use to encode data written to the sink.
SpecValue: The sink to write to, plus its parameters.
Fields:
codec: The codec to use to encode data written to the sink.
spec: The sink to write to, plus its parameters.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class CodecValue(_messages.Message):
r"""The codec to use to encode data written to the sink.
Messages:
AdditionalProperty: An additional property for a CodecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a CodecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class SpecValue(_messages.Message):
r"""The sink to write to, plus its parameters.
Messages:
AdditionalProperty: An additional property for a SpecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a SpecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
codec = _messages.MessageField('CodecValue', 1)
spec = _messages.MessageField('SpecValue', 2)
class Source(_messages.Message):
r"""A source that records can be read and decoded from.
Messages:
BaseSpecsValueListEntry: A BaseSpecsValueListEntry object.
CodecValue: The codec to use to decode data read from the source.
SpecValue: The source to read from, plus its parameters.
Fields:
baseSpecs: While splitting, sources may specify the produced bundles as
differences against another source, in order to save backend-side memory
and allow bigger jobs. For details, see SourceSplitRequest. To support
this use case, the full set of parameters of the source is logically
obtained by taking the latest explicitly specified value of each
parameter in the order: base_specs (later items win), spec (overrides
anything in base_specs).
codec: The codec to use to decode data read from the source.
doesNotNeedSplitting: Setting this value to true hints to the framework
that the source doesn't need splitting, and using SourceSplitRequest on
it would yield SOURCE_SPLIT_OUTCOME_USE_CURRENT. E.g. a file splitter
may set this to true when splitting a single file into a set of byte
ranges of appropriate size, and set this to false when splitting a
filepattern into individual files. However, for efficiency, a file
splitter may decide to produce file subranges directly from the
filepattern to avoid a splitting round-trip. See SourceSplitRequest for
an overview of the splitting process. This field is meaningful only in
the Source objects populated by the user (e.g. when filling in a
DerivedSource). Source objects supplied by the framework to the user
don't have this field populated.
metadata: Optionally, metadata for this source can be supplied right away,
avoiding a SourceGetMetadataOperation roundtrip (see
SourceOperationRequest). This field is meaningful only in the Source
objects populated by the user (e.g. when filling in a DerivedSource).
Source objects supplied by the framework to the user don't have this
field populated.
spec: The source to read from, plus its parameters.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class BaseSpecsValueListEntry(_messages.Message):
r"""A BaseSpecsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a BaseSpecsValueListEntry
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a BaseSpecsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class CodecValue(_messages.Message):
r"""The codec to use to decode data read from the source.
Messages:
AdditionalProperty: An additional property for a CodecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a CodecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class SpecValue(_messages.Message):
r"""The source to read from, plus its parameters.
Messages:
AdditionalProperty: An additional property for a SpecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a SpecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
baseSpecs = _messages.MessageField('BaseSpecsValueListEntry', 1, repeated=True)
codec = _messages.MessageField('CodecValue', 2)
doesNotNeedSplitting = _messages.BooleanField(3)
metadata = _messages.MessageField('SourceMetadata', 4)
spec = _messages.MessageField('SpecValue', 5)
class SourceFork(_messages.Message):
r"""DEPRECATED in favor of DynamicSourceSplit.
Fields:
primary: DEPRECATED
primarySource: DEPRECATED
residual: DEPRECATED
residualSource: DEPRECATED
"""
primary = _messages.MessageField('SourceSplitShard', 1)
primarySource = _messages.MessageField('DerivedSource', 2)
residual = _messages.MessageField('SourceSplitShard', 3)
residualSource = _messages.MessageField('DerivedSource', 4)
class SourceGetMetadataRequest(_messages.Message):
r"""A request to compute the SourceMetadata of a Source.
Fields:
source: Specification of the source whose metadata should be computed.
"""
source = _messages.MessageField('Source', 1)
class SourceGetMetadataResponse(_messages.Message):
r"""The result of a SourceGetMetadataOperation.
Fields:
metadata: The computed metadata.
"""
metadata = _messages.MessageField('SourceMetadata', 1)
class SourceMetadata(_messages.Message):
r"""Metadata about a Source useful for automatically optimizing and tuning
the pipeline, etc.
Fields:
estimatedSizeBytes: An estimate of the total size (in bytes) of the data
that would be read from this source. This estimate is in terms of
external storage size, before any decompression or other processing done
by the reader.
infinite: Specifies that the size of this source is known to be infinite
(this is a streaming source).
producesSortedKeys: Whether this source is known to produce key/value
pairs with the (encoded) keys in lexicographically sorted order.
"""
estimatedSizeBytes = _messages.IntegerField(1)
infinite = _messages.BooleanField(2)
producesSortedKeys = _messages.BooleanField(3)
class SourceOperationRequest(_messages.Message):
r"""A work item that represents the different operations that can be
performed on a user-defined Source specification.
Fields:
getMetadata: Information about a request to get metadata about a source.
name: User-provided name of the Read instruction for this source.
originalName: System-defined name for the Read instruction for this source
in the original workflow graph.
split: Information about a request to split a source.
stageName: System-defined name of the stage containing the source
operation. Unique across the workflow.
systemName: System-defined name of the Read instruction for this source.
Unique across the workflow.
"""
getMetadata = _messages.MessageField('SourceGetMetadataRequest', 1)
name = _messages.StringField(2)
originalName = _messages.StringField(3)
split = _messages.MessageField('SourceSplitRequest', 4)
stageName = _messages.StringField(5)
systemName = _messages.StringField(6)
class SourceOperationResponse(_messages.Message):
r"""The result of a SourceOperationRequest, specified in
ReportWorkItemStatusRequest.source_operation when the work item is
completed.
Fields:
getMetadata: A response to a request to get metadata about a source.
split: A response to a request to split a source.
"""
getMetadata = _messages.MessageField('SourceGetMetadataResponse', 1)
split = _messages.MessageField('SourceSplitResponse', 2)
class SourceSplitOptions(_messages.Message):
r"""Hints for splitting a Source into bundles (parts for parallel
processing) using SourceSplitRequest.
Fields:
desiredBundleSizeBytes: The source should be split into a set of bundles
where the estimated size of each is approximately this many bytes.
desiredShardSizeBytes: DEPRECATED in favor of desired_bundle_size_bytes.
"""
desiredBundleSizeBytes = _messages.IntegerField(1)
desiredShardSizeBytes = _messages.IntegerField(2)
class SourceSplitRequest(_messages.Message):
r"""Represents the operation to split a high-level Source specification into
bundles (parts for parallel processing). At a high level, splitting of a
source into bundles happens as follows: SourceSplitRequest is applied to the
source. If it returns SOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting
happens and the source is used "as is". Otherwise, splitting is applied
recursively to each produced DerivedSource. As an optimization, for any
Source, if its does_not_need_splitting is true, the framework assumes that
splitting this source would return SOURCE_SPLIT_OUTCOME_USE_CURRENT, and
doesn't initiate a SourceSplitRequest. This applies both to the initial
source being split and to bundles produced from it.
Fields:
options: Hints for tuning the splitting process.
source: Specification of the source to be split.
"""
options = _messages.MessageField('SourceSplitOptions', 1)
source = _messages.MessageField('Source', 2)
class SourceSplitResponse(_messages.Message):
r"""The response to a SourceSplitRequest.
Enums:
OutcomeValueValuesEnum: Indicates whether splitting happened and produced
a list of bundles. If this is USE_CURRENT_SOURCE_AS_IS, the current
source should be processed "as is" without splitting. "bundles" is
ignored in this case. If this is SPLITTING_HAPPENED, then "bundles"
contains a list of bundles into which the source was split.
Fields:
bundles: If outcome is SPLITTING_HAPPENED, then this is a list of bundles
into which the source was split. Otherwise this field is ignored. This
list can be empty, which means the source represents an empty input.
outcome: Indicates whether splitting happened and produced a list of
bundles. If this is USE_CURRENT_SOURCE_AS_IS, the current source should
be processed "as is" without splitting. "bundles" is ignored in this
case. If this is SPLITTING_HAPPENED, then "bundles" contains a list of
bundles into which the source was split.
shards: DEPRECATED in favor of bundles.
"""
class OutcomeValueValuesEnum(_messages.Enum):
r"""Indicates whether splitting happened and produced a list of bundles.
If this is USE_CURRENT_SOURCE_AS_IS, the current source should be
processed "as is" without splitting. "bundles" is ignored in this case. If
this is SPLITTING_HAPPENED, then "bundles" contains a list of bundles into
which the source was split.
Values:
SOURCE_SPLIT_OUTCOME_UNKNOWN: The source split outcome is unknown, or
unspecified.
SOURCE_SPLIT_OUTCOME_USE_CURRENT: The current source should be processed
"as is" without splitting.
SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED: Splitting produced a list of
bundles.
"""
SOURCE_SPLIT_OUTCOME_UNKNOWN = 0
SOURCE_SPLIT_OUTCOME_USE_CURRENT = 1
SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED = 2
bundles = _messages.MessageField('DerivedSource', 1, repeated=True)
outcome = _messages.EnumField('OutcomeValueValuesEnum', 2)
shards = _messages.MessageField('SourceSplitShard', 3, repeated=True)
class SourceSplitShard(_messages.Message):
r"""DEPRECATED in favor of DerivedSource.
Enums:
DerivationModeValueValuesEnum: DEPRECATED
Fields:
derivationMode: DEPRECATED
source: DEPRECATED
"""
class DerivationModeValueValuesEnum(_messages.Enum):
r"""DEPRECATED
Values:
SOURCE_DERIVATION_MODE_UNKNOWN: The source derivation is unknown, or
unspecified.
SOURCE_DERIVATION_MODE_INDEPENDENT: Produce a completely independent
Source with no base.
SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT: Produce a Source based on the
Source being split.
SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT: Produce a Source based on the
base of the Source being split.
"""
SOURCE_DERIVATION_MODE_UNKNOWN = 0
SOURCE_DERIVATION_MODE_INDEPENDENT = 1
SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT = 2
SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT = 3
derivationMode = _messages.EnumField('DerivationModeValueValuesEnum', 1)
source = _messages.MessageField('Source', 2)
class SpannerIODetails(_messages.Message):
r"""Metadata for a Spanner connector used by the job.
Fields:
databaseId: DatabaseId accessed in the connection.
instanceId: InstanceId accessed in the connection.
projectId: ProjectId accessed in the connection.
"""
databaseId = _messages.StringField(1)
instanceId = _messages.StringField(2)
projectId = _messages.StringField(3)
class SplitInt64(_messages.Message):
r"""A representation of an int64, n, that is immune to precision loss when
encoded in JSON.
Fields:
highBits: The high order bits, including the sign: n >> 32.
lowBits: The low order bits: n & 0xffffffff.
"""
highBits = _messages.IntegerField(1, variant=_messages.Variant.INT32)
lowBits = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
class StageSource(_messages.Message):
r"""Description of an input or output of an execution stage.
Fields:
name: Dataflow service generated name for this source.
originalTransformOrCollection: User name for the original user transform
or collection with which this source is most closely associated.
sizeBytes: Size of the source, if measurable.
userName: Human-readable name for this source; may be user or system
generated.
"""
name = _messages.StringField(1)
originalTransformOrCollection = _messages.StringField(2)
sizeBytes = _messages.IntegerField(3)
userName = _messages.StringField(4)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class StateFamilyConfig(_messages.Message):
r"""State family configuration.
Fields:
isRead: If true, this family corresponds to a read operation.
stateFamily: The state family value.
"""
isRead = _messages.BooleanField(1)
stateFamily = _messages.StringField(2)
class Status(_messages.Message):
r"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class Step(_messages.Message):
r"""Defines a particular step within a Cloud Dataflow job. A job consists
of multiple steps, each of which performs some specific operation as part of
the overall job. Data is typically passed from one step to another as part
of the job. Here's an example of a sequence of steps which together
implement a Map-Reduce job: * Read a collection of data from some source,
parsing the collection's elements. * Validate the elements. *
Apply a user-defined function to map each element to some value and
extract an element-specific key value. * Group elements with the same key
into a single element with that key, transforming a multiply-keyed
collection into a uniquely-keyed collection. * Write the elements out
to some data sink. Note that the Cloud Dataflow service may be used to run
many different types of jobs, not just Map-Reduce.
Messages:
PropertiesValue: Named properties associated with the step. Each kind of
predefined step has its own required set of properties. Must be provided
on Create. Only retrieved with JOB_VIEW_ALL.
Fields:
kind: The kind of step in the Cloud Dataflow job.
name: The name that identifies the step. This must be unique for each step
with respect to all other steps in the Cloud Dataflow job.
properties: Named properties associated with the step. Each kind of
predefined step has its own required set of properties. Must be provided
on Create. Only retrieved with JOB_VIEW_ALL.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
r"""Named properties associated with the step. Each kind of predefined
step has its own required set of properties. Must be provided on Create.
Only retrieved with JOB_VIEW_ALL.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
kind = _messages.StringField(1)
name = _messages.StringField(2)
properties = _messages.MessageField('PropertiesValue', 3)
class StreamLocation(_messages.Message):
r"""Describes a stream of data, either as input to be processed or as output
of a streaming Dataflow job.
Fields:
customSourceLocation: The stream is a custom source.
pubsubLocation: The stream is a pubsub stream.
sideInputLocation: The stream is a streaming side input.
streamingStageLocation: The stream is part of another computation within
the current streaming Dataflow job.
"""
customSourceLocation = _messages.MessageField('CustomSourceLocation', 1)
pubsubLocation = _messages.MessageField('PubsubLocation', 2)
sideInputLocation = _messages.MessageField('StreamingSideInputLocation', 3)
streamingStageLocation = _messages.MessageField('StreamingStageLocation', 4)
class StreamingApplianceSnapshotConfig(_messages.Message):
r"""Streaming appliance snapshot configuration.
Fields:
importStateEndpoint: Indicates which endpoint is used to import appliance
state.
snapshotId: If set, indicates the snapshot id for the snapshot being
performed.
"""
importStateEndpoint = _messages.StringField(1)
snapshotId = _messages.StringField(2)
class StreamingComputationConfig(_messages.Message):
r"""Configuration information for a single streaming computation.
Messages:
TransformUserNameToStateFamilyValue: Map from user name of stateful
transforms in this stage to their state family.
Fields:
computationId: Unique identifier for this computation.
instructions: Instructions that comprise the computation.
stageName: Stage name of this computation.
systemName: System defined name for this computation.
transformUserNameToStateFamily: Map from user name of stateful transforms
in this stage to their state family.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class TransformUserNameToStateFamilyValue(_messages.Message):
r"""Map from user name of stateful transforms in this stage to their state
family.
Messages:
AdditionalProperty: An additional property for a
TransformUserNameToStateFamilyValue object.
Fields:
additionalProperties: Additional properties of type
TransformUserNameToStateFamilyValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a TransformUserNameToStateFamilyValue
object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
computationId = _messages.StringField(1)
instructions = _messages.MessageField('ParallelInstruction', 2, repeated=True)
stageName = _messages.StringField(3)
systemName = _messages.StringField(4)
transformUserNameToStateFamily = _messages.MessageField('TransformUserNameToStateFamilyValue', 5)
class StreamingComputationRanges(_messages.Message):
r"""Describes full or partial data disk assignment information of the
computation ranges.
Fields:
computationId: The ID of the computation.
rangeAssignments: Data disk assignments for ranges from this computation.
"""
computationId = _messages.StringField(1)
rangeAssignments = _messages.MessageField('KeyRangeDataDiskAssignment', 2, repeated=True)
class StreamingComputationTask(_messages.Message):
r"""A task which describes what action should be performed for the specified
streaming computation ranges.
Enums:
TaskTypeValueValuesEnum: A type of streaming computation task.
Fields:
computationRanges: Contains ranges of a streaming computation this task
should apply to.
dataDisks: Describes the set of data disks this task should apply to.
taskType: A type of streaming computation task.
"""
class TaskTypeValueValuesEnum(_messages.Enum):
r"""A type of streaming computation task.
Values:
STREAMING_COMPUTATION_TASK_UNKNOWN: The streaming computation task is
unknown, or unspecified.
STREAMING_COMPUTATION_TASK_STOP: Stop processing specified streaming
computation range(s).
STREAMING_COMPUTATION_TASK_START: Start processing specified streaming
computation range(s).
"""
STREAMING_COMPUTATION_TASK_UNKNOWN = 0
STREAMING_COMPUTATION_TASK_STOP = 1
STREAMING_COMPUTATION_TASK_START = 2
computationRanges = _messages.MessageField('StreamingComputationRanges', 1, repeated=True)
dataDisks = _messages.MessageField('MountedDataDisk', 2, repeated=True)
taskType = _messages.EnumField('TaskTypeValueValuesEnum', 3)
class StreamingConfigTask(_messages.Message):
r"""A task that carries configuration information for streaming
computations.
Messages:
UserStepToStateFamilyNameMapValue: Map from user step names to state
families.
Fields:
commitStreamChunkSizeBytes: Chunk size for commit streams from the harness
to windmill.
getDataStreamChunkSizeBytes: Chunk size for get data streams from the
harness to windmill.
maxWorkItemCommitBytes: Maximum size for work item commit supported
windmill storage layer.
streamingComputationConfigs: Set of computation configuration information.
userStepToStateFamilyNameMap: Map from user step names to state families.
windmillServiceEndpoint: If present, the worker must use this endpoint to
communicate with Windmill Service dispatchers, otherwise the worker must
continue to use whatever endpoint it had been using.
windmillServicePort: If present, the worker must use this port to
communicate with Windmill Service dispatchers. Only applicable when
windmill_service_endpoint is specified.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UserStepToStateFamilyNameMapValue(_messages.Message):
r"""Map from user step names to state families.
Messages:
AdditionalProperty: An additional property for a
UserStepToStateFamilyNameMapValue object.
Fields:
additionalProperties: Additional properties of type
UserStepToStateFamilyNameMapValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a UserStepToStateFamilyNameMapValue
object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
commitStreamChunkSizeBytes = _messages.IntegerField(1)
getDataStreamChunkSizeBytes = _messages.IntegerField(2)
maxWorkItemCommitBytes = _messages.IntegerField(3)
streamingComputationConfigs = _messages.MessageField('StreamingComputationConfig', 4, repeated=True)
userStepToStateFamilyNameMap = _messages.MessageField('UserStepToStateFamilyNameMapValue', 5)
windmillServiceEndpoint = _messages.StringField(6)
windmillServicePort = _messages.IntegerField(7)
class StreamingSetupTask(_messages.Message):
r"""A task which initializes part of a streaming Dataflow job.
Fields:
drain: The user has requested drain.
receiveWorkPort: The TCP port on which the worker should listen for
messages from other streaming computation workers.
snapshotConfig: Configures streaming appliance snapshot.
streamingComputationTopology: The global topology of the streaming
Dataflow job.
workerHarnessPort: The TCP port used by the worker to communicate with the
Dataflow worker harness.
"""
drain = _messages.BooleanField(1)
receiveWorkPort = _messages.IntegerField(2, variant=_messages.Variant.INT32)
snapshotConfig = _messages.MessageField('StreamingApplianceSnapshotConfig', 3)
streamingComputationTopology = _messages.MessageField('TopologyConfig', 4)
workerHarnessPort = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class StreamingSideInputLocation(_messages.Message):
r"""Identifies the location of a streaming side input.
Fields:
stateFamily: Identifies the state family where this side input is stored.
tag: Identifies the particular side input within the streaming Dataflow
job.
"""
stateFamily = _messages.StringField(1)
tag = _messages.StringField(2)
class StreamingStageLocation(_messages.Message):
r"""Identifies the location of a streaming computation stage, for stage-to-
stage communication.
Fields:
streamId: Identifies the particular stream within the streaming Dataflow
job.
"""
streamId = _messages.StringField(1)
class StringList(_messages.Message):
r"""A metric value representing a list of strings.
Fields:
elements: Elements of the list.
"""
elements = _messages.StringField(1, repeated=True)
class StructuredMessage(_messages.Message):
r"""A rich message format, including a human readable string, a key for
identifying the message, and structured data associated with the message for
programmatic consumption.
Fields:
messageKey: Identifier for this message type. Used by external systems to
internationalize or personalize message.
messageText: Human-readable version of message.
parameters: The structured data associated with this message.
"""
messageKey = _messages.StringField(1)
messageText = _messages.StringField(2)
parameters = _messages.MessageField('Parameter', 3, repeated=True)
class TaskRunnerSettings(_messages.Message):
r"""Taskrunner configuration settings.
Fields:
alsologtostderr: Whether to also send taskrunner log info to stderr.
baseTaskDir: The location on the worker for task-specific subdirectories.
baseUrl: The base URL for the taskrunner to use when accessing Google
Cloud APIs. When workers access Google Cloud APIs, they logically do so
via relative URLs. If this field is specified, it supplies the base URL
to use for resolving these relative URLs. The normative algorithm used
is defined by RFC 1808, "Relative Uniform Resource Locators". If not
specified, the default value is "http://www.googleapis.com/"
commandlinesFileName: The file to store preprocessing commands in.
continueOnException: Whether to continue taskrunner if an exception is
hit.
dataflowApiVersion: The API version of endpoint, e.g. "v1b3"
harnessCommand: The command to launch the worker harness.
languageHint: The suggested backend language.
logDir: The directory on the VM to store logs.
logToSerialconsole: Whether to send taskrunner log info to Google Compute
Engine VM serial console.
logUploadLocation: Indicates where to put logs. If this is not specified,
the logs will not be uploaded. The supported resource type is: Google
Cloud Storage: storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
oauthScopes: The OAuth2 scopes to be requested by the taskrunner in order
to access the Cloud Dataflow API.
parallelWorkerSettings: The settings to pass to the parallel worker
harness.
streamingWorkerMainClass: The streaming worker main class name.
taskGroup: The UNIX group ID on the worker VM to use for tasks launched by
taskrunner; e.g. "wheel".
taskUser: The UNIX user ID on the worker VM to use for tasks launched by
taskrunner; e.g. "root".
tempStoragePrefix: The prefix of the resources the taskrunner should use
for temporary storage. The supported resource type is: Google Cloud
Storage: storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
vmId: The ID string of the VM.
workflowFileName: The file to store the workflow in.
"""
alsologtostderr = _messages.BooleanField(1)
baseTaskDir = _messages.StringField(2)
baseUrl = _messages.StringField(3)
commandlinesFileName = _messages.StringField(4)
continueOnException = _messages.BooleanField(5)
dataflowApiVersion = _messages.StringField(6)
harnessCommand = _messages.StringField(7)
languageHint = _messages.StringField(8)
logDir = _messages.StringField(9)
logToSerialconsole = _messages.BooleanField(10)
logUploadLocation = _messages.StringField(11)
oauthScopes = _messages.StringField(12, repeated=True)
parallelWorkerSettings = _messages.MessageField('WorkerSettings', 13)
streamingWorkerMainClass = _messages.StringField(14)
taskGroup = _messages.StringField(15)
taskUser = _messages.StringField(16)
tempStoragePrefix = _messages.StringField(17)
vmId = _messages.StringField(18)
workflowFileName = _messages.StringField(19)
class TemplateMetadata(_messages.Message):
r"""Metadata describing a template.
Fields:
description: Optional. A description of the template.
name: Required. The name of the template.
parameters: The parameters for the template.
"""
description = _messages.StringField(1)
name = _messages.StringField(2)
parameters = _messages.MessageField('ParameterMetadata', 3, repeated=True)
class TopologyConfig(_messages.Message):
r"""Global topology of the streaming Dataflow job, including all
computations and their sharded locations.
Messages:
UserStageToComputationNameMapValue: Maps user stage names to stable
computation names.
Fields:
computations: The computations associated with a streaming Dataflow job.
dataDiskAssignments: The disks assigned to a streaming Dataflow job.
forwardingKeyBits: The size (in bits) of keys that will be assigned to
source messages.
persistentStateVersion: Version number for persistent state.
userStageToComputationNameMap: Maps user stage names to stable computation
names.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UserStageToComputationNameMapValue(_messages.Message):
r"""Maps user stage names to stable computation names.
Messages:
AdditionalProperty: An additional property for a
UserStageToComputationNameMapValue object.
Fields:
additionalProperties: Additional properties of type
UserStageToComputationNameMapValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a UserStageToComputationNameMapValue
object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
computations = _messages.MessageField('ComputationTopology', 1, repeated=True)
dataDiskAssignments = _messages.MessageField('DataDiskAssignment', 2, repeated=True)
forwardingKeyBits = _messages.IntegerField(3, variant=_messages.Variant.INT32)
persistentStateVersion = _messages.IntegerField(4, variant=_messages.Variant.INT32)
userStageToComputationNameMap = _messages.MessageField('UserStageToComputationNameMapValue', 5)
class TransformSummary(_messages.Message):
r"""Description of the type, names/ids, and input/outputs for a transform.
Enums:
KindValueValuesEnum: Type of transform.
Fields:
displayData: Transform-specific display data.
id: SDK generated id of this transform instance.
inputCollectionName: User names for all collection inputs to this
transform.
kind: Type of transform.
name: User provided name for this transform instance.
outputCollectionName: User names for all collection outputs to this
transform.
"""
class KindValueValuesEnum(_messages.Enum):
r"""Type of transform.
Values:
UNKNOWN_KIND: Unrecognized transform type.
PAR_DO_KIND: ParDo transform.
GROUP_BY_KEY_KIND: Group By Key transform.
FLATTEN_KIND: Flatten transform.
READ_KIND: Read transform.
WRITE_KIND: Write transform.
CONSTANT_KIND: Constructs from a constant value, such as with Create.of.
SINGLETON_KIND: Creates a Singleton view of a collection.
SHUFFLE_KIND: Opening or closing a shuffle session, often as part of a
GroupByKey.
"""
UNKNOWN_KIND = 0
PAR_DO_KIND = 1
GROUP_BY_KEY_KIND = 2
FLATTEN_KIND = 3
READ_KIND = 4
WRITE_KIND = 5
CONSTANT_KIND = 6
SINGLETON_KIND = 7
SHUFFLE_KIND = 8
displayData = _messages.MessageField('DisplayData', 1, repeated=True)
id = _messages.StringField(2)
inputCollectionName = _messages.StringField(3, repeated=True)
kind = _messages.EnumField('KindValueValuesEnum', 4)
name = _messages.StringField(5)
outputCollectionName = _messages.StringField(6, repeated=True)
class ValidateResponse(_messages.Message):
r"""Response to the validation request.
Fields:
errorMessage: Will be empty if validation succeeds.
"""
errorMessage = _messages.StringField(1)
class WorkItem(_messages.Message):
r"""WorkItem represents basic information about a WorkItem to be executed in
the cloud.
Fields:
configuration: Work item-specific configuration as an opaque blob.
id: Identifies this WorkItem.
initialReportIndex: The initial index to use when reporting the status of
the WorkItem.
jobId: Identifies the workflow job this WorkItem belongs to.
leaseExpireTime: Time when the lease on this Work will expire.
mapTask: Additional information for MapTask WorkItems.
packages: Any required packages that need to be fetched in order to
execute this WorkItem.
projectId: Identifies the cloud project this WorkItem belongs to.
reportStatusInterval: Recommended reporting interval.
seqMapTask: Additional information for SeqMapTask WorkItems.
shellTask: Additional information for ShellTask WorkItems.
sourceOperationTask: Additional information for source operation
WorkItems.
streamingComputationTask: Additional information for
StreamingComputationTask WorkItems.
streamingConfigTask: Additional information for StreamingConfigTask
WorkItems.
streamingSetupTask: Additional information for StreamingSetupTask
WorkItems.
"""
configuration = _messages.StringField(1)
id = _messages.IntegerField(2)
initialReportIndex = _messages.IntegerField(3)
jobId = _messages.StringField(4)
leaseExpireTime = _messages.StringField(5)
mapTask = _messages.MessageField('MapTask', 6)
packages = _messages.MessageField('Package', 7, repeated=True)
projectId = _messages.StringField(8)
reportStatusInterval = _messages.StringField(9)
seqMapTask = _messages.MessageField('SeqMapTask', 10)
shellTask = _messages.MessageField('ShellTask', 11)
sourceOperationTask = _messages.MessageField('SourceOperationRequest', 12)
streamingComputationTask = _messages.MessageField('StreamingComputationTask', 13)
streamingConfigTask = _messages.MessageField('StreamingConfigTask', 14)
streamingSetupTask = _messages.MessageField('StreamingSetupTask', 15)
class WorkItemServiceState(_messages.Message):
r"""The Dataflow service's idea of the current state of a WorkItem being
processed by a worker.
Messages:
HarnessDataValue: Other data returned by the service, specific to the
particular worker harness.
Fields:
harnessData: Other data returned by the service, specific to the
particular worker harness.
hotKeyDetection: A hot key is a symptom of poor data distribution in which
there are enough elements mapped to a single key to impact pipeline
performance. When present, this field includes metadata associated with
any hot key.
leaseExpireTime: Time at which the current lease will expire.
metricShortId: The short ids that workers should use in subsequent metric
updates. Workers should strive to use short ids whenever possible, but
it is ok to request the short_id again if a worker lost track of it
(e.g. if the worker is recovering from a crash). NOTE: it is possible
that the response may have short ids for a subset of the metrics.
nextReportIndex: The index value to use for the next report sent by the
worker. Note: If the report call fails for whatever reason, the worker
should reuse this index for subsequent report attempts.
reportStatusInterval: New recommended reporting interval.
splitRequest: The progress point in the WorkItem where the Dataflow
service suggests that the worker truncate the task.
suggestedStopPoint: DEPRECATED in favor of split_request.
suggestedStopPosition: Obsolete, always empty.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class HarnessDataValue(_messages.Message):
r"""Other data returned by the service, specific to the particular worker
harness.
Messages:
AdditionalProperty: An additional property for a HarnessDataValue
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a HarnessDataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
harnessData = _messages.MessageField('HarnessDataValue', 1)
hotKeyDetection = _messages.MessageField('HotKeyDetection', 2)
leaseExpireTime = _messages.StringField(3)
metricShortId = _messages.MessageField('MetricShortId', 4, repeated=True)
nextReportIndex = _messages.IntegerField(5)
reportStatusInterval = _messages.StringField(6)
splitRequest = _messages.MessageField('ApproximateSplitRequest', 7)
suggestedStopPoint = _messages.MessageField('ApproximateProgress', 8)
suggestedStopPosition = _messages.MessageField('Position', 9)
class WorkItemStatus(_messages.Message):
r"""Conveys a worker's progress through the work described by a WorkItem.
Fields:
completed: True if the WorkItem was completed (successfully or
unsuccessfully).
counterUpdates: Worker output counters for this WorkItem.
dynamicSourceSplit: See documentation of stop_position.
errors: Specifies errors which occurred during processing. If errors are
provided, and completed = true, then the WorkItem is considered to have
failed.
metricUpdates: DEPRECATED in favor of counter_updates.
progress: DEPRECATED in favor of reported_progress.
reportIndex: The report index. When a WorkItem is leased, the lease will
contain an initial report index. When a WorkItem's status is reported
to the system, the report should be sent with that report index, and the
response will contain the index the worker should use for the next
report. Reports received with unexpected index values will be rejected
by the service. In order to preserve idempotency, the worker should not
alter the contents of a report, even if the worker must submit the same
report multiple times before getting back a response. The worker should
not submit a subsequent report until the response for the previous
report had been received from the service.
reportedProgress: The worker's progress through this WorkItem.
requestedLeaseDuration: Amount of time the worker requests for its lease.
sourceFork: DEPRECATED in favor of dynamic_source_split.
sourceOperationResponse: If the work item represented a
SourceOperationRequest, and the work is completed, contains the result
of the operation.
stopPosition: A worker may split an active map task in two parts,
"primary" and "residual", continuing to process the primary part and
returning the residual part into the pool of available work. This event
is called a "dynamic split" and is critical to the dynamic work
rebalancing feature. The two obtained sub-tasks are called "parts" of
the split. The parts, if concatenated, must represent the same input as
would be read by the current task if the split did not happen. The exact
way in which the original task is decomposed into the two parts is
specified either as a position demarcating them (stop_position), or
explicitly as two DerivedSources, if this task consumes a user-defined
source type (dynamic_source_split). The "current" task is adjusted as a
result of the split: after a task with range [A, B) sends a
stop_position update at C, its range is considered to be [A, C), e.g.: *
Progress should be interpreted relative to the new range, e.g. "75%
completed" means "75% of [A, C) completed" * The worker should interpret
proposed_stop_position relative to the new range, e.g. "split at 68%"
should be interpreted as "split at 68% of [A, C)". * If the worker
chooses to split again using stop_position, only stop_positions in [A,
C) will be accepted. * Etc. dynamic_source_split has similar semantics:
e.g., if a task with source S splits using dynamic_source_split into {P,
R} (where P and R must be together equivalent to S), then subsequent
progress and proposed_stop_position should be interpreted relative to P,
and in a potential subsequent dynamic_source_split into {P', R'}, P' and
R' must be together equivalent to P, etc.
totalThrottlerWaitTimeSeconds: Total time the worker spent being throttled
by external systems.
workItemId: Identifies the WorkItem.
"""
completed = _messages.BooleanField(1)
counterUpdates = _messages.MessageField('CounterUpdate', 2, repeated=True)
dynamicSourceSplit = _messages.MessageField('DynamicSourceSplit', 3)
errors = _messages.MessageField('Status', 4, repeated=True)
metricUpdates = _messages.MessageField('MetricUpdate', 5, repeated=True)
progress = _messages.MessageField('ApproximateProgress', 6)
reportIndex = _messages.IntegerField(7)
reportedProgress = _messages.MessageField('ApproximateReportedProgress', 8)
requestedLeaseDuration = _messages.StringField(9)
sourceFork = _messages.MessageField('SourceFork', 10)
sourceOperationResponse = _messages.MessageField('SourceOperationResponse', 11)
stopPosition = _messages.MessageField('Position', 12)
totalThrottlerWaitTimeSeconds = _messages.FloatField(13)
workItemId = _messages.StringField(14)
class WorkerHealthReport(_messages.Message):
r"""WorkerHealthReport contains information about the health of a worker.
The VM should be identified by the labels attached to the WorkerMessage that
this health ping belongs to.
Messages:
PodsValueListEntry: A PodsValueListEntry object.
Fields:
msg: A message describing any unusual health reports.
pods: The pods running on the worker. See: http://kubernetes.io/v1.1/docs
/api-reference/v1/definitions.html#_v1_pod This field is used by the
worker to send the status of the indvidual containers running on each
worker.
reportInterval: The interval at which the worker is sending health
reports. The default value of 0 should be interpreted as the field is
not being explicitly set by the worker.
vmIsBroken: Whether the VM is in a permanently broken state. Broken VMs
should be abandoned or deleted ASAP to avoid assigning or completing any
work.
vmIsHealthy: Whether the VM is currently healthy.
vmStartupTime: The time the VM was booted.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PodsValueListEntry(_messages.Message):
r"""A PodsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a PodsValueListEntry
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PodsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
msg = _messages.StringField(1)
pods = _messages.MessageField('PodsValueListEntry', 2, repeated=True)
reportInterval = _messages.StringField(3)
vmIsBroken = _messages.BooleanField(4)
vmIsHealthy = _messages.BooleanField(5)
vmStartupTime = _messages.StringField(6)
class WorkerHealthReportResponse(_messages.Message):
r"""WorkerHealthReportResponse contains information returned to the worker
in response to a health ping.
Fields:
reportInterval: A positive value indicates the worker should change its
reporting interval to the specified value. The default value of zero
means no change in report rate is requested by the server.
"""
reportInterval = _messages.StringField(1)
class WorkerLifecycleEvent(_messages.Message):
r"""A report of an event in a worker's lifecycle. The proto contains one
event, because the worker is expected to asynchronously send each message
immediately after the event. Due to this asynchrony, messages may arrive out
of order (or missing), and it is up to the consumer to interpret. The
timestamp of the event is in the enclosing WorkerMessage proto.
Enums:
EventValueValuesEnum: The event being reported.
Messages:
MetadataValue: Other stats that can accompany an event. E.g. {
"downloaded_bytes" : "123456" }
Fields:
containerStartTime: The start time of this container. All events will
report this so that events can be grouped together across container/VM
restarts.
event: The event being reported.
metadata: Other stats that can accompany an event. E.g. {
"downloaded_bytes" : "123456" }
"""
class EventValueValuesEnum(_messages.Enum):
r"""The event being reported.
Values:
UNKNOWN_EVENT: Invalid event.
OS_START: The time the VM started.
CONTAINER_START: Our container code starts running. Multiple containers
could be distinguished with WorkerMessage.labels if desired.
NETWORK_UP: The worker has a functional external network connection.
STAGING_FILES_DOWNLOAD_START: Started downloading staging files.
STAGING_FILES_DOWNLOAD_FINISH: Finished downloading all staging files.
SDK_INSTALL_START: For applicable SDKs, started installation of SDK and
worker packages.
SDK_INSTALL_FINISH: Finished installing SDK.
"""
UNKNOWN_EVENT = 0
OS_START = 1
CONTAINER_START = 2
NETWORK_UP = 3
STAGING_FILES_DOWNLOAD_START = 4
STAGING_FILES_DOWNLOAD_FINISH = 5
SDK_INSTALL_START = 6
SDK_INSTALL_FINISH = 7
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Other stats that can accompany an event. E.g. { "downloaded_bytes" :
"123456" }
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Additional properties of type MetadataValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
containerStartTime = _messages.StringField(1)
event = _messages.EnumField('EventValueValuesEnum', 2)
metadata = _messages.MessageField('MetadataValue', 3)
class WorkerMessage(_messages.Message):
r"""WorkerMessage provides information to the backend about a worker.
Messages:
LabelsValue: Labels are used to group WorkerMessages. For example, a
worker_message about a particular container might have the labels: {
"JOB_ID": "2015-04-22", "WORKER_ID": "wordcount-vm-2015..."
"CONTAINER_TYPE": "worker", "CONTAINER_ID": "ac1234def"} Label tags
typically correspond to Label enum values. However, for ease of
development other strings can be used as tags. LABEL_UNSPECIFIED should
not be used here.
Fields:
labels: Labels are used to group WorkerMessages. For example, a
worker_message about a particular container might have the labels: {
"JOB_ID": "2015-04-22", "WORKER_ID": "wordcount-vm-2015..."
"CONTAINER_TYPE": "worker", "CONTAINER_ID": "ac1234def"} Label tags
typically correspond to Label enum values. However, for ease of
development other strings can be used as tags. LABEL_UNSPECIFIED should
not be used here.
time: The timestamp of the worker_message.
workerHealthReport: The health of a worker.
workerLifecycleEvent: Record of worker lifecycle events.
workerMessageCode: A worker message code.
workerMetrics: Resource metrics reported by workers.
workerShutdownNotice: Shutdown notice by workers.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Labels are used to group WorkerMessages. For example, a worker_message
about a particular container might have the labels: { "JOB_ID":
"2015-04-22", "WORKER_ID": "wordcount-vm-2015..." "CONTAINER_TYPE":
"worker", "CONTAINER_ID": "ac1234def"} Label tags typically correspond
to Label enum values. However, for ease of development other strings can
be used as tags. LABEL_UNSPECIFIED should not be used here.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
labels = _messages.MessageField('LabelsValue', 1)
time = _messages.StringField(2)
workerHealthReport = _messages.MessageField('WorkerHealthReport', 3)
workerLifecycleEvent = _messages.MessageField('WorkerLifecycleEvent', 4)
workerMessageCode = _messages.MessageField('WorkerMessageCode', 5)
workerMetrics = _messages.MessageField('ResourceUtilizationReport', 6)
workerShutdownNotice = _messages.MessageField('WorkerShutdownNotice', 7)
class WorkerMessageCode(_messages.Message):
r"""A message code is used to report status and error messages to the
service. The message codes are intended to be machine readable. The service
will take care of translating these into user understandable messages if
necessary. Example use cases: 1. Worker processes reporting successful
startup. 2. Worker processes reporting specific errors (e.g. package
staging failure).
Messages:
ParametersValue: Parameters contains specific information about the code.
This is a struct to allow parameters of different types. Examples: 1.
For a "HARNESS_STARTED" message parameters might provide the name of
the worker and additional data like timing information. 2. For a
"GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS
objects being downloaded and fields containing errors. In general
complex data structures should be avoided. If a worker needs to send a
specific and complicated data structure then please consider defining a
new proto and adding it to the data oneof in WorkerMessageResponse.
Conventions: Parameters should only be used for information that isn't
typically passed as a label. hostname and other worker identifiers
should almost always be passed as labels since they will be included on
most messages.
Fields:
code: The code is a string intended for consumption by a machine that
identifies the type of message being sent. Examples: 1.
"HARNESS_STARTED" might be used to indicate the worker harness has
started. 2. "GCS_DOWNLOAD_ERROR" might be used to indicate an error
downloading a GCS file as part of the boot process of one of the
worker containers. This is a string and not an enum to make it easy to
add new codes without waiting for an API change.
parameters: Parameters contains specific information about the code. This
is a struct to allow parameters of different types. Examples: 1. For a
"HARNESS_STARTED" message parameters might provide the name of the
worker and additional data like timing information. 2. For a
"GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS
objects being downloaded and fields containing errors. In general
complex data structures should be avoided. If a worker needs to send a
specific and complicated data structure then please consider defining a
new proto and adding it to the data oneof in WorkerMessageResponse.
Conventions: Parameters should only be used for information that isn't
typically passed as a label. hostname and other worker identifiers
should almost always be passed as labels since they will be included on
most messages.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParametersValue(_messages.Message):
r"""Parameters contains specific information about the code. This is a
struct to allow parameters of different types. Examples: 1. For a
"HARNESS_STARTED" message parameters might provide the name of the
worker and additional data like timing information. 2. For a
"GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS
objects being downloaded and fields containing errors. In general complex
data structures should be avoided. If a worker needs to send a specific
and complicated data structure then please consider defining a new proto
and adding it to the data oneof in WorkerMessageResponse. Conventions:
Parameters should only be used for information that isn't typically passed
as a label. hostname and other worker identifiers should almost always be
passed as labels since they will be included on most messages.
Messages:
AdditionalProperty: An additional property for a ParametersValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ParametersValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.StringField(1)
parameters = _messages.MessageField('ParametersValue', 2)
class WorkerMessageResponse(_messages.Message):
r"""A worker_message response allows the server to pass information to the
sender.
Fields:
workerHealthReportResponse: The service's response to a worker's health
report.
workerMetricsResponse: Service's response to reporting worker metrics
(currently empty).
workerShutdownNoticeResponse: Service's response to shutdown notice
(currently empty).
"""
workerHealthReportResponse = _messages.MessageField('WorkerHealthReportResponse', 1)
workerMetricsResponse = _messages.MessageField('ResourceUtilizationReportResponse', 2)
workerShutdownNoticeResponse = _messages.MessageField('WorkerShutdownNoticeResponse', 3)
class WorkerPool(_messages.Message):
r"""Describes one particular pool of Cloud Dataflow workers to be
instantiated by the Cloud Dataflow service in order to perform the
computations required by a job. Note that a workflow job may use multiple
pools, in order to match the various computational requirements of the
various stages of the job.
Enums:
DefaultPackageSetValueValuesEnum: The default package set to install.
This allows the service to select a default set of packages which are
useful to worker harnesses written in a particular language.
IpConfigurationValueValuesEnum: Configuration for VM IPs.
TeardownPolicyValueValuesEnum: Sets the policy for determining when to
turndown worker pool. Allowed values are: `TEARDOWN_ALWAYS`,
`TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means
workers are always torn down regardless of whether the job succeeds.
`TEARDOWN_ON_SUCCESS` means workers are torn down if the job succeeds.
`TEARDOWN_NEVER` means the workers are never torn down. If the workers
are not torn down by the service, they will continue to run and use
Google Compute Engine VM resources in the user's project until they are
explicitly terminated by the user. Because of this, Google recommends
using the `TEARDOWN_ALWAYS` policy except for small, manually supervised
test jobs. If unknown or unspecified, the service will attempt to
choose a reasonable default.
Messages:
MetadataValue: Metadata to set on the Google Compute Engine VMs.
PoolArgsValue: Extra arguments for this worker pool.
Fields:
autoscalingSettings: Settings for autoscaling of this WorkerPool.
dataDisks: Data disks that are used by a VM in this workflow.
defaultPackageSet: The default package set to install. This allows the
service to select a default set of packages which are useful to worker
harnesses written in a particular language.
diskSizeGb: Size of root disk for VMs, in GB. If zero or unspecified, the
service will attempt to choose a reasonable default.
diskSourceImage: Fully qualified source image for disks.
diskType: Type of root disk for VMs. If empty or unspecified, the service
will attempt to choose a reasonable default.
ipConfiguration: Configuration for VM IPs.
kind: The kind of the worker pool; currently only `harness` and `shuffle`
are supported.
machineType: Machine type (e.g. "n1-standard-1"). If empty or
unspecified, the service will attempt to choose a reasonable default.
metadata: Metadata to set on the Google Compute Engine VMs.
network: Network to which VMs will be assigned. If empty or unspecified,
the service will use the network "default".
numThreadsPerWorker: The number of threads per worker harness. If empty or
unspecified, the service will choose a number of threads (according to
the number of cores on the selected machine type for batch, or 1 by
convention for streaming).
numWorkers: Number of Google Compute Engine workers in this pool needed to
execute the job. If zero or unspecified, the service will attempt to
choose a reasonable default.
onHostMaintenance: The action to take on host maintenance, as defined by
the Google Compute Engine API.
packages: Packages to be installed on workers.
poolArgs: Extra arguments for this worker pool.
subnetwork: Subnetwork to which VMs will be assigned, if desired.
Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
taskrunnerSettings: Settings passed through to Google Compute Engine
workers when using the standard Dataflow task runner. Users should
ignore this field.
teardownPolicy: Sets the policy for determining when to turndown worker
pool. Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
`TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down
regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means
workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the
workers are never torn down. If the workers are not torn down by the
service, they will continue to run and use Google Compute Engine VM
resources in the user's project until they are explicitly terminated by
the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS`
policy except for small, manually supervised test jobs. If unknown or
unspecified, the service will attempt to choose a reasonable default.
workerHarnessContainerImage: Required. Docker container image that
executes the Cloud Dataflow worker harness, residing in Google Container
Registry.
zone: Zone to run the worker pools in. If empty or unspecified, the
service will attempt to choose a reasonable default.
"""
class DefaultPackageSetValueValuesEnum(_messages.Enum):
r"""The default package set to install. This allows the service to select
a default set of packages which are useful to worker harnesses written in
a particular language.
Values:
DEFAULT_PACKAGE_SET_UNKNOWN: The default set of packages to stage is
unknown, or unspecified.
DEFAULT_PACKAGE_SET_NONE: Indicates that no packages should be staged at
the worker unless explicitly specified by the job.
DEFAULT_PACKAGE_SET_JAVA: Stage packages typically useful to workers
written in Java.
DEFAULT_PACKAGE_SET_PYTHON: Stage pacakges typically useful to workers
written in Python.
"""
DEFAULT_PACKAGE_SET_UNKNOWN = 0
DEFAULT_PACKAGE_SET_NONE = 1
DEFAULT_PACKAGE_SET_JAVA = 2
DEFAULT_PACKAGE_SET_PYTHON = 3
class IpConfigurationValueValuesEnum(_messages.Enum):
r"""Configuration for VM IPs.
Values:
WORKER_IP_UNSPECIFIED: The configuration is unknown, or unspecified.
WORKER_IP_PUBLIC: Workers should have public IP addresses.
WORKER_IP_PRIVATE: Workers should have private IP addresses.
"""
WORKER_IP_UNSPECIFIED = 0
WORKER_IP_PUBLIC = 1
WORKER_IP_PRIVATE = 2
class TeardownPolicyValueValuesEnum(_messages.Enum):
r"""Sets the policy for determining when to turndown worker pool. Allowed
values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
`TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down
regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means
workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the
workers are never torn down. If the workers are not torn down by the
service, they will continue to run and use Google Compute Engine VM
resources in the user's project until they are explicitly terminated by
the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS`
policy except for small, manually supervised test jobs. If unknown or
unspecified, the service will attempt to choose a reasonable default.
Values:
TEARDOWN_POLICY_UNKNOWN: The teardown policy isn't specified, or is
unknown.
TEARDOWN_ALWAYS: Always teardown the resource.
TEARDOWN_ON_SUCCESS: Teardown the resource on success. This is useful
for debugging failures.
TEARDOWN_NEVER: Never teardown the resource. This is useful for
debugging and development.
"""
TEARDOWN_POLICY_UNKNOWN = 0
TEARDOWN_ALWAYS = 1
TEARDOWN_ON_SUCCESS = 2
TEARDOWN_NEVER = 3
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Metadata to set on the Google Compute Engine VMs.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Additional properties of type MetadataValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class PoolArgsValue(_messages.Message):
r"""Extra arguments for this worker pool.
Messages:
AdditionalProperty: An additional property for a PoolArgsValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a PoolArgsValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
autoscalingSettings = _messages.MessageField('AutoscalingSettings', 1)
dataDisks = _messages.MessageField('Disk', 2, repeated=True)
defaultPackageSet = _messages.EnumField('DefaultPackageSetValueValuesEnum', 3)
diskSizeGb = _messages.IntegerField(4, variant=_messages.Variant.INT32)
diskSourceImage = _messages.StringField(5)
diskType = _messages.StringField(6)
ipConfiguration = _messages.EnumField('IpConfigurationValueValuesEnum', 7)
kind = _messages.StringField(8)
machineType = _messages.StringField(9)
metadata = _messages.MessageField('MetadataValue', 10)
network = _messages.StringField(11)
numThreadsPerWorker = _messages.IntegerField(12, variant=_messages.Variant.INT32)
numWorkers = _messages.IntegerField(13, variant=_messages.Variant.INT32)
onHostMaintenance = _messages.StringField(14)
packages = _messages.MessageField('Package', 15, repeated=True)
poolArgs = _messages.MessageField('PoolArgsValue', 16)
subnetwork = _messages.StringField(17)
taskrunnerSettings = _messages.MessageField('TaskRunnerSettings', 18)
teardownPolicy = _messages.EnumField('TeardownPolicyValueValuesEnum', 19)
workerHarnessContainerImage = _messages.StringField(20)
zone = _messages.StringField(21)
class WorkerSettings(_messages.Message):
r"""Provides data to pass through to the worker harness.
Fields:
baseUrl: The base URL for accessing Google Cloud APIs. When workers
access Google Cloud APIs, they logically do so via relative URLs. If
this field is specified, it supplies the base URL to use for resolving
these relative URLs. The normative algorithm used is defined by RFC
1808, "Relative Uniform Resource Locators". If not specified, the
default value is "http://www.googleapis.com/"
reportingEnabled: Whether to send work progress updates to the service.
servicePath: The Cloud Dataflow service path relative to the root URL, for
example, "dataflow/v1b3/projects".
shuffleServicePath: The Shuffle service path relative to the root URL, for
example, "shuffle/v1beta1".
tempStoragePrefix: The prefix of the resources the system should use for
temporary storage. The supported resource type is: Google Cloud
Storage: storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
workerId: The ID of the worker running this pipeline.
"""
baseUrl = _messages.StringField(1)
reportingEnabled = _messages.BooleanField(2)
servicePath = _messages.StringField(3)
shuffleServicePath = _messages.StringField(4)
tempStoragePrefix = _messages.StringField(5)
workerId = _messages.StringField(6)
class WorkerShutdownNotice(_messages.Message):
r"""Shutdown notification from workers. This is to be sent by the shutdown
script of the worker VM so that the backend knows that the VM is being shut
down.
Fields:
reason: The reason for the worker shutdown. Current possible values are:
"UNKNOWN": shutdown reason is unknown. "PREEMPTION": shutdown reason
is preemption. Other possible reasons may be added in the future.
"""
reason = _messages.StringField(1)
class WorkerShutdownNoticeResponse(_messages.Message):
r"""Service-side response to WorkerMessage issuing shutdown notice."""
class WriteInstruction(_messages.Message):
r"""An instruction that writes records. Takes one input, produces no
outputs.
Fields:
input: The input.
sink: The sink to write to.
"""
input = _messages.MessageField('InstructionInput', 1)
sink = _messages.MessageField('Sink', 2)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| {
"content_hash": "c7871b2c62baf79170d2dfdccdc631ab",
"timestamp": "",
"source": "github",
"line_count": 5710,
"max_line_length": 102,
"avg_line_length": 39.65464098073555,
"alnum_prop": 0.7327627325242461,
"repo_name": "RyanSkraba/beam",
"id": "9a76e8d1c8bb5ff64dd3205b2ca4b396c37cd15c",
"size": "227213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/dataflow/internal/clients/dataflow/dataflow_v1b3_messages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1597"
},
{
"name": "CSS",
"bytes": "40963"
},
{
"name": "Dockerfile",
"bytes": "16638"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2683402"
},
{
"name": "Groovy",
"bytes": "517560"
},
{
"name": "HTML",
"bytes": "183330"
},
{
"name": "Java",
"bytes": "28609011"
},
{
"name": "JavaScript",
"bytes": "16595"
},
{
"name": "Jupyter Notebook",
"bytes": "56365"
},
{
"name": "Python",
"bytes": "6191025"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "235061"
},
{
"name": "TSQL",
"bytes": "841"
}
],
"symlink_target": ""
} |
from typing import Optional
from appium.options.common.supports_capabilities import SupportsCapabilities
KEYCHAIN_PASSWORD = 'keychainPassword'
class KeychainPasswordOption(SupportsCapabilities):
@property
def keychain_password(self) -> Optional[str]:
"""
Custom keychain password.
"""
return self.get_capability(KEYCHAIN_PASSWORD)
@keychain_password.setter
def keychain_password(self, value: str) -> None:
"""
Custom keychain password. The keychain is expected to
contain the private development key.
"""
self.set_capability(KEYCHAIN_PASSWORD, value)
| {
"content_hash": "1add367d0486b9a8957912481e059028",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 29.318181818181817,
"alnum_prop": 0.6914728682170542,
"repo_name": "appium/python-client",
"id": "5514519f09804ee2588d15581d2c4e3b3568abf2",
"size": "1433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appium/options/ios/xcuitest/wda/keychain_password_option.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "835"
},
{
"name": "Python",
"bytes": "801497"
},
{
"name": "Shell",
"bytes": "3195"
}
],
"symlink_target": ""
} |
# :Author: a Pygments author|contributor; Felix Wiemann; Guenter Milde
# :Date: $Date: 2012-02-28 21:07:21 -0300 (Tue, 28 Feb 2012) $
# :Copyright: This module has been placed in the public domain.
#
# This is a merge of `Using Pygments in ReST documents`_ from the pygments_
# documentation, and a `proof of concept`_ by Felix Wiemann.
#
# ========== ===========================================================
# 2007-06-01 Removed redundancy from class values.
# 2007-06-04 Merge of successive tokens of same type
# (code taken from pygments.formatters.others).
# 2007-06-05 Separate docutils formatter script
# Use pygments' CSS class names (like the html formatter)
# allowing the use of pygments-produced style sheets.
# 2007-06-07 Merge in the formatting of the parsed tokens
# (misnamed as docutils_formatter) as class DocutilsInterface
# 2007-06-08 Failsave implementation (fallback to a standard literal block
# if pygments not found)
# ========== ===========================================================
#
# ::
"""Define and register a code-block directive using pygments"""
# Requirements
# ------------
# ::
import codecs
from docutils import nodes
from docutils.parsers.rst import directives
try:
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters.html import _get_ttype_class
except ImportError:
pass
from log import log
# Customisation
# -------------
#
# Do not insert inline nodes for the following tokens.
# (You could add e.g. Token.Punctuation like ``['', 'p']``.) ::
unstyled_tokens = ['']
# DocutilsInterface
# -----------------
#
# This interface class combines code from
# pygments.formatters.html and pygments.formatters.others.
#
# It does not require anything of docutils and could also become a part of
# pygments::
class DocutilsInterface(object):
"""Parse `code` string and yield "classified" tokens.
Arguments
code -- string of source code to parse
language -- formal language the code is written in.
Merge subsequent tokens of the same token-type.
Yields the tokens as ``(ttype_class, value)`` tuples,
where ttype_class is taken from pygments.token.STANDARD_TYPES and
corresponds to the class argument used in pygments html output.
"""
def __init__(self, code, language, custom_args={}):
self.code = code
self.language = language
self.custom_args = custom_args
def lex(self):
# Get lexer for language (use text as fallback)
try:
if self.language and unicode(self.language).lower() <> 'none':
lexer = get_lexer_by_name(self.language.lower(),
**self.custom_args
)
else:
lexer = get_lexer_by_name('text', **self.custom_args)
except ValueError:
log.info("no pygments lexer for %s, using 'text'" \
% self.language)
# what happens if pygment isn't present ?
lexer = get_lexer_by_name('text')
return pygments.lex(self.code, lexer)
def join(self, tokens):
"""join subsequent tokens of same token-type
"""
tokens = iter(tokens)
(lasttype, lastval) = tokens.next()
for ttype, value in tokens:
if ttype is lasttype:
lastval += value
else:
yield(lasttype, lastval)
(lasttype, lastval) = (ttype, value)
yield(lasttype, lastval)
def __iter__(self):
"""parse code string and yield "clasified" tokens
"""
try:
tokens = self.lex()
except IOError:
log.info("Pygments lexer not found, using fallback")
# TODO: write message to INFO
yield ('', self.code)
return
for ttype, value in self.join(tokens):
yield (_get_ttype_class(ttype), value)
# code_block_directive
# --------------------
# ::
def code_block_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Parse and classify content of a code_block."""
if 'include' in options:
try:
if 'encoding' in options:
encoding = options['encoding']
else:
encoding = 'utf-8'
content = codecs.open(options['include'], 'r', encoding).read().rstrip()
except (IOError, UnicodeError): # no file or problem finding it or reading it
log.error('Error reading file: "%s" L %s' % (options['include'], lineno))
content = u''
line_offset = 0
if content:
# here we define the start-at and end-at options
# so that limit is included in extraction
# this is different than the start-after directive of docutils
# (docutils/parsers/rst/directives/misc.py L73+)
# which excludes the beginning
# the reason is we want to be able to define a start-at like
# def mymethod(self)
# and have such a definition included
after_text = options.get('start-at', None)
if after_text:
# skip content in include_text before *and NOT incl.* a matching text
after_index = content.find(after_text)
if after_index < 0:
raise state_machine.reporter.severe('Problem with "start-at" option of "%s" '
'code-block directive:\nText not found.' % options['start-at'])
content = content[after_index:]
line_offset = len(content[:after_index].splitlines())
after_text = options.get('start-after', None)
if after_text:
# skip content in include_text before *and incl.* a matching text
after_index = content.find(after_text)
if after_index < 0:
raise state_machine.reporter.severe('Problem with "start-after" option of "%s" '
'code-block directive:\nText not found.' % options['start-after'])
line_offset = len(content[:after_index + len(after_text)].splitlines())
content = content[after_index + len(after_text):]
# same changes here for the same reason
before_text = options.get('end-at', None)
if before_text:
# skip content in include_text after *and incl.* a matching text
before_index = content.find(before_text)
if before_index < 0:
raise state_machine.reporter.severe('Problem with "end-at" option of "%s" '
'code-block directive:\nText not found.' % options['end-at'])
content = content[:before_index + len(before_text)]
before_text = options.get('end-before', None)
if before_text:
# skip content in include_text after *and NOT incl.* a matching text
before_index = content.find(before_text)
if before_index < 0:
raise state_machine.reporter.severe('Problem with "end-before" option of "%s" '
'code-block directive:\nText not found.' % options['end-before'])
content = content[:before_index]
else:
content = u'\n'.join(content)
if 'tabsize' in options:
tabw = options['tabsize']
else:
tabw = int(options.get('tab-width', 8))
content = content.replace('\t',' '*tabw)
withln = "linenos" in options
if not "linenos_offset" in options:
line_offset = 0
language = arguments[0]
# create a literal block element and set class argument
code_block = nodes.literal_block(classes=["code", language])
if withln:
lineno = 1 + line_offset
total_lines = content.count('\n') + 1 + line_offset
lnwidth = len(str(total_lines))
fstr = "\n%%%dd " % lnwidth
code_block += nodes.inline(fstr[1:] % lineno, fstr[1:] % lineno, classes=['linenumber'])
# parse content with pygments and add to code_block element
for cls, value in DocutilsInterface(content, language, options):
if withln and "\n" in value:
# Split on the "\n"s
values = value.split("\n")
# The first piece, pass as-is
code_block += nodes.Text(values[0], values[0])
# On the second and later pieces, insert \n and linenos
linenos = range(lineno, lineno + len(values))
for chunk, ln in zip(values, linenos)[1:]:
if ln <= total_lines:
code_block += nodes.inline(fstr % ln, fstr % ln, classes=['linenumber'])
code_block += nodes.Text(chunk, chunk)
lineno += len(values) - 1
elif cls in unstyled_tokens:
# insert as Text to decrease the verbosity of the output.
code_block += nodes.Text(value, value)
else:
code_block += nodes.inline(value, value, classes=["pygments-" + cls])
return [code_block]
# Custom argument validators
# --------------------------
# ::
#
# Move to separated module??
def string_list(argument):
"""
Converts a space- or comma-separated list of values into a python list
of strings.
(Directive option conversion function)
Based in positive_int_list of docutils.parsers.rst.directives
"""
if ',' in argument:
entries = argument.split(',')
else:
entries = argument.split()
return entries
def string_bool(argument):
"""
Converts True, true, False, False in python boolean values
"""
if argument is None:
msg = 'argument required but none supplied; choose from "True" or "False"'
raise ValueError(msg)
elif argument.lower() == 'true':
return True
elif argument.lower() == 'false':
return False
else:
raise ValueError('"%s" unknown; choose from "True" or "False"'
% argument)
def csharp_unicodelevel(argument):
return directives.choice(argument, ('none', 'basic', 'full'))
def lhs_litstyle(argument):
return directives.choice(argument, ('bird', 'latex'))
def raw_compress(argument):
return directives.choice(argument, ('gz', 'bz2'))
# Register Directive
# ------------------
# ::
code_block_directive.arguments = (1, 0, 1)
code_block_directive.content = 1
code_block_directive.options = {'include': directives.unchanged_required,
'start-at': directives.unchanged_required,
'end-at': directives.unchanged_required,
'start-after': directives.unchanged_required,
'end-before': directives.unchanged_required,
'linenos': directives.unchanged,
'linenos_offset': directives.unchanged,
'tab-width': directives.unchanged,
# generic
'stripnl' : string_bool,
'stripall': string_bool,
'ensurenl': string_bool,
'tabsize' : directives.positive_int,
'encoding': directives.encoding,
# Lua
'func_name_hightlighting':string_bool,
'disabled_modules': string_list,
# Python Console
'python3': string_bool,
# Delphi
'turbopascal':string_bool,
'delphi' :string_bool,
'freepascal': string_bool,
'units': string_list,
# Modula2
'pim' : string_bool,
'iso' : string_bool,
'objm2' : string_bool,
'gm2ext': string_bool,
# CSharp
'unicodelevel' : csharp_unicodelevel,
# Literate haskell
'litstyle' : lhs_litstyle,
# Raw
'compress': raw_compress,
# Rst
'handlecodeblocks': string_bool,
# Php
'startinline': string_bool,
'funcnamehighlighting': string_bool,
'disabledmodules': string_list,
}
# .. _doctutils: http://docutils.sf.net/
# .. _pygments: http://pygments.org/
# .. _Using Pygments in ReST documents: http://pygments.org/docs/rstdirective/
# .. _proof of concept:
# http://article.gmane.org/gmane.text.docutils.user/3689
#
# Test output
# -----------
#
# If called from the command line, call the docutils publisher to render the
# input::
if __name__ == '__main__':
from docutils.core import publish_cmdline, default_description
from docutils.parsers.rst import directives
directives.register_directive('code-block', code_block_directive)
description = "code-block directive test output" + default_description
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except Exception:
pass
publish_cmdline(writer_name='html', description=description)
| {
"content_hash": "45ca847c23b932d3eccb475986321b3a",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 104,
"avg_line_length": 39.18681318681319,
"alnum_prop": 0.5271312394840157,
"repo_name": "ddd332/presto",
"id": "7fd5bcab9e96f7a73687a142e091e6ab725de852",
"size": "14461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "presto-docs/target/sphinx/rst2pdf/pygments_code_block_directive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1449"
},
{
"name": "CSS",
"bytes": "130017"
},
{
"name": "GAP",
"bytes": "41169"
},
{
"name": "Java",
"bytes": "6836515"
},
{
"name": "JavaScript",
"bytes": "135954"
},
{
"name": "Python",
"bytes": "8056702"
},
{
"name": "TeX",
"bytes": "55016"
}
],
"symlink_target": ""
} |
"""
circleci module
~~~~~~~~~~~~~~~
This module provides a wrapper around the CircleCI API.
:copyright: (c) 2017-2019 by Lev Lazinskiy
:license: MIT, see LICENSE for more details.
"""
| {
"content_hash": "4a57453577a7e0ac5ccd456a88d084bb",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 59,
"avg_line_length": 22,
"alnum_prop": 0.6363636363636364,
"repo_name": "levlaz/circleci.py",
"id": "7edf4697df849239592cc9b289b347981cd8f002",
"size": "222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "circleci/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "550"
},
{
"name": "Python",
"bytes": "39153"
}
],
"symlink_target": ""
} |
"""Pytest configuration for REANA client."""
from __future__ import absolute_import, print_function
import pytest
from typing import Dict
@pytest.fixture()
def create_yaml_workflow_schema() -> str:
"""Return dummy YAML workflow schema."""
reana_yaml_schema = """
version: 0.7.2
inputs:
files:
- code/helloworld.py
- inputs/names.txt
parameters:
sleeptime: 2
inputfile: inputs/names.txt
helloworld: code/helloworld.py
outputfile: outputs/greetings.txt
outputs:
files:
- outputs/greetings.txt
workflow:
type: serial
specification:
steps:
- environment: 'python:2.7'
commands:
- python "${helloworld}" --sleeptime ${sleeptime} \
--inputfile "${inputfile}" --outputfile "${outputfile}"
"""
return reana_yaml_schema
@pytest.fixture()
def create_yaml_workflow_schema_with_workspace(create_yaml_workflow_schema: str) -> str:
"""Return dummy YAML workflow schema with `/var/reana` workspace."""
reana_yaml_schema = f"""
{create_yaml_workflow_schema}
workspace:
root_path: /var/reana
"""
return reana_yaml_schema
@pytest.fixture()
def get_workflow_specification_with_directory() -> Dict:
"""Return dummy workflow specification with "data" directory listed in inputs."""
reana_yaml_schema = {
"inputs": {
"directories": ["data"],
},
"version": "0.3.0",
"workflow": {
"specification": {
"steps": [
{"commands": ["echo hello"], "environment": "python:2.7-slim"}
]
},
"type": "serial",
},
}
return {"specification": reana_yaml_schema}
@pytest.fixture()
def create_cwl_yaml_workflow_schema():
"""Return dummy CWL workflow schema."""
reana_cwl_yaml_schema = """
version: 0.7.2
workflow:
type: cwl
file: main.cwl
outputs:
files:
- foo/bar
"""
return reana_cwl_yaml_schema
@pytest.fixture()
def cwl_workflow_spec_step():
"""Return dummy CWL workflow loaded spec step."""
cwl_workflow_spec_step = """
#!/usr/bin/env cwl-runner
cwlVersion: v1.0
class: Workflow
inputs:
outputfile:
type: string
inputBinding:
prefix: --outputfile
outputs:
result:
type: File
outputSource: first/result
steps:
first:
run: test.tool
in:
outputfile: outputfile
out: [result]
"""
return cwl_workflow_spec_step
@pytest.fixture()
def cwl_workflow_spec_correct_input_param():
"""Return correct dummy CWL workflow loaded spec."""
cwl_workflow_spec = """
#!/usr/bin/env cwl-runner
cwlVersion: v1.0
class: CommandLineTool
baseCommand: python
inputs:
outputfile:
type: string
inputBinding:
prefix: --outputfile
outputs:
result:
type: File
outputBinding:
glob: $(inputs.outputfile)
"""
return cwl_workflow_spec
@pytest.fixture()
def cwl_workflow_spec_wrong_input_param():
"""Return wrong dummy CWL workflow loaded spec."""
cwl_workflow_spec = """
#!/usr/bin/env cwl-runner
cwlVersion: v1.0
class: CommandLineTool
baseCommand: python
inputs:
xoutputfile: # wrong input param
type: string
inputBinding:
prefix: --outputfile
outputs:
result:
type: File
outputBinding:
glob: $(inputs.outputfile)
"""
return cwl_workflow_spec
@pytest.fixture()
def cwl_workflow_spec_loaded():
"""Return dummy CWL workflow loaded spec."""
cwl_workflow_spec = {
"workflow": {
"type": "cwl",
"specification": {
"$graph": [
{
"class": "Workflow",
"inputs": [
{
"type": "string",
"inputBinding": {"prefix": "--outputfile"},
"id": "#main/outputfile",
}
],
"outputs": [
{
"type": "File",
"outputSource": "#main/first/result",
"id": "#main/result",
}
],
"steps": [
{
"run": "#test.tool",
"in": [
{
"source": "#main/outputfile",
"id": "#main/first/outputfile",
}
],
"out": ["#main/first/result"],
"id": "#main/first",
}
],
"id": "#main",
},
{
"class": "CommandLineTool",
"baseCommand": "python",
"inputs": [
{
"type": "string",
"inputBinding": {"prefix": "--outputfile"},
"id": "#test.tool/outputfile",
}
],
"outputs": [
{
"type": "File",
"outputBinding": {"glob": "$(inputs.outputfile)"},
"id": "#test.tool/result",
}
],
"id": "#test.tool",
},
]
},
}
}
return cwl_workflow_spec
| {
"content_hash": "76336d87284d10b596c8bd5fcebbf7a9",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 88,
"avg_line_length": 28.223684210526315,
"alnum_prop": 0.4111888111888112,
"repo_name": "reanahub/reana-client",
"id": "369beb3d5f9d1e6c3f9ca347418d339bb7577117",
"size": "6685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "285830"
},
{
"name": "Shell",
"bytes": "5244"
}
],
"symlink_target": ""
} |
"""Provides data related to hardware."""
from typing import Final
from mimesis.data import (
CPU,
CPU_CODENAMES,
CPU_MODEL_CODES,
GENERATION,
GRAPHICS,
HDD_SSD,
MANUFACTURERS,
PHONE_MODELS,
RAM_SIZES,
RAM_TYPES,
RESOLUTIONS,
SCREEN_SIZES,
)
from mimesis.providers.base import BaseProvider
__all__ = ["Hardware"]
class Hardware(BaseProvider):
"""Class for generate data related to hardware."""
class Meta:
"""Class for metadata."""
name: Final[str] = "hardware"
def resolution(self) -> str:
"""Get a random screen resolution.
:return: Resolution of screen.
:Example:
1280x720.
"""
return self.random.choice(RESOLUTIONS)
def screen_size(self) -> str:
"""Get a random size of screen in inch.
:return: Screen size.
:Example:
13″.
"""
return self.random.choice(SCREEN_SIZES)
def cpu(self) -> str:
"""Get a random CPU name.
:return: CPU name.
:Example:
Intel® Core i7.
"""
return self.random.choice(CPU)
def cpu_frequency(self) -> str:
"""Get a random frequency of CPU.
:return: Frequency of CPU.
:Example:
4.0 GHz.
"""
frequency = self.random.uniform(a=1.5, b=4.3, precision=1)
return f"{frequency}GHz"
def generation(self) -> str:
"""Get a random generation.
:return: Generation of something.
:Example:
6th Generation.
"""
return self.random.choice(GENERATION)
def cpu_model_code(self) -> str:
"""Get a random CPU model.
:return: CPU model.
"""
return self.random.choice(CPU_MODEL_CODES)
def cpu_codename(self) -> str:
"""Get a random CPU code name.
:return: CPU code name.
:Example:
Cannonlake.
"""
return self.random.choice(CPU_CODENAMES)
def ram_type(self) -> str:
"""Get a random RAM type.
:return: Type of RAM.
:Example:
DDR3.
"""
return self.random.choice(RAM_TYPES)
def ram_size(self) -> str:
"""Get a random size of RAM.
:return: RAM size.
:Example:
16GB.
"""
return self.random.choice(RAM_SIZES)
def ssd_or_hdd(self) -> str:
"""Get a random value from list.
:return: HDD or SSD.
:Example:
512GB SSD.
"""
return self.random.choice(HDD_SSD)
def graphics(self) -> str:
"""Get a random graphics.
:return: Graphics.
:Example:
Intel® Iris™ Pro Graphics 6200.
"""
return self.random.choice(GRAPHICS)
def manufacturer(self) -> str:
"""Get a random manufacturer.
:return: Manufacturer.
:Example:
Dell.
"""
return self.random.choice(MANUFACTURERS)
def phone_model(self) -> str:
"""Get a random phone model.
:return: Phone model.
:Example:
Nokia Lumia 920.
"""
return self.random.choice(PHONE_MODELS)
| {
"content_hash": "148b9b92547aecfc7f2bc1fcc903398a",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 66,
"avg_line_length": 20.56050955414013,
"alnum_prop": 0.5322180916976456,
"repo_name": "lk-geimfari/church",
"id": "4b79cde5507a19ae1734aafaf608656d02a15d31",
"size": "3234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mimesis/providers/hardware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135889"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import copy
import errno
import gc
import logging
import os
import pprint
import socket
import sys
import traceback
import eventlet.backdoor
import greenlet
from oslo_config import cfg
from cloudpulse.openstack.common._i18n import _LI
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
def list_opts():
"""Entry point for oslo-config-generator.
"""
return [(None, copy.deepcopy(eventlet_backdoor_opts))]
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d'),
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port
| {
"content_hash": "7f2276794a71214c2e4eab468d69b76f",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 77,
"avg_line_length": 30.74074074074074,
"alnum_prop": 0.6281927710843374,
"repo_name": "anand1712/cloudpulse",
"id": "cd4f35776cbd6cc4d4f0c20fd65706fcb97607fe",
"size": "4861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudpulse/openstack/common/eventlet_backdoor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "377316"
}
],
"symlink_target": ""
} |
import zeit.cms.type
from zeit.cms.i18n import MessageFactory as _
import lxml.objectify
import zeit.cms.connector
import zeit.cms.content.adapter
import zeit.cms.content.dav
import zeit.cms.content.metadata
import zeit.cms.interfaces
import zeit.content.quiz.container
import grokcore.component
import zeit.content.quiz.interfaces
import zeit.wysiwyg.interfaces
import zope.component
import zope.interface
QUIZ_TEMPLATE = u"""\
<quiz xmlns:py="http://codespeak.net/lxml/objectify/pytype">
</quiz>"""
class Quiz(zeit.content.quiz.container.Container,
zeit.cms.content.metadata.CommonMetadata):
"""Quiz"""
zope.interface.implements(zeit.content.quiz.interfaces.IQuiz,
zeit.cms.interfaces.IEditorialContent)
commentsAllowed = zeit.cms.content.dav.DAVProperty(
zeit.content.quiz.interfaces.IQuiz['commentsAllowed'],
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS, 'comments')
default_template = QUIZ_TEMPLATE
def _iter_xml_children(self):
for child in self.xml.getchildren():
if child.tag == 'question':
yield child
def _get_persistent_container(self):
return self
class QuizType(zeit.cms.type.XMLContentTypeDeclaration):
factory = Quiz
interface = zeit.content.quiz.interfaces.IQuiz
title = _('Quiz')
type = 'quiz'
addform = zeit.cms.type.SKIP_ADD
@zope.component.adapter(zeit.content.quiz.interfaces.IQuizContent)
@zope.interface.implementer(zeit.content.quiz.interfaces.IQuiz)
def quiz_for_content(context):
candidate = context
while not zeit.content.quiz.interfaces.IQuiz.providedBy(candidate):
candidate = candidate.__parent__
return candidate
class ContentBase(object):
"""Base class for questions and answers."""
title = zeit.cms.content.property.ObjectPathProperty('.title')
def get_node(self, name):
try:
node = self.xml[name]
except AttributeError:
node = lxml.objectify.Element(name)
self.xml.append(node)
return node
@property
def convert(self):
return zeit.wysiwyg.interfaces.IHTMLConverter(self)
class SearchableText(grokcore.component.Adapter):
"""SearchableText for a quiz."""
grokcore.component.context(zeit.content.quiz.interfaces.IQuiz)
grokcore.component.implements(zope.index.text.interfaces.ISearchableText)
def getSearchableText(self):
main_text = []
for p in self.context.xml.xpath("//question//p"):
text = unicode(p).strip()
if text:
main_text.append(text)
return main_text
| {
"content_hash": "ee1b4082ce897363c3ec6323742c6c8f",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 77,
"avg_line_length": 28.73913043478261,
"alnum_prop": 0.6898638426626323,
"repo_name": "ZeitOnline/zeit.content.quiz",
"id": "3e0d3a5a4c7278a750e2410e797dd5e4422a2a32",
"size": "2644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zeit/content/quiz/quiz.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "350"
},
{
"name": "JavaScript",
"bytes": "1084"
},
{
"name": "Python",
"bytes": "33629"
}
],
"symlink_target": ""
} |
import numpy as np
import cv2
from collections import deque
class Preprocessor:
def __init__(self):
self.preprocess_stack = deque([], 2)
def add(self, aleRGB):
self.preprocess_stack.append(aleRGB)
'''
Implement the preprocessing step phi, from
the Nature paper. It takes the maximum pixel
values of two consecutive frames. It then
grayscales the image, and resizes it to 84x84.
'''
def preprocess(self):
assert len(self.preprocess_stack) == 2
return self.resize(self.grayscale(np.maximum(self.preprocess_stack[0],
self.preprocess_stack[1])))
'''
Takes in an RGB image and returns a grayscaled
image.
'''
def grayscale(self, img):
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
'''
Resizes the input to an 84x84 image.
'''
def resize(self, image):
return cv2.resize(image, (84, 84)) | {
"content_hash": "24ad76a6c97edc7d710f0d7e0786c247",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 73,
"avg_line_length": 23.4,
"alnum_prop": 0.7155067155067155,
"repo_name": "dsbrown1331/CoRL2019-DREX",
"id": "cffd4e8dc4671fcc2e4a4f75067496a277d1bfac",
"size": "819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drex-atari/preprocess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "918"
},
{
"name": "HTML",
"bytes": "591968"
},
{
"name": "Jupyter Notebook",
"bytes": "1160596"
},
{
"name": "Python",
"bytes": "1438389"
}
],
"symlink_target": ""
} |
import matplotlib as mpl
mpl.use('Agg')
import sys
import matplotlib.pyplot as plt
import numpy as np
Lx = 1.
Ly = 1.
n = int(sys.argv[1])
nodes, comm_size, init_time, factor_time, solve_time, total_time = \
np.loadtxt("./timings%i.dat"%n, unpack=True)
fig1, ax1 = plt.subplots(1, 1)
ax1.loglog(comm_size, total_time)
ax1.set_xlabel("MPI Ranks")
ax1.set_ylabel("CPU Time (s)")
fig1.savefig("timing%i.png"%n)
fig2, ax2 = plt.subplots(1, 1)
speedup = total_time[0]/total_time
ax2.plot(comm_size, speedup)
ax2.set_xlabel("MPI Ranks")
ax2.set_ylabel("Speedup")
fig2.savefig("speedup%i.png"%n)
fig3, ax3 = plt.subplots(1, 1)
efficiency = speedup/comm_size
ax3.plot(comm_size, efficiency)
ax3.set_xlabel("MPI Ranks")
ax3.set_ylabel("Parallel Efficiency")
fig3.savefig("efficiency%i.png"%n)
labels = ['Init', 'Assembly', 'Solution']
fig4, ax4 = plt.subplots(1, 1)
handles4 = []
handles4.append(ax4.loglog(comm_size, init_time, label='Init'))
handles4.append(ax4.loglog(comm_size, factor_time, label='Assembly'))
handles4.append(ax4.loglog(comm_size, solve_time, label='Solution'))
ax4.set_xlabel("MPI Ranks")
ax4.set_ylabel("CPU Time (s)")
# fig4.legend(handles4, labels, 'upper right')
plt.legend(loc='best')
fig4.savefig("timing%i_detail.png"%n)
fig5, ax5 = plt.subplots(1, 1)
init_speedup = init_time[0]/init_time
factor_speedup = factor_time[0]/factor_time
solve_speedup = solve_time[0]/solve_time
handles5 = []
handles5.append(ax5.plot(comm_size, init_speedup, label='Init'))
handles5.append(ax5.plot(comm_size, factor_speedup, label='Assembly'))
handles5.append(ax5.plot(comm_size, solve_speedup, label='Solution'))
ax5.set_xlabel("MPI Ranks")
ax5.set_ylabel("Speedup")
# fig5.legend(handles5, labels, 'lower right')
plt.legend(loc='best')
fig5.savefig("speedup%i_detail.png"%n)
fig6, ax6 = plt.subplots(1, 1)
init_eff = init_speedup/comm_size
factor_eff = factor_speedup/comm_size
solve_eff = solve_speedup/comm_size
handles6 = []
handles6.append(ax6.plot(comm_size, init_eff, label='Init'))
handles6.append(ax6.plot(comm_size, factor_eff, label='Assembly'))
handles6.append(ax6.plot(comm_size, solve_eff, label='Solution'))
ax6.set_xlabel("MPI Ranks")
ax6.set_ylabel("Parallel Efficiency")
# fig6.legend(handles6, labels, 'upper right')
plt.legend(loc='best')
fig6.savefig("efficiency%i_detail.png"%n)
| {
"content_hash": "29b74250b9a487194c8266c74984847d",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 70,
"avg_line_length": 31.726027397260275,
"alnum_prop": 0.7240932642487047,
"repo_name": "denera/CSCI6360",
"id": "50e319b68b5fd02dd85aab148af38949569e8f9e",
"size": "2316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/plot_scaling.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "77887"
},
{
"name": "C++",
"bytes": "5284"
},
{
"name": "Makefile",
"bytes": "754"
},
{
"name": "Python",
"bytes": "28693"
},
{
"name": "Shell",
"bytes": "1101"
},
{
"name": "TeX",
"bytes": "28697"
}
],
"symlink_target": ""
} |
import unittest
import common.utils as utils
# TODO - add more tests
class TestUtilsMethods(unittest.TestCase):
def test_strip_string(self):
self.assertEqual(utils.strip_string("^&#*#*#*# foo &#&#&#&#&#").strip(" "), "foo")
def test_consolidate_spelling_errors(self):
pass
def test_spelling_group_list_from_hash(self):
pass
def test_split_line(self):
pass
def test_convert(self):
pass
def test_update_file(self):
pass
def test_remove_non_utf8(self):
pass
def test_is_two_part_word(self):
self.assertTrue(utils.is_two_part_word("twopart"))
self.assertTrue(utils.is_two_part_word("morewords"))
self.assertFalse(utils.is_two_part_word("word"))
def test_is_spelling_error(self):
self.assertFalse(utils.is_spelling_error("word"))
self.assertTrue(utils.is_spelling_error("wo23io234as;dfjd"))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "99a0f02c60eb976c108f14a04d586516",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 94,
"avg_line_length": 23.975609756097562,
"alnum_prop": 0.6215666327568667,
"repo_name": "seales/spellchecker",
"id": "1b72dc334185f01b2c487bc016c791f16d278b43",
"size": "984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/TestUtils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23834"
}
],
"symlink_target": ""
} |
import os
import spyral
from .sprites import sprite
from . import collision
WIDTH = 1200
HEIGHT = 900
WHITE = (255, 255, 255)
SIZE = (WIDTH, HEIGHT)
GREEN = (60, 179, 113)
RED = (255, 0, 0)
BLACKBLUE = (19, 15, 48)
BG_COLOR = BLACKBLUE
ENEMYGAP = 30
XMARGIN = 175
YMARGIN = 100
MOVEX = 15
MOVEY = 20
ENEMYSIDE = 50
BACKGROUND = os.path.join("game", "graphics", "spacebackground.png")
class Level1(spyral.Scene):
def __init__(self):
spyral.Scene.__init__(self, SIZE)
self.space = spyral.Image(filename=BACKGROUND)
self.background = self.space.scale((1200, 900))
self.collision_handler = collision.CollisionHandler(self)
self.player = sprite.Player(self, 'left', self.collision_handler)
self.alien_list = self.make_aliens(6, 3)
self.collision_handler.add_player(self.player)
self.collision_handler.add_aliens(self.alien_list)
spyral.event.register("system.quit", spyral.director.pop)
spyral.event.register("director.update", self.update)
spyral.event.register("input.keyboard.down.q", spyral.director.pop)
def update(self, delta):
pass
def make_aliens(self, columns, rows):
"""
Make aliens and send them to collision handler.
"""
alien_list = []
for column in range(columns):
for row in range(rows):
alien = sprite.Alien(self, row, column)
alien_list.append(alien)
return alien_list
| {
"content_hash": "615c0acc15620065d34d9dd3e9da19f7",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 75,
"avg_line_length": 26.803571428571427,
"alnum_prop": 0.6322451698867422,
"repo_name": "justinmeister/spaceinvaders-spyral",
"id": "b29037245789e34b25cd2afadd5df6c3f2737676",
"size": "1501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/level.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "95"
},
{
"name": "Perl",
"bytes": "3224"
},
{
"name": "Python",
"bytes": "368084"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis
import uuid
class HoutcategoriesSolveRefChannelCategoryATOMCategory(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule outcategoriesSolveRefChannelCategoryATOMCategory.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HoutcategoriesSolveRefChannelCategoryATOMCategory, self).__init__(name='HoutcategoriesSolveRefChannelCategoryATOMCategory', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """outcategoriesSolveRefChannelCategoryATOMCategory"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'outcategoriesSolveRefChannelCategoryATOMCategory')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """outcategoriesSolveRefChannelCategoryATOMCategory"""
# match class Channel(5.0.m.0Channel) node
self.add_node()
self.vs[3]["mm__"] = """Channel"""
self.vs[3]["attr1"] = """+"""
# match class Category(5.0.m.1Category) node
self.add_node()
self.vs[4]["mm__"] = """Category"""
self.vs[4]["attr1"] = """+"""
# apply class ATOM(5.0.a.0ATOM) node
self.add_node()
self.vs[5]["mm__"] = """ATOM"""
self.vs[5]["attr1"] = """1"""
# apply class Category(5.0.a.1Category) node
self.add_node()
self.vs[6]["mm__"] = """Category"""
self.vs[6]["attr1"] = """1"""
# match association Channel--category-->Category node
self.add_node()
self.vs[7]["attr1"] = """category"""
self.vs[7]["mm__"] = """directLink_S"""
# apply association ATOM--categories-->Category node
self.add_node()
self.vs[8]["attr1"] = """categories"""
self.vs[8]["mm__"] = """directLink_T"""
# backward association ATOM-->Channelnode
self.add_node()
self.vs[9]["mm__"] = """backward_link"""
# backward association Category-->Categorynode
self.add_node()
self.vs[10]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class Channel(5.0.m.0Channel)
(0,4), # matchmodel -> match_class Category(5.0.m.1Category)
(1,5), # applymodel -> apply_classATOM(5.0.a.0ATOM)
(1,6), # applymodel -> apply_classCategory(5.0.a.1Category)
(3,7), # match classChannel(5.0.m.0Channel) -> association category
(7,4), # associationcategory -> match_classChannel(5.0.m.1Category)
(5,8), # apply class ATOM(5.0.a.0ATOM) -> association categories
(8,6), # associationcategories -> apply_classCategory(5.0.a.1Category)
(5,9), # apply class ATOM(5.0.m.0Channel) -> backward_association
(9,3), # backward_associationChannel -> match_class Channel(5.0.m.0Channel)
(6,10), # apply class Category(5.0.m.1Category) -> backward_association
(10,4), # backward_associationCategory -> match_class Category(5.0.m.1Category)
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
self["equations"] = []
| {
"content_hash": "76a6355d2ba8a48c1451e17e98dda9e7",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 154,
"avg_line_length": 35.61363636363637,
"alnum_prop": 0.6569878749202297,
"repo_name": "levilucio/SyVOLT",
"id": "4a0837f9090356b0adbd2cf1b6d57eb72b5557b8",
"size": "3134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RSS2ATOM/transformation/HoutcategoriesSolveRefChannelCategoryATOMCategory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
"""
----------------------------------------------------------------------
Authors: Jan-Justin van Tonder
----------------------------------------------------------------------
Unit tests for the ID context module.
----------------------------------------------------------------------
"""
import pytest
from hutts_verification.id_contexts.sa_id_card import SAIDCard
def test_get_id_info_ignore_fields():
"""
Test the case in which an ID number was found by get_id_info and whether it is used to extract other information
such as date of birth, status and sex.
"""
sa_id_card = SAIDCard()
in_str = (
'Identity Number\n'
'7101135111011\n'
'Surname\n'
'Doe\n'
'Names\n'
'John-Michael\n'
'Robert\n'
'Nationality\n'
'RSA\n'
'Country of Birth\n'
'RSA\n'
'Status\n'
'Citizen\n'
'Sex\n'
'M\n'
'Date of Birth\n'
'13 Jan 1971'
)
assert sa_id_card.get_id_info(in_str, ignore_fields=['nationality', 'status']) == {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '1971-01-13',
'country_of_birth': 'RSA'
}
def test_get_id_info_invalid_arg_in_str():
"""
Test to see if get_id_info raises the correct exception when an incorrect type for the in_str arg is passed.
"""
sa_id_card = SAIDCard()
with pytest.raises(TypeError):
sa_id_card.get_id_info(['not legit'])
def test_get_id_info_invalid_arg_barcode_data():
"""
Test to see if get_id_info raises the correct exception when an incorrect type for the barcode_data arg is passed.
"""
sa_id_card = SAIDCard()
with pytest.raises(TypeError):
sa_id_card.get_id_info('seems legit', barcode_data='nope')
def test_get_id_info_invalid_arg_ignore_fields():
"""
Test to see if get_id_info raises the correct exception when an incorrect type for the ignore_fields arg is passed.
"""
sa_id_card = SAIDCard()
with pytest.raises(TypeError):
sa_id_card.get_id_info('seems legit', ignore_fields='nah fam')
def test_get_id_info_invalid_arg_min_fuzzy_ratio():
"""
Test to see if get_id_info raises the correct exception when an incorrect type for the min_fuzzy_ratio arg is passed.
"""
sa_id_card = SAIDCard()
with pytest.raises(TypeError):
sa_id_card.get_id_info('good so far...', fuzzy_min_ratio='...fail')
def test_get_id_info_invalid_arg_max_multi_line():
"""
Test to see if get_id_info raises the correct exception when an incorrect type for the max_multi_line arg is passed.
"""
sa_id_card = SAIDCard()
with pytest.raises(TypeError):
sa_id_card.get_id_info('good so far...', max_multi_line=['...nevermind'])
| {
"content_hash": "f2dbb83bcb9ca27d1116f3daab90d92b",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 121,
"avg_line_length": 31.9,
"alnum_prop": 0.5694879832810867,
"repo_name": "javaTheHutts/Java-the-Hutts",
"id": "e753e9f9b3de97499a3bf7e013f7ead306b5c7c4",
"size": "2871",
"binary": false,
"copies": "1",
"ref": "refs/heads/Develop",
"path": "src/unittest/python/test_id_context.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "604"
},
{
"name": "Makefile",
"bytes": "410"
},
{
"name": "Python",
"bytes": "250676"
}
],
"symlink_target": ""
} |
from django.db import models
from wevote_functions.functions import positive_value_exists
import wevote_functions.admin
logger = wevote_functions.admin.get_logger(__name__)
# See also WeVoteServer/twitter/models.py for routines that manage internal twitter data
# https://dev.twitter.com/overview/api/users
# https://dev.twitter.com/overview/general/user-profile-images-and-banners
# Variant Dimensions Example URL
# normal 48px by 48px http://pbs.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png
# https://pbs.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png
# bigger 73px by 73px http://pbs.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_bigger.png
# https://pbs.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_bigger.png
# mini 24px by 24px http://pbs.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_mini.png
# https://pbs.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_mini.png
# original original http://pbs.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3.png
# https://pbs.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3.png
# Omit the underscore and variant to retrieve the original image. The images can be very large.
class TwitterAuthResponse(models.Model):
"""
This is the authResponse data from a Twitter authentication
"""
voter_device_id = models.CharField(
verbose_name="voter_device_id initiating Twitter Auth", max_length=255, null=False, blank=False, unique=True)
datetime_of_authorization = models.DateTimeField(verbose_name='date and time of action', null=False, auto_now=True)
# Twitter session information
twitter_id = models.BigIntegerField(verbose_name="twitter big integer id", null=True, blank=True)
twitter_screen_name = models.CharField(verbose_name='twitter screen name / handle',
max_length=255, null=True, unique=False)
twitter_name = models.CharField(verbose_name="display name from twitter", max_length=255, null=True, blank=True)
twitter_profile_image_url_https = models.URLField(verbose_name='url of logo from twitter', blank=True, null=True)
twitter_profile_banner_url_https = models.URLField(verbose_name='url of banner from twitter', blank=True, null=True)
twitter_request_token = models.TextField(verbose_name='twitter request token', null=True, blank=True)
twitter_request_secret = models.TextField(verbose_name='twitter request secret', null=True, blank=True)
twitter_access_token = models.TextField(verbose_name='twitter access token', null=True, blank=True)
twitter_access_secret = models.TextField(verbose_name='twitter access secret', null=True, blank=True)
class TwitterAuthManager(models.Manager):
def __unicode__(self):
return "TwitterAuthManager"
def update_or_create_twitter_auth_response(self, voter_device_id, twitter_id=0, twitter_screen_name='',
twitter_profile_image_url_https='', twitter_request_token='',
twitter_request_secret='', twitter_access_token='',
twitter_access_secret=''):
defaults = {
"voter_device_id": voter_device_id,
}
if positive_value_exists(twitter_id):
defaults["twitter_id"] = twitter_id
if positive_value_exists(twitter_screen_name):
defaults["twitter_screen_name"] = twitter_screen_name
if positive_value_exists(twitter_profile_image_url_https):
defaults["twitter_profile_image_url_https"] = twitter_profile_image_url_https
if positive_value_exists(twitter_request_token):
defaults["twitter_request_token"] = twitter_request_token
if positive_value_exists(twitter_request_secret):
defaults["twitter_request_secret"] = twitter_request_secret
if positive_value_exists(twitter_access_token):
defaults["twitter_access_token"] = twitter_access_token
if positive_value_exists(twitter_access_secret):
defaults["twitter_access_secret"] = twitter_access_secret
try:
twitter_auth_response, created = TwitterAuthResponse.objects.update_or_create(
voter_device_id__iexact=voter_device_id,
defaults=defaults,
)
twitter_auth_response_saved = True
success = True
status = "TWITTER_AUTH_RESPONSE_UPDATED_OR_CREATED"
except Exception as e:
twitter_auth_response_saved = False
twitter_auth_response = TwitterAuthResponse()
success = False
created = False
status = "TWITTER_AUTH_RESPONSE_NOT_UPDATED_OR_CREATED"
logger.error("update_or_create_twitter_auth_response threw " + str(e))
results = {
'success': success,
'status': status,
'twitter_auth_response_saved': twitter_auth_response_saved,
'twitter_auth_response_created': created,
'twitter_auth_response': twitter_auth_response,
}
return results
def retrieve_twitter_auth_response(self, voter_device_id):
"""
:param voter_device_id:
:return:
"""
twitter_auth_response = TwitterAuthResponse()
twitter_auth_response_id = 0
try:
if positive_value_exists(voter_device_id):
twitter_auth_response = TwitterAuthResponse.objects.get(
voter_device_id__iexact=voter_device_id,
)
twitter_auth_response_id = twitter_auth_response.id
twitter_auth_response_found = True
success = True
status = "RETRIEVE_TWITTER_AUTH_RESPONSE_FOUND_BY_VOTER_DEVICE_ID"
else:
twitter_auth_response_found = False
success = False
status = "RETRIEVE_TWITTER_AUTH_RESPONSE_VARIABLES_MISSING"
except TwitterAuthResponse.DoesNotExist:
twitter_auth_response_found = False
success = True
status = "RETRIEVE_TWITTER_AUTH_RESPONSE_NOT_FOUND"
except Exception as e:
twitter_auth_response_found = False
success = False
status = 'FAILED retrieve_twitter_auth_response'
results = {
'success': success,
'status': status,
'twitter_auth_response_found': twitter_auth_response_found,
'twitter_auth_response_id': twitter_auth_response_id,
'twitter_auth_response': twitter_auth_response,
}
return results
@staticmethod
def save_twitter_auth_values(twitter_auth_response, twitter_user_object):
"""
This is used to store the cached values in the TwitterAuthResponse record during authentication.
Please also see voter/models.py VoterManager->save_twitter_user_values
:param twitter_auth_response:
:param twitter_user_object:
:return:
"""
status = ""
try:
twitter_auth_value_to_save = False
if hasattr(twitter_user_object, "id") and positive_value_exists(twitter_user_object.id):
twitter_auth_response.twitter_id = twitter_user_object.id
twitter_auth_value_to_save = True
# 'id_str': '132728535',
# 'utc_offset': 32400,
# 'description': "Cars, Musics, Games, Electronics, toys, food, etc... I'm just a typical boy!",
# 'profile_image_url': 'http://a1.twimg.com/profile_images/1213351752/_2_2__normal.jpg',
if hasattr(twitter_user_object, "profile_image_url_https") and \
positive_value_exists(twitter_user_object.profile_image_url_https):
twitter_auth_response.twitter_profile_image_url_https = twitter_user_object.profile_image_url_https
twitter_auth_value_to_save = True
# 'profile_background_image_url': 'http://a2.twimg.com/a/1294785484/images/themes/theme15/bg.png',
if hasattr(twitter_user_object, "screen_name") and positive_value_exists(twitter_user_object.screen_name):
twitter_auth_response.twitter_screen_name = twitter_user_object.screen_name
twitter_auth_value_to_save = True
# 'lang': 'en',
if hasattr(twitter_user_object, "name") and positive_value_exists(twitter_user_object.name):
twitter_auth_response.twitter_name = twitter_user_object.name
twitter_auth_value_to_save = True
# 'url': 'http://www.carbonize.co.kr',
# 'time_zone': 'Seoul',
if hasattr(twitter_user_object, "profile_banner_url") and \
positive_value_exists(twitter_user_object.profile_banner_url):
twitter_auth_response.twitter_profile_banner_url_https = twitter_user_object.profile_banner_url
twitter_auth_value_to_save = True
if twitter_auth_value_to_save:
twitter_auth_response.save()
success = True
status += "SAVED_TWITTER_AUTH_VALUES "
except Exception as e:
status += "UNABLE_TO_SAVE_TWITTER_AUTH_VALUES "
logger.error("save_twitter_auth_values threw " + str(e))
success = False
results = {
'status': status,
'success': success,
'twitter_auth_response': twitter_auth_response,
}
return results
| {
"content_hash": "d2d81f3d2f9f073e2ae0c93beec32974",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 120,
"avg_line_length": 50.114583333333336,
"alnum_prop": 0.6370816877987945,
"repo_name": "wevote/WeVoteServer",
"id": "97164eaf559c383d46f0f73360d9ed17a68b1af2",
"size": "9719",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "import_export_twitter/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3612"
},
{
"name": "HTML",
"bytes": "1559624"
},
{
"name": "JavaScript",
"bytes": "26822"
},
{
"name": "Procfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "11943600"
},
{
"name": "Shell",
"bytes": "587"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('taborniki', '0007_druzina'),
]
operations = [
migrations.RemoveField(
model_name='druzina',
name='druzina',
),
migrations.DeleteModel(
name='Druzina',
),
]
| {
"content_hash": "6ce61f42ee16915f1b3b01e58f2f78ee",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 40,
"avg_line_length": 19.6,
"alnum_prop": 0.5612244897959183,
"repo_name": "markbaltic/TaborniskaBaza",
"id": "135519932a11b57b222917dd24d3f70a2d2c6233",
"size": "416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taborniki/migrations/0008_auto_20170615_2033.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "357660"
},
{
"name": "HTML",
"bytes": "39221"
},
{
"name": "JavaScript",
"bytes": "145859"
},
{
"name": "Python",
"bytes": "196035"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import django
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SECRET_KEY = 'NOTASECRET'
if django.VERSION[:2] == (1, 8):
# Use a wrapper that includes Django commit 4f6a7663bcddffb114f2647f9928cbf1fdd8e4b5
# so that full SQL queries from sqlite come through
engine = 'django18_sqlite3_backend'
else:
engine = 'django.db.backends.sqlite3'
DATABASES = {
'default': {
'ENGINE': engine,
'NAME': ':memory:',
},
'replica': {
'ENGINE': engine,
'NAME': ':memory:',
'TEST': {
'MIRROR': 'default',
}
},
'second': {
'ENGINE': engine,
'NAME': ':memory:',
},
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'second': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'testapp',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
| {
"content_hash": "9bd2638273686b142ba9b81757cb16a0",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 88,
"avg_line_length": 24.11627906976744,
"alnum_prop": 0.6142719382835101,
"repo_name": "moumoutte/django-perf-rec",
"id": "b02bc1a47820b237a8d7b40d4328777c0b62eaf7",
"size": "2097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53075"
}
],
"symlink_target": ""
} |
from distutils.core import setup, Extension
setup(name="PositionWeightMatrix", version="1.0",
ext_modules = [
Extension("PositionWeightMatrix", ["PositionWeightMatrix.c"])
])
| {
"content_hash": "299302f854f7d71adbcfbe17076637c7",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 63,
"avg_line_length": 36,
"alnum_prop": 0.75,
"repo_name": "tbepler/MESS",
"id": "b00d8288394bb5874f736153772716418351557b",
"size": "180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "18867"
},
{
"name": "Python",
"bytes": "18395"
},
{
"name": "Shell",
"bytes": "616"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.