text stringlengths 4 1.02M | meta dict |
|---|---|
from functools import wraps
import zipline.api
from zipline.utils.algo_instance import get_algo_instance, set_algo_instance
class ZiplineAPI(object):
"""
Context manager for making an algorithm instance available to zipline API
functions within a scoped block.
"""
def __init__(self, algo_instance):
self.algo_instance = algo_instance
def __enter__(self):
"""
Set the given algo instance, storing any previously-existing instance.
"""
self.old_algo_instance = get_algo_instance()
set_algo_instance(self.algo_instance)
def __exit__(self, _type, _value, _tb):
"""
Restore the algo instance stored in __enter__.
"""
set_algo_instance(self.old_algo_instance)
def api_method(f):
# Decorator that adds the decorated class method as a callable
# function (wrapped) to zipline.api
@wraps(f)
def wrapped(*args, **kwargs):
# Get the instance and call the method
algo_instance = get_algo_instance()
if algo_instance is None:
raise RuntimeError(
'zipline api method %s must be called during a simulation.'
% f.__name__
)
return getattr(algo_instance, f.__name__)(*args, **kwargs)
# Add functor to zipline.api
setattr(zipline.api, f.__name__, wrapped)
zipline.api.__all__.append(f.__name__)
f.is_api_method = True
return f
def require_not_initialized(exception):
"""
Decorator for API methods that should only be called during or before
TradingAlgorithm.initialize. `exception` will be raised if the method is
called after initialize.
Usage
-----
@require_not_initialized(SomeException("Don't do that!"))
def method(self):
# Do stuff that should only be allowed during initialize.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if self.initialized:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator
def require_initialized(exception):
"""
Decorator for API methods that should only be called after
TradingAlgorithm.initialize. `exception` will be raised if the method is
called before initialize has completed.
Usage
-----
@require_initialized(SomeException("Don't do that!"))
def method(self):
# Do stuff that should only be allowed after initialize.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if not self.initialized:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator
def disallowed_in_before_trading_start(exception):
"""
Decorator for API methods that cannot be called from within
TradingAlgorithm.before_trading_start. `exception` will be raised if the
method is called inside `before_trading_start`.
Usage
-----
@disallowed_in_before_trading_start(SomeException("Don't do that!"))
def method(self):
# Do stuff that is not allowed inside before_trading_start.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if self._in_before_trading_start:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator
def allowed_only_in_before_trading_start(exception):
"""
Decorator for API methods that can be called only from within
TradingAlgorithm.before_trading_start. `exception` will be raised if the
method is called outside `before_trading_start`.
Usage
-----
@allowed_only_in_before_trading_start(SomeException("Don't do that!"))
def method(self):
# Do stuff that is only allowed inside before_trading_start.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if not self._in_before_trading_start:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator
| {
"content_hash": "d2270149fb47c93bb5572caee2e24b48",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 78,
"avg_line_length": 31.59259259259259,
"alnum_prop": 0.6318874560375146,
"repo_name": "florentchandelier/zipline",
"id": "6757f66bf19c013a63282326b46961cf28fc427d",
"size": "4848",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "zipline/utils/api_support.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7014"
},
{
"name": "Dockerfile",
"bytes": "2480"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Jupyter Notebook",
"bytes": "162383"
},
{
"name": "PowerShell",
"bytes": "3269"
},
{
"name": "Python",
"bytes": "3677457"
},
{
"name": "Shell",
"bytes": "7420"
}
],
"symlink_target": ""
} |
import testsuite
# Bootstrap the testsuite
testsuite.setup()
from catalogService.rest.models import cloud_types
class CloudsTest(testsuite.TestCase):
def testFreezeThaw(self):
hndlr = cloud_types.Handler()
cloudTypeId = "cId"
cloudType = "ec4"
ctype = cloud_types.CloudType(id = cloudTypeId,
cloudTypeName = cloudType)
ctype.setCloudInstances(cloud_types.CloudInstances(href = 'aaa'))
ctype.setDescriptorCredentials(cloud_types.DescriptorCredentials(href = 'bbb'))
ctype.setDescriptorInstanceConfiguration(cloud_types.DescriptorInstanceConfiguration(href = 'ccc'))
self.failUnlessEqual(ctype.getId(), cloudTypeId)
self.failUnlessEqual(ctype.getCloudTypeName(), cloudType)
self.failUnlessEqual(ctype.getCloudInstances().getHref(), 'aaa')
self.failUnlessEqual(ctype.getDescriptorCredentials().getHref(), 'bbb')
self.failUnlessEqual(ctype.getDescriptorInstanceConfiguration().
getHref(), 'ccc')
xmlContents = """<cloudType id="cId"><cloudInstances href="aaa"/><cloudTypeName>ec4</cloudTypeName><descriptorCredentials href="bbb"/><descriptorInstanceConfiguration href="ccc"/></cloudType>"""
ret = hndlr.toXml(ctype, prettyPrint = False)
self.failUnlessEqual(ret,
"<?xml version='1.0' encoding='UTF-8'?>\n" + xmlContents)
ctype = hndlr.parseString(ret)
self.failUnlessEqual(ctype.getId(), cloudTypeId)
self.failUnlessEqual(ctype.getCloudTypeName(), cloudType)
self.failUnlessEqual(ctype.getCloudInstances().getHref(), 'aaa')
self.failUnlessEqual(ctype.getDescriptorCredentials().getHref(), 'bbb')
self.failUnlessEqual(ctype.getDescriptorInstanceConfiguration().
getHref(), 'ccc')
# Multiple nodes
nodes = cloud_types.CloudTypes()
nodes.append(ctype)
ret = hndlr.toXml(nodes, prettyPrint = False)
self.failUnlessEqual(ret, "<?xml version='1.0' encoding='UTF-8'?>\n<cloudTypes>%s</cloudTypes>" % xmlContents)
if __name__ == "__main__":
testsuite.main()
| {
"content_hash": "4a8d5d77e553c4195ecf6f178041fcec",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 202,
"avg_line_length": 42.44,
"alnum_prop": 0.6828463713477851,
"repo_name": "sassoftware/catalog-service",
"id": "334bbecad90fdcd513ec9a8716a694255521c3fc",
"size": "2727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catalogService_test/cloud_types_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "9398"
},
{
"name": "Makefile",
"bytes": "26985"
},
{
"name": "Python",
"bytes": "8543839"
},
{
"name": "Shell",
"bytes": "329"
}
],
"symlink_target": ""
} |
def Setup(Settings,DefaultModel):
Settings["experiment_name"] = "Number_of_FC_blocks_test"
Settings["graph_histories"] = ['together'] #['all','together',[],[1,0],[0,0,0],[]]
Settings["models"][0]["dataset_name"] = "1200x_markable_299x299"
Settings["models"][0]["number_of_images"] = None
Settings["models"][0]["epochs"] = 1
'''
Settings["models"][0]["epochs"] = 150
Settings["models"][0]["unique_id"] = '1fc'
Settings["models"][0]["top_repeat_FC_block"] = 1
Settings["models"].append(DefaultModel.copy())
Settings["models"][1]["dataset_pointer"] = 0 # 0 - reuse the first dataset
Settings["models"][1]["dataset_name"] = "1200x_markable_299x299"
Settings["models"][1]["number_of_images"] = None
Settings["models"][1]["epochs"] = 150
Settings["models"][1]["unique_id"] = '2fc'
Settings["models"][1]["top_repeat_FC_block"] = 2
Settings["models"].append(DefaultModel.copy())
Settings["models"][2]["dataset_pointer"] = 0 # 0 - reuse the first dataset
Settings["models"][2]["dataset_name"] = "1200x_markable_299x299"
Settings["models"][2]["number_of_images"] = None
Settings["models"][2]["epochs"] = 150
Settings["models"][2]["unique_id"] = '3fc'
Settings["models"][2]["top_repeat_FC_block"] = 3
'''
return Settings
| {
"content_hash": "cac1566f7f9b1e45735900a5a3d23dc3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 86,
"avg_line_length": 39.90909090909091,
"alnum_prop": 0.6142748671222475,
"repo_name": "previtus/MGR-Project-Code",
"id": "f36f0ffffa03dc6ae047a0fcadf82d06b71a2420",
"size": "1317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Settings/independent_experiments/individual_model_types_and_competition/top_number_of_fc_blocks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "247470"
},
{
"name": "Python",
"bytes": "505284"
},
{
"name": "Shell",
"bytes": "1109"
}
],
"symlink_target": ""
} |
import uuid as __uuid
import argparse
import requests
import json
import os
import sys
from vnc_api.vnc_api import *
from vnc_api.gen.resource_xsd import *
from cfgm_common.exceptions import *
EP_DELIM=','
PUBSUB_DELIM=' '
DEFAULT_HEADERS = {'Content-type': 'application/json; charset="UTF-8"'}
def show_usage():
print 'A rule string must be specified for this operation'
print '<publisher-spec> <subscriber-spec>'
print 'publisher-spec := <prefix>,<type>,<id>,<version>'
print 'subscriber-spec := <prefix>,<type>,<id>,<version>'
def parse_pubsub_ep(pubsub_str):
r = pubsub_str.split(EP_DELIM)
if len(r) < 4:
for i in range(4-len(r)):
r.append('')
return r
# '1.1.1.1/24' or '1.1.1.1'
def prefix_str_to_obj(prefix_str):
if '/' not in prefix_str:
prefix_str += '/32'
x = prefix_str.split('/')
if len(x) != 2:
return None
return SubnetType(x[0], int(x[1]))
def build_dsa_rule_entry(rule_str):
r = parse_pubsub_ep(rule_str)
r = rule_str.split(PUBSUB_DELIM) if rule_str else []
if len(r) < 2:
return None
# [0] is publisher-spec, [1] is subscriber-spec
pubspec = parse_pubsub_ep(r[0])
subspec = parse_pubsub_ep(r[1])
pfx_pub = prefix_str_to_obj(pubspec[0])
pfx_sub = prefix_str_to_obj(subspec[0])
if pfx_sub is None or pfx_sub is None:
return None
publisher = DiscoveryPubSubEndPointType(ep_prefix = pfx_pub,
ep_type = pubspec[1], ep_id = pubspec[2],
ep_version = pubspec[3])
subscriber = [DiscoveryPubSubEndPointType(ep_prefix = pfx_sub,
ep_type = subspec[1], ep_id = subspec[2],
ep_version = subspec[3])]
dsa_rule_entry = DiscoveryServiceAssignmentType(publisher, subscriber)
return dsa_rule_entry
#end
def match_pubsub_ep(ep1, ep2):
if ep1.ep_prefix.ip_prefix != ep2.ep_prefix.ip_prefix:
return False
if ep1.ep_prefix.ip_prefix_len != ep2.ep_prefix.ip_prefix_len:
return False
if ep1.ep_type != ep2.ep_type:
return False
if ep1.ep_id != ep2.ep_id:
return False
if ep1.ep_version != ep2.ep_version:
return False
return True
# match two rules (type DiscoveryServiceAssignmentType)
def match_rule_entry(r1, r2):
if not match_pubsub_ep(r1.get_publisher(), r2.get_publisher()):
return False
sub1 = r1.get_subscriber()
sub2 = r2.get_subscriber()
if len(sub1) != len(sub2):
return False
for i in range(len(sub1)):
if not match_pubsub_ep(sub1[i], sub2[i]):
return False
return True
# end
# check if rule already exists in rule list and returns its index if it does
def find_rule(dsa_rules, in_rule):
rv = None
for dsa_rule in dsa_rules:
dsa_rule_obj = vnc_read_obj(vnc, 'dsa-rule', dsa_rule['to'])
entry = dsa_rule_obj.get_dsa_rule_entry()
if match_rule_entry(entry, in_rule):
rv = dsa_rule_obj
return rv
# end
def print_dsa_rule_entry(entry, prefix = ''):
pub = entry.get_publisher()
sub = entry.get_subscriber()[0]
pub_str = '%s/%d,%s,%s,%s' % \
(pub.ep_prefix.ip_prefix, pub.ep_prefix.ip_prefix_len,
pub.ep_type, pub.ep_id, pub.ep_version)
sub_str = '%s/%d,%s,%s,%s' % \
(sub.ep_prefix.ip_prefix, sub.ep_prefix.ip_prefix_len,
sub.ep_type, sub.ep_id, sub.ep_version)
print '%s %s %s' % (prefix, pub_str, sub_str)
"""
[
{
u'to': [u'default-discovery-service-assignment', u'default-dsa-rule'],
u'href': u'http://127.0.0.1:8082/dsa-rule/b241e9e7-2085-4a8b-8e4b-375ebf4a6dba',
u'uuid': u'b241e9e7-2085-4a8b-8e4b-375ebf4a6dba'
}
]
"""
def show_dsa_rules(vnc, dsa_rules):
if dsa_rules is None:
print 'Empty DSA group!'
return
print 'Rules (%d):' % len(dsa_rules)
print '----------'
idx = 1
for rule in dsa_rules:
dsa_rule = vnc_read_obj(vnc, 'dsa-rule', rule['to'])
entry = dsa_rule.get_dsa_rule_entry()
# entry is empty by default in a DSA rule object
if entry:
print_dsa_rule_entry(entry, prefix = '%d)' % idx)
idx += 1
print ''
# end
def vnc_read_obj(vnc, obj_type, fq_name):
method_name = obj_type.replace('-', '_')
method = getattr(vnc, "%s_read" % (method_name))
try:
return method(fq_name=fq_name)
except NoIdError:
print '%s %s not found!' % (obj_type, fq_name)
return None
# end
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--server', help="Discovery server address in the form IP:Port",
default = '127.0.0.1:5998')
parser.add_argument(
'--api-server', help="API server address in the form IP:Port",
default = '127.0.0.1:8082')
parser.add_argument(
'--load-balance', help="Load balance type",
choices = ["partial", "full"], default = "partial")
parser.add_argument(
'--admin-state', choices = ['up', 'down'],
help="Set administrative state of a service")
parser.add_argument(
'--service-id', help="Service id")
parser.add_argument(
'--service-type', help="Service type")
valid_ops = ['read', 'add-rule', 'del-rule', 'create', 'delete', 'load-balance']
parser.add_argument(
'--op', choices = valid_ops, help="Operation to perform")
parser.add_argument(
'--name', help="FQN of discovery-service-assignment object",
default = 'default-discovery-service-assignment')
parser.add_argument('--rule', help="Rule to add or delete")
parser.add_argument('--uuid', help="object UUID")
parser.add_argument(
'--os-username', help="Keystone User Name", default=None)
parser.add_argument(
'--os-password', help="Keystone User Password", default=None)
parser.add_argument(
'--os-tenant-name', help="Keystone Tenant Name", default=None)
args = parser.parse_args()
return args
# end parse_args
def get_ks_var(args, name):
opts = vars(args)
uname = name.upper()
cname = '-'.join(name.split('_'))
if opts['os_%s' % (name)]:
value = opts['os_%s' % (name)]
return (value, '')
rsp = ''
try:
value = os.environ['OS_' + uname]
if value == '':
value = None
except KeyError:
value = None
if value is None:
rsp = 'You must provide a %s via either --os-%s or env[OS_%s]' % (
name, cname, uname)
return (value, rsp)
# end
args = parse_args()
# Validate Discovery server information
server = args.server.split(':')
if len(server) != 2:
print 'Discovery server address must be of the form ip:port, '\
'for example 127.0.0.1:5998'
sys.exit(1)
server_ip = server[0]
server_port = server[1]
# Validate API server information
api_server = args.api_server.split(':')
if len(api_server) != 2:
print 'API server address must be of the form ip:port, '\
'for example 127.0.0.1:8082'
sys.exit(1)
api_server_ip = api_server[0]
api_server_port = api_server[1]
# Validate keystone credentials
conf = {}
for name in ['username', 'password', 'tenant_name']:
val, rsp = get_ks_var(args, name)
if val is None:
print rsp
sys.exit(1)
conf[name] = val
username = conf['username']
password = conf['password']
tenant_name = conf['tenant_name']
print 'API Server = ', args.api_server
print 'Discovery Server = ', args.server
print 'Username = ', username
print 'Tenant = ', tenant_name
print ''
try:
vnc = VncApi(username, password, tenant_name,
api_server[0], api_server[1])
except Exception as e:
print 'Exception: %s' % str(e)
sys.exit(1)
headers = DEFAULT_HEADERS.copy()
headers['X-AUTH-TOKEN'] = vnc.get_auth_token()
if args.admin_state:
if not args.service_id or not args.service_type:
print 'Please specify service type and ID'
sys.exit(1)
print 'Service type %s, id %s' % (args.service_type, args.service_id)
data = {
"service-type": args.service_type,
}
if args.admin_state:
data['admin-state'] = args.admin_state
url = "http://%s:%s/service/%s" % (server_ip, server_port, args.service_id)
r = requests.put(url, data=json.dumps(data), headers=headers)
if r.status_code != 200:
print "Operation status %d" % r.status_code
sys.exit(0)
elif args.op == 'load-balance':
if not args.service_type:
print 'Please specify service type'
sys.exit(1)
if args.service_id:
print 'Specific service id %s ignored for this operation' % args.service_id
url = "http://%s:%s/load-balance/%s" % (server_ip, server_port, args.service_type)
payload = { 'type': args.load_balance }
r = requests.post(url, headers=headers, data=json.dumps(payload))
if r.status_code != 200:
print "Operation status %d" % r.status_code
sys.exit(0)
uuid = args.uuid
# transform uuid if needed
if uuid and '-' not in uuid:
uuid = str(__uuid.UUID(uuid))
fq_name = vnc.id_to_fq_name(uuid) if uuid else args.name.split(':')
print ''
print 'Oper = ', args.op
print 'Name = %s' % fq_name
print 'UUID = %s' % uuid
if args.op == 'add-rule':
if not args.rule:
print 'Error: missing rule'
sys.exit(1)
rule_entry = build_dsa_rule_entry(args.rule)
if rule_entry is None:
show_usage()
sys.exit(1)
# name is of discovery-service-assignment object
# which consists of one or more rules
dsa = vnc.discovery_service_assignment_read(fq_name = fq_name)
dsa_rules = dsa.get_dsa_rules()
show_dsa_rules(vnc, dsa_rules)
print ''
print_dsa_rule_entry(rule_entry)
ans = raw_input("Confirm (y/n): ")
if not ans or ans[0].lower() != 'y':
sys.exit(0)
rule_uuid = __uuid.uuid4()
dsa_rule = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule.set_uuid(str(rule_uuid))
vnc.dsa_rule_create(dsa_rule)
elif args.op == 'read':
dsa = vnc_read_obj(vnc, 'discovery-service-assignment', fq_name)
if dsa == None:
sys.exit(1)
dsa_rules = dsa.get_dsa_rules()
show_dsa_rules(vnc, dsa_rules)
elif args.op == 'del-rule':
if args.rule is None:
print 'Error: missing rule'
sys.exit(1)
rule = build_dsa_rule_entry(args.rule)
if rule is None:
show_usage()
sys.exit(1)
dsa = vnc.discovery_service_assignment_read(fq_name = fq_name)
dsa_rules = dsa.get_dsa_rules()
if dsa_rules is None:
print 'Empty DSA group!'
sys.exit(1)
show_dsa_rules(vnc, dsa_rules)
obj = find_rule(dsa_rules, rule)
if not obj:
print 'Rule not found. Unchanged'
sys.exit(1)
else:
print 'Rule found!'
ans = raw_input("Confirm (y/n): ")
if not ans or ans[0].lower() != 'y':
sys.exit(0)
vnc.dsa_rule_delete(id = obj.uuid)
| {
"content_hash": "2610f01197b3c8af62fde5951369b4d4",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 92,
"avg_line_length": 30.558333333333334,
"alnum_prop": 0.6014907735660394,
"repo_name": "tcpcloud/contrail-controller",
"id": "3632e8f51ec81845166252e4b859b7beae189adf",
"size": "11070",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/config/utils/discovery_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "88309"
},
{
"name": "C++",
"bytes": "20774234"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "GDB",
"bytes": "44610"
},
{
"name": "Groff",
"bytes": "41295"
},
{
"name": "HTML",
"bytes": "519766"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "LLVM",
"bytes": "2937"
},
{
"name": "Lua",
"bytes": "19459"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "Protocol Buffer",
"bytes": "6129"
},
{
"name": "Python",
"bytes": "5701059"
},
{
"name": "Shell",
"bytes": "52859"
},
{
"name": "Thrift",
"bytes": "8382"
},
{
"name": "Yacc",
"bytes": "7737"
}
],
"symlink_target": ""
} |
from collections import Counter
__author__ = "Sergey Aganezov"
__email__ = "aganezov(at)cs.jhu.edu"
__status__ = "production"
class KBreak(object):
""" A generic object that can represent any k-break ( k>= 2)
A notion of k-break arises from the bioinformatics combinatorial object BreakpointGraph and is first mentioned in http://home.gwu.edu/~maxal/ap_tcs08.pdf
A generic k-break operates on k specified edges of spisific multicolor and replaces them with another set of k edges with the same multicolor on the same set of vertices in way, that the degree of vertices is kept intact.
Initialization of the instance of :class:`KBreak` is performed with a validity check of supplied data, which must comply with the definition of k-break.
Class carries following attributes carrying information about k-break structure:
* :attr:`KBreak.start_edges`: a list of edges (in terms of paired vertices) that are to be removed by current :class:`KBreak`
* :attr:`KBreak.result_edges`: a list of edges (in terms of paired vertices) that are to be created by current :class:`KBreak`
* :attr:`KBreak.multicolor`: a :class:`bg.multicolor.Multicolor` instance, that specifies the multicolor of edges that are to be removed / created by current :class:`KBreak`
Main operations:
* :meth:`KBreak.valid_kbreak_matchings`: a method that checks if provided sets of started / resulted edges comply with the notions ob k-break definition
"""
def __init__(self, start_edges, result_edges, multicolor, data=None):
""" Initialization of :class:`KBreak` object.
The initialization process consists of multiple checks, before any assignment and initialization itself is performed.
First checks the fact, that information about start / result edges is supplied in form of paired vertices.
Then check is performed to make sure, that degrees of vertices, that current :class:`KBreak` operates on, is preserved.
:param start_edges: a list of pairs of vertices, that specifies where edges shall be removed by current :class:`KBreak`
:type start_edges: ``list(tuple(vertex, vertex), ...)``
:param result_edges: a list of pairs of vertices, that specifies where edges shall be created by current :class:`KBreak`
:type result_edges: ``list(tuple(vertex, vertex), ...)``
:param multicolor: a multicolor, that specifies which edges between specified pairs of vertices are to be removed / created
:type multicolor: :class:`bg.multicolor.Multicolor`
:return: a new instance of :class:`Kbreak`
:rtype: :class:`KBreak`
:raises: ``ValueError``
"""
self.start_edges = start_edges
self.result_edges = result_edges
self.multicolor = multicolor
if data is None:
data = self.create_default_data_dict()
self.data = data
for vertex_pair in self.start_edges:
if len(vertex_pair) != 2:
raise ValueError("Expected edges in a form of pairs of vertices.\n "
"Not a pair of vertices ({issue}) in start edges."
"".format(issue=str(vertex_pair)))
for vertex_pair in self.result_edges:
if len(vertex_pair) != 2:
raise ValueError("Expected edges in a form of pairs of vertices.\n "
"Not a pair of vertices ({issue}) in result edges."
"".format(issue=str(vertex_pair)))
if not KBreak.valid_kbreak_matchings(start_edges=self.start_edges,
result_edges=self.result_edges):
raise ValueError("Supplied sets of start and result edges do not correspond to "
"correct k-break operation (either the set of vertices is not consistent, or "
"the degrees of vertices change)")
@property
def is_a_two_break(self):
return len(self.start_edges) == 2
@property
def is_a_fusion(self):
return self.is_a_two_break and any(map(lambda vertex_set: all(map(lambda vertex: vertex.is_irregular_vertex, vertex_set)), self.result_edges))
@classmethod
def create_default_data_dict(cls):
return {
"origin": None
}
@staticmethod
def valid_kbreak_matchings(start_edges, result_edges):
""" A staticmethod check implementation that makes sure that degrees of vertices, that are affected by current :class:`KBreak`
By the notion of k-break, it shall keep the degree of vertices in :class:`bg.breakpoint_graph.BreakpointGraph` the same, after its application.
By utilizing the Counter class, such check is performed, as the number the vertex is mentioned corresponds to its degree.
:param start_edges: a list of pairs of vertices, that specifies where edges shall be removed by :class:`KBreak`
:type start_edges: ``list(tuple(vertex, vertex), ...)``
:param result_edges: a list of pairs of vertices, that specifies where edges shall be created by :class:`KBreak`
:type result_edges: ``list(tuple(vertex, vertex), ...)``
:return: a flag indicating if the degree of vertices are equal in start / result edges, targeted by :class:`KBreak`
:rtype: ``Boolean``
"""
start_stats = Counter(vertex for vertex_pair in start_edges for vertex in vertex_pair)
result_stats = Counter(vertex for vertex_pair in result_edges for vertex in vertex_pair)
return start_stats == result_stats | {
"content_hash": "3d2c00ecf31ececf77424f436aa59321",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 225,
"avg_line_length": 57.865979381443296,
"alnum_prop": 0.6638161411010155,
"repo_name": "aganezov/bg",
"id": "04da680c07bc034bb53917bcea8a1dcb393edf8d",
"size": "5637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bg/kbreak.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "629658"
},
{
"name": "Shell",
"bytes": "1355"
}
],
"symlink_target": ""
} |
import os
import os.path
import shutil
import time
from dtest import Tester
from tools import sslkeygen
from tools.decorators import since
_LOG_ERR_SIG = "^javax.net.ssl.SSLHandshakeException: sun.security.validator.ValidatorException: Certificate signature validation failed$"
_LOG_ERR_IP = "^javax.net.ssl.SSLHandshakeException: java.security.cert.CertificateException: No subject alternative names matching IP address [0-9.]+ found$"
_LOG_ERR_HOST = "^javax.net.ssl.SSLHandshakeException: java.security.cert.CertificateException: No name matching \S+ found$"
_LOG_ERR_CERT = "^javax.net.ssl.SSLHandshakeException: Received fatal alert: certificate_unknown$"
@since('3.6')
class TestNodeToNodeSSLEncryption(Tester):
def ssl_enabled_test(self):
"""Should be able to start with valid ssl options"""
credNode1 = sslkeygen.generate_credentials("127.0.0.1")
credNode2 = sslkeygen.generate_credentials("127.0.0.2", credNode1.cakeystore, credNode1.cacert)
self.setup_nodes(credNode1, credNode2)
self.cluster.start()
self.cql_connection(self.node1)
def ssl_correct_hostname_with_validation_test(self):
"""Should be able to start with valid ssl options"""
credNode1 = sslkeygen.generate_credentials("127.0.0.1")
credNode2 = sslkeygen.generate_credentials("127.0.0.2", credNode1.cakeystore, credNode1.cacert)
self.setup_nodes(credNode1, credNode2, endpointVerification=True)
self.allow_log_errors = False
self.cluster.start()
time.sleep(2)
self.cql_connection(self.node1)
def ssl_wrong_hostname_no_validation_test(self):
"""Should be able to start with valid ssl options"""
credNode1 = sslkeygen.generate_credentials("127.0.0.80")
credNode2 = sslkeygen.generate_credentials("127.0.0.81", credNode1.cakeystore, credNode1.cacert)
self.setup_nodes(credNode1, credNode2, endpointVerification=False)
self.cluster.start()
time.sleep(2)
self.cql_connection(self.node1)
def ssl_wrong_hostname_with_validation_test(self):
"""Should be able to start with valid ssl options"""
credNode1 = sslkeygen.generate_credentials("127.0.0.80")
credNode2 = sslkeygen.generate_credentials("127.0.0.81", credNode1.cakeystore, credNode1.cacert)
self.setup_nodes(credNode1, credNode2, endpointVerification=True)
self.allow_log_errors = True
self.cluster.start(no_wait=True)
found = self._grep_msg(self.node1, _LOG_ERR_IP, _LOG_ERR_HOST)
self.assertTrue(found)
found = self._grep_msg(self.node2, _LOG_ERR_IP, _LOG_ERR_HOST)
self.assertTrue(found)
self.cluster.stop()
self.assertTrue(found)
def ssl_client_auth_required_fail_test(self):
"""peers need to perform mutual auth (cient auth required), but do not supply the local cert"""
credNode1 = sslkeygen.generate_credentials("127.0.0.1")
credNode2 = sslkeygen.generate_credentials("127.0.0.2")
self.setup_nodes(credNode1, credNode2, client_auth=True)
self.allow_log_errors = True
self.cluster.start(no_wait=True)
time.sleep(2)
found = self._grep_msg(self.node1, _LOG_ERR_CERT)
self.assertTrue(found)
found = self._grep_msg(self.node2, _LOG_ERR_CERT)
self.assertTrue(found)
self.cluster.stop()
self.assertTrue(found)
def ssl_client_auth_required_succeed_test(self):
"""peers need to perform mutual auth (cient auth required), but do not supply the loca cert"""
credNode1 = sslkeygen.generate_credentials("127.0.0.1")
credNode2 = sslkeygen.generate_credentials("127.0.0.2", credNode1.cakeystore, credNode1.cacert)
sslkeygen.import_cert(credNode1.basedir, 'ca127.0.0.2', credNode2.cacert, credNode1.cakeystore)
sslkeygen.import_cert(credNode2.basedir, 'ca127.0.0.1', credNode1.cacert, credNode2.cakeystore)
self.setup_nodes(credNode1, credNode2, client_auth=True)
self.cluster.start()
self.cql_connection(self.node1)
def ca_mismatch_test(self):
"""CA mismatch should cause nodes to fail to connect"""
credNode1 = sslkeygen.generate_credentials("127.0.0.1")
credNode2 = sslkeygen.generate_credentials("127.0.0.2") # mismatching CA!
self.setup_nodes(credNode1, credNode2)
self.allow_log_errors = True
self.cluster.start(no_wait=True)
found = self._grep_msg(self.node1, _LOG_ERR_SIG)
self.cluster.stop()
self.assertTrue(found)
def _grep_msg(self, node, *kwargs):
tries = 30
while tries > 0:
try:
print("Checking logs for error")
for err in kwargs:
m = node.grep_log(err)
if m:
print("Found log message: {}".format(m[0]))
return True
except IOError:
pass # log does not exists yet
time.sleep(1)
tries -= 1
return False
def setup_nodes(self, credentials1, credentials2, endpointVerification=False, client_auth=False):
cluster = self.cluster
def copy_cred(credentials, node):
dir = node.get_conf_dir()
print("Copying credentials to node %s" % dir)
kspath = os.path.join(dir, 'keystore.jks')
tspath = os.path.join(dir, 'truststore.jks')
shutil.copyfile(credentials.keystore, kspath)
shutil.copyfile(credentials.cakeystore, tspath)
node.set_configuration_options(values={
'server_encryption_options': {
'internode_encryption': 'all',
'keystore': kspath,
'keystore_password': 'cassandra',
'truststore': tspath,
'truststore_password': 'cassandra',
'require_endpoint_verification': endpointVerification,
'require_client_auth': client_auth
}
})
cluster = cluster.populate(2)
self.node1 = cluster.nodelist()[0]
copy_cred(credentials1, self.node1)
self.node2 = cluster.nodelist()[1]
copy_cred(credentials2, self.node2)
| {
"content_hash": "2b1b4226f8086bbaf3a3e1b986d66e11",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 158,
"avg_line_length": 37.93413173652694,
"alnum_prop": 0.6374112075769535,
"repo_name": "riptano/cassandra-dtest",
"id": "a11a3f4c91875d3bf0eda1be5b6bdf99fb0cb3af",
"size": "6335",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sslnodetonode_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2350477"
},
{
"name": "Shell",
"bytes": "2035"
}
],
"symlink_target": ""
} |
from ctypes import *
import unittest, sys
from test import support
from ctypes.test import is_resource_enabled, xfail
################################################################
# This section should be moved into ctypes\__init__.py, when it's ready.
from _ctypes import PyObj_FromPtr
################################################################
if is_resource_enabled("refcount"):
from sys import getrefcount as grc
if sys.version_info > (2, 4):
c_py_ssize_t = c_size_t
else:
c_py_ssize_t = c_int
class PythonAPITestCase(unittest.TestCase):
@xfail
def test_PyBytes_FromStringAndSize(self):
PyBytes_FromStringAndSize = pythonapi.PyBytes_FromStringAndSize
PyBytes_FromStringAndSize.restype = py_object
PyBytes_FromStringAndSize.argtypes = c_char_p, c_py_ssize_t
self.assertEqual(PyBytes_FromStringAndSize(b"abcdefghi", 3), b"abc")
@support.refcount_test
def test_PyString_FromString(self):
pythonapi.PyBytes_FromString.restype = py_object
pythonapi.PyBytes_FromString.argtypes = (c_char_p,)
s = b"abc"
refcnt = grc(s)
pyob = pythonapi.PyBytes_FromString(s)
self.assertEqual(grc(s), refcnt)
self.assertEqual(s, pyob)
del pyob
self.assertEqual(grc(s), refcnt)
if is_resource_enabled("refcount"):
# This test is unreliable, because it is possible that code in
# unittest changes the refcount of the '42' integer. So, it
# is disabled by default.
def test_PyLong_Long(self):
ref42 = grc(42)
pythonapi.PyLong_FromLong.restype = py_object
self.assertEqual(pythonapi.PyLong_FromLong(42), 42)
self.assertEqual(grc(42), ref42)
pythonapi.PyLong_AsLong.argtypes = (py_object,)
pythonapi.PyLong_AsLong.restype = c_long
res = pythonapi.PyLong_AsLong(42)
self.assertEqual(grc(res), ref42 + 1)
del res
self.assertEqual(grc(42), ref42)
@support.refcount_test
def test_PyObj_FromPtr(self):
s = "abc def ghi jkl"
ref = grc(s)
# id(python-object) is the address
pyobj = PyObj_FromPtr(id(s))
self.assertIs(s, pyobj)
self.assertEqual(grc(s), ref + 1)
del pyobj
self.assertEqual(grc(s), ref)
@xfail
def test_PyOS_snprintf(self):
PyOS_snprintf = pythonapi.PyOS_snprintf
PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p
buf = c_buffer(256)
PyOS_snprintf(buf, sizeof(buf), b"Hello from %s", b"ctypes")
self.assertEqual(buf.value, b"Hello from ctypes")
PyOS_snprintf(buf, sizeof(buf), b"Hello from %s (%d, %d, %d)", b"ctypes", 1, 2, 3)
self.assertEqual(buf.value, b"Hello from ctypes (1, 2, 3)")
# not enough arguments
self.assertRaises(TypeError, PyOS_snprintf, buf)
@xfail
def test_pyobject_repr(self):
self.assertEqual(repr(py_object()), "py_object(<NULL>)")
self.assertEqual(repr(py_object(42)), "py_object(42)")
self.assertEqual(repr(py_object(object)), "py_object(%r)" % object)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "1743d231efdfb1ea723738971540b199",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 90,
"avg_line_length": 33.20618556701031,
"alnum_prop": 0.6022974231605092,
"repo_name": "timm/timmnix",
"id": "22cbc9157079ffda7768a84905b8d0e8fce680db",
"size": "3221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypy3-v5.5.0-linux64/lib-python/3/ctypes/test/test_python_api.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1641"
},
{
"name": "Batchfile",
"bytes": "1234"
},
{
"name": "C",
"bytes": "436685"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "Common Lisp",
"bytes": "4"
},
{
"name": "Emacs Lisp",
"bytes": "290698"
},
{
"name": "HTML",
"bytes": "111577"
},
{
"name": "Makefile",
"bytes": "1681"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "1540"
},
{
"name": "Prolog",
"bytes": "14301"
},
{
"name": "Python",
"bytes": "21267592"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "27687"
},
{
"name": "TeX",
"bytes": "3052861"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class TopologyAssociation(Model):
"""Resources that have an association with the parent resource.
:param name: The name of the resource that is associated with the parent
resource.
:type name: str
:param resource_id: The ID of the resource that is associated with the
parent resource.
:type resource_id: str
:param association_type: The association type of the child resource to the
parent resource. Possible values include: 'Associated', 'Contains'
:type association_type: str or
~azure.mgmt.network.v2017_09_01.models.AssociationType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'association_type': {'key': 'associationType', 'type': 'str'},
}
def __init__(self, name=None, resource_id=None, association_type=None):
super(TopologyAssociation, self).__init__()
self.name = name
self.resource_id = resource_id
self.association_type = association_type
| {
"content_hash": "40118ee1b68ddab4c9d5fe093218c6c7",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 78,
"avg_line_length": 37.44827586206897,
"alnum_prop": 0.6593001841620626,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "89b3e60f48f3c7b010e012a2cd328bdca0be2ca7",
"size": "1560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/topology_association.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
"""Canonicalizes continue statements by de-sugaring into a control boolean."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
class _Continue(object):
def __init__(self):
self.used = False
self.control_var_name = None
def __repr__(self):
return '<_Continue(used: {}, var: {})>'.format(self.used,
self.control_var_name)
class _Block(object):
def __init__(self):
self.guard_created = False
self.create_guard = False
class ContinueCanonicalizationTransformer(converter.Base):
"""Canonicalizes continue statements into additional conditionals."""
def visit_Continue(self, node):
self.state[_Continue].used = True
template = """
var_name = True
"""
return templates.replace(
template, var_name=self.state[_Continue].control_var_name)
def _postprocess_statement(self, node):
# Example of how the state machine below works:
#
# 1| stmt # State: Continue_.used = False
# | # Action: none
# 2| if cond:
# 3| continue # State: Continue_.used = True,
# | # Continue_.guard_created = False,
# | # Continue_.create_guard = False
# | # Action: Continue_.create_guard = True
# 4| stmt # State: Continue_.used = True,
# | # Continue_.guard_created = False,
# | # Continue_.create_guard = True
# | # Action: create `if not continue_used`,
# | # set Continue_.guard_created = True
# 5| stmt # State: Continue_.used = True,
# | # Continue_.guard_created = True
# | # Action: none (will be wrapped under previously
# | # created if node)
if self.state[_Continue].used:
if self.state[_Block].guard_created:
return node, None
elif not self.state[_Block].create_guard:
self.state[_Block].create_guard = True
return node, None
else:
self.state[_Block].guard_created = True
template = """
if ag__.not_(var_name):
original_node
"""
cond, = templates.replace(
template,
var_name=self.state[_Continue].control_var_name,
original_node=node)
return cond, cond.body
return node, None
def _visit_loop_body(self, node, nodes):
self.state[_Continue].enter()
self.state[_Block].enter()
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
continue_var = self.ctx.namer.new_symbol('continue_', scope.referenced)
self.state[_Continue].control_var_name = continue_var
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
if self.state[_Continue].used:
template = """
var_name = False
"""
control_var_init = templates.replace(template, var_name=continue_var)
nodes = control_var_init + nodes
self.state[_Block].exit()
self.state[_Continue].exit()
return nodes
def _visit_non_loop_body(self, nodes):
self.state[_Block].enter()
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
self.state[_Block].exit()
return nodes
def visit_While(self, node):
node.test = self.visit(node.test)
node.body = self._visit_loop_body(node, node.body)
# A continue in the else clause applies to the containing scope.
node.orelse = self._visit_non_loop_body(node.orelse)
return node
def visit_For(self, node):
node.target = self.generic_visit(node.target)
node.iter = self.generic_visit(node.iter)
node.body = self._visit_loop_body(node, node.body)
# A continue in the else clause applies to the containing scope.
node.orelse = self._visit_non_loop_body(node.orelse)
return node
def visit_If(self, node):
node.body = self.visit_block(node.body)
node.orelse = self._visit_non_loop_body(node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body = self._visit_non_loop_body(node.body)
return node
def visit_Try(self, node):
node.body = self._visit_non_loop_body(node.body)
node.orelse = self._visit_non_loop_body(node.orelse)
# In Python 3.8 and later continue is allowed in finally blocks
node.finalbody = self._visit_non_loop_body(node.finalbody)
node.handlers = self.visit_block(node.handlers)
return node
def visit_ExceptHandler(self, node):
node.body = self._visit_non_loop_body(node.body)
return node
def transform(node, ctx):
transformer = ContinueCanonicalizationTransformer(ctx)
node = transformer.visit(node)
return node
| {
"content_hash": "e164423ae752cddca7b95f6c2605c03e",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 78,
"avg_line_length": 34.1,
"alnum_prop": 0.617008797653959,
"repo_name": "theflofly/tensorflow",
"id": "780f837fa3966c68383ab0ba4acdfcb7b221d005",
"size": "5804",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/autograph/converters/continue_statements.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644154"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59546729"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1507157"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908330"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94633"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15108"
},
{
"name": "Pascal",
"bytes": "770"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46310564"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481712"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
} |
"""Config flow to configure the FRITZ!Box Tools integration."""
from __future__ import annotations
import logging
from typing import Any
from urllib.parse import ParseResult, urlparse
from fritzconnection.core.exceptions import FritzConnectionException, FritzSecurityError
import voluptuous as vol
from homeassistant.components.device_tracker.const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
)
from homeassistant.components.ssdp import (
ATTR_SSDP_LOCATION,
ATTR_UPNP_FRIENDLY_NAME,
ATTR_UPNP_UDN,
)
from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.typing import DiscoveryInfoType
from .common import FritzBoxTools
from .const import (
DEFAULT_HOST,
DEFAULT_PORT,
DOMAIN,
ERROR_AUTH_INVALID,
ERROR_CANNOT_CONNECT,
ERROR_UNKNOWN,
)
_LOGGER = logging.getLogger(__name__)
class FritzBoxToolsFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a FRITZ!Box Tools config flow."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlow:
"""Get the options flow for this handler."""
return FritzBoxToolsOptionsFlowHandler(config_entry)
def __init__(self) -> None:
"""Initialize FRITZ!Box Tools flow."""
self._host: str | None = None
self._entry: ConfigEntry
self._name: str
self._password: str
self._port: int | None = None
self._username: str
self.fritz_tools: FritzBoxTools
async def fritz_tools_init(self) -> str | None:
"""Initialize FRITZ!Box Tools class."""
if not self._host or not self._port:
return None
self.fritz_tools = FritzBoxTools(
hass=self.hass,
host=self._host,
port=self._port,
username=self._username,
password=self._password,
)
try:
await self.fritz_tools.async_setup()
except FritzSecurityError:
return ERROR_AUTH_INVALID
except FritzConnectionException:
return ERROR_CANNOT_CONNECT
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return ERROR_UNKNOWN
return None
async def async_check_configured_entry(self) -> ConfigEntry | None:
"""Check if entry is configured."""
for entry in self._async_current_entries(include_ignore=False):
if entry.data[CONF_HOST] == self._host:
return entry
return None
@callback
def _async_create_entry(self) -> FlowResult:
"""Async create flow handler entry."""
return self.async_create_entry(
title=self._name,
data={
CONF_HOST: self.fritz_tools.host,
CONF_PASSWORD: self.fritz_tools.password,
CONF_PORT: self.fritz_tools.port,
CONF_USERNAME: self.fritz_tools.username,
},
options={
CONF_CONSIDER_HOME: DEFAULT_CONSIDER_HOME.total_seconds(),
},
)
async def async_step_ssdp(self, discovery_info: DiscoveryInfoType) -> FlowResult:
"""Handle a flow initialized by discovery."""
ssdp_location: ParseResult = urlparse(discovery_info[ATTR_SSDP_LOCATION])
self._host = ssdp_location.hostname
self._port = ssdp_location.port
self._name = (
discovery_info.get(ATTR_UPNP_FRIENDLY_NAME) or self.fritz_tools.model
)
self.context[CONF_HOST] = self._host
if uuid := discovery_info.get(ATTR_UPNP_UDN):
if uuid.startswith("uuid:"):
uuid = uuid[5:]
await self.async_set_unique_id(uuid)
self._abort_if_unique_id_configured({CONF_HOST: self._host})
for progress in self._async_in_progress():
if progress.get("context", {}).get(CONF_HOST) == self._host:
return self.async_abort(reason="already_in_progress")
if entry := await self.async_check_configured_entry():
if uuid and not entry.unique_id:
self.hass.config_entries.async_update_entry(entry, unique_id=uuid)
return self.async_abort(reason="already_configured")
self.context["title_placeholders"] = {
"name": self._name.replace("FRITZ!Box ", "")
}
return await self.async_step_confirm()
async def async_step_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle user-confirmation of discovered node."""
if user_input is None:
return self._show_setup_form_confirm()
errors = {}
self._username = user_input[CONF_USERNAME]
self._password = user_input[CONF_PASSWORD]
error = await self.fritz_tools_init()
if error:
errors["base"] = error
return self._show_setup_form_confirm(errors)
return self._async_create_entry()
def _show_setup_form_init(self, errors: dict[str, str] | None = None) -> FlowResult:
"""Show the setup form to the user."""
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): vol.Coerce(int),
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
),
errors=errors or {},
)
def _show_setup_form_confirm(
self, errors: dict[str, str] | None = None
) -> FlowResult:
"""Show the setup form to the user."""
return self.async_show_form(
step_id="confirm",
data_schema=vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
),
description_placeholders={"name": self._name},
errors=errors or {},
)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by the user."""
if user_input is None:
return self._show_setup_form_init()
self._host = user_input[CONF_HOST]
self._port = user_input[CONF_PORT]
self._username = user_input[CONF_USERNAME]
self._password = user_input[CONF_PASSWORD]
if not (error := await self.fritz_tools_init()):
self._name = self.fritz_tools.model
if await self.async_check_configured_entry():
error = "already_configured"
if error:
return self._show_setup_form_init({"base": error})
return self._async_create_entry()
async def async_step_reauth(self, data: dict[str, Any]) -> FlowResult:
"""Handle flow upon an API authentication error."""
if cfg_entry := self.hass.config_entries.async_get_entry(
self.context["entry_id"]
):
self._entry = cfg_entry
self._host = data[CONF_HOST]
self._port = data[CONF_PORT]
self._username = data[CONF_USERNAME]
self._password = data[CONF_PASSWORD]
return await self.async_step_reauth_confirm()
def _show_setup_form_reauth_confirm(
self, user_input: dict[str, Any], errors: dict[str, str] | None = None
) -> FlowResult:
"""Show the reauth form to the user."""
default_username = user_input.get(CONF_USERNAME)
return self.async_show_form(
step_id="reauth_confirm",
data_schema=vol.Schema(
{
vol.Required(CONF_USERNAME, default=default_username): str,
vol.Required(CONF_PASSWORD): str,
}
),
description_placeholders={"host": self._host},
errors=errors or {},
)
async def async_step_reauth_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Dialog that informs the user that reauth is required."""
if user_input is None:
return self._show_setup_form_reauth_confirm(
user_input={CONF_USERNAME: self._username}
)
self._username = user_input[CONF_USERNAME]
self._password = user_input[CONF_PASSWORD]
if error := await self.fritz_tools_init():
return self._show_setup_form_reauth_confirm(
user_input=user_input, errors={"base": error}
)
self.hass.config_entries.async_update_entry(
self._entry,
data={
CONF_HOST: self._host,
CONF_PASSWORD: self._password,
CONF_PORT: self._port,
CONF_USERNAME: self._username,
},
)
await self.hass.config_entries.async_reload(self._entry.entry_id)
return self.async_abort(reason="reauth_successful")
async def async_step_import(self, import_config: dict[str, Any]) -> FlowResult:
"""Import a config entry from configuration.yaml."""
return await self.async_step_user(
{
CONF_HOST: import_config[CONF_HOST],
CONF_USERNAME: import_config[CONF_USERNAME],
CONF_PASSWORD: import_config.get(CONF_PASSWORD),
CONF_PORT: import_config.get(CONF_PORT, DEFAULT_PORT),
}
)
class FritzBoxToolsOptionsFlowHandler(OptionsFlow):
"""Handle a option flow."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
data_schema = vol.Schema(
{
vol.Optional(
CONF_CONSIDER_HOME,
default=self.config_entry.options.get(
CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()
),
): vol.All(vol.Coerce(int), vol.Clamp(min=0, max=900)),
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
| {
"content_hash": "e598eb38052e11a1cf57c419ba2c7db6",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 88,
"avg_line_length": 34.95114006514658,
"alnum_prop": 0.5820130475302889,
"repo_name": "sander76/home-assistant",
"id": "5ca351cdec15790f2a6c77d0067cc4a56f7cc922",
"size": "10730",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/fritz/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "36548768"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import datetime as date
from com.hhj.pystock.snakecoin.block import Block
def create_genesis_block():
# Manually construct a block with
# index zero and arbitrary previous hash
return Block(0, date.datetime.now(), "Genesis Block", "0")
def next_block(last_block):
this_index = last_block.index + 1
this_timestamp = date.datetime.now()
this_data = "Hey! I'm block " + str(this_index)
this_hash = last_block.hash
return Block(this_index, this_timestamp, this_data, this_hash)
# Create the blockchain and add the genesis block
blockchain = [create_genesis_block()]
previous_block = blockchain[0]
# How many blocks should we add to the chain
# after the genesis block
num_of_blocks_to_add = 20
# Add blocks to the chain
for i in range(0, num_of_blocks_to_add):
block_to_add = next_block(previous_block)
blockchain.append(block_to_add)
previous_block = block_to_add
# Tell everyone about it!
print("Block #{} has been added to the blockchain!".format(block_to_add.index))
print("Block data :{} !".format(block_to_add.data))
print("Hash: {}\n".format(block_to_add.hash))
| {
"content_hash": "113d4b681c59d7d40f830b5da0520835",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 83,
"avg_line_length": 30.675675675675677,
"alnum_prop": 0.6951541850220264,
"repo_name": "hhj0325/pystock",
"id": "6d74fae17eba0aa0998a7dfb6d48373f0e4177b8",
"size": "1135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "com/hhj/pystock/snakecoin/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "87050"
}
],
"symlink_target": ""
} |
import sys
import unittest
import pymysql
_mysql = pymysql
from pymysql.constants import FIELD_TYPE
from pymysql.tests import base
class TestDBAPISet(unittest.TestCase):
def test_set_equality(self):
self.assertTrue(pymysql.STRING == pymysql.STRING)
def test_set_inequality(self):
self.assertTrue(pymysql.STRING != pymysql.NUMBER)
def test_set_equality_membership(self):
self.assertTrue(FIELD_TYPE.VAR_STRING == pymysql.STRING)
def test_set_inequality_membership(self):
self.assertTrue(FIELD_TYPE.DATE != pymysql.STRING)
class CoreModule(unittest.TestCase):
"""Core _mysql module features."""
def test_NULL(self):
"""Should have a NULL constant."""
self.assertEqual(_mysql.NULL, "NULL")
def test_version(self):
"""Version information sanity."""
self.assertTrue(isinstance(_mysql.__version__, str))
self.assertTrue(isinstance(_mysql.version_info, tuple))
self.assertEqual(len(_mysql.version_info), 5)
def test_client_info(self):
self.assertTrue(isinstance(_mysql.get_client_info(), str))
def test_thread_safe(self):
self.assertTrue(isinstance(_mysql.thread_safe(), int))
class CoreAPI(unittest.TestCase):
"""Test _mysql interaction internals."""
def setUp(self):
kwargs = base.PyMySQLTestCase.databases[0].copy()
kwargs["read_default_file"] = "~/.my.cnf"
self.conn = _mysql.connect(**kwargs)
def tearDown(self):
self.conn.close()
def test_thread_id(self):
tid = self.conn.thread_id()
self.assertTrue(
isinstance(tid, int), "thread_id didn't return an integral value."
)
self.assertRaises(
TypeError,
self.conn.thread_id,
("evil",),
"thread_id shouldn't accept arguments.",
)
def test_affected_rows(self):
self.assertEqual(
self.conn.affected_rows(), 0, "Should return 0 before we do anything."
)
# def test_debug(self):
## FIXME Only actually tests if you lack SUPER
# self.assertRaises(pymysql.OperationalError,
# self.conn.dump_debug_info)
def test_charset_name(self):
self.assertTrue(
isinstance(self.conn.character_set_name(), str), "Should return a string."
)
def test_host_info(self):
assert isinstance(self.conn.get_host_info(), str), "should return a string"
def test_proto_info(self):
self.assertTrue(
isinstance(self.conn.get_proto_info(), int), "Should return an int."
)
def test_server_info(self):
self.assertTrue(
isinstance(self.conn.get_server_info(), str), "Should return an str."
)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "a29fb5b6c76d16035cd32ce4b0900c05",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 86,
"avg_line_length": 28.08,
"alnum_prop": 0.6257122507122507,
"repo_name": "PyMySQL/PyMySQL",
"id": "b8d4bb1e6e60c243055e7336f4c77619b2ecdd69",
"size": "2808",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "pymysql/tests/thirdparty/test_MySQLdb/test_MySQLdb_nonstandard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "225744"
}
],
"symlink_target": ""
} |
import logging
from .apidriver import APIDriver
log = logging.getLogger('vmaas.main')
class MAASException(Exception):
pass
class MAASDriverException(Exception):
pass
class MAASClient(object):
"""
A wrapper for the python maas client which makes using the API a bit
more user friendly.
"""
def __init__(self, api_url, api_key, **kwargs):
self.driver = self._get_driver(api_url, api_key, **kwargs)
def _get_driver(self, api_url, api_key, **kwargs):
return APIDriver(api_url, api_key)
def _validate_maas(self):
try:
self.driver.validate_maas()
logging.info("Validated MAAS API")
return True
except Exception as e:
logging.error("MAAS API validation has failed. "
"Check maas_url and maas_credentials. Error: {}"
"".format(e))
return False
###########################################################################
# DNS API - http://maas.ubuntu.com/docs2.0/api.html#dnsresource
###########################################################################
def get_dnsresources(self):
"""
Get a listing of DNS resources which are currently defined.
:returns: a list of DNS objects
DNS object is a dictionary of the form:
{'fqdn': 'keystone.maas',
'resource_records': [],
'address_ttl': None,
'resource_uri': '/MAAS/api/2.0/dnsresources/1/',
'ip_addresses': [],
'id': 1}
"""
resp = self.driver.get_dnsresources()
if resp.ok:
return resp.data
return []
def update_dnsresource(self, rid, fqdn, ip_address):
"""
Updates a DNS resource with a new ip_address
:param rid: The dnsresource_id i.e.
/api/2.0/dnsresources/{dnsresource_id}/
:param fqdn: The fqdn address to update
:param ip_address: The ip address to update the A record to point to
:returns: True if the DNS object was updated, False otherwise.
"""
resp = self.driver.update_dnsresource(rid, fqdn, ip_address)
if resp.ok:
return True
return False
def create_dnsresource(self, fqdn, ip_address, address_ttl=None):
"""
Creates a new DNS resource
:param fqdn: The fqdn address to update
:param ip_address: The ip address to update the A record to point to
:param adress_ttl: DNS time to live
:returns: True if the DNS object was updated, False otherwise.
"""
resp = self.driver.create_dnsresource(fqdn, ip_address, address_ttl)
if resp.ok:
return True
return False
###########################################################################
# IP API - http://maas.ubuntu.com/docs2.0/api.html#ip-address
###########################################################################
def get_ipaddresses(self):
"""
Get a list of ip addresses
:returns: a list of ip address dictionaries
"""
resp = self.driver.get_ipaddresses()
if resp.ok:
return resp.data
return []
def create_ipaddress(self, ip_address, hostname=None):
"""
Creates a new IP resource
:param ip_address: The ip address to register
:param hostname: the hostname to register at the same time
:returns: True if the DNS object was updated, False otherwise.
"""
resp = self.driver.create_ipaddress(ip_address, hostname)
if resp.ok:
return True
return False
| {
"content_hash": "f678fd485d5afae09aa885e7bc388007",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 32.39473684210526,
"alnum_prop": 0.5353371242891958,
"repo_name": "CanonicalBootStack/charm-hacluster",
"id": "70d017a6ba9abf2a70b50da738ba903a42c1f96d",
"size": "4268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ocf/maas/maasclient/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "488"
},
{
"name": "Perl",
"bytes": "9735"
},
{
"name": "Python",
"bytes": "586050"
},
{
"name": "Shell",
"bytes": "15354"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('customer', '0001_initial'),
('product', '0004_auto_20151026_1435'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('quantity', models.IntegerField(default=1)),
('status', models.SmallIntegerField()),
('created_at', models.DateTimeField()),
('customer', models.ForeignKey(to='customer.Customer')),
('product', models.ForeignKey(to='product.Product')),
],
),
]
| {
"content_hash": "4c02d1873b63417a56dd014567ea98c7",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 114,
"avg_line_length": 32,
"alnum_prop": 0.55875,
"repo_name": "MahdiZareie/PyShop",
"id": "4f65818de3bbbe46e93537a5469b55e0e64447a2",
"size": "824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31673"
},
{
"name": "HTML",
"bytes": "14687"
},
{
"name": "Python",
"bytes": "26480"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
setup(
name = 'nimblenet',
packages = find_packages(),
version = '0.2.1',
description = 'Efficient python (NumPy) neural network library.',
long_description = 'This is an efficient implementation of a fully connected neural network in NumPy. The network can be trained by a variety of learning algorithms: backpropagation, resilient backpropagation and scaled conjugate gradient learning. The network has been developed with PYPY in mind.',
author = 'Jorgen Grimnes',
author_email = 'jorgenkg@yahoo.no',
url = 'https://jorgenkg.github.io/python-neural-network/',
download_url = 'https://github.com/jorgenkg/python-neural-network/tarball/0.2.1',
keywords = ["python", "numpy", "neuralnetwork", "neural", "network", "efficient", "lightweight"],
install_requires = [ 'numpy' ],
extras_require = {
'efficient_sigmoid' : ["scipy"],
'training_with_scipy_minimize' : ["scipy"]
},
classifiers = [
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
) | {
"content_hash": "ccd6bdcd0650d42cfd4fae2fcef40f7b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 302,
"avg_line_length": 47.370370370370374,
"alnum_prop": 0.6387802971071149,
"repo_name": "jorgenkg/python-neural-network",
"id": "454ec9be6e65938460dce7754f07eead73897c77",
"size": "1279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "56648"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, absolute_import
from ..exception import TigrisException
# import urllib.parse
class Enrollment(object):
""" Tigris Enrollment object """
def __init__(self, enrollment_obj, session):
"""
:param module_obj:
A `dict` of the module data
:type module_obj:
`dict`
:param session:
The client session
:type session:
:class:`TigrisSession`
"""
self.BASE_ENDPOINT = 'users/{0}/enrollments'.format(
enrollment_obj['user_id']
)
self._session = session
self._populate(enrollment_obj)
@property
def id(self):
return self._id
@property
def user_id(self):
return self._user_id
@property
def course_id(self):
return self._course_id
@property
def registered_on(self):
return self._registered_on
@property
def completed_on(self):
return self._completed_on
def _populate(self, enrollment_obj):
try:
self._id = enrollment_obj['id']
except KeyError:
self._id = False
try:
self._course_id = enrollment_obj['course_id']
except KeyError:
self._course_id = None
try:
self._user_id = enrollment_obj['user_id']
except KeyError:
self._user_id = None
try:
self.progress = enrollment_obj['progress']
except KeyError:
self.progress = None
try:
self._registered_on = enrollment_obj['registered_on']
except KeyError:
self._registered_on = None
try:
self._completed_on = enrollment_obj['completed_on']
except KeyError:
self._completed_on = None
try:
self.is_enrolled = enrollment_obj['is_enrolled']
except KeyError:
self.is_enrolled = None
def get(self):
"""
Retrieves the enrollment
:rtype:
:class:`Enrollment`
"""
url = '{0}/{1}'.format(self.BASE_ENDPOINT, self._id)
content, status_code, headers = self._session._get(url)
self._populate(content)
return self
def save(self, new=False):
"""
Upserts the Module object to the DB.
:param new:
Determines whether or not this Module is to be inserted or updated.
:type new:
`bool`
:rtype:
`dict`
"""
enrollment_obj = dict(vars(self))
del enrollment_obj['_id']
del enrollment_obj['registered_on']
del enrollment_obj['_session']
del enrollment_obj['BASE_ENDPOINT']
if new:
del enrollment_obj['completed_on']
url = self.BASE_ENDPOINT
data = {'fields': enrollment_obj}
content, status_code, headers = self._session._post(url, data=data)
if 'error' in content:
raise TigrisException(content['error'])
self._populate(content['result'])
else:
del enrollment_obj['course_id']
del enrollment_obj['user_id']
url = '{0}/{1}'.format(self.BASE_ENDPOINT, self._id)
data = {'fields': enrollment_obj}
content, status_code, headers = self._session._patch(url, data=data)
if 'error' in content:
raise TigrisException(content['error'])
self.get()
return self
def destroy(self):
"""
Destroys the Enrollment
"""
url = '{0}/{1}'.format(self.BASE_ENDPOINT, self._id)
self._session._delete(url)
| {
"content_hash": "1a7c3c43236c560d666f1ff77df1ba4d",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 80,
"avg_line_length": 29.228346456692915,
"alnum_prop": 0.5363685344827587,
"repo_name": "jogral/tigris-python-sdk",
"id": "50e73f66c35f78e0ff5f0143df3ae30d62d499d2",
"size": "3730",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tigrissdk/platform/enrollment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "72569"
}
],
"symlink_target": ""
} |
import unittest
import reactivex
from reactivex import operators as ops
from reactivex.testing import ReactiveTest, TestScheduler
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(error: str):
raise RxException(error)
class TestConcat(unittest.TestCase):
def test_concat_empty_empty(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
msgs2 = [on_next(150, 1), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.pipe(ops.concat(e2))
results = scheduler.start(create)
assert results.messages == [on_completed(250)]
def test_concat_empty_never(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = reactivex.never()
def create():
return e1.pipe(ops.concat(e2))
results = scheduler.start(create)
assert results.messages == []
def test_concat_never_empty(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = reactivex.never()
def create():
return e2.pipe(ops.concat(e1))
results = scheduler.start(create)
assert results.messages == []
def test_concat_never_never(self):
scheduler = TestScheduler()
e1 = reactivex.never()
e2 = reactivex.never()
def create():
return e1.pipe(ops.concat(e2))
results = scheduler.start(create)
assert results.messages == []
def test_concat_empty_on_error(self):
ex = "ex"
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
msgs2 = [on_next(150, 1), on_error(250, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.pipe(ops.concat(e2))
results = scheduler.start(create)
assert results.messages == [on_error(250, ex)]
def test_concat_throw_empty(self):
ex = "ex"
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_error(230, ex)]
msgs2 = [on_next(150, 1), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.pipe(ops.concat(e2))
results = scheduler.start(create)
assert results.messages == [on_error(230, ex)]
def test_concat_throw_on_error(self):
ex = "ex"
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_error(230, ex)]
msgs2 = [on_next(150, 1), on_error(250, "ex2")]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.pipe(ops.concat(e2))
results = scheduler.start(create)
assert results.messages == [on_error(230, ex)]
def test_concat_return_empty(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.pipe(ops.concat(e2))
results = scheduler.start(create)
assert results.messages == [on_next(210, 2), on_completed(250)]
def test_concat_empty_return(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
msgs2 = [on_next(150, 1), on_next(240, 2), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.pipe(ops.concat(e2))
results = scheduler.start(create)
assert results.messages == [on_next(240, 2), on_completed(250)]
def test_concat_return_never(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(230)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = reactivex.never()
def create():
return e1.pipe(ops.concat(e2))
results = scheduler.start(create)
assert results.messages == [on_next(210, 2)]
def test_concat_never_return(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(230)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = reactivex.never()
def create():
return e2.pipe(ops.concat(e1))
results = scheduler.start(create)
assert results.messages == []
def test_concat_return_return(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(220, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_next(240, 3), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.pipe(ops.concat(e2))
results = scheduler.start(create)
assert results.messages == [on_next(220, 2), on_next(240, 3), on_completed(250)]
def test_concat_throw_return(self):
ex = "ex"
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_error(230, ex)]
msgs2 = [on_next(150, 1), on_next(240, 2), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.pipe(ops.concat(e2))
results = scheduler.start(create)
assert results.messages == [on_error(230, ex)]
def test_concat_return_on_error(self):
ex = "ex"
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(220, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_error(250, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.pipe(ops.concat(e2))
results = scheduler.start(create)
assert results.messages == [on_next(220, 2), on_error(250, ex)]
def test_concat_some_data_some_data(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_next(220, 3), on_completed(225)]
msgs2 = [on_next(150, 1), on_next(230, 4), on_next(240, 5), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.pipe(ops.concat(e2))
results = scheduler.start(create)
assert results.messages == [
on_next(210, 2),
on_next(220, 3),
on_next(230, 4),
on_next(240, 5),
on_completed(250),
]
def test_concat_forward_scheduler(self):
scheduler = TestScheduler()
subscribe_schedulers = {"e1": "unknown", "e2": "unknown"}
def subscribe_e1(observer, scheduler="not_set"):
subscribe_schedulers["e1"] = scheduler
observer.on_completed()
def subscribe_e2(observer, scheduler="not_set"):
subscribe_schedulers["e2"] = scheduler
observer.on_completed()
e1 = reactivex.create(subscribe_e1)
e2 = reactivex.create(subscribe_e2)
stream = e1.pipe(ops.concat(e2))
stream.subscribe(scheduler=scheduler)
scheduler.advance_to(1000)
assert subscribe_schedulers["e1"] is scheduler
assert subscribe_schedulers["e2"] is scheduler
def test_concat_forward_none_scheduler(self):
subscribe_schedulers = {"e1": "unknown", "e2": "unknown"}
def subscribe_e1(observer, scheduler="not_set"):
subscribe_schedulers["e1"] = scheduler
observer.on_completed()
def subscribe_e2(observer, scheduler="not_set"):
subscribe_schedulers["e2"] = scheduler
observer.on_completed()
e1 = reactivex.create(subscribe_e1)
e2 = reactivex.create(subscribe_e2)
stream = e1.pipe(ops.concat(e2))
stream.subscribe()
assert subscribe_schedulers["e1"] is None
assert subscribe_schedulers["e2"] is None
| {
"content_hash": "c14cc5ea618690c5405f8f99853b298b",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 88,
"avg_line_length": 33.21969696969697,
"alnum_prop": 0.6023945267958951,
"repo_name": "ReactiveX/RxPY",
"id": "b7d1b063cc830cc84a77bbf1a2f53b43aaac08c6",
"size": "8770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_observable/test_concat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1503"
},
{
"name": "Jupyter Notebook",
"bytes": "347338"
},
{
"name": "Python",
"bytes": "1726895"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.cosmosdb import CosmosDBManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-cosmosdb
# USAGE
python cosmos_db_database_account_get_metric_definitions.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CosmosDBManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.database_accounts.list_metric_definitions(
resource_group_name="rg1",
account_name="ddb1",
)
for item in response:
print(item)
# x-ms-original-file: specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2022-08-15-preview/examples/CosmosDBDatabaseAccountGetMetricDefinitions.json
if __name__ == "__main__":
main()
| {
"content_hash": "57c45d4625208d21dfe6c0c0fb146530",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 168,
"avg_line_length": 34.14705882352941,
"alnum_prop": 0.73557278208441,
"repo_name": "Azure/azure-sdk-for-python",
"id": "a438667d24ca04df91ba68a5d424f455e4b627a6",
"size": "1629",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/cosmos/azure-mgmt-cosmosdb/generated_samples/cosmos_db_database_account_get_metric_definitions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""This example gets all creative sets.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
creative_set_service = client.GetService(
'CreativeSetService', version='v201811')
# Create a statement to select creative sets.
statement = ad_manager.StatementBuilder(version='v201811')
# Retrieve a small amount of creative sets at a time, paging
# through until all creative sets have been retrieved.
while True:
response = creative_set_service.getCreativeSetsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for creative_set in response['results']:
# Print out some information for each creative set.
print('Creative set with ID "%d" and name "%s" was found.\n' %
(creative_set['id'], creative_set['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| {
"content_hash": "0c5f70184f334b37b09e27572ab06735",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 72,
"avg_line_length": 33.5,
"alnum_prop": 0.697346600331675,
"repo_name": "Aloomaio/googleads-python-lib",
"id": "51bdb6c20f29bb4e114034fff096c6725f2d104f",
"size": "1827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ad_manager/v201811/creative_set_service/get_all_creative_sets.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "491015"
}
],
"symlink_target": ""
} |
from common_fixtures import * # NOQA
import subprocess
from subprocess import Popen
from os import path
import os
import sys
import pytest
import cattle
import ConfigParser
PROJECTS = []
CERT = '''-----BEGIN CERTIFICATE-----
MIIDJjCCAg4CCQDLCSjwGXM72TANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJB
VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0
cyBQdHkgTHRkMQ4wDAYDVQQDEwVhbGVuYTAeFw0xNTA3MjMwMzUzMDdaFw0xNjA3
MjIwMzUzMDdaMFUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEw
HwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDjAMBgNVBAMTBWFsZW5h
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxdVIDGlAySQmighbfNqb
TtqetENPXjNNq1JasIjGGZdOsmFvNciroNBgCps/HPJphICQwtHpNeKv4+ZuL0Yg
1FECgW7oo6DOET74swUywtq/2IOeik+i+7skmpu1o9uNC+Fo+twpgHnGAaGk8IFm
fP5gDgthrWBWlEPTPY1tmPjI2Hepu2hJ28SzdXi1CpjfFYOiWL8cUlvFBdyNqzqT
uo6M2QCgSX3E1kXLnipRT6jUh0HokhFK4htAQ3hTBmzcxRkgTVZ/D0hA5lAocMKX
EVP1Tlw0y1ext2ppS1NR9Sg46GP4+ATgT1m3ae7rWjQGuBEB6DyDgyxdEAvmAEH4
LQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA45V0bnGPhIIkb54Gzjt9jyPJxPVTW
mwTCP+0jtfLxAor5tFuCERVs8+cLw1wASfu4vH/yHJ/N/CW92yYmtqoGLuTsywJt
u1+amECJaLyq0pZ5EjHqLjeys9yW728IifDxbQDX0cj7bBjYYzzUXp0DB/dtWb/U
KdBmT1zYeKWmSxkXDFFSpL/SGKoqx3YLTdcIbgNHwKNMfTgD+wTZ/fvk0CLxye4P
n/1ZWdSeZPAgjkha5MTUw3o1hjo/0H0ekI4erZFrZnG2N3lDaqDPR8djR+x7Gv6E
vloANkUoc1pvzvxKoz2HIHUKf+xFT50xppx6wsQZ01pNMSNF0qgc1vvH
-----END CERTIFICATE-----
'''
KEY = '''-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAxdVIDGlAySQmighbfNqbTtqetENPXjNNq1JasIjGGZdOsmFv
NciroNBgCps/HPJphICQwtHpNeKv4+ZuL0Yg1FECgW7oo6DOET74swUywtq/2IOe
ik+i+7skmpu1o9uNC+Fo+twpgHnGAaGk8IFmfP5gDgthrWBWlEPTPY1tmPjI2Hep
u2hJ28SzdXi1CpjfFYOiWL8cUlvFBdyNqzqTuo6M2QCgSX3E1kXLnipRT6jUh0Ho
khFK4htAQ3hTBmzcxRkgTVZ/D0hA5lAocMKXEVP1Tlw0y1ext2ppS1NR9Sg46GP4
+ATgT1m3ae7rWjQGuBEB6DyDgyxdEAvmAEH4LQIDAQABAoIBAEKeWL29L9DL+KJg
wBYiM0xxeCHxzKdHFW+Msvdhh3wUpK6S+vUclxb3NHA96RnhU8EH3jeMokDADkTr
Us1eiy2T/gkCBRscymeqUetO49IUAahyYg/nU1X7pg7eQmNkSnHmvQhE3UDjQNdJ
zJYkrROIQWZZVNIib+VLlbXTi0WIYcoukS+Jy2lfABLZbYVFMOEOv5IfRvXTjcgc
jiHUbamYM9ADR/mtupFTShyVV2UBoI8cuWSPJnWNHZ39TN61owNoVycxfagBlheO
Jb07cY0DSSx9968RYRzX9YGMUCpnoleWG5Qg29ySaLDJWqpEkNXdeJlJ+0RzErFr
TrnlXMECgYEA6OTUpfRHu8m1yhqF9HK0+aiOPVLBOkFc55Ja/dBaSApaYtcU5ZYe
IlCgGRM1+3G3bzwrwunbAdGVKdd+SiXLY5+p08HW0sFSgebdkRtcTmbq1Gvns+Fx
ZUX9QBxZq7jiQjHde68y1kpSqJfjeHktZ1voueZ0JUZwx9c7YDC/+V0CgYEA2XX1
W9f7b4Om740opDwgSLIEgIpBqSrSoJQQNzcOAWbY2CTY5xUqM9WbitlgbJ9Bo0Zo
jyHmsp3CLGz8onv7wlR67WJSqriedIBJLQD2DnmQpb3j61rNLruhcxTC5phtBheN
0ZQrO0SmfCjevLefc3jmB0Uu9qfvkoZoJPXAfRECgYEAvxbK+CPYG9fkhiB/GtRn
c5V+qAhXrUHmRceLS0iCWyvLf9/0MHCc5xD6W7isiVSD6wwW6AXTgcmCN2OuJo6e
NG7T/IDGkAS5ewZ/c8lcUqQVOBgVdD2dOjhUFB9u3/yCAUhC73IQJ02yRszhgn8C
5xS9fpL9Z3xFm2MZP9KgIa0CgYBksg1ygPmp8pF7fabjHgBpCR2yk9LBzdWIi+dS
Wgj/NyuUMsPJhXBsXi5PRkczJS+Utoa2OKGF9i0yuyjk6Hp0yv+9KnlTGngtRDYe
Q8Ksgzgqt1px4jL+v92L14JEmzJozsFZ2b2HDUv2VEqHopOQOdxyY2PSzYLPG7Pf
4XhHsQKBgEfRPtokHpt+dJ6RhdUTEQAoT2jDVGhZLaYbtGh5Jtf2F5mhQR3UlvVi
FH/0iMK8IRo8XhFw0lrmZvY0rC0ycFGewvdW5oSvZvStATObGRMHUYNdbMEAMu86
dkOGpBSMzSXoZ2d0rKcetwRWZqUadDJnakNfZkjIY64sbd5Vo4ev
-----END RSA PRIVATE KEY-----
'''
class Compose(object):
def __init__(self, client, compose_bin):
self.compose_bin = compose_bin
self.client = client
def check_retcode(self, input, check_retcode, *args, **kw):
p = self.call(*args, **kw)
output = p.communicate(input=input)
retcode = p.wait()
assert check_retcode == retcode
return output
def check_call(self, input, *args):
p = self.call(*args)
output = p.communicate(input=input)
retcode = p.wait()
assert 0 == retcode
return output
def call(self, *args, **kw):
env = {
'RANCHER_CLIENT_DEBUG': 'true',
'RANCHER_ACCESS_KEY': self.client._access_key,
'RANCHER_SECRET_KEY': self.client._secret_key,
'RANCHER_URL': self.client._url,
}
cmd = [self.compose_bin]
cmd.extend(args)
kw_args = {
'env': env,
'stdin': subprocess.PIPE,
'stdout': sys.stdout,
'stderr': sys.stderr,
'cwd': _base(),
}
kw_args.update(kw)
return Popen(cmd, **kw_args)
@pytest.fixture(scope='session')
def client(admin_user_client, request):
try:
return cattle.from_env(url=os.environ['RANCHER_URL'],
access_key=os.environ['RANCHER_ACCESS_KEY'],
secret_key=os.environ['RANCHER_SECRET_KEY'])
except KeyError:
pass
try:
config = ConfigParser.ConfigParser()
config.read(path.join(_base(), '../../tox.ini'))
return cattle.from_env(url=config.get('rancher', 'url'),
access_key=config.get('rancher', 'access-key'),
secret_key=config.get('rancher', 'secret-key'))
except ConfigParser.NoOptionError:
pass
return new_context(admin_user_client, request).client
def _file(f):
return path.join(_base(), '../../../../{}'.format(f))
def _base():
return path.dirname(__file__)
@pytest.fixture(scope='session')
def compose_bin():
c = _file('bin/rancher-compose')
assert path.exists(c)
return c
def _clean_all(client):
for p in PROJECTS:
client.delete(p)
@pytest.fixture(scope='session')
def compose(client, compose_bin, request):
return new_compose(client, compose_bin, request)
def new_compose(client, compose_bin, request):
request.addfinalizer(lambda: _clean_all(client))
return Compose(client, compose_bin)
def create_project(compose, operation='create', project_name=None, file=None,
input=None):
if project_name is None:
project_name = random_str()
if file is not None:
# TODO
compose.check_call(None, '-f', file, '-p', project_name,
operation)
elif input is not None:
# TODO
compose.check_call(input, '-f', '-', '-p', project_name,
operation)
PROJECTS.append(project_name)
return project_name
def _convert_instance(instance):
d = instance.__dict__
del d['type']
return d
@pytest.mark.skipif('True')
def test_build(client, compose):
project_name = create_project(compose, file='assets/build/test.yml')
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.name == 'fromfile'
assert service.launchConfig.build.dockerfile == 'subdir/Dockerfile'
assert service.launchConfig.build.remote is None
assert service.launchConfig.build.context.startswith('https://')
def test_args(client, compose):
project_name = create_project(compose, file='assets/full-with-build.yml')
project_with_build = find_one(client.list_stack, name=project_name)
service = find_one(project_with_build.services)
assert _convert_instance(service.launchConfig.build) == {
'dockerfile': 'something/other',
'remote': 'github.com/ibuildthecloud/tiny-build',
}
project_name = create_project(compose,
file='assets/full-with-build-v2.yml')
project_with_build_v2 = find_one(client.list_stack,
name=project_name)
service = find_one(project_with_build_v2.services)
assert _convert_instance(service.launchConfig.build) == {
'dockerfile': 'something/other',
'remote': 'github.com/ibuildthecloud/tiny-build',
}
project_name = create_project(compose, file='assets/full-with-image.yml')
project_with_image = find_one(client.list_stack, name=project_name)
service = find_one(project_with_image.services)
assert service.launchConfig.imageUuid == 'docker:nginx'
project_name = create_project(compose,
file='assets/full-with-image-v2.yml')
project_with_image_v2 = find_one(client.list_stack,
name=project_name)
service = find_one(project_with_image_v2.services)
assert service.launchConfig.imageUuid == 'docker:nginx'
for project in (project_with_build, project_with_build_v2,
project_with_image, project_with_image_v2):
service = find_one(project.services)
assert service.name == 'web'
launch_config = service.launchConfig
assert launch_config.command == ['/bin/sh', '-c']
assert len(launch_config.ports) == 2
for p in launch_config.ports:
assert p == '80:81/tcp' or p.endswith(':123/tcp')
assert launch_config.dataVolumes == ['/tmp/foo', '/tmp/x:/tmp/y']
assert launch_config.environment == {'foo': 'bar', 'a': 'b'}
assert launch_config.dns == ['8.8.8.8', '1.1.1.1']
assert launch_config.capAdd == ['ALL', 'SYS_ADMIN']
assert launch_config.capDrop == ['NET_ADMIN', 'SYS_ADMIN']
assert launch_config.dnsSearch == ['foo.com', 'bar.com']
assert launch_config.entryPoint == ['/bin/foo', 'bar']
assert launch_config.workingDir == '/somewhere'
assert launch_config.user == 'somebody'
assert launch_config.hostname == 'myhostname'
assert launch_config.domainName == 'example.com'
assert launch_config.memory == 100
assert launch_config.memorySwap == 101
assert launch_config.privileged
assert launch_config.stdinOpen
assert launch_config.tty
assert 'name' not in launch_config
assert launch_config.cpuShares == 42
assert launch_config.cpuSet == '1,2'
assert launch_config.devices == ['/dev/sda:/dev/a:rwm',
'/dev/sdb:/dev/c:ro']
s = 'io.rancher.service.selector.'
assert launch_config.labels['io.rancher.service.hash'] is not None
del launch_config.labels['io.rancher.service.hash']
assert launch_config.labels == {'a': 'b',
s + 'link': 'bar in (a,b)',
s + 'container': 'foo',
'c': 'd'}
assert service.selectorLink == 'bar in (a,b)'
assert service.selectorContainer == 'foo'
assert launch_config.securityOpt == ['label:foo', 'label:bar']
assert launch_config.pidMode == 'host'
assert _convert_instance(launch_config.logConfig) == {
'driver': 'syslog',
'config': {
'tag': 'foo',
}
}
assert launch_config.extraHosts == ['host:1.1.1.1', 'host:2.2.2.2']
assert launch_config.networkMode == 'host'
assert launch_config.volumeDriver == 'foo'
devNull = launch_config.blkioDeviceOptions['/dev/null']
devVda = launch_config.blkioDeviceOptions['/dev/vda']
assert _convert_instance(devNull) == {
'readBps': 4000,
'readIops': None,
'weight': None,
'writeBps': 200,
'writeIops': None
}
assert _convert_instance(devVda) == {
'readBps': None,
'readIops': 2000,
'weight': None,
'writeBps': None,
'writeIops': 3000
}
assert launch_config.groupAdd == ['root']
assert launch_config.cpuQuota == 20000
assert launch_config.readOnly
assert launch_config.oomScoreAdj == 100
assert launch_config.shmSize == 1024
assert launch_config.cgroupParent == 'abcd'
assert launch_config.blkioWeight == 1000
assert launch_config.stopSignal == 'SIGTERM'
assert launch_config.dnsOpt == ['abc']
assert launch_config.cpuPeriod == 10000
assert launch_config.memorySwappiness == 100
assert launch_config.oomKillDisable
assert launch_config.ipcMode == 'host'
# TODO: test isolation
# Not supported
# assert launch_config.externalLinks == ['foo', 'bar']
def test_git_build(client, compose):
template = '''
nginx:
build: github.com/ibuildthecloud/tiny-build
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert _convert_instance(service.launchConfig.build) == {
'remote': 'github.com/ibuildthecloud/tiny-build',
}
assert service.launchConfig.imageUuid is not None
prefix = 'docker:{}_nginx_'.format(project_name)
assert service.launchConfig.imageUuid.startswith(prefix)
def test_circular_sidekick(client, compose):
template = '''
primary:
stdin_open: true
image: busybox
command: cat
labels:
io.rancher.sidekicks: secondary
volumes_from:
- secondary
secondary:
stdin_open: true
image: busybox
command: cat
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.launchConfig.dataVolumesFromLaunchConfigs == ['secondary']
secondary = filter(lambda x: x.name == 'secondary',
service.secondaryLaunchConfigs)
assert len(secondary) == 1
def test_delete(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.state == 'inactive'
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
service = client.wait_success(service)
assert service.state == 'active'
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'rm', '--force')
service = client.wait_success(service)
assert service.state == 'removed'
def test_bindings(client, compose):
template = '''
prometheus:
image: busybox
command: cat
labels:
labels_prom: value_prom
'''
project_name = random_str()
compose.check_call(template, '--bindings-file', 'assets/bindings.json',
'--verbose', '-p', project_name, '-f', '-', 'up', '-d')
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
service = client.wait_success(service)
dict_label = {"labels_prom_binding": "value_prom_binding"}
ports_array = ["8081"]
assert dict_label.viewitems() <= service.launchConfig.labels.viewitems()
assert service.launchConfig.ports[0].find(ports_array[0]) != -1
def test_delete_while_stopped(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.state == 'inactive'
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'rm', 'web')
service = client.wait_success(service)
assert service.state == 'removed'
def test_network_bridge(client, compose):
template = '''
web:
net: bridge
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'bridge'
def test_network_none(client, compose):
template = '''
web:
net: none
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'none'
def test_network_container(compose, client):
template = '''
foo:
labels:
io.rancher.sidekicks: web
image: nginx
web:
net: container:foo
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'managed'
assert service.secondaryLaunchConfigs[0].networkMode == 'container'
assert service.secondaryLaunchConfigs[0].networkLaunchConfig == 'foo'
def test_network_managed(client, compose):
template = '''
web:
net: managed
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'managed'
def test_network_default(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'managed'
def test_env_file(client, compose):
project_name = create_project(compose, file='assets/base.yml')
project = find_one(client.list_stack, name=project_name)
assert project.name == project_name
second = _get_service(project.services(), 'base')
assert second.launchConfig.environment == {
'bar': 'baz',
'd': 'e',
'env': '2',
'foo': 'bar',
'a': 'b',
}
def test_extends(client, compose):
project_name = create_project(compose, file='assets/base.yml')
project = find_one(client.list_stack, name=project_name)
assert project.name == project_name
base = _get_service(project.services(), 'base')
local = _get_service(project.services(), 'local')
other_base = _get_service(project.services(), 'other-base')
assert base.launchConfig.imageUuid == 'docker:second'
assert local.launchConfig.imageUuid == 'docker:local'
assert local.launchConfig.ports == ['80:80/tcp']
assert local.launchConfig.environment == {'key': 'value'}
assert other_base.launchConfig.ports == ['80:80/tcp', '81:81/tcp']
assert other_base.launchConfig.imageUuid == 'docker:other'
assert other_base.launchConfig.environment == {'key': 'value',
'key2': 'value2'}
def test_extends_1556(client, compose):
project_name = create_project(compose,
file='assets/extends/docker-compose.yml')
project = find_one(client.list_stack, name=project_name)
assert project.name == project_name
web = _get_service(project.services(), 'web')
db = _get_service(project.services(), 'db')
assert web.launchConfig.imageUuid == 'docker:ubuntu:14.04'
assert db.launchConfig.imageUuid == 'docker:ubuntu:14.04'
web = find_one(db.consumedservices)
assert web.name == 'web'
def test_extends_1556_2(compose):
with pytest.raises(AssertionError):
create_project(compose, file='assets/extends_2/docker-compose.yml')
def test_lb_basic(client, compose):
template = '''
lb:
image: rancher/lb-service-haproxy
ports:
- 80
lb_config:
port_rules:
- source_port: 80
target_port: 80
service: web
- source_port: 80
target_port: 80
service: web2
web:
image: nginx
web2:
image: nginx'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 3
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
web2 = _get_service(project.services(), 'web2')
assert lb.lbConfig is not None
assert len(lb.lbConfig.portRules) == 2
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].sourcePort == 80
assert lb.lbConfig.portRules[0].targetPort == 80
assert lb.lbConfig.portRules[1].serviceId == web2.id
assert lb.lbConfig.portRules[1].sourcePort == 80
assert lb.lbConfig.portRules[1].targetPort == 80
def test_lb_private(client, compose):
template = '''
lb:
image: rancher/lb-service-haproxy
lb_config:
port_rules:
- source_port: 111
target_port: 222
service: web
- source_port: 222
target_port: 333
protocol: tcp
service: web
web:
image: nginx'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
assert lb.lbConfig is not None
assert 'ports' not in lb.launchConfig
assert 'expose' not in lb.launchConfig
assert len(lb.lbConfig.portRules) == 2
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].sourcePort == 111
assert lb.lbConfig.portRules[0].targetPort == 222
assert lb.lbConfig.portRules[1].serviceId == web.id
assert lb.lbConfig.portRules[1].sourcePort == 222
assert lb.lbConfig.portRules[1].targetPort == 333
assert lb.lbConfig.portRules[1].protocol == 'tcp'
def test_lb_hostname_and_path(client, compose):
template = '''
lb:
image: rancher/lb-service-haproxy
ports:
- 80
lb_config:
port_rules:
- source_port: 80
target_port: 80
service: web
hostname: hostname
path: /path1
- source_port: 80
target_port: 80
service: web
hostname: hostname
path: /path2
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
assert lb.lbConfig is not None
assert len(lb.lbConfig.portRules) == 2
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].sourcePort == 80
assert lb.lbConfig.portRules[0].targetPort == 80
assert lb.lbConfig.portRules[0].hostname == 'hostname'
assert lb.lbConfig.portRules[0].path == '/path1'
assert lb.lbConfig.portRules[1].serviceId == web.id
assert lb.lbConfig.portRules[1].sourcePort == 80
assert lb.lbConfig.portRules[1].targetPort == 80
assert lb.lbConfig.portRules[1].hostname == 'hostname'
assert lb.lbConfig.portRules[1].path == '/path2'
def test_lb_full_config(client, compose):
project_name = create_project(compose, file='assets/lb/docker-compose.yml')
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
assert lb.lbConfig is not None
assert len(lb.lbConfig.portRules) == 1
assert lb.lbConfig.config == 'global\n foo bar\n'
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].sourcePort == 80
assert lb.lbConfig.portRules[0].targetPort == 80
assert lb.lbConfig.portRules[0].protocol == 'https'
assert lb.lbConfig.portRules[0].hostname == 'hostname'
assert lb.lbConfig.portRules[0].path == '/path'
def test_legacy_lb_private(client, compose):
template = '''
lb:
expose:
- 111:222
- 222:333/tcp
image: rancher/load-balancer-service
ports:
- 80
links:
- web
web:
image: nginx'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
assert lb.lbConfig is not None
assert 'expose' not in lb.launchConfig
assert len(lb.lbConfig.portRules) == 3
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].sourcePort == 80
assert lb.lbConfig.portRules[0].targetPort == 80
assert lb.lbConfig.portRules[1].serviceId == web.id
assert lb.lbConfig.portRules[1].sourcePort == 111
assert lb.lbConfig.portRules[1].targetPort == 222
assert lb.lbConfig.portRules[2].serviceId == web.id
assert lb.lbConfig.portRules[2].sourcePort == 222
assert lb.lbConfig.portRules[2].targetPort == 333
def test_legacy_lb_basic(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80:80
links:
- web
- web2:web2
web:
image: nginx
web2:
image: nginx'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 3
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
web2 = _get_service(project.services(), 'web2')
assert lb.lbConfig is not None
assert len(lb.lbConfig.portRules) == 2
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].sourcePort == 80
assert lb.lbConfig.portRules[0].targetPort == 80
assert lb.lbConfig.portRules[1].serviceId == web2.id
assert lb.lbConfig.portRules[1].sourcePort == 80
assert lb.lbConfig.portRules[1].targetPort == 80
def test_legacy_lb_ssl(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
labels:
io.rancher.loadbalancer.ssl.ports: '80'
ports:
- 80:80
links:
- web
- web2
web:
image: nginx
web2:
image: nginx'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 3
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
web2 = _get_service(project.services(), 'web2')
assert 'io.rancher.loadbalancer.ssl.ports' not in lb.launchConfig.labels
assert lb.lbConfig is not None
assert len(lb.lbConfig.portRules) == 2
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].protocol == 'https'
assert lb.lbConfig.portRules[1].serviceId == web2.id
assert lb.lbConfig.portRules[1].protocol == 'https'
def test_legacy_lb_default_port_http(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 7900:80/tcp
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
assert lb.launchConfig.ports == ['7900:7900/tcp']
assert lb.lbConfig is not None
assert len(lb.lbConfig.portRules) == 1
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].sourcePort == 7900
assert lb.lbConfig.portRules[0].targetPort == 80
def test_legacy_lb_default_port_with_mapped_tcp(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80:8080/tcp
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
assert lb.launchConfig.ports == ['80:80/tcp']
assert lb.lbConfig is not None
assert len(lb.lbConfig.portRules) == 1
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].sourcePort == 80
assert lb.lbConfig.portRules[0].targetPort == 8080
def test_legacy_lb_default_port_with_tcp(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80/tcp
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
assert lb.launchConfig.ports == ['80:80/tcp']
assert lb.lbConfig is not None
assert len(lb.lbConfig.portRules) == 1
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].sourcePort == 80
assert lb.lbConfig.portRules[0].targetPort == 80
def test_legacy_lb_label_basic(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80:8080
labels:
io.rancher.loadbalancer.target.web: "hostname:80/path=9090"
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
assert lb.launchConfig.ports == ['80:80/tcp']
assert 'io.rancher.loadbalancer.target.web' not in lb.launchConfig.labels
assert lb.lbConfig is not None
assert len(lb.lbConfig.portRules) == 1
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].sourcePort == 80
assert lb.lbConfig.portRules[0].targetPort == 9090
assert lb.lbConfig.portRules[0].hostname == 'hostname'
assert lb.lbConfig.portRules[0].path == '/path'
def test_legacy_lb_path_name(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 6000:8080
labels:
io.rancher.loadbalancer.target.web: hostname:6000/path=7000
links:
- web
- web2
web:
image: nginx
web2:
image: nginx'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 3
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
web2 = _get_service(project.services(), 'web2')
assert lb.launchConfig.ports == ['6000:6000/tcp']
assert 'io.rancher.loadbalancer.target.web' not in lb.launchConfig.labels
assert lb.lbConfig is not None
assert len(lb.lbConfig.portRules) == 2
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].sourcePort == 6000
assert lb.lbConfig.portRules[0].targetPort == 7000
assert lb.lbConfig.portRules[0].hostname == 'hostname'
assert lb.lbConfig.portRules[0].path == '/path'
assert lb.lbConfig.portRules[1].serviceId == web2.id
assert lb.lbConfig.portRules[1].sourcePort == 6000
assert lb.lbConfig.portRules[1].targetPort == 8080
def test_legacy_lb_label_override(client, compose):
label1 = 'www.abc1.com:1008/service1.html,www.abc2.com:1009/service2.html'
label2 = 'www.abc3.com:1008/service3.html,www.abc4.com:1009/service4.html'
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 1008:80
- 1009:81
labels:
io.rancher.loadbalancer.target.web: %s
io.rancher.loadbalancer.target.web2: %s
links:
- web
- web2
web:
image: nginx
web2:
image: nginx''' % (label1, label2)
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 3
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
web2 = _get_service(project.services(), 'web2')
assert lb.launchConfig.ports == ['1008:1008/tcp', '1009:1009/tcp']
assert 'io.rancher.loadbalancer.target.web' not in lb.launchConfig.labels
assert 'io.rancher.loadbalancer.target.web2' not in lb.launchConfig.labels
assert lb.lbConfig is not None
assert len(lb.lbConfig.portRules) == 4
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].sourcePort == 1008
assert lb.lbConfig.portRules[0].targetPort == 80
assert lb.lbConfig.portRules[0].hostname == 'www.abc1.com'
assert lb.lbConfig.portRules[0].path == '/service1.html'
assert lb.lbConfig.portRules[1].serviceId == web2.id
assert lb.lbConfig.portRules[1].sourcePort == 1008
assert lb.lbConfig.portRules[1].targetPort == 80
assert lb.lbConfig.portRules[1].hostname == 'www.abc3.com'
assert lb.lbConfig.portRules[1].path == '/service3.html'
assert lb.lbConfig.portRules[2].serviceId == web.id
assert lb.lbConfig.portRules[2].sourcePort == 1009
assert lb.lbConfig.portRules[2].targetPort == 81
assert lb.lbConfig.portRules[2].hostname == 'www.abc2.com'
assert lb.lbConfig.portRules[2].path == '/service2.html'
assert lb.lbConfig.portRules[3].serviceId == web2.id
assert lb.lbConfig.portRules[3].sourcePort == 1009
assert lb.lbConfig.portRules[3].targetPort == 81
assert lb.lbConfig.portRules[3].hostname == 'www.abc4.com'
assert lb.lbConfig.portRules[3].path == '/service4.html'
def test_legacy_lb_full_config(client, compose):
project_name = create_project(compose,
file='assets/lb-legacy/docker-compose.yml')
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
assert lb.launchConfig.ports == ['80:80/tcp']
label = 'io.rancher.loadbalancer.proxy-protocol.ports'
assert label not in lb.launchConfig.labels
assert lb.lbConfig is not None
assert len(lb.lbConfig.portRules) == 1
assert lb.lbConfig.portRules[0].serviceId == web.id
assert lb.lbConfig.portRules[0].sourcePort == 80
assert lb.lbConfig.portRules[0].targetPort == 80
conf = 'global\n foo bar\n \ndefaults\n def 1\n '
conf += '\nfrontend 80\n accept-proxy'
assert lb.lbConfig.config == conf
def test_links(client, compose):
template = '''
web:
image: nginx
db:
image: mysql
links:
- web
other:
image: foo
links:
- web
- db
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
web = _get_service(project.services(), 'web')
db = _get_service(project.services(), 'db')
other = _get_service(project.services(), 'other')
assert len(web.consumedservices()) == 0
db_consumed = db.consumedservices()
assert len(db_consumed) == 1
assert db_consumed[0].name == 'web'
other_consumed = other.consumedservices()
assert len(other_consumed) == 2
names = {i.name for i in other_consumed}
assert names == {'web', 'db'}
def test_volumes_from(client, compose):
template = '''
web:
labels:
io.rancher.sidekicks: db
image: nginx
db:
image: mysql
volumes_from:
- web
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.secondaryLaunchConfigs[0].dataVolumesFromLaunchConfigs == \
['web']
def test_sidekick_simple(client, compose):
template = '''
web:
labels:
io.rancher.sidekicks: log
image: nginx
log:
image: mysql
log2:
image: bar
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
services = project.services()
service = _get_service(services, 'web')
log2 = _get_service(services, 'log2')
assert len(services) == 2
assert service.name == 'web'
assert service.launchConfig.imageUuid == 'docker:nginx'
assert service.launchConfig.networkMode == 'managed'
assert len(service.secondaryLaunchConfigs) == 1
assert service.secondaryLaunchConfigs[0].name == 'log'
assert service.secondaryLaunchConfigs[0].imageUuid == 'docker:mysql'
assert service.secondaryLaunchConfigs[0].networkMode == 'managed'
assert log2.name == 'log2'
assert log2.launchConfig.imageUuid == 'docker:bar'
def test_sidekick_container_network(client, compose):
template = '''
web:
labels:
io.rancher.sidekicks: log
image: nginx
log:
net: container:web
image: mysql
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.launchConfig.imageUuid == 'docker:nginx'
assert len(service.secondaryLaunchConfigs) == 1
assert service.secondaryLaunchConfigs[0].name == 'log'
assert service.secondaryLaunchConfigs[0].imageUuid == 'docker:mysql'
assert service.secondaryLaunchConfigs[0].networkMode == 'container'
assert service.secondaryLaunchConfigs[0].networkLaunchConfig == 'web'
def test_not_external_service_hostname(client, compose):
template = '''
web:
hostname: foo
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.type == 'service'
assert service.launchConfig.hostname == 'foo'
def test_external_service_hostname(client, compose):
project_name = create_project(compose, file='assets/hostname/test.yml')
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.type == 'externalService'
assert service.hostname == 'example.com'
def test_external_ip(client, compose):
project_name = create_project(compose, file='assets/externalip/test.yml')
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.type == 'externalService'
assert service.externalIpAddresses == ['1.1.1.1', '2.2.2.2']
assert service.healthCheck.healthyThreshold == 2
def test_service_inplace_rollback(client, compose):
project_name = random_str()
template = '''
web:
image: nginx
'''
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
project = find_one(client.list_stack, name=project_name)
s = find_one(project.services)
assert s.state == 'active'
template = '''
web:
image: nginx:1.9.5
'''
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-u',
'-d')
s2 = find_one(project.services)
assert s.launchConfig.labels['io.rancher.service.hash'] != \
s2.launchConfig.labels['io.rancher.service.hash']
assert s2.launchConfig.imageUuid == 'docker:nginx:1.9.5'
assert s2.state == 'upgraded'
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-r',
'-d')
s2 = find_one(project.services)
assert s2.state == 'active'
assert s2.launchConfig.imageUuid == 'docker:nginx'
def test_service_inplace_upgrade_inactive(client, compose):
project_name = random_str()
template = '''
web:
image: nginx
'''
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'create')
project = find_one(client.list_stack, name=project_name)
s = find_one(project.services)
assert s.state == 'inactive'
template = '''
web:
image: nginx:1.9.5
'''
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-u',
'-d')
s2 = find_one(project.services)
assert s.launchConfig.labels['io.rancher.service.hash'] != \
s2.launchConfig.labels['io.rancher.service.hash']
assert s2.launchConfig.imageUuid == 'docker:nginx:1.9.5'
assert s2.state == 'upgraded'
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-c',
'-d')
s2 = find_one(project.services)
assert s2.state == 'active'
def test_service_inplace_upgrade(client, compose):
project_name = random_str()
template = '''
web:
image: nginx
'''
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
project = find_one(client.list_stack, name=project_name)
s = find_one(project.services)
assert s.state == 'active'
template = '''
web:
image: nginx:1.9.5
'''
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-u',
'-d')
s2 = find_one(project.services)
assert s.launchConfig.labels['io.rancher.service.hash'] != \
s2.launchConfig.labels['io.rancher.service.hash']
assert s2.launchConfig.imageUuid == 'docker:nginx:1.9.5'
assert s2.state == 'upgraded'
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-c',
'-d')
s2 = find_one(project.services)
assert s2.state == 'active'
def test_upgrade_add_sidekick(client, compose):
project_name = random_str()
template = '''
parent:
image: nginx
labels:
io.rancher.sidekicks: child1
child1:
image: nginx
'''
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-d')
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 1
parent = _get_service(project.services(), 'parent')
instances = parent.instances()
child_id = [x.id for x in instances if 'child' in x.name]
assert len(child_id) == 1
template = '''
parent:
image: nginx
labels:
io.rancher.sidekicks: child1, child2
child1:
image: nginx
child2:
image: nginx
'''
compose.check_call(template, '-p', project_name, '-f', '-', 'up',
'--upgrade', '-c', '-d')
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 1
parent = _get_service(project.services(), 'parent')
instances = parent.instances()
child_id = [x.id for x in instances if 'child' in x.name]
assert len(child_id) == 2
def test_upgrade_remove_sidekick(client, compose):
project_name = random_str()
template = '''
parent:
image: nginx
labels:
io.rancher.sidekicks: child1, child2
child1:
image: nginx
child2:
image: nginx
'''
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-d')
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 1
parent = _get_service(project.services(), 'parent')
instances = parent.instances()
child_id = [x.id for x in instances if 'child' in x.name]
assert len(child_id) == 2
template = '''
parent:
image: nginx
labels:
io.rancher.sidekicks: child1
child1:
image: nginx
'''
compose.check_call(template, '-p', project_name, '-f', '-', 'up',
'--upgrade', '-c', '-d')
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 1
parent = _get_service(project.services(), 'parent')
instances = parent.instances()
child_id = [x.id for x in instances if 'child' in x.name]
assert len(child_id) == 1
def test_service_hash_with_rancher(client, compose):
project_name = create_project(compose,
file='assets/hash-no-rancher/test.yml')
project = find_one(client.list_stack, name=project_name)
s = find_one(project.services)
project_name = create_project(compose,
file='assets/hash-with-rancher/test.yml')
project = find_one(client.list_stack, name=project_name)
s2 = find_one(project.services)
assert s.metadata['io.rancher.service.hash'] is not None
assert s2.metadata['io.rancher.service.hash'] is not None
assert s.metadata['io.rancher.service.hash'] != \
s2.metadata['io.rancher.service.hash']
def test_service_hash_no_change(client, compose):
template = '''
web1:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
web = find_one(project.services)
assert web.metadata['io.rancher.service.hash'] is not None
assert web.launchConfig.labels['io.rancher.service.hash'] is not None
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
web2 = find_one(project.services)
assert web2.metadata['io.rancher.service.hash'] is not None
assert web2.launchConfig.labels['io.rancher.service.hash'] is not None
assert web.launchConfig.labels['io.rancher.service.hash'] == \
web2.launchConfig.labels['io.rancher.service.hash']
assert web.metadata['io.rancher.service.hash'] == \
web2.metadata['io.rancher.service.hash']
def test_dns_service(client, compose):
template = '''
web1:
image: nginx
web2:
image: nginx
web:
image: rancher/dns-service
links:
- web1
- web2
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
services = project.services()
assert len(services) == 3
web = _get_service(services, 'web')
assert web.type == 'dnsService'
consumed = web.consumedservices()
assert len(consumed) == 2
names = {x.name for x in consumed}
assert names == {'web1', 'web2'}
def test_up_relink(client, compose):
template = '''
lb:
image: nginx
ports:
- 80
links:
- web
labels:
a: b
c: d
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
lb = _get_service(project.services(), 'lb')
consumed = lb.consumedservices()
assert len(consumed) == 1
assert consumed[0].name == 'web'
del lb.launchConfig.labels['io.rancher.service.hash']
assert lb.launchConfig.labels == {
'a': 'b',
'c': 'd',
}
template2 = '''
lb:
image: nginx
ports:
- 80
links:
- web2
web2:
image: nginx
'''
compose.check_call(template2, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
def check():
x = lb.consumedservices()
if len(x) == 1:
return x
consumed = wait_for(check, timeout=5)
assert len(consumed) == 1
assert consumed[0].name == 'web2'
def test_service_upgrade_from_nil(client, compose):
template = '''
foo:
image: nginx
web2:
image: nginx
'''
project_name = create_project(compose, input=template)
upgrade = '''
foo:
image: nginx
web:
image: nginx
web2:
image: nginx
'''
compose.check_retcode(upgrade, 1, '-p', project_name, '-f',
'-', 'upgrade', 'web', 'web2')
def test_service_upgrade_no_global_on_src(client, compose):
template = '''
web:
image: nginx
labels:
io.rancher.scheduler.global: "true"
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 1
upgrade = '''
web2:
image: nginx
'''
out, err = compose.check_retcode(upgrade, 1, '-p', project_name, '-f',
'-', 'upgrade', 'web', 'web2',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
assert out.find('Upgrade is not supported for global services')
assert len(project.services()) == 1
def test_service_upgrade_no_global_on_dest(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
upgrade = '''
web2:
image: nginx
labels:
io.rancher.scheduler.global: true
'''
out, err = compose.check_retcode(upgrade, 1, '-p', project_name, '-f',
'-', 'upgrade', 'web', 'web2',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
assert out.find('Upgrade is not supported for global services')
def test_service_map_syntax(client, compose):
template = '''
foo:
image: nginx
links:
- web:alias
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
foo = _get_service(project.services(), 'foo')
maps = client.list_serviceConsumeMap(serviceId=foo.id)
assert len(maps) == 1
assert maps[0].name == 'alias'
def test_cross_stack_link(client, compose):
template = '''
dest:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
dest = _get_service(project.services(), 'dest')
template = '''
src:
external_links:
- {}/dest
image: nginx
'''.format(project_name)
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
src = _get_service(project.services(), 'src')
services = src.consumedservices()
assert len(services) == 1
assert services[0].id == dest.id
def test_up_deletes_links(client, compose):
template = '''
dest:
image: busybox
command: cat
stdin_open: true
tty: true
src:
image: busybox
command: cat
stdin_open: true
tty: true
links:
- dest
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
src = _get_service(project.services(), 'src')
services = src.consumedservices()
assert len(services) == 1
template = '''
src:
image: nginx
'''
compose.check_call(template, '-f', '-', '-p', project_name, 'up', '-d')
services = src.consumedservices()
assert len(services) == 0
def test_upgrade_no_source(client, compose):
project_name = random_str()
compose.check_retcode(None, 1, '-p', project_name, '-f',
'assets/upgrade-ignore-scale/docker-compose.yml',
'upgrade', '--interval', '1000',
'--scale=2', 'from', 'to')
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 0
def test_upgrade_ignore_scale(client, compose):
project_name = create_project(compose, file='assets/upgrade-ignore-scale/'
'docker-compose-source.yml')
compose.check_call(None, '--verbose', '-f', 'assets/upgrade-ignore-scale/'
'docker-compose-source.yml',
'-p', project_name, 'up', '-d')
project = find_one(client.list_stack, name=project_name)
compose.check_call(None, '-p', project_name, '-f',
'assets/upgrade-ignore-scale/docker-compose.yml',
'upgrade', '--pull', '--interval', '1000',
'--scale=2', 'from', 'to')
f = _get_service(project.services(), 'from')
to = _get_service(project.services(), 'to')
assert to.scale <= 2
f = client.wait_success(f)
to = client.wait_success(to)
assert f.scale == 0
assert to.scale == 2
assert to.state == 'active'
def test_service_link_with_space(client, compose):
template = '''
foo:
image: nginx
links:
- "web: alias"
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
foo = _get_service(project.services(), 'foo')
maps = client.list_serviceConsumeMap(serviceId=foo.id)
assert len(maps) == 1
assert maps[0].name == 'alias'
def test_circle_simple(client, compose):
template = '''
foo:
image: nginx
links:
- web
web:
image: nginx
links:
- foo
'''
project_name = random_str()
compose.check_call(template, '-p', project_name, '-f',
'-', 'create')
project = find_one(client.list_stack, name=project_name)
foo = _get_service(project.services(), 'foo')
web = _get_service(project.services(), 'web')
s = find_one(foo.consumedservices)
assert s.name == 'web'
s = find_one(web.consumedservices)
assert s.name == 'foo'
def test_one_circle(client, compose):
template = '''
foo:
image: nginx
links:
- foo
'''
project_name = random_str()
compose.check_call(template, '-p', project_name, '-f',
'-', 'create')
project = find_one(client.list_stack, name=project_name)
foo = _get_service(project.services(), 'foo')
s = find_one(foo.consumedservices)
assert s.name == 'foo'
def test_circle_madness(client, compose):
template = '''
foo:
image: nginx
links:
- foo
- foo2
- foo3
foo2:
image: nginx
links:
- foo
- foo2
- foo3
foo3:
image: nginx
links:
- foo
- foo2
- foo3
'''
project_name = random_str()
compose.check_call(template, '-p', project_name, '-f',
'-', 'up', '-d')
project = find_one(client.list_stack, name=project_name)
foo = _get_service(project.services(), 'foo')
foo2 = _get_service(project.services(), 'foo2')
foo3 = _get_service(project.services(), 'foo3')
assert len(foo.consumedservices()) == 3
assert len(foo2.consumedservices()) == 3
assert len(foo3.consumedservices()) == 3
def test_variables(client, compose):
project_name = random_str()
compose.check_call(None, '--env-file', 'assets/env-file/env-file',
'--verbose', '-f', 'assets/env-file/docker-compose.yml',
'-p', project_name, 'create')
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.launchConfig.imageUuid == 'docker:nginx'
assert service.launchConfig.labels['var'] == 'nginx'
assert service.metadata.var == 'E'
assert service.metadata.var2 == ''
def test_metadata_on_service(client, compose):
project_name = create_project(compose, file='assets/metadata/test.yml')
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.metadata.test1[0] == 'one'
assert service.metadata.test1[1] == 'two'
assert service.metadata.test2.name == "t2name"
assert service.metadata.test2.value == "t2value"
assert service.metadata.test3
assert service.metadata.test4[0].test5.name == "t5name"
assert service.metadata.test4[1].test6.name == "t6name"
assert service.metadata.test4[1].test6.value == "t6value"
assert service.metadata.test7.test7nest.test7nestofnest[0].test7dot1.name \
== "test7dot1name"
assert service.metadata.test7.test7nest.test7nestofnest[1].test7dot2.name \
== "test7dot2name"
assert service.metadata.test8[0].test8a[0].name == "test8a"
assert service.metadata.test8[0].test8a[0].value == "test8avalue"
assert service.metadata.test8[0].test8a[1].name == "test8ab"
assert service.metadata.test8[0].test8a[1].value == "test8abvalue"
assert service.metadata.test8[1].test8b[0].name == "test8ba"
assert service.metadata.test8[1].test8b[0].value == "test8bavalue"
def test_healthchecks(client, compose):
project_name = create_project(compose, file='assets/health/test.yml')
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.launchConfig.healthCheck.port == 80
assert service.launchConfig.healthCheck.interval == 2000
assert service.launchConfig.healthCheck.unhealthyThreshold == 3
assert service.launchConfig.healthCheck.requestLine == \
"OPTIONS /ping HTTP/1.1\r\nHost:\\ www.example.com"
def _get_service(services, name):
service = None
for i in services:
if i.name == name:
service = i
break
assert service is not None
return service
def test_restart_no(client, compose):
template = '''
web:
image: nginx
restart: "no"
'''
project_name = create_project(compose, input=template)
find_one(client.list_stack, name=project_name)
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
p = find_one(client.list_stack, name=project_name)
find_one(p.services)
def test_stack_case(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
find_one(client.list_stack, name=project_name)
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
compose.check_call(template, '--verbose', '-f', '-', '-p',
project_name.upper(), 'up', '-d')
find_one(client.list_stack, name=project_name)
@pytest.mark.skipif('True')
def test_certs(new_context, compose_bin, request):
client = new_context.client
compose = new_compose(client, compose_bin, request)
cert = client.create_certificate(name='cert1',
cert=CERT,
certChain=CERT,
key=KEY)
cert2 = client.create_certificate(name='cert2',
cert=CERT,
certChain=CERT,
key=KEY)
cert = client.wait_success(cert)
cert2 = client.wait_success(cert2)
assert cert.state == 'active'
assert cert2.state == 'active'
project_name = create_project(compose,
file='assets/ssl/docker-compose.yml')
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
assert lb.defaultCertificateId == cert.id
assert lb.certificateIds == [cert.id, cert2.id]
def test_cert_not_found(new_context, compose_bin, request):
compose = new_compose(new_context.client, compose_bin, request)
compose.check_retcode(None, 1, '-p', random_str(), '-f',
'assets/ssl/docker-compose.yml', 'create')
def test_project_name(client, compose):
project_name = 'FooBar23-' + random_str()
stack = client.create_stack(name=project_name)
stack = client.wait_success(stack)
assert stack.state == 'active'
template = '''
web:
image: nginx
'''
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 0
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'create')
assert len(project.services()) == 1
def test_project_name_case_insensitive(client, compose):
project_name = 'FooBar23-' + random_str()
stack = client.create_stack(name=project_name)
stack = client.wait_success(stack)
assert stack.state == 'active'
template = '''
web:
image: nginx
'''
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 0
project_name = project_name.replace('FooBar', 'fOoBaR')
assert project_name.startswith('fOoBaR')
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'create')
assert len(project.services()) == 1
def test_project_name_with_dots(client, compose):
project_name = 'something-with-dashes-v0-2-6'
bad_project_name = 'something-with-dashes-v0.2.6'
ret = client.list_stack(name=project_name)
assert len(ret) == 0
compose.check_call(None, '--verbose', '-f',
'assets/{}/docker-compose.yml'.format(bad_project_name),
'create')
ret = client.list_stack(name=project_name)
assert len(ret) == 1
def test_create_then_up_on_circle(client, compose):
template = '''
etcd-lb:
stdin_open: true
image: busybox
command: cat
links:
- etcd0
- etcd1
- etcd2
etcd0:
stdin_open: true
image: busybox
command: cat
links:
- etcd1
- etcd2
etcd1:
stdin_open: true
image: busybox
command: cat
links:
- etcd0
- etcd2
etcd2:
stdin_open: true
image: busybox
command: cat
links:
- etcd0
- etcd1
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
etcd_lb = _get_service(project.services(), 'etcd-lb')
etcd0 = _get_service(project.services(), 'etcd0')
etcd1 = _get_service(project.services(), 'etcd1')
etcd2 = _get_service(project.services(), 'etcd2')
assert len(etcd_lb.consumedservices()) == 3
assert len(etcd0.consumedservices()) == 2
assert len(etcd1.consumedservices()) == 2
assert len(etcd2.consumedservices()) == 2
assert len(etcd_lb.consumedservices()) == 3
compose.check_call(template, '-f', '-', '-p', project_name, 'up', '-d')
assert len(etcd_lb.consumedservices()) == 3
assert len(etcd0.consumedservices()) == 2
assert len(etcd1.consumedservices()) == 2
assert len(etcd2.consumedservices()) == 2
def test_expose_port_ignore(client, compose):
template = '''
foo:
image: nginx
expose:
- 1234
links:
- foo
'''
project_name = random_str()
compose.check_call(template, '-p', project_name, '-f',
'-', 'create')
project = find_one(client.list_stack, name=project_name)
foo = _get_service(project.services(), 'foo')
assert 'ports' not in foo.launchConfig
def test_create_no_update_links(client, compose):
template = '''
foo:
image: nginx
links:
- foo2
foo2:
image: tianon/true
foo3:
image: tianon/true
'''
project_name = random_str()
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 3
foo = _get_service(project.services(), 'foo')
foo2 = find_one(foo.consumedservices)
assert foo2.name == 'foo2'
template2 = '''
foo:
image: tianon/true
links:
- foo3
foo2:
image: tianon/true
foo3:
image: tianon/true
'''
compose.check_call(template2, '-p', project_name, '-f', '-', 'create')
foo2 = find_one(foo.consumedservices)
assert foo2.name == 'foo2'
def test_pull_sidekick(client, compose):
template = '''
foo:
labels:
io.rancher.sidekicks: foo2
image: nginx
foo2:
image: tianon/true
'''
project_name = random_str()
out, err = compose.check_retcode(template, 0, '-p', project_name, '-f',
'-', 'pull', stdout=subprocess.PIPE)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 0
assert 'nginx' in out
assert 'tianon/true' in out
def test_retain_ip(client, compose):
project_name = create_project(compose, file='assets/retain-ip/'
'docker-compose.yml')
project = find_one(client.list_stack, name=project_name)
retain = _get_service(project.services(), 'retain')
not_retain = _get_service(project.services(), 'not-retain')
assert retain.retainIp
assert not not_retain.retainIp
def test_no_update_selector_link(client, compose):
template = '''
parent:
labels:
io.rancher.service.selector.link: foo=bar
image: tianon/true
child:
labels:
foo: bar
image: tianon/true
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 2
parent = _get_service(project.services(), 'parent')
find_one(parent.consumedservices)
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-d',
'parent')
parent = _get_service(project.services(), 'parent')
find_one(parent.consumedservices)
def test_sidekick_build_remote(client, compose):
template = '''
parent:
labels:
io.rancher.sidekicks: child
build: http://parent
dockerfile: parent-file
child:
build: http://child
dockerfile: child-file
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 1
parent = _get_service(project.services(), 'parent')
assert parent.launchConfig.build.remote == 'http://parent'
assert parent.launchConfig.build.dockerfile == 'parent-file'
assert len(parent.secondaryLaunchConfigs) == 1
assert parent.secondaryLaunchConfigs[0].build.remote == 'http://child'
assert parent.secondaryLaunchConfigs[0].build.dockerfile == 'child-file'
def test_sidekick_healthcheck(client, compose):
project_name = create_project(compose, file='assets/sidekick-health/'
'docker-compose.yml')
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 1
parent = _get_service(project.services(), 'parent')
assert parent.launchConfig.healthCheck.port == 80
assert parent.secondaryLaunchConfigs[0].healthCheck.port == 81
def test_force_upgrade_primary(client, compose):
template = '''
parent:
labels:
io.rancher.sidekicks: child
image: nginx
child:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
assert len(project.services()) == 1
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-d')
parent = _get_service(project.services(), 'parent')
instances = parent.instances()
child_prefix = project_name + '_child'
child_id = [x.id for x in instances if x.name.startswith(child_prefix)]
assert len(instances) == 2
compose.check_call(template, '-p', project_name, '-f', '-', 'up',
'--force-upgrade', '-d', 'parent')
new_instances = parent.instances()
new_child_id = [x.id for x in instances if x.name.startswith(child_prefix)]
assert child_id == new_child_id
ids = {x.id for x in instances}.union({x.id for x in new_instances})
assert len(ids) == 3
compose.check_call(template, '-p', project_name, '-f', '-', 'up',
'-c', '-d')
compose.check_call(template, '-p', project_name, '-f', '-', 'up',
'--force-upgrade', '-d')
ids = ids.union({x.id for x in parent.instances()})
assert len(ids) == 5
def test_virtual_machine(client, compose):
template = '''
vm:
type: virtualMachine
image: nginx
vcpu: 2
memory: 1024
userdata: |
#cloud-config
foo
disks:
- name: foo
size: 1g
opts:
foo: bar
- name: foo2
size: 2g
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
vm = find_one(project.services)
assert vm.launchConfig.kind == 'virtualMachine'
assert vm.launchConfig.vcpu == 2
assert vm.launchConfig.userdata == '#cloud-config\nfoo\n'
assert vm.launchConfig.memoryMb == 1024
assert _convert_instance(vm.launchConfig.disks[0]) == {
'name': 'foo', 'size': '1g',
'opts': {'foo': 'bar'},
}
assert _convert_instance(vm.launchConfig.disks[1]) == {
'name': 'foo2', 'size': '2g',
}
def test_cyclic_link_dependency(client, compose):
# cyclic link dependencies shouldn't error or hang
create_project(compose, file='assets/cyclic-link-dependency/'
'docker-compose.yml')
def test_yaml_corner_cases(client, compose):
create_project(compose, file='assets/yaml-corner-cases/'
'docker-compose.yml')
def test_environment_variables(client, compose):
template = '''
env-test1:
image: nginx
environment:
ENV1: ENV1
ENV2:
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.name == 'env-test1'
launch_config = service.launchConfig
assert launch_config.environment == {'ENV1': 'ENV1'}
template = '''
env-test2:
image: nginx
environment:
- ENV1=ENV1
- ENV2
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_stack, name=project_name)
service = find_one(project.services)
assert service.name == 'env-test2'
launch_config = service.launchConfig
assert launch_config.environment == {'ENV1': 'ENV1'}
| {
"content_hash": "de0d8c9bc03f509d9811a5b403d27bf8",
"timestamp": "",
"source": "github",
"line_count": 2296,
"max_line_length": 79,
"avg_line_length": 30.83493031358885,
"alnum_prop": 0.622060256790542,
"repo_name": "rancherio/rancher-compose",
"id": "76b0879b1d116786730e0b4b06b7b6527d94d5ec",
"size": "70797",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/integration/cattletest/core/test_compose.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "17"
},
{
"name": "Go",
"bytes": "151318"
},
{
"name": "Makefile",
"bytes": "411"
},
{
"name": "Python",
"bytes": "96512"
},
{
"name": "Shell",
"bytes": "6362"
}
],
"symlink_target": ""
} |
from flask import Blueprint
main = Blueprint('main', __name__)
from . import routes, events
| {
"content_hash": "de3460e603c8477b7422ed2fc26bf711",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 34,
"avg_line_length": 18.8,
"alnum_prop": 0.7127659574468085,
"repo_name": "Yprolic/LinkDragon",
"id": "33ca9f517a2bb124770303bb3e21676d18dbcef5",
"size": "94",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "app/main/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2530"
},
{
"name": "Python",
"bytes": "5243"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/chemistry/shared_medpack_enhance_constitution_b.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "fbbd56b9aa022a1de9d55a6acecbaf35",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 95,
"avg_line_length": 24.923076923076923,
"alnum_prop": 0.7067901234567902,
"repo_name": "obi-two/Rebelion",
"id": "bbed192db14baca378f03f6246375fd85657634b",
"size": "469",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/chemistry/shared_medpack_enhance_constitution_b.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import utils
common = utils.utils
class tm1_loading(common):
"""
Please insert comment here!
"""
def __init__(self, tm1Base='http://localhost:8000/api/v1/', tm1AdminName='admin', tm1AdminPW='apple', debugLevel=0):
"""
the default parameters for this class
"""
# initialize Parent Class
common.__init__(self, tm1Base, tm1AdminName, tm1AdminPW, debugLevel)
def getListOf_Processes(self):
"""Please insert comment here!"""
return common.createListOfObjects(self, "Processes")
def getListOf_Chores(self):
"""Please insert comment here!"""
return common.createListOfObjects(self, "Chores")
def createProcess(self, tm1ProcessName, tm1ProcessProlog=""):
'''Creates a Process'''
taskname = "CREATE Process - " + tm1ProcessName
restCall = self.tm1Base + "Processes"
body = '{"Name": "' + tm1ProcessName + '", "PrologProcedure": "' + tm1ProcessProlog + '"}'
print(body)
common.tm1Post(self, restCall, body, taskname)
def deleteProcess(self, tm1ProcessName ):
'''Deletes a Process'''
taskname = "DELETE Process - " + tm1ProcessName
restCall = self.tm1Base + "Processes('" + tm1ProcessName + "')"
common.tm1Delete(self, restCall, taskname)
def executeProcess(self, tm1ProcessName ):
'''Executes a Process'''
taskname = "EXECUTE Process - " + tm1ProcessName
restCall = self.tm1Base + "Processes('" + tm1ProcessName + "')/tm1.Execute"
body = '{"Parameters": \
[\
]\
}'
common.tm1Post(self, restCall, body, taskname)
if __name__ == '__main__':
print("ATTENTION - This is not intended for direct use.")
tm1Base = 'https://txtm1.tablonautix.com/api/v1/'
tm1AdminName = 'admin'
tm1AdminPW = 'apple'
debugLevel = 5
# initialize script
tm1 = tm1_basicstructureinformation(tm1Base, tm1AdminName, tm1AdminPW, debugLevel)
print(tm1.getListOf_Processes())
print(tm1.getListOf_Chores())
| {
"content_hash": "a734d3cc6f38c9dda15988008395a847",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 120,
"avg_line_length": 27.205128205128204,
"alnum_prop": 0.6046182846371347,
"repo_name": "scrumthing/pytm1",
"id": "099ce760ecb9657811ee40ded178232f51e642dc",
"size": "2138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytm1/tm1_loading.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19415"
}
],
"symlink_target": ""
} |
import json
import asyncio
from statistics import median
from numbers import Number
from electrum.network import filter_protocol, Network
from electrum.util import create_and_start_event_loop, log_exceptions
loop, stopping_fut, loop_thread = create_and_start_event_loop()
network = Network()
network.start()
@log_exceptions
async def f():
try:
peers = await network.get_peers()
peers = filter_protocol(peers)
results = await network.send_multiple_requests(peers, 'blockchain.estimatefee', [2])
print(json.dumps(results, indent=4))
feerate_estimates = filter(lambda x: isinstance(x, Number), results.values())
print(f"median feerate: {median(feerate_estimates)}")
finally:
stopping_fut.set_result(1)
asyncio.run_coroutine_threadsafe(f(), loop)
| {
"content_hash": "e28eec176fe7ae78176b8eab41ec03b7",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 92,
"avg_line_length": 31.23076923076923,
"alnum_prop": 0.7142857142857143,
"repo_name": "fujicoin/electrum-fjc",
"id": "76bcc55b54cfd5b6a55eef3f50c4c7e0dbaadbd4",
"size": "835",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "electrum/scripts/estimate_fee.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7756"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "877"
},
{
"name": "NSIS",
"bytes": "7450"
},
{
"name": "Python",
"bytes": "2346736"
},
{
"name": "Shell",
"bytes": "30493"
}
],
"symlink_target": ""
} |
"""
Pygal - A python svg graph plotting library
"""
__version__ = '0.13.0'
import sys
from pygal.config import Config
from pygal.ghost import Ghost
from pygal.graph import CHARTS_NAMES
CHARTS = []
CHARTS_BY_NAME = {}
for NAME in CHARTS_NAMES:
_CHART = type(NAME, (Ghost,), {})
CHARTS.append(_CHART)
CHARTS_BY_NAME[NAME] = _CHART
setattr(sys.modules[__name__], NAME, _CHART)
__all__ = CHARTS_NAMES + [Config.__name__, 'CHARTS', 'CHARTS_BY_NAME']
| {
"content_hash": "610014070222ad3db4616686d3286b20",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 21.272727272727273,
"alnum_prop": 0.6495726495726496,
"repo_name": "vineethguna/heroku-buildpack-libsandbox",
"id": "9ec4e4ce6d3eee2b3ba7b88ef9506bfa60386461",
"size": "1236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor/pygal-0.13.0/pygal/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "1393"
},
{
"name": "Python",
"bytes": "20214"
},
{
"name": "Ruby",
"bytes": "0"
},
{
"name": "Shell",
"bytes": "13182"
}
],
"symlink_target": ""
} |
import time
import logging
import numpy as np
from copy import deepcopy
from functools import partial
import qcodes.utils.validators as vals
from qcodes.instrument.parameter import ManualParameter
try:
from pycqed.instrument_drivers.physical_instruments.ZurichInstruments. \
ZI_HDAWG_core import ZI_HDAWG_core
except Exception:
ZI_HDAWG_core = type(None)
try:
from pycqed.instrument_drivers.physical_instruments.ZurichInstruments. \
ZI_base_instrument import merge_waveforms
except Exception:
pass
from .pulsar import PulsarAWGInterface
from .zi_pulsar_mixin import ZIPulsarMixin, ZIMultiCoreCompilerMixin
from .zi_pulsar_mixin import ZIGeneratorModule
log = logging.getLogger(__name__)
class HDAWG8Pulsar(PulsarAWGInterface, ZIPulsarMixin, ZIMultiCoreCompilerMixin):
"""ZI HDAWG8 specific functionality for the Pulsar class."""
AWG_CLASSES = [ZI_HDAWG_core]
GRANULARITY = 16
ELEMENT_START_GRANULARITY = 8 / 2.4e9
MIN_LENGTH = 16 / 2.4e9
# TODO: Check if other values commented out should be removed
INTER_ELEMENT_DEADTIME = 8 / 2.4e9 # 80 / 2.4e9 # 0 / 2.4e9
CHANNEL_AMPLITUDE_BOUNDS = {
"analog": (0.01, 5.0),
"marker": (0.01, 5.0),
}
CHANNEL_OFFSET_BOUNDS = {
"analog": tuple(), # TODO: Check if there are indeed no bounds for the offset
"marker": tuple(), # TODO: Check if there are indeed no bounds for the offset
}
IMPLEMENTED_ACCESSORS = ["offset", "amp", "amplitude_scaling"]
_hdawg_sequence_string_template = (
"{wave_definitions}\n"
"\n"
"{codeword_table_defs}\n"
"\n"
"while (1) {{\n"
" {playback_string}\n"
"}}\n"
)
def __init__(self, pulsar, awg):
super().__init__(pulsar, awg)
try:
# Here we instantiate a zhinst.qcodes-based HDAWG in addition to
# the one based on the ZI_base_instrument because the parallel
# uploading of elf files is only supported by the qcodes driver
from pycqed.instrument_drivers.physical_instruments. \
ZurichInstruments.zhinst_qcodes_wrappers import HDAWG8
self._awg_mcc = HDAWG8(awg.devname, name=awg.name + '_mcc',
host='localhost', interface=awg.interface,
server=awg.server)
if getattr(self.awg.daq, 'server', None) == 'emulator':
# This is a hack for virtual setups to make sure that the
# ready node is in sync between the two mock DAQ servers.
for i in range(4):
path = f'/{self.awg.devname}/awgs/{i}/ready'
self._awg_mcc._session.daq_server.nodes[
path] = self.awg.daq.nodes[path]
except ImportError as e:
log.debug(f'Error importing zhinst-qcodes: {e}.')
log.debug(f'Parallel elf compilation will not be available for '
f'{awg.name} ({awg.devname}).')
self._awg_mcc = None
self._init_mcc()
self._awg_modules = []
for awg_nr in self._hdawg_active_awgs():
channel_pair = HDAWGGeneratorModule(
awg=self.awg,
awg_interface=self,
awg_nr=awg_nr
)
self._awg_modules.append(channel_pair)
def _get_awgs_mcc(self) -> list:
if self._awg_mcc is not None:
return list(self._awg_mcc.awgs)
else:
return []
def create_awg_parameters(self, channel_name_map):
super().create_awg_parameters(channel_name_map)
pulsar = self.pulsar
name = self.awg.name
# Override _min_length parameter created in base class
# TODO: Check if this makes sense, it is a constant for the other AWGs
# Furthermore, it does not really make sense to manually set the minimum
# length which is a property of the instrument...
del pulsar.parameters[f"{name}_min_length"]
pulsar.add_parameter(f"{name}_min_length",
initial_value=self.MIN_LENGTH,
parameter_class=ManualParameter)
pulsar.add_parameter(f"{name}_use_placeholder_waves",
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter)
pulsar.add_parameter(f"{name}_trigger_source",
initial_value="Dig1",
vals=vals.Enum("Dig1", "DIO", "ZSync"),
parameter_class=ManualParameter,
docstring="Defines for which trigger source the "
"AWG should wait, before playing the "
"next waveform. Allowed values are: "
"'Dig1', 'DIO', 'ZSync'.")
pulsar.add_parameter(f"{name}_prepend_zeros",
initial_value=None,
vals=vals.MultiType(vals.Enum(None), vals.Ints(),
vals.Lists(vals.Ints())),
parameter_class=ManualParameter)
group = []
for ch_nr in range(8):
id = f"ch{ch_nr + 1}"
ch_name = channel_name_map.get(id, f"{name}_{id}")
self.create_channel_parameters(id, ch_name, "analog")
pulsar.channels.add(ch_name)
group.append(ch_name)
id = f"ch{ch_nr + 1}m"
ch_name = channel_name_map.get(id, f"{name}_{id}")
self.create_channel_parameters(id, ch_name, "marker")
pulsar.channels.add(ch_name)
group.append(ch_name)
# channel pairs plus the corresponding marker channels are
# considered as groups
if (ch_nr + 1) % 2 == 0:
for ch_name in group:
pulsar.channel_groups.update({ch_name: group})
group = []
def create_channel_parameters(self, id:str, ch_name:str, ch_type:str):
super().create_channel_parameters(id, ch_name, ch_type)
pulsar = self.pulsar
if ch_type == "analog":
pulsar.add_parameter(
f"{ch_name}_amplitude_scaling",
set_cmd=partial(self.awg_setter, id, "amplitude_scaling"),
get_cmd=partial(self.awg_getter, id, "amplitude_scaling"),
vals=vals.Numbers(min_value=-1.0, max_value=1.0),
initial_value=1.0,
docstring=f"Scales the AWG output of channel by a given factor."
)
pulsar.add_parameter(f"{ch_name}_internal_modulation",
initial_value=False, vals=vals.Bool(),
parameter_class=ManualParameter)
awg_nr = (int(id[2:]) - 1) // 2
output_nr = (int(id[2:]) - 1) % 2
pulsar.add_parameter(
'{}_modulation_mode'.format(ch_name),
vals=vals.Enum('Modulation Off', 'Sine 1', 'Sine 2', 'FG 1' ,
'FG 2', 'Advanced', 'off', 'direct',
0, 1, 2, 3, 4, 5),
initial_value='Modulation Off',
set_cmd=self._hdawg_mod_mode_setter(awg_nr, output_nr),
get_cmd=self._hdawg_mod_mode_getter(awg_nr, output_nr),
docstring=f"Modulation mode of channel {ch_name}."
)
# first channel of a pair
if (int(id[2:]) - 1) % 2 == 0:
param_name = f"{ch_name}_mod_freq"
pulsar.add_parameter(
param_name,
unit='Hz',
initial_value=None,
set_cmd=self._hdawg_mod_freq_setter(awg_nr),
get_cmd=self._hdawg_mod_freq_getter(awg_nr),
docstring="Carrier frequency of internal modulation for "
"a channel pair. Positive (negative) sign "
"corresponds to upper (lower) side band. Setting "
"it to None disables internal modulation."
)
# qcodes will not set the initial value if it is None, so we set
# it manually here to ensure that internal modulation gets
# switched off in the init.
pulsar.set(param_name, None)
param_name = '{}_direct_mod_freq'.format(ch_name)
pulsar.add_parameter(
param_name,
unit='Hz',
initial_value=None,
set_cmd=self._hdawg_mod_freq_setter(awg_nr, direct=True),
get_cmd=self._hdawg_mod_freq_getter(awg_nr, direct=True),
docstring=f"Directly output I and Q signals for the "
f"channel pair starting with {ch_name}. The output is "
f"not modulated according to the uploaded waveform. "
f"Positive (negative) sign corresponds to upper "
f"(lower) side band. Setting the frequency to "
f"None disables the output."
)
# qcodes will not set the initial value if it is None, so we set
# it manually here to ensure that internal modulation gets
# switched off in the init.
pulsar.set(param_name, None)
param_name = '{}_direct_output_amp'.format(ch_name)
pulsar.add_parameter(
param_name,
unit='V',
initial_value=0,
set_cmd=self._hdawg_direct_output_amp_setter(awg_nr),
get_cmd=self._hdawg_direct_output_amp_getter(awg_nr),
docstring=f"Amplitude of the sine generator output used in "
f"direct output mode."
)
else: # ch_type == "marker"
# So far no additional parameters specific to marker channels
pass
def awg_setter(self, id:str, param:str, value):
# Sanity checks
super().awg_setter(id, param, value)
channel_type = "analog" if id[-1] != "m" else "marker"
ch = int(id[2]) - 1
if param == "offset":
if channel_type == "analog":
self.awg.set(f"sigouts_{ch}_offset", value)
else:
pass # raise NotImplementedError("Cannot set offset on marker channels.")
elif param == "amp":
if channel_type == "analog":
self.awg.set(f"sigouts_{ch}_range", 2 * value)
else:
pass # raise NotImplementedError("Cannot set amp on marker channels.")
elif param == "amplitude_scaling" and channel_type == "analog":
# ch1/ch2 are on sub-awg 0, ch3/ch4 are on sub-awg 1, etc.
awg = (int(id[2:]) - 1) // 2
# ch1/ch3/... are output 0, ch2/ch4/... are output 0,
output = (int(id[2:]) - 1) - 2 * awg
self.awg.set(f"awgs_{awg}_outputs_{output}_amplitude", value)
def awg_getter(self, id:str, param:str):
# Sanity checks
super().awg_getter(id, param)
channel_type = "analog" if id[-1] != "m" else "marker"
ch = int(id[2]) - 1
if param == "offset":
if channel_type == "analog":
return self.awg.get(f"sigouts_{ch}_offset")
else:
return 0
elif param == "amp":
if channel_type == "analog":
if self.pulsar.awgs_prequeried:
return self.awg.parameters[f"sigouts_{ch}_range"].get_latest() / 2
else:
return self.awg.get(f"sigouts_{ch}_range") / 2
else:
return 1
elif param == "amplitude_scaling" and channel_type == "analog":
# ch1/ch2 are on sub-awg 0, ch3/ch4 are on sub-awg 1, etc.
awg = (int(id[2:]) - 1) // 2
# ch1/ch3/... are output 0, ch2/ch4/... are output 0,
output = (int(id[2:]) - 1) - 2 * awg
return self.awg.get(f"awgs_{awg}_outputs_{output}_amplitude")
def _hdawg_direct_output_amp_setter(self, awg_nr):
def s(val):
self.awg.set(f'sines_{awg_nr * 2}_amplitudes_0', val)
self.awg.set(f'sines_{awg_nr * 2 + 1}_amplitudes_1', val)
return s
def _hdawg_direct_output_amp_getter(self, awg_nr):
def g():
amp0 = self.awg.get(f'sines_{awg_nr * 2}_amplitudes_0')
amp1 = self.awg.get(f'sines_{awg_nr * 2 + 1}_amplitudes_1')
if amp0 != amp1:
log.warning(f"Amplitude of sine generator 0 on awg {awg_nr * 2}"
f"is {amp0} V and not equal to the amplitude of "
f"sine generator 1 on awg {awg_nr * 2 + 1} which is"
f" {amp1} V.")
return amp0
return g
def _hdawg_mod_freq_setter(self, awg_nr, direct=False, amp=0.0):
def s(val):
log.debug(f'{self.awg.name}_awgs_{awg_nr} modulation freq: {val}')
if val == None:
self.awg.set(f'awgs_{awg_nr}_outputs_0_modulation_mode', 0)
self.awg.set(f'awgs_{awg_nr}_outputs_1_modulation_mode', 0)
self.awg.set(f'sines_{awg_nr * 2}_enables_0', 0)
self.awg.set(f'sines_{awg_nr * 2}_enables_1', 0)
self.awg.set(f'sines_{awg_nr * 2 + 1}_enables_0', 0)
self.awg.set(f'sines_{awg_nr * 2 + 1}_enables_1', 0)
else:
# FIXME: this currently only works for real-valued baseband
# signals (zero Q component), and it assumes that the the I
# component gets programmed to both channels, see the case
# of mod_frequency=None in
# pulse_library.SSB_DRAG_pulse.chan_wf.
# In the future, we should extended this to support general
# IQ modulation and adapt the pulse library accordingly.
# Also note that we here assume that the I (Q) channel is the
# first (second) channel of a pair.
sideband = np.sign(val)
freq = np.abs(val)
# For the oscillator, we can use any index, as long as the
# respective osc is not needed for anything else. Since we
# currently use oscs only here, the following index
# calculated from awg_nr can ensure that a unique osc is
# used for every channel pair for which we configure
# internal modulation.
osc_nr = awg_nr * 4
# configure the oscillator frequency
self.awg.set(f'oscs_{osc_nr}_freq', freq)
# set up the two sines of the channel pair with the same
# oscillator and with 90 phase shift
self.awg.set(f'sines_{awg_nr * 2}_oscselect', osc_nr)
self.awg.set(f'sines_{awg_nr * 2 + 1}_oscselect', osc_nr)
self.awg.set(f'sines_{awg_nr * 2}_phaseshift', 0)
# positive (negative) phase shift is needed for upper (
# lower) sideband
self.awg.set(f'sines_{awg_nr * 2 + 1}_phaseshift', sideband * 90)
# see pycqed\instrument_drivers\physical_instruments\
# ZurichInstruments\zi_parameter_files\node_doc_HDAWG8.json
# for description of the nodes used below.
# awg_nr: ch1/ch2 are on sub-awg 0, ch3/ch4 are on sub-awg 1,
# etc. Mode 1 (2) means that the AWG Output is multiplied with
# Sine Generator signal 0 (1) of this sub-awg
if direct:
self.awg.set(f'sines_{awg_nr * 2}_enables_0', 1)
self.awg.set(f'sines_{awg_nr * 2}_enables_1', 0)
self.awg.set(f'sines_{awg_nr * 2 + 1}_enables_0', 0)
self.awg.set(f'sines_{awg_nr * 2 + 1}_enables_1', 1)
self.awg.set(f'awgs_{awg_nr}_outputs_0_modulation_mode', 0)
self.awg.set(f'awgs_{awg_nr}_outputs_1_modulation_mode', 0)
else:
self.awg.set(f'sines_{awg_nr * 2}_enables_0', 0)
self.awg.set(f'sines_{awg_nr * 2}_enables_1', 0)
self.awg.set(f'sines_{awg_nr * 2 + 1}_enables_0', 0)
self.awg.set(f'sines_{awg_nr * 2 + 1}_enables_1', 0)
self.awg.set(f'awgs_{awg_nr}_outputs_0_modulation_mode', 1)
self.awg.set(f'awgs_{awg_nr}_outputs_1_modulation_mode', 2)
return s
def _hdawg_mod_freq_getter(self, awg_nr, direct=False):
def g():
modes = [
self.awg.get(f'awgs_{awg_nr}_outputs_0_modulation_mode'),
self.awg.get(f'awgs_{awg_nr}_outputs_1_modulation_mode')
]
if direct:
enables = [
self.awg.get(f'sines_{awg_nr * 2}_enables_0'),
self.awg.get(f'sines_{awg_nr * 2}_enables_1'),
self.awg.get(f'sines_{awg_nr * 2 + 1}_enables_0'),
self.awg.get(f'sines_{awg_nr * 2 + 1}_enables_1')
]
if modes == [0, 0] and (not direct or enables != [1, 0, 0, 1]):
# If modulation mode is 0 for both outputs, internal
# modulation is switched off (indicated by a modulation
# frequency set to None).
return None
elif (modes == [1, 2] and not direct) or \
(modes == [0, 0] and enables == [1, 0, 0, 1]):
# these calcuations invert the calculations in
# _hdawg_mod_freq_setter, see therein for explaining comments
osc0 = self.awg.get(f'sines_{awg_nr * 2}_oscselect')
osc1 = self.awg.get(f'sines_{awg_nr * 2 + 1}_oscselect')
if osc0 == osc1:
sideband = np.sign(self.awg.get(
f'sines_{awg_nr * 2 + 1}_phaseshift'))
return sideband * self.awg.get(f'oscs_{osc0}_freq')
# If we have not returned a result at this point, the current
# AWG settings do not correspond to a configuration made by
# _hdawg_mod_freq_setter.
log.warning('The current modulation configuration is not '
'supported by pulsar. Cannot retrieve modulation '
'frequency.')
return None
return g
def _hdawg_mod_mode_setter(self, awg_nr, output_nr):
def s(val):
# see pycqed\instrument_drivers\physical_instruments\
# ZurichInstruments\zi_parameter_files\node_doc_HDAWG8.json
# for description of the nodes used below.
mod_mode_dict = {'Modulation Off': 0, 'Sine 1': 1, 'Sine 2': 2,
'FG 1': 3, 'FG 2': 4, 'Advanced': 5, 'off':0,
'direct':5}
if isinstance(val, str):
mode = mod_mode_dict[val]
else:
mode = val
log.debug(f'{self.awg.name}_awgs_{awg_nr} modulation mod: {val} ({mode})')
self.awg.set(f'awgs_{awg_nr}_outputs_{output_nr}_modulation_mode', mode)
return s
def _hdawg_mod_mode_getter(self, awg_nr, output_nr):
def g():
return self.awg.get(f'awgs_{awg_nr}_outputs_{output_nr}_modulation_mode')
return g
def get_divisor(self, chid, awg):
"""Divisor is 2 for modulated non-marker channels, 1 for other cases."""
name = self.pulsar._id_channel(chid, awg)
if chid[-1]!='m' and self.pulsar.get(f"{name}_internal_modulation"):
return 2
else:
return 1
def program_awg(self, awg_sequence, waveforms, repeat_pattern=None,
channels_to_upload="all", channels_to_program="all"):
self._zi_program_generator_awg(
awg_sequence=awg_sequence,
waveforms=waveforms,
repeat_pattern=repeat_pattern,
channels_to_upload=channels_to_upload,
channels_to_program=channels_to_program,
)
def is_awg_running(self):
return all([self.awg.get('awgs_{}_enable'.format(awg_nr))
for awg_nr in self._hdawg_active_awgs()
if self.awg._awg_program[awg_nr] is not None])
def clock(self):
return self.awg.clock_freq()
def _hdawg_active_awgs(self):
return [0,1,2,3]
def get_segment_filter_userregs(self, include_inactive=False):
return [(f'awgs_{i}_userregs_{self.awg.USER_REG_FIRST_SEGMENT}',
f'awgs_{i}_userregs_{self.awg.USER_REG_LAST_SEGMENT}')
for i in range(4) if include_inactive or
self.awg._awg_program[i] is not None]
def sigout_on(self, ch, on=True):
chid = self.pulsar.get(ch + '_id')
if chid[-1] != 'm': # not a marker channel
self.awg.set('sigouts_{}_on'.format(int(chid[-1]) - 1), on)
def upload_waveforms(self, awg_nr, wave_idx, waveforms, wave_hashes):
# This wrapper method is needed because 'finalize_upload_after_mcc'
# method in 'MultiCoreCompilerQudevZI' class calls 'upload_waveforms'
# method from device interfaces instead of from channel interfaces.
self._awg_modules[awg_nr].upload_waveforms(
wave_idx=wave_idx,
waveforms=waveforms,
wave_hashes=wave_hashes
)
class HDAWGGeneratorModule(ZIGeneratorModule):
"""Pulsar interface for ZI HDAWG AWG modules. Each AWG module consists of
two analog channels and two marker channels. Please refer to ZI user manual
https://docs.zhinst.com/hdawg_user_manual/overview.html
for more details."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._hdawg_internal_mod = False
# TODO: this attribute is used only when using the old internal
# modulation implementation on HDAWG. Remove this attribute once the
# new implementation is deployed.
"""Flag that indicates whether internal modulation is turned on for
this device."""
self._device_type = 'hdawg'
"""Device type of the generator."""
def _generate_channel_ids(
self,
awg_nr
):
ch1id = 'ch{}'.format(awg_nr * 2 + 1)
ch1mid = 'ch{}m'.format(awg_nr * 2 + 1)
ch2id = 'ch{}'.format(awg_nr * 2 + 2)
ch2mid = 'ch{}m'.format(awg_nr * 2 + 2)
self.channel_ids = [ch1id, ch1mid, ch2id, ch2mid]
self.analog_channel_ids = [ch1id, ch2id]
self.marker_channel_ids = [ch1mid, ch2mid]
self._upload_idx = awg_nr
def _generate_divisor(self):
"""Generate divisors for all channels. Divisor is 2 for non-modulated
marker channels, 1 for every other channel."""
for chid in self.channel_ids:
self._divisor[chid] = self._awg_interface.get_divisor(
chid=chid,
awg=self._awg.name,
)
def _update_internal_mod_config(
self,
awg_sequence,
):
"""Updates self._hdawg_internal_modulation flag according to the
setting specified in pulsar.
Args:
awg_sequence: A list of elements. Each element consists of a
waveform-hash for each codeword and each channel.
"""
channels = [self.pulsar._id_channel(chid, self._awg.name)
for chid in self.analog_channel_ids]
if all([self.pulsar.get(f"{chan}_internal_modulation")
for chan in channels]):
self._hdawg_internal_mod = True
elif not any([self.pulsar.get(f"{chan}_internal_modulation")
for chan in channels]):
self._hdawg_internal_mod = False
else:
raise NotImplementedError('Internal modulation can only be'
'specified per sub AWG!')
def _update_waveforms(self, wave_idx, wave_hashes, waveforms):
awg_nr = self._awg_nr
if self.pulsar.use_sequence_cache():
if wave_hashes == self.waveform_cache.get(wave_idx, None):
log.debug(
f'{self._awg.name} awgs{awg_nr}: {wave_idx} same as in '
f'cache')
return
log.debug(
f'{self._awg.name} awgs{awg_nr}: {wave_idx} needs to be uploaded')
a1, m1, a2, m2 = [waveforms.get(h, None) for h in wave_hashes]
n = max([len(w) for w in [a1, m1, a2, m2] if w is not None])
if m1 is not None and a1 is None:
a1 = np.zeros(n)
if m1 is None and a1 is None and (m2 is not None or a2 is not None):
# Hack needed to work around an HDAWG bug where programming only
# m2 channel does not work. Remove once bug is fixed.
a1 = np.zeros(n)
if m2 is not None and a2 is None:
a2 = np.zeros(n)
if m1 is not None or m2 is not None:
m1 = np.zeros(n) if m1 is None else np.pad(m1, n - m1.size)
m2 = np.zeros(n) if m2 is None else np.pad(m2, n - m2.size)
if a1 is None:
mc = m2
else:
mc = m1 + 4*m2
else:
mc = None
a1 = None if a1 is None else np.pad(a1, n - a1.size)
a2 = None if a2 is None else np.pad(a2, n - a2.size)
wf_raw_combined = merge_waveforms(a1, a2, mc)
if self.pulsar.use_mcc() and len(self._awg_interface.awgs_mcc) > 0:
# Parallel seqc compilation is used, which must take place before
# waveform upload. Waveforms are added to self.wfms_to_upload and
# will be uploaded to device in pulsar._program_awgs.
self._awg_interface.wfms_to_upload[(awg_nr, wave_idx)] = \
(wf_raw_combined, wave_hashes)
else:
self.upload_waveforms(wave_idx, wf_raw_combined, wave_hashes)
def upload_waveforms(self, wave_idx, waveforms, wave_hashes):
"""
Upload waveforms to an awg core (awg_nr).
Args:
wave_idx (int): index of wave upload (0 or 1)
waveforms (array): waveforms to upload
wave_hashes: waveforms hashes
"""
# Upload waveforms to awg
self._awg.setv(f'awgs/{self._awg_nr}/waveform/waves/{wave_idx}',
waveforms)
# Save hashes in the cache memory after a successful waveform upload.
self._save_hashes(wave_idx, wave_hashes)
def _save_hashes(self, wave_idx, wave_hashes):
"""
Save hashes in the cache memory after a successful waveform upload.
Args:
wave_idx (int): index of wave upload (0 or 1)
wave_hashes: waveforms hashes
"""
if self.pulsar.use_sequence_cache():
self.waveform_cache[wave_idx] = wave_hashes
def _update_awg_instrument_status(self):
# tell ZI_base_instrument that it should not compile a program on
# this sub AWG (because we already do it here)
self._awg._awg_needs_configuration[self._awg_nr] = False
# tell ZI_base_instrument.start() to start this sub AWG (The base
# class will start sub AWGs for which _awg_program is not None. Since
# we set _awg_needs_configuration to False, we do not need to put the
# actual program here, but anything different from None is sufficient.)
self._awg._awg_program[self._awg_nr] = True
def _generate_playback_string(
self,
wave,
codeword,
use_placeholder_waves,
metadata,
first_element_of_segment
):
if not self._hdawg_internal_mod:
if first_element_of_segment:
prepend_zeros = self.pulsar.parameters[
f"{self._awg.name}_prepend_zeros"]()
if prepend_zeros is None:
prepend_zeros = self.pulsar.prepend_zeros()
elif isinstance(prepend_zeros, list):
prepend_zeros = prepend_zeros[self._awg_nr]
else:
prepend_zeros = 0
self._playback_strings += self._awg_interface.zi_playback_string(
name=self._awg.name,
device='hdawg',
wave=wave,
codeword=codeword,
prepend_zeros=prepend_zeros,
placeholder_wave=use_placeholder_waves,
allow_filter=metadata.get('allow_filter', False)
)
elif not use_placeholder_waves:
pb_string, interleave_string = \
self._awg_interface._zi_interleaved_playback_string(
name=self._awg.name,
device='hdawg',
counter=self._counter,
wave=wave,
codeword=codeword
)
self._counter += 1
self._playback_strings += pb_string
self._interleaves += interleave_string
else:
raise NotImplementedError("Placeholder waves in "
"combination with internal "
"modulation not implemented.")
def _configure_awg_str(
self,
awg_str
):
self._awg.configure_awg_from_string(
self._awg_nr,
program_string=awg_str,
timeout=600
)
def _set_signal_output_status(self):
if self.pulsar.sigouts_on_after_programming():
for ch in range(8):
self._awg.set('sigouts_{}_on'.format(ch), True)
| {
"content_hash": "fea73d4899f24410ecd651bc3a3fbdd5",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 89,
"avg_line_length": 44.5887573964497,
"alnum_prop": 0.5333421803463606,
"repo_name": "QudevETH/PycQED_py3",
"id": "3a6a34016dfd5e782a4ef08c5b2a5ce79953434e",
"size": "30142",
"binary": false,
"copies": "1",
"ref": "refs/heads/qudev_master",
"path": "pycqed/measurement/waveform_control/pulsar/hdawg8_pulsar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5431925"
}
],
"symlink_target": ""
} |
import argparse
import codecs
import sys
from tree import DepTree
from collections import defaultdict
from itertools import izip
parser = argparse.ArgumentParser(description="Inflect a lemmatized corpus")
parser.add_argument("-t", type=str, default="data/train", help="training data prefix")
parser.add_argument("-d", type=str, default="data/dtest", help="test data prefix")
parser.add_argument("-l", type=str, default="lemma", help="lemma file suffix")
parser.add_argument("-w", type=str, default="form", help="word file suffix")
args = parser.parse_args()
# Python sucks at UTF-8
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
def inflections(lemma, postag, deptree):
if LEMMAS.has_key((lemma, postag, deptree)):
lemma_tag_tree_combo = LEMMAS[(lemma, postag, deptree)].keys()
return sorted(lemma_tag_tree_combo, lambda x, y: cmp(LEMMAS[(lemma, postag, deptree)][y], LEMMAS[(lemma, postag, deptree)][x]))
elif LEMMAS.has_key((lemma, postag)):
lemma_tag_combo = LEMMAS[(lemma, postag)].keys()
return sorted(lemma_tag_combo, lambda x, y: cmp(LEMMAS[(lemma, postag)][y], LEMMAS[(lemma, postag)][x]))
elif LEMMAS.has_key((lemma, deptree)):
lemma_tree_combo = LEMMAS[(lemma, deptree)].keys()
return sorted(lemma_tree_combo, lambda x, y: cmp(LEMMAS[(lemma, deptree)][y], LEMMAS[(lemma, deptree)][x]))
elif LEMMAS.has_key(lemma):
lemma_combo = LEMMAS[lemma].keys()
return sorted(lemma_combo, lambda x, y: cmp(LEMMAS[lemma][y], LEMMAS[lemma][x]))
return [lemma]
def best_inflection(lemma, postag, tree):
return inflections(lemma, postag, tree)[0]
if __name__ == '__main__':
# Build a simple unigram model on the training data
LEMMAS = defaultdict(defaultdict)
if args.t:
def combine(a, b):
return '%s.%s' % (a, b)
def utf8read(file):
return codecs.open(file, 'r', 'utf-8')
# read the 2 deptree and POStag files
trees_training = utf8read(combine(args.t, "tree"))
trees_testing = utf8read(combine(args.d, "tree"))
tags_training = utf8read(combine(args.t, "tag"))
tags_testing = utf8read(combine(args.d, "tag"))
# At this point let's make a vivified has map
# This map will hash the lemma entries to inflections with a third value
# which is the number of counts
# TRAIN
for words, lemmas, postags, deptree in izip(utf8read(combine(args.t, args.w)),
utf8read(combine(args.t, args.l)),
tags_training,
trees_training):
# Remove preceding and trailing spaces, convert to lowercase and split
words_cleaned = words.rstrip().lstrip().lower().split()
lemmas_cleaned = lemmas.rstrip().lstrip().lower().split()
tags_cleaned = postags.rstrip().lstrip().lower().split()
trees_cleaned = DepTree(deptree)
for word, lemma, postag, tree_site in izip(words_cleaned, lemmas_cleaned, tags_cleaned, trees_cleaned):
dep_tree = (tree_site.parent_index(), tree_site.label())
LEMMAS[lemma][word] = LEMMAS[lemma].get(word, 0) + 1
LEMMAS[(lemma, postag)][word] = LEMMAS[(lemma, postag)].get(word, 0) + 1
LEMMAS[(lemma, dep_tree)][word] = LEMMAS[(lemma, dep_tree)].get(word, 0) + 1
LEMMAS[(lemma, postag, dep_tree)][word] = LEMMAS[(lemma, postag, dep_tree)].get(word, 0) + 1
# Training in the block above has now built up our LEMMA vivified hash
# Now best inflections can be retrieved from the map
# TEST
for lemmas, postags, deptree in izip(utf8read(combine(args.d, args.l)), tags_testing, trees_testing):
# Remove preceding and trailing spaces, convert to lowercase and split
lemmas_cleaned = lemmas.rstrip().lstrip().lower().split()
tags_cleaned = postags.rstrip().lstrip().lower().split()
trees_cleaned = DepTree(deptree)
print ' '.join([best_inflection(l, p, (t.parent_index(), t.label())) for l, p, t in
izip(lemmas_cleaned, tags_cleaned, trees_cleaned)])
| {
"content_hash": "5fb96c85067bbe8189ba7957095f9c5b",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 135,
"avg_line_length": 48.59090909090909,
"alnum_prop": 0.622778297474275,
"repo_name": "abhambh1/mt_spring_2016",
"id": "a963e894ca42c70aa6d1db3955c0567affa5219a",
"size": "4299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inflect/scripts/POS_Deptree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Eiffel",
"bytes": "9638562"
},
{
"name": "FORTRAN",
"bytes": "11607801"
},
{
"name": "Python",
"bytes": "42849"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
def set_receipt_types(apps, schema_editor):
Contribution = apps.get_model("contributions", "Contribution")
for contribution in Contribution.objects.all():
if not contribution.receipt_type:
if contribution.is_external:
contribution.receipt_type = 'external-receipt'
else:
contribution.receipt_type = 'mayapur-receipt'
contribution.save()
class Migration(migrations.Migration):
dependencies = [
('contributions', '0049_contribution_receipt_type'),
]
operations = [
migrations.RunPython(set_receipt_types),
]
| {
"content_hash": "feecba81dc7d9cd24fd84c60fb0a8a67",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 66,
"avg_line_length": 28.08,
"alnum_prop": 0.6509971509971509,
"repo_name": "mayapurmedia/tovp",
"id": "874178fd17c24fb4f815fe03355d4fcb06a28c83",
"size": "726",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tovp/contributions/migrations/0050_auto_20160411_1418.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190169"
},
{
"name": "HTML",
"bytes": "281143"
},
{
"name": "JavaScript",
"bytes": "2888"
},
{
"name": "Python",
"bytes": "504316"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
import json
from django.core.urlresolvers import reverse
from django.http import HttpResponsePermanentRedirect, JsonResponse
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from ..cart.utils import set_cart_cookie
from ..core.utils import serialize_decimal
from .models import Category
from .utils import (products_with_details, products_for_cart,
handle_cart_form, get_availability,
get_product_images, get_variant_picker_data,
get_product_attributes_data, product_json_ld)
def product_details(request, slug, product_id, form=None):
"""Product details page
The following variables are available to the template:
product:
The Product instance itself.
is_visible:
Whether the product is visible to regular users (for cases when an
admin is previewing a product before publishing).
form:
The add-to-cart form.
price_range:
The PriceRange for the product including all discounts.
undiscounted_price_range:
The PriceRange excluding all discounts.
discount:
Either a Price instance equal to the discount value or None if no
discount was available.
local_price_range:
The same PriceRange from price_range represented in user's local
currency. The value will be None if exchange rate is not available or
the local currency is the same as site's default currency.
"""
products = products_with_details(user=request.user)
product = get_object_or_404(products, id=product_id)
if product.get_slug() != slug:
return HttpResponsePermanentRedirect(product.get_absolute_url())
today = datetime.date.today()
is_visible = (
product.available_on is None or product.available_on <= today)
if form is None:
form = handle_cart_form(request, product, create_cart=False)[0]
availability = get_availability(product, discounts=request.discounts,
local_currency=request.currency)
template_name = 'product/details_%s.html' % (
type(product).__name__.lower(),)
templates = [template_name, 'product/details.html']
product_images = get_product_images(product)
variant_picker_data = get_variant_picker_data(
product, request.discounts, request.currency)
product_attributes = get_product_attributes_data(product)
show_variant_picker = all([v.attributes for v in product.variants.all()])
json_ld_data = product_json_ld(product, availability, product_attributes)
return TemplateResponse(
request, templates,
{'is_visible': is_visible,
'form': form,
'availability': availability,
'product': product,
'product_attributes': product_attributes,
'product_images': product_images,
'show_variant_picker': show_variant_picker,
'variant_picker_data': json.dumps(
variant_picker_data, default=serialize_decimal),
'json_ld_product_data': json.dumps(
json_ld_data, default=serialize_decimal)})
def product_add_to_cart(request, slug, product_id):
# types: (int, str, dict) -> None
if not request.method == 'POST':
return redirect(reverse(
'product:details',
kwargs={'product_id': product_id, 'slug': slug}))
products = products_for_cart(user=request.user)
product = get_object_or_404(products, pk=product_id)
form, cart = handle_cart_form(request, product, create_cart=True)
if form.is_valid():
form.save()
if request.is_ajax():
response = JsonResponse({'next': reverse('cart:index')}, status=200)
else:
response = redirect('cart:index')
else:
if request.is_ajax():
response = JsonResponse({'error': form.errors}, status=400)
else:
response = product_details(request, slug, product_id, form)
if not request.user.is_authenticated():
set_cart_cookie(cart, response)
return response
def category_index(request, path, category_id):
category = get_object_or_404(Category, id=category_id)
actual_path = category.get_full_path()
if actual_path != path:
return redirect('product:category', permanent=True, path=actual_path,
category_id=category_id)
return TemplateResponse(request, 'category/index.html',
{'category': category})
| {
"content_hash": "4cce7e17a2de7806f307691284c9b82e",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 80,
"avg_line_length": 38.563025210084035,
"alnum_prop": 0.659185007626934,
"repo_name": "itbabu/saleor",
"id": "42f4372046f3bb496cb3962d21f5123c8e6c4079",
"size": "4589",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "saleor/product/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "63640"
},
{
"name": "HTML",
"bytes": "381272"
},
{
"name": "JavaScript",
"bytes": "58958"
},
{
"name": "Python",
"bytes": "651031"
}
],
"symlink_target": ""
} |
"""
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from itertools import chain
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.forms.fields import ChoiceField, Field
from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (
HiddenInput, MultipleHiddenInput, SelectMultiple,
)
from django.utils import six
from django.utils.encoding import force_text
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext, ugettext_lazy as _
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'ModelChoiceField', 'ModelMultipleChoiceField', 'ALL_FIELDS',
'BaseModelFormSet', 'modelformset_factory', 'BaseInlineFormSet',
'inlineformset_factory', 'modelform_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Leave defaults for fields that aren't in POST data, except for
# checkbox inputs because they don't appear in POST data if not checked.
if (f.has_default() and form.add_prefix(f.name) not in form.data and
not getattr(form[f.name].field.widget, 'dont_use_model_field_default_for_empty_data', False)):
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_private_fields = [f for f in opts.private_fields if isinstance(f, ModelField)]
for f in sorted(chain(opts.concrete_fields, sortable_private_fields, opts.many_to_many)):
if not getattr(f, 'editable', False):
if (fields is not None and f.name in fields and
(exclude is None or f.name not in exclude)):
raise FieldError(
"'%s' cannot be specified for %s model form as it is a non-editable field" % (
f.name, model.__name__)
)
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if field_classes and f.name in field_classes:
kwargs['form_class'] = field_classes[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
self.field_classes = getattr(options, 'field_classes', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
base_formfield_callback = None
for b in bases:
if hasattr(b, 'Meta') and hasattr(b.Meta, 'formfield_callback'):
base_formfield_callback = b.Meta.formfield_callback
break
formfield_callback = attrs.pop('formfield_callback', base_formfield_callback)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, six.string_types) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(opts.model, opts.fields, opts.exclude,
opts.widgets, formfield_callback,
opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages,
opts.field_classes)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = (set(none_model_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None, use_required_attribute=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(
data, files, auto_id, prefix, object_data, error_class,
label_suffix, empty_permitted, use_required_attribute=use_required_attribute,
)
# Apply ``limit_choices_to`` to each field.
for field_name in self.fields:
formfield = self.fields[field_name]
if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'):
limit_choices_to = formfield.get_limit_choices_to()
if limit_choices_to is not None:
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
# Allow the model generated by construct_instance() to raise
# ValidationError and have them handled in the same way as others.
if hasattr(errors, 'error_dict'):
error_dict = errors.error_dict
else:
error_dict = {NON_FIELD_ERRORS: errors}
for field, messages in error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(name)
try:
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
except ValidationError as e:
self._update_errors(e)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def _save_m2m(self):
"""
Save the many-to-many fields and generic relations for this form.
"""
cleaned_data = self.cleaned_data
exclude = self._meta.exclude
fields = self._meta.fields
opts = self.instance._meta
# Note that for historical reasons we want to include also
# private_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.private_fields):
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(self.instance, cleaned_data[f.name])
def save(self, commit=True):
"""
Save this form's self.instance object if commit=True. Otherwise, add
a save_m2m() method to the form which can be called after the instance
is saved manually at a later time. Return the model instance.
"""
if self.errors:
raise ValueError(
"The %s could not be %s because the data didn't validate." % (
self.instance._meta.object_name,
'created' if self.instance._state.adding else 'changed',
)
)
if commit:
# If committing, save the instance and the m2m data immediately.
self.instance.save()
self._save_m2m()
else:
# If not committing, add a method to the form to allow deferred
# saving of m2m data.
self.save_m2m = self._save_m2m
return self.instance
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields. If omitted or '__all__',
all fields will be used.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
if field_classes is not None:
attrs['field_classes'] = field_classes
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
if formfield_callback:
Meta.formfield_callback = staticmethod(formfield_callback)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instantiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
# Set of fields that must be unique among forms of this set.
unique_fields = set()
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = {o.pk: o for o in self.get_queryset()}
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) to_python.
"""
while field.remote_field is not None:
field = field.remote_field.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def delete_existing(self, obj, commit=True):
"""Deletes an existing model instance."""
if commit:
obj.delete()
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# Get the data for the set of fields that must be unique among the forms.
row_data = (
field if field in self.unique_fields else form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data
)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None and
form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext(
"Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s."
) % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
if form in forms_to_delete:
# If the pk is None, it means that the object can't be
# deleted again. Possible reason for this is that the
# object was already deleted from the DB. Refs #14877.
if obj.pk is None:
continue
self.deleted_objects.append(obj)
self.delete_existing(obj, commit=commit)
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return (
(not pk.editable) or (pk.auto_created or isinstance(pk, AutoField)) or (
pk.remote_field and pk.remote_field.parent_link and
pk_is_not_editable(pk.remote_field.model._meta.pk)
)
)
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
# If we're adding the related instance, ignore its primary key
# as it could be an auto-generated default which isn't actually
# in the database.
pk_value = None if form.instance._state.adding else form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.remote_field.model._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Returns a FormSet class for the given Django model class.
"""
meta = getattr(form, 'Meta', None)
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
raise ImproperlyConfigured(
"Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts,
error_messages=error_messages, field_classes=field_classes)
FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_min=validate_min, validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.remote_field.model()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
self.unique_fields = {self.fk.name}
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if self.form._meta.fields and self.fk.name not in self.form._meta.fields:
if isinstance(self.form._meta.fields, tuple):
self.form._meta.fields = list(self.form._meta.fields)
self.form._meta.fields.append(self.fk.name)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do its validation.
fk_value = self.instance.pk
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
fk_value = getattr(self.instance, self.fk.remote_field.field_name)
fk_value = getattr(fk_value, 'pk', fk_value)
setattr(form.instance, self.fk.get_attname(), fk_value)
return form
@classmethod
def get_default_prefix(cls):
return cls.fk.remote_field.get_accessor_name(model=cls.model).replace('+', '')
def save_new(self, form, commit=True):
# Ensure the latest copy of the related instance is present on each
# form (it may have been saved after the formset was originally
# instantiated).
setattr(form.instance, self.fk.name, self.instance)
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.remote_field.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
kwargs['to_field'] = self.fk.remote_field.field_name
# If we're adding a new object, ignore a parent's auto-generated key
# as it will be regenerated on the save request.
if self.instance._state.adding:
if kwargs.get('to_field') is not None:
to_field = self.instance._meta.get_field(kwargs['to_field'])
else:
to_field = self.instance._meta.pk
if to_field.has_default():
setattr(self.instance, to_field.attname, None)
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unless can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.remote_field.model != parent_model and
fk.remote_field.model not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label)
)
elif len(fks_to_parent) == 0:
raise ValueError(
"'%s' has no field named '%s'." % (model._meta.label, fk_name)
)
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey) and (
f.remote_field.model == parent_model or
f.remote_field.model in parent_model._meta.get_parent_list()
)
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise ValueError(
"'%s' has no ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
else:
raise ValueError(
"'%s' has more than one ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'min_num': min_num,
'max_num': max_num,
'widgets': widgets,
'validate_min': validate_min,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
'field_classes': field_classes,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def has_changed(self, initial, data):
return False
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
queryset = self.queryset.all()
# Can't use iterator() when queryset uses prefetch_related()
if not queryset._prefetch_related_lookups:
queryset = queryset.iterator()
for obj in queryset:
yield self.choice(obj)
def __len__(self):
return (len(self.queryset) + (1 if self.field.empty_label is not None else 0))
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
iterator = ModelChoiceIterator
def __init__(self, queryset, empty_label="---------",
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
*args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.to_field_name = to_field_name
def get_limit_choices_to(self):
"""
Returns ``limit_choices_to`` for this form field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return force_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return self.iterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def has_changed(self, initial, data):
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return force_text(self.prepare_value(initial_value)) != force_text(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value for a primary key.')
}
def __init__(self, queryset, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(
queryset, None, required, widget, label, initial, help_text,
*args, **kwargs
)
def to_python(self, value):
if not value:
return []
return list(self._check_values(value))
def clean(self, value):
value = self.prepare_value(value)
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
qs = self._check_values(value)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def _check_values(self, value):
"""
Given a list of possible PK values, returns a QuerySet of the
corresponding objects. Raises a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages['list'],
code='list',
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except (ValueError, TypeError):
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set(force_text(getattr(o, key)) for o in qs)
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in self.prepare_value(initial))
data_set = set(force_text(value) for value in data)
return data_set != initial_set
def modelform_defines_fields(form_class):
return (form_class is not None and (
hasattr(form_class, '_meta') and
(form_class._meta.fields is not None or
form_class._meta.exclude is not None)
))
| {
"content_hash": "47982524d91a2dd15d0450e78d670910",
"timestamp": "",
"source": "github",
"line_count": 1333,
"max_line_length": 116,
"avg_line_length": 41.90772693173293,
"alnum_prop": 0.5882784669638222,
"repo_name": "jarshwah/django",
"id": "f44ce33f6531f118dd48fe1f1b78d237003e6535",
"size": "55863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/forms/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53023"
},
{
"name": "HTML",
"bytes": "172977"
},
{
"name": "JavaScript",
"bytes": "448123"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12112516"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""Layer serialization/deserialization functions."""
import threading
import tensorflow.compat.v2 as tf
from keras.engine import base_layer
from keras.engine import input_layer
from keras.engine import input_spec
from keras.layers import activation
from keras.layers import attention
from keras.layers import convolutional
from keras.layers import core
from keras.layers import locally_connected
from keras.layers import merging
from keras.layers import pooling
from keras.layers import regularization
from keras.layers import reshaping
from keras.layers import rnn
from keras.layers.normalization import batch_normalization
from keras.layers.normalization import batch_normalization_v1
from keras.layers.normalization import group_normalization
from keras.layers.normalization import layer_normalization
from keras.layers.normalization import unit_normalization
from keras.layers.preprocessing import category_encoding
from keras.layers.preprocessing import discretization
from keras.layers.preprocessing import hashed_crossing
from keras.layers.preprocessing import hashing
from keras.layers.preprocessing import image_preprocessing
from keras.layers.preprocessing import integer_lookup
from keras.layers.preprocessing import (
normalization as preprocessing_normalization,
)
from keras.layers.preprocessing import string_lookup
from keras.layers.preprocessing import text_vectorization
from keras.layers.rnn import cell_wrappers
from keras.layers.rnn import gru
from keras.layers.rnn import lstm
from keras.saving.legacy import serialization
from keras.saving.legacy.saved_model import json_utils
from keras.utils import generic_utils
from keras.utils import tf_inspect as inspect
# isort: off
from tensorflow.python.util.tf_export import keras_export
ALL_MODULES = (
base_layer,
input_layer,
activation,
attention,
convolutional,
core,
locally_connected,
merging,
batch_normalization_v1,
group_normalization,
layer_normalization,
unit_normalization,
pooling,
image_preprocessing,
regularization,
reshaping,
rnn,
hashing,
hashed_crossing,
category_encoding,
discretization,
integer_lookup,
preprocessing_normalization,
string_lookup,
text_vectorization,
)
ALL_V2_MODULES = (
batch_normalization,
layer_normalization,
cell_wrappers,
gru,
lstm,
)
# ALL_OBJECTS is meant to be a global mutable. Hence we need to make it
# thread-local to avoid concurrent mutations.
LOCAL = threading.local()
def populate_deserializable_objects():
"""Populates dict ALL_OBJECTS with every built-in layer."""
global LOCAL
if not hasattr(LOCAL, "ALL_OBJECTS"):
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = None
if (
LOCAL.ALL_OBJECTS
and LOCAL.GENERATED_WITH_V2 == tf.__internal__.tf2.enabled()
):
# Objects dict is already generated for the proper TF version:
# do nothing.
return
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = tf.__internal__.tf2.enabled()
base_cls = base_layer.Layer
generic_utils.populate_dict_with_module_objects(
LOCAL.ALL_OBJECTS,
ALL_MODULES,
obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls),
)
# Overwrite certain V1 objects with V2 versions
if tf.__internal__.tf2.enabled():
generic_utils.populate_dict_with_module_objects(
LOCAL.ALL_OBJECTS,
ALL_V2_MODULES,
obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls),
)
# These deserialization aliases are added for backward compatibility,
# as in TF 1.13, "BatchNormalizationV1" and "BatchNormalizationV2"
# were used as class name for v1 and v2 version of BatchNormalization,
# respectively. Here we explicitly convert them to their canonical names.
LOCAL.ALL_OBJECTS[
"BatchNormalizationV1"
] = batch_normalization_v1.BatchNormalization
LOCAL.ALL_OBJECTS[
"BatchNormalizationV2"
] = batch_normalization.BatchNormalization
# Prevent circular dependencies.
from keras import models
from keras.feature_column.sequence_feature_column import (
SequenceFeatures,
)
from keras.premade_models.linear import (
LinearModel,
)
from keras.premade_models.wide_deep import (
WideDeepModel,
)
LOCAL.ALL_OBJECTS["Input"] = input_layer.Input
LOCAL.ALL_OBJECTS["InputSpec"] = input_spec.InputSpec
LOCAL.ALL_OBJECTS["Functional"] = models.Functional
LOCAL.ALL_OBJECTS["Model"] = models.Model
LOCAL.ALL_OBJECTS["SequenceFeatures"] = SequenceFeatures
LOCAL.ALL_OBJECTS["Sequential"] = models.Sequential
LOCAL.ALL_OBJECTS["LinearModel"] = LinearModel
LOCAL.ALL_OBJECTS["WideDeepModel"] = WideDeepModel
if tf.__internal__.tf2.enabled():
from keras.feature_column.dense_features_v2 import (
DenseFeatures,
)
LOCAL.ALL_OBJECTS["DenseFeatures"] = DenseFeatures
else:
from keras.feature_column.dense_features import (
DenseFeatures,
)
LOCAL.ALL_OBJECTS["DenseFeatures"] = DenseFeatures
# Merging layers, function versions.
LOCAL.ALL_OBJECTS["add"] = merging.add
LOCAL.ALL_OBJECTS["subtract"] = merging.subtract
LOCAL.ALL_OBJECTS["multiply"] = merging.multiply
LOCAL.ALL_OBJECTS["average"] = merging.average
LOCAL.ALL_OBJECTS["maximum"] = merging.maximum
LOCAL.ALL_OBJECTS["minimum"] = merging.minimum
LOCAL.ALL_OBJECTS["concatenate"] = merging.concatenate
LOCAL.ALL_OBJECTS["dot"] = merging.dot
@keras_export("keras.layers.serialize")
def serialize(layer):
"""Serializes a `Layer` object into a JSON-compatible representation.
Args:
layer: The `Layer` object to serialize.
Returns:
A JSON-serializable dict representing the object's config.
Example:
```python
from pprint import pprint
model = tf.keras.models.Sequential()
model.add(tf.keras.Input(shape=(16,)))
model.add(tf.keras.layers.Dense(32, activation='relu'))
pprint(tf.keras.layers.serialize(model))
# prints the configuration of the model, as a dict.
"""
return serialization.serialize_keras_object(layer)
@keras_export("keras.layers.deserialize")
def deserialize(config, custom_objects=None):
"""Instantiates a layer from a config dictionary.
Args:
config: dict of the form {'class_name': str, 'config': dict}
custom_objects: dict mapping class names (or function names) of custom
(non-Keras) objects to class/functions
Returns:
Layer instance (may be Model, Sequential, Network, Layer...)
Example:
```python
# Configuration of Dense(32, activation='relu')
config = {
'class_name': 'Dense',
'config': {
'activation': 'relu',
'activity_regularizer': None,
'bias_constraint': None,
'bias_initializer': {'class_name': 'Zeros', 'config': {}},
'bias_regularizer': None,
'dtype': 'float32',
'kernel_constraint': None,
'kernel_initializer': {'class_name': 'GlorotUniform',
'config': {'seed': None}},
'kernel_regularizer': None,
'name': 'dense',
'trainable': True,
'units': 32,
'use_bias': True
}
}
dense_layer = tf.keras.layers.deserialize(config)
```
"""
populate_deserializable_objects()
return serialization.deserialize_keras_object(
config,
module_objects=LOCAL.ALL_OBJECTS,
custom_objects=custom_objects,
printable_module_name="layer",
)
def get_builtin_layer(class_name):
"""Returns class if `class_name` is registered, else returns None."""
if not hasattr(LOCAL, "ALL_OBJECTS"):
populate_deserializable_objects()
return LOCAL.ALL_OBJECTS.get(class_name)
def deserialize_from_json(json_string, custom_objects=None):
"""Instantiates a layer from a JSON string."""
populate_deserializable_objects()
config = json_utils.decode_and_deserialize(
json_string,
module_objects=LOCAL.ALL_OBJECTS,
custom_objects=custom_objects,
)
return deserialize(config, custom_objects)
| {
"content_hash": "de489003d81888461cdc562260bee7b5",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 80,
"avg_line_length": 31.75095785440613,
"alnum_prop": 0.6947025461566309,
"repo_name": "keras-team/keras",
"id": "27b928454fd9884ddc02162439248f88a4824366",
"size": "8976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras/layers/serialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "900"
},
{
"name": "Python",
"bytes": "11342063"
},
{
"name": "Shell",
"bytes": "11489"
},
{
"name": "Starlark",
"bytes": "273139"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("team", "0001_initial"),
]
operations = [
migrations.AlterModelOptions(
name="team",
options={
"permissions": (("can_use_teams", "Can use teams"),),
"verbose_name": "team",
"verbose_name_plural": "teams",
},
),
]
| {
"content_hash": "1c4083456793e803d1861aac63804760",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 69,
"avg_line_length": 22.476190476190474,
"alnum_prop": 0.510593220338983,
"repo_name": "fin/froide",
"id": "2cd066d81b88459b9503a8ef83efb2497e183900",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "froide/team/migrations/0002_auto_20180111_1347.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "302838"
},
{
"name": "JavaScript",
"bytes": "47357"
},
{
"name": "Makefile",
"bytes": "535"
},
{
"name": "Python",
"bytes": "1706123"
},
{
"name": "SCSS",
"bytes": "39397"
},
{
"name": "TypeScript",
"bytes": "57910"
},
{
"name": "Vue",
"bytes": "218866"
}
],
"symlink_target": ""
} |
from twisted.protocols import basic
from twisted.internet import protocol, reactor
class HTTPEchoProtocol(basic.LineReceiver):
def __init__(self):
self.lines = []
def lineReceived(self, line):
self.lines.append(line)
if not line:
self.sendResponse()
def sendResponse(self):
self.sendLine("HTTP/1.1 200 OK")
self.sendLine("")
responseBody = "You said:\r\n\r\n" + "\r\n".join(self.lines)
self.transport.write(responseBody)
self.transport.loseConnection()
class HTTPEchoProtocol(protocol.ServerFactory):
def buildProtocol(self, addr):
return HTTPEchoProtocol()
reactor.listenTCP(8000, HTTPEchoProtocol())
reactor.run()
| {
"content_hash": "6f22ffee7443aa543398bc11355637ab",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 68,
"avg_line_length": 28.84,
"alnum_prop": 0.6643550624133149,
"repo_name": "r2k0/py-apps",
"id": "bb4cf874d379a18b28b4ac10c53391ee420240f8",
"size": "721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netstuff/apps/echo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7202"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0007_location_non_mandatory'),
]
operations = [
migrations.AddField(
model_name='article',
name='show_day',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='article',
name='show_month',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='article',
name='show_year',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='article',
name='show_featured_image',
field=models.BooleanField(default=True),
),
]
| {
"content_hash": "43ee648e4752f9c0d8e444945828f475",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 52,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.5546605293440736,
"repo_name": "PARINetwork/pari",
"id": "ce1af95c9bd40856b859d8e01c7e1dd41215668f",
"size": "893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "article/migrations/0008_made_date_fields_selectable.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "94103"
},
{
"name": "HTML",
"bytes": "452629"
},
{
"name": "JavaScript",
"bytes": "124537"
},
{
"name": "Less",
"bytes": "229040"
},
{
"name": "Python",
"bytes": "479247"
},
{
"name": "Shell",
"bytes": "3919"
}
],
"symlink_target": ""
} |
"""Set category of ALV document revisions.
Revision ID: 22710e6fd2b1
Revises: 4a3debf40b72
Create Date: 2018-03-14 15:35:30.841219
"""
from alembic import op
import sqlalchemy as sa
from app.models.base_model import BaseEntity
from app.enums import FileCategory
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
# revision identifiers, used by Alembic.
revision = '22710e6fd2b1'
down_revision = '4a3debf40b72'
Base = declarative_base()
db = sa
db.Model = Base
db.relationship = relationship
class File(db.Model, BaseEntity):
__tablename__ = 'file'
hash = db.Column(db.String(200), nullable=False)
extension = db.Column(db.String(20), nullable=False)
category = db.Column(db.Enum(FileCategory, name='file_category'),
nullable=False)
display_name = db.Column(db.String(200))
class Alv(db.Model, BaseEntity):
__tablename__ = 'alv'
minutes_file_id = db.Column(db.Integer, db.ForeignKey('file.id'),
nullable=True)
minutes_file = db.relationship('File', foreign_keys=[minutes_file_id])
class AlvDocumentVersion(db.Model, BaseEntity):
__tablename__ = 'alv_document_version'
file_id = db.Column(db.Integer(), db.ForeignKey('file.id'))
file = db.relationship('File')
def upgrade():
connection = op.get_bind()
Session = sa.orm.sessionmaker()
session = Session(bind=connection)
db.session = session
minutes = db.session.query(File).join(Alv.minutes_file)
documents = db.session.query(File).join(AlvDocumentVersion.file)
files = documents.union(minutes).all()
for f in files:
f.category = FileCategory.ALV_DOCUMENT
f.display_name = None
db.session.commit()
def downgrade():
pass
| {
"content_hash": "8c9245255a5b8dd8824a44eb502f1421",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 74,
"avg_line_length": 25.450704225352112,
"alnum_prop": 0.6878804648588821,
"repo_name": "viaict/viaduct",
"id": "ff8d7bf08b192dc00b0746f3126a6cddb14d4674",
"size": "1807",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "migrations/versions/2018_03_14_22710e6fd2b1_set_category_of_alv_document_revisions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1583078"
},
{
"name": "Dockerfile",
"bytes": "1131"
},
{
"name": "HTML",
"bytes": "227955"
},
{
"name": "JavaScript",
"bytes": "63026"
},
{
"name": "Makefile",
"bytes": "896"
},
{
"name": "Python",
"bytes": "770976"
},
{
"name": "Shell",
"bytes": "3004"
},
{
"name": "TypeScript",
"bytes": "3288"
},
{
"name": "Vue",
"bytes": "27869"
}
],
"symlink_target": ""
} |
import collections
import http
import logging
import sys
import traceback
import uuid
import flask
import flask_restful
import marshmallow
from tuxedo_mask import repositories, services, resources
app = flask.Flask(__name__)
app.config['PROPAGATE_EXCEPTIONS'] = True
api = flask_restful.Api(app=app)
api.add_resource(resources.ApplicationsCollectionResource, '/v1/applications/')
api.add_resource(resources.ApplicationsResource, '/v1/applications/<string:applications_sid>')
api.add_resource(resources.LoginAttemptsResource, '/v1/applications/<string:applications_sid>/login_attempts/')
api.add_resource(resources.UsersCollectionResource, '/v1/applications/<string:applications_sid>/users/')
@app.before_request
def do_before_request():
# Flask-RESTful constructs a new Resource on every request. Should
# these be global objects?
flask.g.logger = logging.getLogger(name='tuxedomask')
flask.g.service = services.TuxedoMaskService.from_configuration()
flask.g.message = {'topic': '', 'requests_uuid': str(uuid.uuid4())}
flask.g.body = collections.OrderedDict()
flask.g.http_status_code = None
flask.g.headers = dict()
def get_response():
return flask.g.body, flask.g.http_status_code, flask.g.headers
flask.g.get_response = get_response
extra = {'topic': 'ConnectionPooling'}
extra.update({'requests_uuid': flask.g.message['requests_uuid']})
extra.update(services.TuxedoMaskService.get_connection_pool_status())
flask.g.logger.info('', extra=extra)
@app.after_request
def do_after_request(response):
flask.g.service.dispose()
extra = {'topic': 'ConnectionPooling'}
extra.update({'requests_uuid': flask.g.message['requests_uuid']})
extra.update(services.TuxedoMaskService.get_connection_pool_status())
flask.g.logger.info('', extra=extra)
return response
@app.errorhandler(Exception)
def handle_exception(e):
message = ''.join(traceback.format_exception_only(*sys.exc_info()[:2]))
response = flask.jsonify(dict())
response.status_code = http.HTTPStatus.INTERNAL_SERVER_ERROR
log_e(message=message, e=e, is_internal_e=True)
return response
@app.errorhandler(repositories.EntityConflict)
def handle_entity_conflict(e):
message_mapping = {
'applicationscollectionresource': (
"""There is already an existing application with this name."""),
'userscollectionresource': (
"""There is already an existing user with this username.""")}
message = message_mapping[flask.request.endpoint]
response = flask.jsonify(message)
response.status_code = http.HTTPStatus.CONFLICT
log_e(message=message, e=e)
return response
@app.errorhandler(marshmallow.exceptions.ValidationError)
def handle_validation_error(e):
response = flask.jsonify(e.messages)
response.status_code = http.HTTPStatus.BAD_REQUEST
log_e(message='There was an error when trying to validate the data.', e=e)
return response
def log_e(message, e, is_internal_e=False):
extra = flask.g.message.copy()
extra.update(dict([
('error_type', '.'.join([e.__class__.__module__, e.__class__.__name__])),
('error_message', str(e)),
('users_sid', ''),
('user_username', ''),
('request_client_ip_address', flask.request.environ.get('REMOTE_ADDR')),
('request_client_port_number', flask.request.environ.get('REMOTE_PORT')),
('request_method', flask.request.environ.get('REQUEST_METHOD')),
('request_url', flask.request.url),
('request_authorization', flask.request.environ.get('HTTP_AUTHORIZATION')),
('request_body', flask.request.get_data().decode('utf-8')),
('request_host', flask.request.host),
('request_endpoint_name', flask.request.endpoint)]))
extra.update({'state': 'Complete'})
try:
extra['users_sid'] = flask.g.user.users_sid
extra['user_username'] = flask.g.user.username
except AttributeError:
pass
if is_internal_e:
flask.g.logger.exception(message, extra=extra)
else:
flask.g.logger.error(message, extra=extra)
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True)
| {
"content_hash": "26c91b631805c22c88f1d61b778791cd",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 111,
"avg_line_length": 32.274809160305345,
"alnum_prop": 0.6866130558183539,
"repo_name": "dnguyen0304/tuxedo-mask",
"id": "ff64e6131692741235aaf3617e639b4275fc84a8",
"size": "4275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tuxedo_mask/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52980"
},
{
"name": "Shell",
"bytes": "6402"
}
],
"symlink_target": ""
} |
"""This API defines FeatureColumn abstraction.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. For example "country:US" is a feature which is in "country" feature
column and has a feature value ("US").
Supported feature types are:
* _SparseColumn: also known as categorical features.
* _RealValuedColumn: also known as continuous features.
Supported transformations on above features are:
* Bucketization: also known as binning.
* Crossing: also known as composition or union.
* Embedding.
Typical usage example:
```python
# Define features and transformations
country = sparse_column_with_keys(column_name="native_country",
keys=["US", "BRA", ...])
country_emb = embedding_column(sparse_id_column=country, dimension=3,
combiner="sum")
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
occupation_x_country = crossed_column(columns=[occupation, country],
hash_bucket_size=10000)
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
my_features = [occupation_emb, age_buckets, country_emb]
# Building model via layers
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=my_features)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=my_features)
second_layer = fully_connected(first_layer, ...)
# Building model via tf.learn.estimators
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=my_wide_features,
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
See feature_column_ops_test for more examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.contrib.layers.python.ops import bucketization_op
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.contrib.lookup import lookup_ops as contrib_lookup_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
class _FeatureColumn(object):
"""Represents a feature column abstraction.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. For example "country:US" is a feature which is in "country" feature
column and has a feature value ("US").
This class is an abstract class. User should not create one instance of this.
Following classes (_SparseColumn, _RealValuedColumn, ...) are concrete
instances.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def name(self):
"""Returns the name of column or transformed column."""
pass
@abc.abstractproperty
def config(self):
"""Returns configuration of the base feature for `tf.parse_example`."""
pass
@abc.abstractproperty
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
pass
@abc.abstractmethod
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
raise NotImplementedError("Transform is not implemented for {}.".format(
self))
@abc.abstractmethod
def to_dnn_input_layer(self,
input_tensor,
weight_collection=None,
trainable=True):
"""Returns a Tensor as an input to the first layer of neural network."""
raise ValueError("Calling an abstract method.")
@abc.abstractmethod
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
"""Returns a Tensor as linear predictions and a list of created Variable."""
raise ValueError("Calling an abstract method.")
# TODO(b/30410315): Support warm starting in all feature columns.
class _SparseColumn(_FeatureColumn,
collections.namedtuple("_SparseColumn",
["column_name", "is_integerized",
"bucket_size", "lookup_config",
"combiner", "dtype"])):
"""Represents a sparse feature column also known as categorical features.
Instances of this class are immutable. A sparse column means features are
sparse and dictionary returned by InputBuilder contains a
("column_name", SparseTensor) pair.
One and only one of bucket_size or lookup_config should be set. If
is_integerized is True then bucket_size should be set.
Attributes:
column_name: A string defining sparse column name.
is_integerized: A bool if True means type of feature is an integer.
Integerized means we can use the feature itself as id.
bucket_size: An int that is > 1. The number of buckets.
lookup_config: A _SparseIdLookupConfig defining feature-to-id lookup
configuration
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features, such as `tf.string` or `tf.int64`.
Raises:
TypeError: if lookup_config is not a _SparseIdLookupConfig.
ValueError: if above expectations about input fails.
"""
def __new__(cls,
column_name,
is_integerized=False,
bucket_size=None,
lookup_config=None,
combiner="sum",
dtype=dtypes.string):
if is_integerized and bucket_size is None:
raise ValueError("bucket_size must be set if is_integerized is True. "
"column_name: {}".format(column_name))
if is_integerized and not dtype.is_integer:
raise ValueError("dtype must be an integer if is_integerized is True. "
"dtype: {}, column_name: {}.".format(dtype, column_name))
if bucket_size is None and lookup_config is None:
raise ValueError("one of bucket_size or lookup_config must be set. "
"column_name: {}".format(column_name))
if bucket_size is not None and lookup_config:
raise ValueError("one and only one of bucket_size or lookup_config "
"must be set. column_name: {}".format(column_name))
if bucket_size is not None and bucket_size < 2:
raise ValueError("bucket_size must be at least 2. "
"bucket_size: {}, column_name: {}".format(bucket_size,
column_name))
if ((lookup_config) and
(not isinstance(lookup_config, _SparseIdLookupConfig))):
raise TypeError(
"lookup_config must be an instance of _SparseIdLookupConfig. "
"Given one is in type {} for column_name {}".format(
type(lookup_config), column_name))
if (lookup_config and lookup_config.vocabulary_file and
lookup_config.vocab_size is None):
raise ValueError("vocab_size must be defined. "
"column_name: {}".format(column_name))
return super(_SparseColumn, cls).__new__(cls, column_name, is_integerized,
bucket_size, lookup_config,
combiner, dtype)
@property
def name(self):
return self.column_name
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
if self.bucket_size is not None:
return self.bucket_size
return self.lookup_config.vocab_size + self.lookup_config.num_oov_buckets
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(self.dtype)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
# pylint: disable=unused-argument
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return None
# pylint: disable=unused-argument
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
raise ValueError("SparseColumn is not supported in DNN. "
"Please use embedding_column. column: {}".format(self))
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
return _create_embedding_lookup(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
dimension=num_outputs,
weight_collections=_add_variable_collection(weight_collections),
initializer=init_ops.zeros_initializer,
combiner=self.combiner,
trainable=trainable)
class _SparseColumnIntegerized(_SparseColumn):
"""See `sparse_column_with_integerized_feature`."""
def __new__(cls,
column_name,
bucket_size,
combiner="sum",
dtype=dtypes.int64):
if not dtype.is_integer:
raise ValueError("dtype must be an integer. "
"dtype: {}, column_name: {}".format(dtype, column_name))
return super(_SparseColumnIntegerized, cls).__new__(cls,
column_name,
is_integerized=True,
bucket_size=bucket_size,
combiner=combiner,
dtype=dtype)
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
sparse_id_values = math_ops.mod(columns_to_tensors[self.name].values,
self.bucket_size, name="mod")
columns_to_tensors[self] = ops.SparseTensor(
columns_to_tensors[self.name].indices, sparse_id_values,
columns_to_tensors[self.name].shape)
def sparse_column_with_integerized_feature(column_name,
bucket_size,
combiner="sum",
dtype=dtypes.int64):
"""Creates an integerized _SparseColumn.
Use this when your features are already pre-integerized into int64 IDs.
output_id = input_feature
Args:
column_name: A string defining sparse column name.
bucket_size: An int that is > 1. The number of buckets. It should be bigger
than maximum feature. In other words features in this column should be an
int64 in range [0, bucket_size)
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features. It should be an integer type. Default value is
dtypes.int64.
Returns:
An integerized _SparseColumn definition.
Raises:
ValueError: bucket_size is not greater than 1.
ValueError: dtype is not integer.
"""
return _SparseColumnIntegerized(column_name,
bucket_size,
combiner=combiner,
dtype=dtype)
class _SparseColumnHashed(_SparseColumn):
"""See `sparse_column_with_hash_bucket`."""
def __new__(cls, column_name, hash_bucket_size, combiner="sum"):
return super(_SparseColumnHashed, cls).__new__(cls,
column_name,
bucket_size=hash_bucket_size,
combiner=combiner,
dtype=dtypes.string)
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
sparse_id_values = string_ops.string_to_hash_bucket_fast(
columns_to_tensors[self.name].values,
self.bucket_size,
name="lookup")
columns_to_tensors[self] = ops.SparseTensor(
columns_to_tensors[self.name].indices, sparse_id_values,
columns_to_tensors[self.name].shape)
def sparse_column_with_hash_bucket(column_name,
hash_bucket_size,
combiner="sum"):
"""Creates a _SparseColumn with hashed bucket configuration.
Use this when your sparse features are in string format, but you don't have a
vocab file that maps each string to an integer ID.
output_id = Hash(input_feature_string) % bucket_size
Args:
column_name: A string defining sparse column name.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
Returns:
A _SparseColumn with hashed bucket configuration
Raises:
ValueError: hash_bucket_size is not greater than 2.
"""
return _SparseColumnHashed(column_name, hash_bucket_size, combiner)
class _SparseColumnKeys(_SparseColumn):
"""See `sparse_column_with_keys`."""
def __new__(cls,
column_name,
keys,
default_value=-1,
combiner="sum"):
return super(_SparseColumnKeys, cls).__new__(
cls,
column_name,
combiner=combiner,
lookup_config=_SparseIdLookupConfig(keys=keys,
vocab_size=len(keys),
default_value=default_value),
dtype=dtypes.string)
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
columns_to_tensors[self] = contrib_lookup_ops.string_to_index(
tensor=columns_to_tensors[self.name],
mapping=list(self.lookup_config.keys),
default_value=self.lookup_config.default_value,
name="lookup")
def sparse_column_with_keys(column_name,
keys,
default_value=-1,
combiner="sum"):
"""Creates a _SparseColumn with keys.
Look up logic is as follows:
lookup_id = index_of_feature_in_keys if feature in keys else default_value
Args:
column_name: A string defining sparse column name.
keys: a string list defining vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Default is -1.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
Returns:
A _SparseColumnKeys with keys configuration.
"""
return _SparseColumnKeys(column_name,
tuple(keys),
default_value=default_value,
combiner=combiner)
class _WeightedSparseColumn(_FeatureColumn, collections.namedtuple(
"_WeightedSparseColumn",
["sparse_id_column", "weight_column_name", "dtype"])):
"""See `weighted_sparse_column`."""
def __new__(cls, sparse_id_column, weight_column_name, dtype):
return super(_WeightedSparseColumn, cls).__new__(
cls, sparse_id_column, weight_column_name, dtype)
@property
def name(self):
return "{}_weighted_by_{}".format(self.sparse_id_column.name,
self.weight_column_name)
@property
def length(self):
"""Returns id size."""
return self.sparse_id_column.length
@property
def config(self):
config = _get_feature_config(self.sparse_id_column)
config.update(
{self.weight_column_name: parsing_ops.VarLenFeature(self.dtype)})
return config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
"""Inserts a tuple with the id and weight tensors."""
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = tuple([
columns_to_tensors[self.sparse_id_column],
columns_to_tensors[self.weight_column_name]])
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor[0]
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return input_tensor[1]
# pylint: disable=unused-argument
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
raise ValueError("WeightedSparseColumn is not supported in DNN. "
"Please use embedding_column. column: {}".format(self))
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
return _create_embedding_lookup(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
dimension=num_outputs,
weight_collections=_add_variable_collection(weight_collections),
initializer=init_ops.zeros_initializer,
combiner=self.sparse_id_column.combiner,
trainable=trainable)
def weighted_sparse_column(sparse_id_column,
weight_column_name,
dtype=dtypes.float32):
"""Creates a _SparseColumn by combining sparse_id_column with a weight column.
Args:
sparse_id_column: A _SparseColumn which is created by `sparse_column_with_*`
functions.
weight_column_name: A string defining a sparse column name which represents
weight or value of the corresponding sparse id feature.
dtype: Type of weights, such as `tf.float32`
Returns:
A _WeightedSparseColumn composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if dtype is not convertible to float.
An example usage:
```python
words = sparse_column_with_hash_bucket("words", 1000)
tfidf_weighted_words = weighted_sparse_column(words, "tfidf_score")
```
This configuration assumes that input dictionary of model contains the
following two items:
* (key="words", value=word_tensor) where word_tensor is a SparseTensor.
* (key="tfidf_score", value=tfidf_score_tensor) where tfidf_score_tensor
is a SparseTensor.
Following are assumed to be true:
* word_tensor.indices = tfidf_score_tensor.indices
* word_tensor.shape = tfidf_score_tensor.shape
"""
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype is not convertible to float. Given {}".format(
dtype))
return _WeightedSparseColumn(sparse_id_column,
weight_column_name,
dtype)
class _EmbeddingColumn(_FeatureColumn, collections.namedtuple(
"_EmbeddingColumn",
["sparse_id_column", "dimension", "combiner", "initializer",
"ckpt_to_load_from", "tensor_name_in_ckpt"])):
"""Represents an embedding column.
Args:
sparse_id_column: A _SparseColumn which is created by `sparse_column_with_*`
or `weighted_sparse_column` functions.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
of this can be thought as example level normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
Raises:
ValueError: if `initializer` is specified and is not callable. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
def __new__(cls,
sparse_id_column,
dimension,
combiner="mean",
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"Embedding of column_name: {}".format(
sparse_id_column.name))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
if initializer is None:
stddev = 1 / math.sqrt(sparse_id_column.length)
# TODO(b/25671353): Better initial value?
initializer = init_ops.truncated_normal_initializer(mean=0.0,
stddev=stddev)
return super(_EmbeddingColumn, cls).__new__(cls, sparse_id_column,
dimension, combiner,
initializer, ckpt_to_load_from,
tensor_name_in_ckpt)
@property
def name(self):
return "{}_embedding".format(self.sparse_id_column.name)
@property
def length(self):
"""Returns id size."""
return self.sparse_id_column.length
@property
def config(self):
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
fields_values = []
# pylint: disable=protected-access
for k, v in self._asdict().items():
if k == "initializer":
# Excludes initializer from the key since we don't support allowing
# users to specify different initializers for the same embedding column.
# Special treatment is needed since the default str form of a
# function contains its address, which could introduce non-determinism
# in sorting.
continue
fields_values.append("{}={}".format(k, v))
# pylint: enable=protected-access
# This is effectively the same format as str(self), except with our special
# treatment.
return "%s(%s)" % (type(self).__name__, ", ".join(fields_values))
def insert_transformed_feature(self, columns_to_tensors):
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
output, embedding_weights = _create_embedding_lookup(
input_tensor=self.sparse_id_column.id_tensor(input_tensor),
weight_tensor=self.sparse_id_column.weight_tensor(input_tensor),
vocab_size=self.length,
dimension=self.dimension,
weight_collections=_add_variable_collection(weight_collections),
initializer=self.initializer,
combiner=self.combiner,
trainable=trainable)
if self.ckpt_to_load_from is not None:
weights_to_restore = embedding_weights
if len(embedding_weights) == 1:
weights_to_restore = embedding_weights[0]
checkpoint_utils.init_from_checkpoint(
self.ckpt_to_load_from,
{self.tensor_name_in_ckpt: weights_to_restore})
return output
# pylint: disable=unused-argument
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
raise ValueError("EmbeddingColumn is not supported in linear models. "
"Please use sparse_column. column: {}".format(self))
def embedding_column(sparse_id_column,
dimension,
combiner="mean",
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None):
"""Creates an _EmbeddingColumn.
Args:
sparse_id_column: A _SparseColumn which is created by `sparse_column_with_*`
or crossed_column functions. Note that `combiner` defined in
`sparse_id_column` is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
of this can be thought as example level normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
Returns:
An _EmbeddingColumn.
"""
return _EmbeddingColumn(sparse_id_column, dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt)
class _HashedEmbeddingColumn(collections.namedtuple(
"_HashedEmbeddingColumn", ["column_name", "size", "dimension", "combiner",
"initializer"]), _EmbeddingColumn):
"""See `hashed_embedding_column`."""
def __new__(cls,
column_name,
size,
dimension,
combiner="mean",
initializer=None):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"column_name: {}".format(column_name))
if initializer is None:
stddev = 0.1
# TODO(b/25671353): Better initial value?
initializer = init_ops.truncated_normal_initializer(mean=0.0,
stddev=stddev)
return super(_HashedEmbeddingColumn, cls).__new__(cls, column_name, size,
dimension, combiner,
initializer)
@property
def name(self):
return "{}_hashed_embedding".format(self.column_name)
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(dtypes.string)}
def insert_transformed_feature(self, columns_to_tensors):
columns_to_tensors[self] = columns_to_tensors[self.column_name]
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
embeddings = _create_embeddings(
shape=[self.size],
initializer=self.initializer,
dtype=dtypes.float32,
trainable=trainable,
weight_collections=_add_variable_collection(weight_collections))
return embedding_ops.hashed_embedding_lookup_sparse(
embeddings, input_tensor, self.dimension, name="lookup")
def hashed_embedding_column(column_name,
size,
dimension,
combiner="mean",
initializer=None):
"""Creates an embedding column of a sparse feature using parameter hashing.
The i-th embedding component of a value v is found by retrieving an
embedding weight whose index is a fingerprint of the pair (v,i).
Args:
column_name: A string defining sparse column name.
size: An integer specifying the number of parameters in the embedding layer.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
of this can be thought as example level normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0 and standard deviation 0.1.
Returns:
A _HashedEmbeddingColumn.
Raises:
ValueError: if dimension or size is not a positive integer; or if combiner
is not supported.
"""
if (dimension < 1) or (size < 1):
raise ValueError("Dimension and size must be greater than 0. "
"dimension: {}, size: {}, column_name: {}".format(
dimension, size, column_name))
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'. "
"combiner: {}, column_name: {}".format(
combiner, column_name))
return _HashedEmbeddingColumn(column_name, size, dimension, combiner,
initializer)
class _RealValuedColumn(_FeatureColumn, collections.namedtuple(
"_RealValuedColumn",
["column_name", "dimension", "default_value", "dtype"])):
"""Represents a real valued feature column also known as continuous features.
Instances of this class are immutable. A real valued column means features are
dense. It means dictionary returned by InputBuilder contains a
("column_name", Tensor) pair. Tensor shape should be (batch_size, 1).
"""
def __new__(cls, column_name, dimension, default_value, dtype):
if default_value is not None:
default_value = tuple(default_value)
return super(_RealValuedColumn, cls).__new__(cls, column_name, dimension,
default_value, dtype)
@property
def name(self):
return self.column_name
@property
def config(self):
default_value = self.default_value
if default_value is not None:
default_value = list(default_value)
return {self.column_name: parsing_ops.FixedLenFeature(
[self.dimension], self.dtype, default_value)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
# No transformation is needed for _RealValuedColumn except reshaping.
input_tensor = columns_to_tensors[self.name]
batch_size = input_tensor.get_shape().as_list()[0]
batch_size = int(batch_size) if batch_size else -1
flattened_shape = [batch_size, self.dimension]
columns_to_tensors[self] = array_ops.reshape(
math_ops.to_float(input_tensor),
flattened_shape,
name="reshape")
# pylint: disable=unused-argument
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
return input_tensor
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
"""Returns a Tensor as linear predictions and a list of created Variable."""
def _weight(name):
return variable_scope.get_variable(
name,
shape=[self.dimension, num_outputs],
initializer=array_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
if self.name:
weight = _weight("weight")
else:
# Old behavior to support a subset of old checkpoints.
weight = _weight("_weight")
# The _RealValuedColumn has the shape of [batch_size, column.dimension].
log_odds_by_dim = math_ops.matmul(input_tensor, weight, name="matmul")
return log_odds_by_dim, [weight]
def real_valued_column(column_name,
dimension=1,
default_value=None,
dtype=dtypes.float32):
"""Creates a _RealValuedColumn.
Args:
column_name: A string defining real valued column name.
dimension: An integer specifying dimension of the real valued column.
The default is 1. The Tensor representing the _RealValuedColumn
will have the shape of [batch_size, dimension].
default_value: A single value compatible with dtype or a list of values
compatible with dtype which the column takes on if data is missing. If
None, then tf.parse_example will fail if an example does not contain
this column. If a single value is provided, the same value will be
applied as the default value for every dimension. If a list of values
is provided, the length of the list should be equal to the value of
`dimension`.
dtype: defines the type of values. Default value is tf.float32.
Returns:
A _RealValuedColumn.
Raises:
TypeError: if dimension is not an int
ValueError: if dimension is not a positive integer
TypeError: if default_value is a list but its length is not equal to the
value of `dimension`.
TypeError: if default_value is not compatible with dtype.
ValueError: if dtype is not convertable to tf.float32.
"""
if not isinstance(dimension, int):
raise TypeError("dimension must be an integer. "
"dimension: {}, column_name: {}".format(dimension,
column_name))
if dimension < 1:
raise ValueError("dimension must be greater than 0. "
"dimension: {}, column_name: {}".format(dimension,
column_name))
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype must be convertible to float. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if default_value is None:
return _RealValuedColumn(column_name, dimension, default_value, dtype)
if isinstance(default_value, int):
if dtype.is_integer:
default_value = [default_value for _ in range(dimension)]
return _RealValuedColumn(column_name, dimension, default_value, dtype)
if dtype.is_floating:
default_value = float(default_value)
default_value = [default_value for _ in range(dimension)]
return _RealValuedColumn(column_name, dimension, default_value, dtype)
if isinstance(default_value, float):
if dtype.is_floating and (not dtype.is_integer):
default_value = [default_value for _ in range(dimension)]
return _RealValuedColumn(column_name, dimension, default_value, dtype)
if isinstance(default_value, list):
if len(default_value) != dimension:
raise ValueError(
"The length of default_value must be equal to dimension. "
"default_value: {}, dimension: {}, column_name: {}".format(
default_value, dimension, column_name))
# Check if the values in the list are all integers or are convertible to
# floats.
is_list_all_int = True
is_list_all_float = True
for v in default_value:
if not isinstance(v, int):
is_list_all_int = False
if not (isinstance(v, float) or isinstance(v, int)):
is_list_all_float = False
if is_list_all_int:
if dtype.is_integer:
return _RealValuedColumn(column_name, dimension, default_value, dtype)
elif dtype.is_floating:
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype)
if is_list_all_float:
if dtype.is_floating and (not dtype.is_integer):
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype)
raise TypeError("default_value must be compatible with dtype. "
"default_value: {}, dtype: {}, column_name: {}".format(
default_value, dtype, column_name))
class _BucketizedColumn(_FeatureColumn, collections.namedtuple(
"_BucketizedColumn", ["source_column", "boundaries"])):
"""Represents a bucketization transformation also known as binning.
Instances of this class are immutable. Values in `source_column` will be
bucketized based on `boundaries`.
For example, if the inputs are:
boundaries = [0, 10, 100]
source_column = [[-5], [150], [10], [0], [4], [19]]
then the bucketized feature will be:
output = [[0], [3], [2], [1], [1], [2]]
Attributes:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list of floats specifying the boundaries. It has to be sorted.
[a, b, c] defines following buckets: (-inf., a), [a, b), [b, c), [c, inf.)
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
def __new__(cls, source_column, boundaries):
if not isinstance(source_column, _RealValuedColumn):
raise TypeError(
"source_column must be an instance of _RealValuedColumn. "
"source_column: {}".format(source_column))
if not isinstance(boundaries, list) or not boundaries:
raise ValueError("boundaries must be a non-empty list. "
"boundaries: {}".format(boundaries))
# We allow bucket boundaries to be monotonically increasing
# (ie a[i+1] >= a[i]). When two bucket boundaries are the same, we
# de-duplicate.
sanitized_boundaries = []
for i in range(len(boundaries) - 1):
if boundaries[i] == boundaries[i + 1]:
continue
elif boundaries[i] < boundaries[i + 1]:
sanitized_boundaries.append(boundaries[i])
else:
raise ValueError("boundaries must be a sorted list. "
"boundaries: {}".format(boundaries))
sanitized_boundaries.append(boundaries[len(boundaries) - 1])
return super(_BucketizedColumn, cls).__new__(cls, source_column,
tuple(sanitized_boundaries))
@property
def name(self):
return "{}_bucketized".format(self.source_column.name)
@property
def length(self):
"""Returns total number of buckets."""
return len(self.boundaries) + 1
@property
def config(self):
return self.source_column.config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
# Bucketize the source column.
if self.source_column not in columns_to_tensors:
self.source_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = bucketization_op.bucketize(
columns_to_tensors[self.source_column],
boundaries=list(self.boundaries),
name="bucketize")
# pylint: disable=unused-argument
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
return array_ops.reshape(
array_ops.one_hot(
math_ops.to_int64(input_tensor),
self.length, 1., 0.,
name="one_hot"),
[-1, self.length * self.source_column.dimension],
name="reshape")
def to_sparse_tensor(self, input_tensor):
"""Creates a SparseTensor from the bucketized Tensor."""
dimension = self.source_column.dimension
batch_size = array_ops.shape(input_tensor, name="shape")[0]
if dimension > 1:
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(
math_ops.range(0, batch_size),
1,
name="expand_dims"),
[1, dimension],
name="tile"),
[-1],
name="rehsape")
i2 = array_ops.tile(math_ops.range(0, dimension),
[batch_size], name="tile")
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = array_ops.reshape(
input_tensor, [-1], name="reshape") + self.length * i2
else:
# Simpler indices when dimension=1
i1 = math_ops.range(0, batch_size)
i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros")
bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape")
indices = math_ops.to_int64(array_ops.transpose(array_ops.pack((i1, i2))))
shape = math_ops.to_int64(array_ops.pack([batch_size, dimension]))
sparse_id_values = ops.SparseTensor(indices, bucket_indices, shape)
return sparse_id_values
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
"""Returns a Tensor as linear predictions and a list of created Variable."""
return _create_embedding_lookup(
input_tensor=self.to_sparse_tensor(input_tensor),
weight_tensor=None,
vocab_size=self.length * self.source_column.dimension,
dimension=num_outputs,
weight_collections=_add_variable_collection(weight_collections),
initializer=init_ops.zeros_initializer,
combiner="sum",
trainable=trainable)
def bucketized_column(source_column, boundaries):
"""Creates a _BucketizedColumn.
Args:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list of floats specifying the boundaries. It has to be sorted.
Returns:
A _BucketizedColumn.
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
return _BucketizedColumn(source_column, boundaries)
class _CrossedColumn(_FeatureColumn, collections.namedtuple(
"_CrossedColumn", ["columns", "hash_bucket_size", "combiner",
"ckpt_to_load_from", "tensor_name_in_ckpt"])):
"""Represents a cross transformation also known as composition or union.
Instances of this class are immutable. It crosses given `columns`. Crossed
column output will be hashed to hash_bucket_size.
Conceptually, transformation can be thought as:
Hash(cartesian product of features in columns) % `hash_bucket_size`
For example, if the columns are
SparseTensor referred by first column: shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
SparseTensor referred by second column: : shape = [2, 1]
[0, 0]: "d"
[1, 1]: "e"
then crossed feature will look like:
shape = [2, 2]
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
Attributes:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
of this can be thought as example level normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
Raises:
TypeError: if all items in columns are not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn or
hash_bucket_size is not an int.
ValueError: if hash_bucket_size is not > 1 or len(columns) is not > 1. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
@staticmethod
def _is_crossable(column):
return isinstance(column,
(_SparseColumn, _CrossedColumn, _BucketizedColumn))
def __new__(cls, columns, hash_bucket_size, combiner="sum",
ckpt_to_load_from=None, tensor_name_in_ckpt=None):
for column in columns:
if not _CrossedColumn._is_crossable(column):
raise TypeError("columns must be a set of _SparseColumn, "
"_CrossedColumn, or _BucketizedColumn instances. "
"column: {}".format(column))
if len(columns) < 2:
raise ValueError("columns must contain at least 2 elements. "
"columns: {}".format(columns))
if not isinstance(hash_bucket_size, int):
raise TypeError("hash_bucket_size must be an int. "
"hash_bucket_size: {}".format(hash_bucket_size))
if hash_bucket_size < 2:
raise ValueError("hash_bucket_size must be at least 2. "
"hash_bucket_size: {}".format(hash_bucket_size))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
sorted_columns = sorted([column for column in columns],
key=lambda column: column.name)
return super(_CrossedColumn, cls).__new__(cls, tuple(sorted_columns),
hash_bucket_size, combiner,
ckpt_to_load_from,
tensor_name_in_ckpt)
@property
def name(self):
sorted_names = sorted([column.name for column in self.columns])
return "_X_".join(sorted_names)
@property
def config(self):
config = {}
for column in self.columns:
config.update(_get_feature_config(column))
return config
@property
def length(self):
"""Returns total number of buckets."""
return self.hash_bucket_size
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
# pylint: disable=unused-argument
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return None
def insert_transformed_feature(self, columns_to_tensors):
"""Handles cross transformation."""
def _collect_leaf_level_columns(cross):
"""Collects base columns contained in the cross."""
leaf_level_columns = []
for c in cross.columns:
if isinstance(c, _CrossedColumn):
leaf_level_columns.extend(_collect_leaf_level_columns(c))
else:
leaf_level_columns.append(c)
return leaf_level_columns
feature_tensors = []
for c in _collect_leaf_level_columns(self):
if isinstance(c, _SparseColumn):
feature_tensors.append(columns_to_tensors[c.name])
else:
if c not in columns_to_tensors:
c.insert_transformed_feature(columns_to_tensors)
if isinstance(c, _BucketizedColumn):
feature_tensors.append(c.to_sparse_tensor(columns_to_tensors[c]))
else:
feature_tensors.append(columns_to_tensors[c])
columns_to_tensors[self] = sparse_feature_cross_op.sparse_feature_cross(
feature_tensors,
hashed_output=True,
num_buckets=self.hash_bucket_size,
name="cross")
# pylint: disable=unused-argument
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
raise ValueError("CrossedColumn is not supported in DNN. "
"Please use embedding_column. column: {}".format(self))
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
output, embedding_weights = _create_embedding_lookup(
input_tensor=input_tensor,
weight_tensor=None,
vocab_size=self.length,
dimension=num_outputs,
weight_collections=_add_variable_collection(weight_collections),
initializer=init_ops.zeros_initializer,
combiner=self.combiner,
trainable=trainable)
if self.ckpt_to_load_from is not None:
weights_to_restore = embedding_weights
if len(embedding_weights) == 1:
weights_to_restore = embedding_weights[0]
checkpoint_utils.init_from_checkpoint(
self.ckpt_to_load_from,
{self.tensor_name_in_ckpt: weights_to_restore})
return output, embedding_weights
def crossed_column(columns, hash_bucket_size, combiner="sum",
ckpt_to_load_from=None,
tensor_name_in_ckpt=None):
"""Creates a _CrossedColumn.
Args:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A combiner string, supports sum, mean, sqrtn.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
Returns:
A _CrossedColumn.
Raises:
TypeError: if any item in columns is not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn, or
hash_bucket_size is not an int.
ValueError: if hash_bucket_size is not > 1 or
len(columns) is not > 1.
"""
return _CrossedColumn(columns, hash_bucket_size, combiner=combiner,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt)
class DataFrameColumn(_FeatureColumn,
collections.namedtuple("DataFrameColumn",
["column_name", "series"])):
"""Represents a feature column produced from a `DataFrame`.
Instances of this class are immutable. A `DataFrame` column may be dense or
sparse, and may have any shape, with the constraint that dimension 0 is
batch_size.
Args:
column_name: a name for this column
series: a `Series` to be wrapped, which has already had its base features
substituted with `PredefinedSeries`.
"""
def __new__(cls, column_name, series):
return super(DataFrameColumn, cls).__new__(cls, column_name, series)
@property
def name(self):
return self.column_name
@property
def config(self):
return self.series.required_base_features()
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self.name
def insert_transformed_feature(self, columns_to_tensors):
# The cache must already contain mappings from the expected base feature
# names to Tensors.
# Passing columns_to_tensors as the cache here means that multiple outputs
# of the transform will be cached, keyed by the repr of their associated
# TransformedSeries.
# The specific requested output ends up in columns_to_tensors twice: once
# keyed by the TransformedSeries repr, and once keyed by this
# DataFrameColumn instance.
columns_to_tensors[self] = self.series.build(columns_to_tensors)
# pylint: disable=unused-argument
def to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True):
# DataFrame typically provides Tensors of shape [batch_size],
# but Estimator requires shape [batch_size, 1]
dims = input_tensor.get_shape().ndims
if dims == 0:
raise ValueError(
"Can't build input layer from tensor of shape (): {}".format(
self.column_name))
elif dims == 1:
return array_ops.expand_dims(input_tensor, 1, name="expand_dims")
else:
return input_tensor
# TODO(soergel): This mirrors RealValuedColumn for now, but should become
# better abstracted with less code duplication when we add other kinds.
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
def _weight(name):
return variable_scope.get_variable(
name,
shape=[self.dimension, num_outputs],
initializer=array_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
if self.name:
weight = _weight("weight")
else:
# Old behavior to support a subset of old checkpoints.
weight = _weight("_weight")
# The _RealValuedColumn has the shape of [batch_size, column.dimension].
log_odds_by_dim = math_ops.matmul(input_tensor, weight, name="matmul")
return log_odds_by_dim, [weight]
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def _get_feature_config(feature_column):
"""Returns configuration for the base feature defined in feature_column."""
if not isinstance(feature_column, _FeatureColumn):
raise TypeError(
"feature_columns should only contain instances of _FeatureColumn. "
"Given column is {}".format(feature_column))
if isinstance(feature_column, (_SparseColumn, _WeightedSparseColumn,
_EmbeddingColumn, _RealValuedColumn,
_BucketizedColumn, _CrossedColumn)):
return feature_column.config
raise TypeError("Not supported _FeatureColumn type. "
"Given column is {}".format(feature_column))
def create_feature_spec_for_parsing(feature_columns):
"""Helper that prepares features config from input feature_columns.
The returned feature config can be used as arg 'features' in tf.parse_example.
Typical usage example:
```python
# Define features and transformations
country = sparse_column_with_vocabulary_file("country", VOCAB_FILE)
age = real_valued_column("age")
click_bucket = bucketized_column(real_valued_column("historical_click_ratio"),
boundaries=[i/10. for i in range(10)])
country_x_click = crossed_column([country, click_bucket], 10)
feature_columns = set([age, click_bucket, country_x_click])
batch_examples = tf.parse_example(
serialized_examples,
create_feature_spec_for_parsing(feature_columns))
```
For the above example, create_feature_spec_for_parsing would return the dict:
{"age": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"historical_click_ratio": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"country": parsing_ops.VarLenFeature(tf.string)}
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A dict mapping feature keys to FixedLenFeature or VarLenFeature values.
"""
features_config = {}
for column in feature_columns:
features_config.update(_get_feature_config(column))
return features_config
def make_place_holder_tensors_for_base_features(feature_columns):
"""Returns placeholder tensors for inference.
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A dict mapping feature keys to SparseTensors (sparse columns) or
placeholder Tensors (dense columns).
"""
# Get dict mapping features to FixedLenFeature or VarLenFeature values.
dict_for_parse_example = create_feature_spec_for_parsing(feature_columns)
placeholders = {}
for column_name, column_type in dict_for_parse_example.items():
if isinstance(column_type, parsing_ops.VarLenFeature):
# Sparse placeholder for sparse tensors.
placeholders[column_name] = array_ops.sparse_placeholder(
column_type.dtype,
name="Placeholder_{}".format(column_name))
else:
# Simple placeholder for dense tensors.
placeholders[column_name] = array_ops.placeholder(
column_type.dtype,
shape=(None, column_type.shape[0]),
name="Placeholder_{}".format(column_name))
return placeholders
class _SparseIdLookupConfig(collections.namedtuple("_SparseIdLookupConfig",
["vocabulary_file", "keys",
"num_oov_buckets",
"vocab_size",
"default_value"])):
"""Defines lookup configuration for a sparse feature.
An immutable object defines lookup table configuration used by
tf.feature_to_id_v2.
Attributes:
vocabulary_file: The vocabulary filename. vocabulary_file cannot be combined
with keys.
keys: A 1-D string iterable that specifies the mapping of strings to
indices. It means a feature in keys will map to it's index in keys.
num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of
vocabulary features will be ignored.
vocab_size: Number of the elements in the vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
"""
def __new__(cls,
vocabulary_file=None,
keys=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1):
return super(_SparseIdLookupConfig, cls).__new__(cls, vocabulary_file, keys,
num_oov_buckets,
vocab_size, default_value)
def _add_variable_collection(weight_collections):
if weight_collections:
weight_collections = list(set(list(weight_collections) +
[ops.GraphKeys.VARIABLES]))
return weight_collections
def _create_embeddings(shape, dtype, initializer, trainable, weight_collections,
name="weights"):
"""Creates embedding variable.
If called within the scope of a partitioner, will partition the variable and
return a list of `tf.Variable`. If no partitioner is specified, returns a list
with just one variable.
Args:
shape: shape of the embeddding. Note this is not the shape of partitioned
variables.
dtype: type of the embedding. Also the shape of each partitioned variable.
initializer: A variable initializer function to be used in embedding
variable initialization.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
weight_collections: List of graph collections to which embedding variables
are added.
name: A string. The name of the embedding variable.
Returns:
A list of `tf.Variable` containing the partitioned embeddings.
Raises:
ValueError: If initializer is None or not callable.
"""
if not initializer:
raise ValueError("initializer must be defined.")
if not callable(initializer):
raise ValueError("initializer must be callable.")
embeddings = contrib_variables.model_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=trainable,
collections=weight_collections)
if isinstance(embeddings, variables.Variable):
return [embeddings]
else: # Else it should be of type `_PartitionedVariable`.
return embeddings._get_variable_list() # pylint: disable=protected-access
def _create_embedding_lookup(input_tensor, weight_tensor, vocab_size, dimension,
weight_collections, initializer, combiner,
trainable, name="weights"):
"""Creates embedding variable and does a lookup.
Args:
input_tensor: A `SparseTensor` which should contain sparse id to look up.
weight_tensor: A `SparseTensor` with the same shape and indices as
`input_tensor`, which contains the float weights corresponding to each
sparse id, or None if all weights are assumed to be 1.0.
vocab_size: An integer specifying the vocabulary size.
dimension: An integer specifying the embedding vector dimension.
weight_collections: List of graph collections to which weights are added.
initializer: A variable initializer function to be used in embedding
variable initialization.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: A string specifying the name of the embedding variable.
Returns:
A Tensor with shape [batch_size, dimension] and embedding Variable.
"""
embeddings = _create_embeddings(name=name,
shape=[vocab_size, dimension],
dtype=dtypes.float32,
initializer=initializer,
trainable=trainable,
weight_collections=weight_collections)
return embedding_ops.safe_embedding_lookup_sparse(
embeddings,
input_tensor,
sparse_weights=weight_tensor,
default_id=0,
combiner=combiner,
name=name), embeddings
| {
"content_hash": "d12f9366e58112898a95cf713ea8ed99",
"timestamp": "",
"source": "github",
"line_count": 1653,
"max_line_length": 82,
"avg_line_length": 40.1826981246219,
"alnum_prop": 0.6317033512992684,
"repo_name": "rew4332/tensorflow",
"id": "176757d017b0b9eb8f404365528d431543dae0e1",
"size": "67111",
"binary": false,
"copies": "1",
"ref": "refs/heads/rew4332-patch-1",
"path": "tensorflow/contrib/layers/python/layers/feature_column.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "177722"
},
{
"name": "C++",
"bytes": "11146864"
},
{
"name": "CMake",
"bytes": "36462"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "GCC Machine Description",
"bytes": "2"
},
{
"name": "HTML",
"bytes": "918222"
},
{
"name": "Java",
"bytes": "41615"
},
{
"name": "JavaScript",
"bytes": "10844"
},
{
"name": "Jupyter Notebook",
"bytes": "1773504"
},
{
"name": "Makefile",
"bytes": "21265"
},
{
"name": "Objective-C",
"bytes": "6942"
},
{
"name": "Objective-C++",
"bytes": "61636"
},
{
"name": "Protocol Buffer",
"bytes": "122032"
},
{
"name": "Python",
"bytes": "9661955"
},
{
"name": "Shell",
"bytes": "242722"
},
{
"name": "TypeScript",
"bytes": "429603"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
from future.builtins import int, input
from optparse import make_option
from socket import gethostname
from django import VERSION
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.management import call_command
from django.core.management.commands import migrate
from django.db import connection
from mezzanine.conf import settings
from mezzanine.utils.tests import copy_test_to_media
DEFAULT_USERNAME = "admin"
DEFAULT_EMAIL = "example@example.com"
DEFAULT_PASSWORD = "default"
class Command(BaseCommand):
help = "Performs initial Mezzanine database setup."
can_import_settings = True
def __init__(self, *args, **kwargs):
"""
Adds extra command options (executed only by Django <= 1.7).
"""
super(Command, self).__init__(*args, **kwargs)
if VERSION[0] == 1 and VERSION[1] <= 7:
self.option_list = migrate.Command.option_list + (
make_option("--nodata", action="store_true", dest="nodata",
default=False, help="Do not add demo data."),)
def add_arguments(self, parser):
"""
Adds extra command options (executed only by Django >= 1.8).
"""
parser.add_argument("--nodata", action="store_true", dest="nodata",
default=False, help="Do not add demo data.")
parser.add_argument("--noinput", action="store_false",
dest="interactive", default=True, help="Do not prompt the user "
"for input of any kind.")
def handle(self, **options):
if "conf_setting" in connection.introspection.table_names():
raise CommandError("Database already created, you probably "
"want the migrate command")
self.verbosity = int(options.get("verbosity", 0))
self.interactive = int(options.get("interactive", 0))
self.no_data = int(options.get("nodata", 0))
call_command("migrate", verbosity=self.verbosity,
interactive=self.interactive)
mapping = [
[self.create_site, ["django.contrib.sites"]],
[self.create_user, ["django.contrib.auth"]],
[self.translation_fields, ["modeltranslation"]],
[self.create_pages, ["mezzanine.pages", "mezzanine.forms",
"mezzanine.blog", "mezzanine.galleries"]],
[self.create_shop, ["cartridge.shop"]],
]
for func, apps in mapping:
if set(apps).issubset(set(settings.INSTALLED_APPS)):
func()
def confirm(self, prompt):
if not self.interactive:
return True
confirm = input(prompt)
while confirm not in ("yes", "no"):
confirm = input("Please enter either 'yes' or 'no': ")
return confirm == "yes"
def create_site(self):
domain = "127.0.0.1:8000" if settings.DEBUG else gethostname()
if self.interactive:
entered = input("\nA site record is required.\nPlease "
"enter the domain and optional port in "
"the format 'domain:port'.\nFor example "
"'localhost:8000' or 'www.example.com'. "
"\nHit enter to use the default (%s): " %
domain)
if entered:
domain = entered.strip("': ")
if self.verbosity >= 1:
print("\nCreating default site record: %s ...\n" % domain)
try:
site = Site.objects.get()
except Site.DoesNotExist:
site = Site()
site.name = "Default"
site.domain = domain
site.save()
def create_user(self):
User = get_user_model()
if not settings.DEBUG or User.objects.count() > 0:
return
if self.verbosity >= 1:
print("\nCreating default account (username: %s / password: %s) "
"...\n" % (DEFAULT_USERNAME, DEFAULT_PASSWORD))
args = (DEFAULT_USERNAME, DEFAULT_EMAIL, DEFAULT_PASSWORD)
User.objects.create_superuser(*args)
def create_pages(self):
call_command("loaddata", "mezzanine_required.json")
install_optional = not self.no_data and self.confirm(
"\nWould you like to install some initial "
"demo pages?\nEg: About us, Contact form, "
"Gallery. (yes/no): ")
if install_optional:
if self.verbosity >= 1:
print("\nCreating demo pages: About us, Contact form, "
"Gallery ...\n")
from mezzanine.galleries.models import Gallery
call_command("loaddata", "mezzanine_optional.json")
zip_name = "gallery.zip"
copy_test_to_media("mezzanine.core", zip_name)
gallery = Gallery.objects.get()
gallery.zip_import = zip_name
gallery.save()
def create_shop(self):
call_command("loaddata", "cartridge_required.json")
install_optional = not self.no_data and self.confirm(
"\nWould you like to install an initial "
"demo product and sale? (yes/no): ")
if install_optional:
if self.verbosity >= 1:
print("\nCreating demo product and sale ...\n")
call_command("loaddata", "cartridge_optional.json")
copy_test_to_media("cartridge.shop", "product")
def translation_fields(self):
try:
from modeltranslation.management.commands \
import (update_translation_fields as update_fields,
sync_translation_fields as create_fields)
except ImportError:
return
update = self.confirm(
"\nDjango-modeltranslation is installed for "
"this project and you have specified to use "
"i18n.\nWould you like to update translation "
"fields from the default ones? (yes/no): ")
if update:
create_fields.Command().execute(
verbosity=self.verbosity, interactive=False)
update_fields.Command().execute(verbosity=self.verbosity)
| {
"content_hash": "6ab4bf2473a1e63ea2e4622b1308f7a6",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 77,
"avg_line_length": 40.24683544303797,
"alnum_prop": 0.5793363736436546,
"repo_name": "SoLoHiC/mezzanine",
"id": "c064c980007e294fe7b1c059e1c0efef78848525",
"size": "6359",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mezzanine/core/management/commands/createdb.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "59846"
},
{
"name": "HTML",
"bytes": "88672"
},
{
"name": "JavaScript",
"bytes": "446953"
},
{
"name": "Python",
"bytes": "646674"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SharedCostList(ListResource):
""" """
def __init__(self, version, account_sid, country_code):
"""
Initialize the SharedCostList
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param country_code: The ISO-3166-1 country code of the country.
:returns: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostList
:rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostList
"""
super(SharedCostList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'country_code': country_code, }
self._uri = '/Accounts/{account_sid}/AvailablePhoneNumbers/{country_code}/SharedCost.json'.format(**self._solution)
def stream(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset,
exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, in_locality=values.unset,
fax_enabled=values.unset, limit=None, page_size=None):
"""
Streams SharedCostInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode area_code: The area code of the phone numbers to read
:param unicode contains: The pattern on which to match phone numbers
:param bool sms_enabled: Whether the phone numbers can receive text messages
:param bool mms_enabled: Whether the phone numbers can receive MMS messages
:param bool voice_enabled: Whether the phone numbers can receive calls.
:param bool exclude_all_address_required: Whether to exclude phone numbers that require an Address
:param bool exclude_local_address_required: Whether to exclude phone numbers that require a local address
:param bool exclude_foreign_address_required: Whether to exclude phone numbers that require a foreign address
:param bool beta: Whether to read phone numbers new to the Twilio platform
:param unicode near_number: Given a phone number, find a geographically close number within distance miles. (US/Canada only)
:param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within distance miles. (US/Canada only)
:param unicode distance: The search radius, in miles, for a near_ query. (US/Canada only)
:param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only)
:param unicode in_region: Limit results to a particular region. (US/Canada only)
:param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only)
:param unicode in_lata: Limit results to a specific local access and transport area. (US/Canada only)
:param unicode in_locality: Limit results to a particular locality
:param bool fax_enabled: Whether the phone numbers can receive faxes
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
area_code=area_code,
contains=contains,
sms_enabled=sms_enabled,
mms_enabled=mms_enabled,
voice_enabled=voice_enabled,
exclude_all_address_required=exclude_all_address_required,
exclude_local_address_required=exclude_local_address_required,
exclude_foreign_address_required=exclude_foreign_address_required,
beta=beta,
near_number=near_number,
near_lat_long=near_lat_long,
distance=distance,
in_postal_code=in_postal_code,
in_region=in_region,
in_rate_center=in_rate_center,
in_lata=in_lata,
in_locality=in_locality,
fax_enabled=fax_enabled,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset, exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, in_locality=values.unset,
fax_enabled=values.unset, limit=None, page_size=None):
"""
Lists SharedCostInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode area_code: The area code of the phone numbers to read
:param unicode contains: The pattern on which to match phone numbers
:param bool sms_enabled: Whether the phone numbers can receive text messages
:param bool mms_enabled: Whether the phone numbers can receive MMS messages
:param bool voice_enabled: Whether the phone numbers can receive calls.
:param bool exclude_all_address_required: Whether to exclude phone numbers that require an Address
:param bool exclude_local_address_required: Whether to exclude phone numbers that require a local address
:param bool exclude_foreign_address_required: Whether to exclude phone numbers that require a foreign address
:param bool beta: Whether to read phone numbers new to the Twilio platform
:param unicode near_number: Given a phone number, find a geographically close number within distance miles. (US/Canada only)
:param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within distance miles. (US/Canada only)
:param unicode distance: The search radius, in miles, for a near_ query. (US/Canada only)
:param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only)
:param unicode in_region: Limit results to a particular region. (US/Canada only)
:param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only)
:param unicode in_lata: Limit results to a specific local access and transport area. (US/Canada only)
:param unicode in_locality: Limit results to a particular locality
:param bool fax_enabled: Whether the phone numbers can receive faxes
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance]
"""
return list(self.stream(
area_code=area_code,
contains=contains,
sms_enabled=sms_enabled,
mms_enabled=mms_enabled,
voice_enabled=voice_enabled,
exclude_all_address_required=exclude_all_address_required,
exclude_local_address_required=exclude_local_address_required,
exclude_foreign_address_required=exclude_foreign_address_required,
beta=beta,
near_number=near_number,
near_lat_long=near_lat_long,
distance=distance,
in_postal_code=in_postal_code,
in_region=in_region,
in_rate_center=in_rate_center,
in_lata=in_lata,
in_locality=in_locality,
fax_enabled=fax_enabled,
limit=limit,
page_size=page_size,
))
def page(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset, exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, in_locality=values.unset,
fax_enabled=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of SharedCostInstance records from the API.
Request is executed immediately
:param unicode area_code: The area code of the phone numbers to read
:param unicode contains: The pattern on which to match phone numbers
:param bool sms_enabled: Whether the phone numbers can receive text messages
:param bool mms_enabled: Whether the phone numbers can receive MMS messages
:param bool voice_enabled: Whether the phone numbers can receive calls.
:param bool exclude_all_address_required: Whether to exclude phone numbers that require an Address
:param bool exclude_local_address_required: Whether to exclude phone numbers that require a local address
:param bool exclude_foreign_address_required: Whether to exclude phone numbers that require a foreign address
:param bool beta: Whether to read phone numbers new to the Twilio platform
:param unicode near_number: Given a phone number, find a geographically close number within distance miles. (US/Canada only)
:param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within distance miles. (US/Canada only)
:param unicode distance: The search radius, in miles, for a near_ query. (US/Canada only)
:param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only)
:param unicode in_region: Limit results to a particular region. (US/Canada only)
:param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only)
:param unicode in_lata: Limit results to a specific local access and transport area. (US/Canada only)
:param unicode in_locality: Limit results to a particular locality
:param bool fax_enabled: Whether the phone numbers can receive faxes
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SharedCostInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostPage
"""
params = values.of({
'AreaCode': area_code,
'Contains': contains,
'SmsEnabled': sms_enabled,
'MmsEnabled': mms_enabled,
'VoiceEnabled': voice_enabled,
'ExcludeAllAddressRequired': exclude_all_address_required,
'ExcludeLocalAddressRequired': exclude_local_address_required,
'ExcludeForeignAddressRequired': exclude_foreign_address_required,
'Beta': beta,
'NearNumber': near_number,
'NearLatLong': near_lat_long,
'Distance': distance,
'InPostalCode': in_postal_code,
'InRegion': in_region,
'InRateCenter': in_rate_center,
'InLata': in_lata,
'InLocality': in_locality,
'FaxEnabled': fax_enabled,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return SharedCostPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SharedCostInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SharedCostInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SharedCostPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.SharedCostList>'
class SharedCostPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the SharedCostPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The account_sid
:param country_code: The ISO-3166-1 country code of the country.
:returns: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostPage
:rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostPage
"""
super(SharedCostPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SharedCostInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance
"""
return SharedCostInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
country_code=self._solution['country_code'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.SharedCostPage>'
class SharedCostInstance(InstanceResource):
""" """
def __init__(self, version, payload, account_sid, country_code):
"""
Initialize the SharedCostInstance
:returns: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance
"""
super(SharedCostInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'friendly_name': payload.get('friendly_name'),
'phone_number': payload.get('phone_number'),
'lata': payload.get('lata'),
'locality': payload.get('locality'),
'rate_center': payload.get('rate_center'),
'latitude': deserialize.decimal(payload.get('latitude')),
'longitude': deserialize.decimal(payload.get('longitude')),
'region': payload.get('region'),
'postal_code': payload.get('postal_code'),
'iso_country': payload.get('iso_country'),
'address_requirements': payload.get('address_requirements'),
'beta': payload.get('beta'),
'capabilities': payload.get('capabilities'),
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, 'country_code': country_code, }
@property
def friendly_name(self):
"""
:returns: A formatted version of the phone number
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def phone_number(self):
"""
:returns: The phone number in E.164 format
:rtype: unicode
"""
return self._properties['phone_number']
@property
def lata(self):
"""
:returns: The LATA of this phone number
:rtype: unicode
"""
return self._properties['lata']
@property
def locality(self):
"""
:returns: The locality or city of this phone number's location
:rtype: unicode
"""
return self._properties['locality']
@property
def rate_center(self):
"""
:returns: The rate center of this phone number
:rtype: unicode
"""
return self._properties['rate_center']
@property
def latitude(self):
"""
:returns: The latitude of this phone number's location
:rtype: unicode
"""
return self._properties['latitude']
@property
def longitude(self):
"""
:returns: The longitude of this phone number's location
:rtype: unicode
"""
return self._properties['longitude']
@property
def region(self):
"""
:returns: The two-letter state or province abbreviation of this phone number's location
:rtype: unicode
"""
return self._properties['region']
@property
def postal_code(self):
"""
:returns: The postal or ZIP code of this phone number's location
:rtype: unicode
"""
return self._properties['postal_code']
@property
def iso_country(self):
"""
:returns: The ISO country code of this phone number
:rtype: unicode
"""
return self._properties['iso_country']
@property
def address_requirements(self):
"""
:returns: The type of Address resource the phone number requires
:rtype: unicode
"""
return self._properties['address_requirements']
@property
def beta(self):
"""
:returns: Whether the phone number is new to the Twilio platform
:rtype: bool
"""
return self._properties['beta']
@property
def capabilities(self):
"""
:returns: Whether a phone number can receive calls or messages
:rtype: unicode
"""
return self._properties['capabilities']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.SharedCostInstance>'
| {
"content_hash": "583efaf75836f3bc5903ba60f16dfa62",
"timestamp": "",
"source": "github",
"line_count": 461,
"max_line_length": 171,
"avg_line_length": 44.88503253796095,
"alnum_prop": 0.6418905857336169,
"repo_name": "tysonholub/twilio-python",
"id": "9ca37a65a53e3299b1d852b6be553f03bdb21c5f",
"size": "20707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twilio/rest/api/v2010/account/available_phone_number/shared_cost.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "173"
},
{
"name": "Makefile",
"bytes": "2081"
},
{
"name": "Python",
"bytes": "8063586"
}
],
"symlink_target": ""
} |
import logging
import uuid
import pytest
import sdk_auth
import sdk_cmd
import sdk_hosts
import sdk_install
import sdk_marathon
import sdk_security
import sdk_utils
from tests import auth
from tests import config
from tests import test_utils
log = logging.getLogger(__name__)
pytestmark = pytest.mark.skipif(sdk_utils.is_open_dcos(),
reason='Feature only supported in DC/OS EE')
@pytest.fixture(scope='module', autouse=True)
def service_account(configure_security):
"""
Creates service account and yields the name.
"""
name = config.SERVICE_NAME
sdk_security.create_service_account(
service_account_name=name, service_account_secret=name)
# TODO(mh): Fine grained permissions needs to be addressed in DCOS-16475
sdk_cmd.run_cli(
"security org groups add_user superusers {name}".format(name=name))
yield name
sdk_security.delete_service_account(
service_account_name=name, service_account_secret=name)
@pytest.fixture(scope='module', autouse=True)
def kafka_principals():
fqdn = "{service_name}.{host_suffix}".format(service_name=config.SERVICE_NAME,
host_suffix=sdk_hosts.AUTOIP_HOST_SUFFIX)
brokers = [
"kafka-0-broker",
"kafka-1-broker",
"kafka-2-broker",
]
principals = []
for b in brokers:
principals.append("kafka/{instance}.{domain}@{realm}".format(
instance=b,
domain=fqdn,
realm=sdk_auth.REALM))
clients = [
"client",
"authorized",
"unauthorized",
"super"
]
for c in clients:
principals.append("{client}@{realm}".format(client=c, realm=sdk_auth.REALM))
yield principals
@pytest.fixture(scope='module', autouse=True)
def kerberos(configure_security, kafka_principals):
try:
principals = []
principals.extend(kafka_principals)
kerberos_env = sdk_auth.KerberosEnvironment()
kerberos_env.add_principals(principals)
kerberos_env.finalize()
yield kerberos_env
finally:
kerberos_env.cleanup()
@pytest.fixture(scope='module', autouse=True)
def kafka_server(kerberos, service_account):
"""
A pytest fixture that installs a Kerberized kafka service.
On teardown, the service is uninstalled.
"""
service_kerberos_options = {
"service": {
"name": config.SERVICE_NAME,
"service_account": service_account,
"service_account_secret": service_account,
"security": {
"kerberos": {
"enabled": True,
"kdc": {
"hostname": kerberos.get_host(),
"port": int(kerberos.get_port())
},
"keytab_secret": kerberos.get_keytab_path(),
},
"transport_encryption": {
"enabled": True
}
}
}
}
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
try:
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_BROKER_COUNT,
additional_options=service_kerberos_options,
timeout_seconds=30 * 60)
yield {**service_kerberos_options, **{"package_name": config.PACKAGE_NAME}}
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.fixture(scope='module', autouse=True)
def kafka_client(kerberos, kafka_server):
brokers = sdk_cmd.svc_cli(
kafka_server["package_name"],
kafka_server["service"]["name"],
"endpoint broker-tls", json=True)["dns"]
try:
client_id = "kafka-client"
client = {
"id": client_id,
"mem": 512,
"user": "nobody",
"container": {
"type": "MESOS",
"docker": {
"image": "elezar/kafka-client:latest",
"forcePullImage": True
},
"volumes": [
{
"containerPath": "/tmp/kafkaconfig/kafka-client.keytab",
"secret": "kafka_keytab"
}
]
},
"secrets": {
"kafka_keytab": {
"source": kerberos.get_keytab_path(),
}
},
"networks": [
{
"mode": "host"
}
],
"env": {
"JVM_MaxHeapSize": "512",
"KAFKA_CLIENT_MODE": "test",
"KAFKA_TOPIC": "securetest",
"KAFKA_BROKER_LIST": ",".join(brokers)
}
}
sdk_marathon.install_app(client)
auth.create_tls_artifacts(
cn="client",
task=client_id)
yield {**client, **{"brokers": list(map(lambda x: x.split(':')[0], brokers))}}
finally:
sdk_marathon.destroy_app(client_id)
@pytest.mark.dcos_min_version('1.10')
@sdk_utils.dcos_ee_only
@pytest.mark.sanity
def test_client_can_read_and_write(kafka_client, kafka_server):
client_id = kafka_client["id"]
auth.wait_for_brokers(kafka_client["id"], kafka_client["brokers"])
topic_name = "authn.test"
sdk_cmd.svc_cli(kafka_server["package_name"], kafka_server["service"]["name"],
"topic create {}".format(topic_name),
json=True)
test_utils.wait_for_topic(kafka_server["package_name"], kafka_server["service"]["name"], topic_name)
message = str(uuid.uuid4())
assert write_to_topic("client", client_id, topic_name, message)
assert message in read_from_topic("client", client_id, topic_name, 1)
def get_client_properties(cn: str) -> str:
client_properties_lines = []
client_properties_lines.extend(auth.get_kerberos_client_properties(ssl_enabled=True))
client_properties_lines.extend(auth.get_ssl_client_properties(cn, True))
return client_properties_lines
def write_to_topic(cn: str, task: str, topic: str, message: str) -> bool:
return auth.write_to_topic(cn, task, topic, message,
get_client_properties(cn),
environment=auth.setup_env(cn, task))
def read_from_topic(cn: str, task: str, topic: str, messages: int, cmd: str=None) -> str:
return auth.read_from_topic(cn, task, topic, messages,
get_client_properties(cn),
environment=auth.setup_env(cn, task))
| {
"content_hash": "83fb3b34ac4e9debcfe60d02f7f3ed93",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 104,
"avg_line_length": 29.399122807017545,
"alnum_prop": 0.5552737580187975,
"repo_name": "vishnu2kmohan/dcos-commons",
"id": "ab7296562565a615157e13fdec7fc8ac5d669969",
"size": "6703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frameworks/kafka/tests/test_ssl_kerberos_auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "168256"
},
{
"name": "HTML",
"bytes": "99573"
},
{
"name": "Java",
"bytes": "2770769"
},
{
"name": "Makefile",
"bytes": "106"
},
{
"name": "Python",
"bytes": "457961"
},
{
"name": "Shell",
"bytes": "46736"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import shutil
from ..utils import in_temporary_directory
from ..utils import assert_cmd_success, assert_cmd_failure
from ..utils import assert_files_exist
from ..workspace_assertions import assert_workspace_initialized
from ..workspace_assertions import assert_no_warnings
TEST_DIR = os.path.dirname(__file__)
RESOURCES_DIR = os.path.join(os.path.dirname(__file__), 'resources')
@in_temporary_directory
def test_build_no_src():
assert_cmd_failure(['catkin', 'build'])
@in_temporary_directory
def test_build_auto_init_no_pkgs():
cwd = os.getcwd()
source_space = os.path.join(cwd, 'src')
print("Creating source directory: %s" % source_space)
os.mkdir(source_space)
out = assert_cmd_failure(['catkin', 'build', '--no-notify'])
assert_no_warnings(out)
assert_workspace_initialized('.')
@in_temporary_directory
def test_build_auto_init_one_pkg():
cwd = os.getcwd()
source_space = os.path.join(cwd, 'src')
print("Creating source directory: %s" % source_space)
os.mkdir(source_space)
assert_cmd_success(['catkin', 'create', 'pkg', '--rosdistro', 'hydro',
'-p', source_space, 'pkg_a'])
out = assert_cmd_success(['catkin', 'build', '--no-notify', '--no-status',
'--verbose'])
assert_no_warnings(out)
assert_workspace_initialized('.')
@in_temporary_directory
def test_build_eclipse():
cwd = os.getcwd()
source_space = os.path.join(cwd, 'src')
print("Creating source directory: %s" % source_space)
os.mkdir(source_space)
assert_cmd_success(['catkin', 'create', 'pkg', '--rosdistro', 'hydro',
'-p', source_space, 'pkg_a'])
out = assert_cmd_success(['catkin', 'build', '--no-notify',
'--no-status', '--verbose', '--cmake-args',
'-GEclipse CDT4 - Unix Makefiles'])
assert_no_warnings(out)
assert_workspace_initialized('.')
assert_files_exist(os.path.join(cwd, 'build', 'pkg_a'),
['.project', '.cproject'])
@in_temporary_directory
def test_build_pkg_unit_tests():
cwd = os.getcwd()
source_space = os.path.join(cwd, 'src')
print("Creating source directory: %s" % source_space)
shutil.copytree(RESOURCES_DIR, source_space)
assert_cmd_success(['catkin', 'build', '--no-notify', '--no-status',
'--verbose', '--no-deps', 'pkg_with_test',
'--make-args', 'run_tests'])
assert_cmd_success(['catkin_test_results', 'build/pkg_with_test'])
assert_cmd_success(['catkin', 'build', '--no-notify', '--no-status',
'--verbose', '--no-deps', 'pkg_with_broken_test',
'--make-args', 'run_tests'])
assert_cmd_failure(['catkin_test_results', 'build/pkg_with_broken_test'])
@in_temporary_directory
def test_build_pkg_unit_tests_alias():
cwd = os.getcwd()
source_space = os.path.join(cwd, 'src')
print("Creating source directory: %s" % source_space)
shutil.copytree(RESOURCES_DIR, source_space)
assert_cmd_success(['catkin', 'run_tests', 'pkg_with_test', '--no-deps',
'--no-notify', '--no-status'])
assert_cmd_success(['catkin_test_results', 'build/pkg_with_test'])
assert_cmd_success(['catkin', 'run_tests', 'pkg_with_broken_test',
'--no-deps', '--no-notify', '--no-status'])
assert_cmd_failure(['catkin_test_results', 'build/pkg_with_broken_test'])
@in_temporary_directory
def test_build_pkg_cmake_args():
cwd = os.getcwd()
source_space = os.path.join(cwd, 'src')
print("Creating source directory: %s" % source_space)
shutil.copytree(RESOURCES_DIR, source_space)
assert_cmd_failure(['catkin', 'build', 'pkg_with_cmake_args', '--no-deps',
'--no-notify', '--no-status', '--force-cmake',
'--cmake-args',
'-DVAR1=VAL1'])
assert_cmd_failure(['catkin', 'build', 'pkg_with_cmake_args', '--no-deps',
'--no-notify', '--no-status', '--force-cmake',
'--cmake-args',
'-DVAR1=VAL1', '-DVAR2=VAL2'])
assert_cmd_success(['catkin', 'build', 'pkg_with_cmake_args', '--no-deps',
'--no-notify', '--no-status', '--force-cmake',
'--cmake-args',
'-DVAR1=VAL1', '-DVAR2=VAL2', '-DVAR3=VAL3'])
assert_cmd_success(['catkin', 'build', 'pkg_with_cmake_args', '--no-deps',
'--no-notify', '--no-status', '--force-cmake',
'--cmake-args',
'-DVAR1=VAL1', '-DVAR2=VAL2', '-DVAR3=VAL3', '--'])
| {
"content_hash": "6bc5d2371ad062bdb20f168b61ef2f3a",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 78,
"avg_line_length": 39.858333333333334,
"alnum_prop": 0.567844449090529,
"repo_name": "davetcoleman/catkin_tools",
"id": "e51a455e643aa7c7c035d9ed15a9dc5c2e611b4e",
"size": "4784",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/integrated/test_build.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "679"
},
{
"name": "Objective-C",
"bytes": "3354"
},
{
"name": "Python",
"bytes": "275993"
}
],
"symlink_target": ""
} |
import io
import re
from setuptools import setup
with io.open('./xero/__init__.py', encoding='utf8') as version_file:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M)
if version_match:
version = version_match.group(1)
else:
raise RuntimeError("Unable to find version string.")
with io.open('README.md', encoding='utf8') as readme:
long_description = readme.read()
setup(
name='pyxero',
version=version,
description='Python API for accessing the REST API of the Xero accounting tool.',
long_description=long_description,
author='Russell Keith-Magee',
author_email='russell@keith-magee.com',
url='http://github.com/freakboy3742/pyxero',
packages=['xero', ],
install_requires=[
'six>=1.8.0',
'requests>=1.1.0',
'requests-oauthlib>=0.3.0',
'python-dateutil>=2.1',
'pycrypto>=2.6.1'
],
tests_require=[
'mock',
],
license='New BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Office/Business :: Financial :: Accounting',
],
test_suite="tests",
)
| {
"content_hash": "3dc1199fed6949f254ba18f69ac9e82f",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 95,
"avg_line_length": 30.865384615384617,
"alnum_prop": 0.5950155763239875,
"repo_name": "jaymcconnell/pyxero",
"id": "cae8d5620751fe34bd97f43416141415c781322e",
"size": "1626",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "76160"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.template import Template, Context
from django.http import HttpRequest
from simple_pagination.utils import(
normalize_page_number,
get_querystring_for_page,
get_page_numbers,
)
from simple_pagination.models import EndlessPage, PageList, ShowItems
from django.http import QueryDict
from django.core.paginator import Paginator
class PaginateAndShowPageItems(TestCase):
def test_addition(self):
t = Template(
"{% load paginate %}{% paginate entities %}.{% show_pageitems %} {% paginate 20 entities %} {% show_pages %}")
req = HttpRequest()
c = Context({"entities": range(100), 'request': req})
val = t.render(c)
self.assertTrue(bool(val))
class NormalizePageNumber(TestCase):
def test_normalize_page_number(self):
page_number = 1
page_range = range(2)
val = normalize_page_number(page_number, page_range)
self.assertTrue(bool(val))
page_range = range(1)
val = normalize_page_number(page_number, page_range)
self.assertFalse(bool(val))
class GetQuerystringForPage(TestCase):
def test_get_querystring_for_page(self):
request = self
request = HttpRequest()
dict = {u"querystring_key": 1,
u"key": 2,
u"page": 3}
qdict = QueryDict('', mutable=True)
qdict.update(dict)
request.GET = qdict
val = get_querystring_for_page(request=request,
page_number=1,
querystring_key="key",
default_number=1)
self.assertTrue(bool(val))
request.GET = {}
val = get_querystring_for_page(request=request,
page_number=1,
querystring_key="key",
default_number=1)
self.assertFalse(bool(val))
class GetPageNumbers(TestCase):
def test_get_page_numbers(self):
self.assertTrue(get_page_numbers(current_page=2, num_pages=10))
self.assertTrue(get_page_numbers(current_page=9, num_pages=10))
self.assertTrue(get_page_numbers(current_page=1, num_pages=3))
class TestEndlessPage(TestCase):
def test_endless_page(self):
request = HttpRequest()
epage = EndlessPage(request=request,
number=2,
current_number=2,
total_number=10,
querystring_key='page')
self.assertTrue(epage)
class TestPageList(TestCase):
def test_page_list(self):
request = HttpRequest()
paginator = Paginator(['john', 'paul', 'george', 'ringo'], 3)
page = paginator.page(1)
page.number = lambda: None
setattr(page, 'number', 2)
setattr(page, 'paginator', paginator)
page_list = PageList(request=request, page=page, querystring_key="page")
page_list = PageList(request=request, page=page, querystring_key="page", default_number=1)
page_list._endless_page(number=1)
page_list._endless_page(number=3)
self.assertTrue(page_list[1])
page_list.next()
self.assertTrue(page_list)
si = ShowItems(request=request, page=page, querystring_key="page")
self.assertTrue(si)
| {
"content_hash": "909fab17c101d1c971ac2ebbbd51bd17",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 122,
"avg_line_length": 34.92857142857143,
"alnum_prop": 0.5848670756646217,
"repo_name": "MicroPyramid/django-simple-pagination",
"id": "ab66de2716b2f87e96ea2e5d518f2126072e274d",
"size": "3423",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "simple_pagination/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1204"
},
{
"name": "Python",
"bytes": "70923"
}
],
"symlink_target": ""
} |
"""Fichier contenant la fonction noms."""
from primaires.scripting.fonction import Fonction
from primaires.scripting.instruction import ErreurExecution
class ClasseFonction(Fonction):
"""Renvoie une liste de noms de plusieurs informations.
En paramètre de cette fonction doit être précisé le type
d'information souhaitée. Par exemple, :
identifiants = noms("salle")
# identifiants contient la liste des identifiants de salle
"""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.noms, "str")
@staticmethod
def noms(type):
"""Retourne les noms des informations demandées.
Paramètres à préciser :
* type : le type d'information (voir plus bas).
Types possibles :
* "joueur" : retourne le nom des joueurs ;
* "salle" : retourne les identifiants de toutes les salles ;
* "prototype de PNJ" : retourne les clés des prototypes de PNJ.
Exemples d'utilisation :
noms = noms("joueur")
# noms contient la liste des noms de tous les joueurs
identifiants = noms("salle")
"""
type = type.lower()
if type == "joueur":
return [j.nom for j in importeur.joueur.joueurs.values()]
elif type == "salle":
return [s.ident for s in importeur.salle._salles.values()]
elif type == "prototype de pnj":
return [p.cle for p in importeur.pnj._prototypes.values()]
else:
raise ErreurExecution("Type inconnu {}".format(repr(type)))
| {
"content_hash": "8d653aff540a3150bd3e1ba9b45b50d8",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 73,
"avg_line_length": 30.823529411764707,
"alnum_prop": 0.6265903307888041,
"repo_name": "vlegoff/tsunami",
"id": "8c2181472c98a505f3d18689449ed00e24c8b4fd",
"size": "3149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/scripting/fonctions/noms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
"""
Manage communication with child zones and keep state for them.
"""
import datetime
import traceback
from eventlet import greenpool
from novaclient import v1_1 as novaclient
from nova import db
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
zone_manager_opts = [
cfg.IntOpt('zone_db_check_interval',
default=60,
help='Seconds between getting fresh zone info from db.'),
cfg.IntOpt('zone_failures_to_offline',
default=3,
help='Number of consecutive errors before offlining a zone'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(zone_manager_opts)
LOG = logging.getLogger(__name__)
class ZoneState(object):
"""Holds state for a particular zone."""
def __init__(self):
self.is_active = True
self.capabilities = {}
self.attempt = 0
self.last_seen = datetime.datetime.min
self.last_exception = None
self.last_exception_time = None
self.zone_info = {}
def update_zone_info(self, zone):
"""Update zone credentials from db"""
self.zone_info = dict(zone.iteritems())
def update_metadata(self, zone_metadata):
"""Update zone metadata after successful communications with
child zone."""
self.last_seen = utils.utcnow()
self.attempt = 0
self.capabilities = dict(
[(k, v) for k, v in zone_metadata.iteritems() if k != 'name'])
self.is_active = True
def get_zone_info(self):
db_fields_to_return = ['api_url', 'id', 'weight_scale',
'weight_offset']
zone_info = dict(is_active=self.is_active,
capabilities=self.capabilities)
for field in db_fields_to_return:
zone_info[field] = self.zone_info[field]
return zone_info
def log_error(self, exception):
"""Something went wrong. Check to see if zone should be
marked as offline."""
self.last_exception = exception
self.last_exception_time = utils.utcnow()
api_url = self.zone_info['api_url']
LOG.warning(_("'%(exception)s' error talking to "
"zone %(api_url)s") % locals())
max_errors = FLAGS.zone_failures_to_offline
self.attempt += 1
if self.attempt >= max_errors:
self.is_active = False
LOG.error(_("No answer from zone %(api_url)s "
"after %(max_errors)d "
"attempts. Marking inactive.") % locals())
def call_novaclient(self):
"""Call novaclient. Broken out for testing purposes. Note that
we have to use the admin credentials for this since there is no
available context."""
username = self.zone_info['username']
password = self.zone_info['password']
api_url = self.zone_info['api_url']
region_name = self.zone_info['name']
client = novaclient.Client(username, password, None, api_url,
region_name)
return client.zones.info()._info
def poll(self):
"""Eventlet worker to poll a self."""
if 'api_url' not in self.zone_info:
return
name = self.zone_info['name']
api_url = self.zone_info['api_url']
LOG.debug(_("Polling zone: %(name)s @ %(api_url)s") % locals())
try:
self.update_metadata(self.call_novaclient())
except Exception, e:
self.log_error(traceback.format_exc())
class ZoneManager(object):
"""Keeps the zone states updated."""
def __init__(self):
self.last_zone_db_check = datetime.datetime.min
self.zone_states = {} # { <zone_id> : ZoneState }
self.green_pool = greenpool.GreenPool()
def get_zone_list(self):
"""Return the list of zones we know about."""
return [zone.get_zone_info() for zone in self.zone_states.values()]
def _refresh_from_db(self, context):
"""Make our zone state map match the db."""
# Add/update existing zones ...
zones = db.zone_get_all(context)
existing = self.zone_states.keys()
db_keys = []
for zone in zones:
zone_id = zone['id']
db_keys.append(zone_id)
if zone_id not in existing:
self.zone_states[zone_id] = ZoneState()
self.zone_states[zone_id].update_zone_info(zone)
# Cleanup zones removed from db ...
keys = self.zone_states.keys() # since we're deleting
for zone_id in keys:
if zone_id not in db_keys:
del self.zone_states[zone_id]
def _poll_zones(self):
"""Try to connect to each child zone and get update."""
def _worker(zone_state):
zone_state.poll()
self.green_pool.imap(_worker, self.zone_states.values())
def update(self, context):
"""Update status for all zones. This should be called
periodically to refresh the zone states.
"""
diff = utils.utcnow() - self.last_zone_db_check
if diff.seconds >= FLAGS.zone_db_check_interval:
LOG.debug(_("Updating zone cache from db."))
self.last_zone_db_check = utils.utcnow()
self._refresh_from_db(context)
self._poll_zones()
| {
"content_hash": "edea67846f59bfc78d7e591361d020b5",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 78,
"avg_line_length": 35.08496732026144,
"alnum_prop": 0.5860655737704918,
"repo_name": "russellb/nova",
"id": "b9d021c7561c914e0cea3462449bccfc317c1ae7",
"size": "6003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/scheduler/zone_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4974"
},
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "5611148"
},
{
"name": "Shell",
"bytes": "25380"
}
],
"symlink_target": ""
} |
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__,'win32lz.pyd')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| {
"content_hash": "31d64d1ef4f0dc6760fb1d400f9437b6",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 69,
"avg_line_length": 39.714285714285715,
"alnum_prop": 0.5971223021582733,
"repo_name": "Titulacion-Sistemas/PythonTitulacion-EV",
"id": "c737d1b6c0d5344ae020b575339b049bf60f62ae",
"size": "278",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Lib/site-packages/pywin32-219-py2.7-win32.egg/win32lz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "2117"
},
{
"name": "C",
"bytes": "469338"
},
{
"name": "C++",
"bytes": "93276"
},
{
"name": "CSS",
"bytes": "173812"
},
{
"name": "JavaScript",
"bytes": "203291"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "17198855"
},
{
"name": "Shell",
"bytes": "2237"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "Visual Basic",
"bytes": "904"
},
{
"name": "XSLT",
"bytes": "154751"
}
],
"symlink_target": ""
} |
__all__ = [
"release"
]
__doc__ = "Utilities for restarting the optimization"
from Optizelle.Utility import *
from Optizelle.Properties import *
import Optizelle.EqualityConstrained.State
class X_Vectors(list):
"""Holds restart information for the vectors in the vector space X"""
pass
class Y_Vectors(list):
"""Holds restart information for the vectors in the vector space Y"""
pass
class Reals(list):
"""Holds restart information for real numbers"""
pass
class Naturals(list):
"""Holds restart information for natural numbers"""
pass
class Params(list):
"""Holds restart information for parameters"""
pass
def release(X,Y,state,xs,ys,reals,nats,params):
"""Release the data into structures controlled by the user"""
# Check the arguments
checkVectorSpace("X",X)
checkVectorSpace("Y",Y)
Optizelle.EqualityConstrained.State.checkT("state",state)
# Release the information from the state
EqualityConstrainedRestartRelease(X,Y,state,xs,ys,reals,nats,params)
# Return nothing. We've modified the passed in lists.
return None
def capture(X,Y,state,xs,ys,reals,nats,params):
"""Capture data from structures controlled by the user."""
# Check the arguments
checkVectorSpace("X",X)
checkVectorSpace("Y",Y)
Optizelle.EqualityConstrained.State.checkT("state",state)
checkVectors('xs',xs)
checkVectors('ys',ys)
checkReals('reals',reals)
checkNaturals('nats',nats)
checkParams('params',params)
# Capture the restart information
EqualityConstrainedRestartCapture(X,Y,state,xs,ys,reals,nats,params)
# Return nothing. The state has been modified.
return None
| {
"content_hash": "ef9b5380081a4b723d8b181c82e8c2b7",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 73,
"avg_line_length": 29.20689655172414,
"alnum_prop": 0.7095631641086186,
"repo_name": "OptimoJoe/Optizelle",
"id": "6af712faaf729197b58cac2550a0020f627d176c",
"size": "1694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/Optizelle/EqualityConstrained/Restart.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "212"
},
{
"name": "C++",
"bytes": "1497824"
},
{
"name": "CMake",
"bytes": "113521"
},
{
"name": "Mathematica",
"bytes": "11313"
},
{
"name": "Matlab",
"bytes": "177318"
},
{
"name": "Python",
"bytes": "121166"
},
{
"name": "TeX",
"bytes": "247884"
}
],
"symlink_target": ""
} |
"""functions related to modeling"""
import pymel.internal.factories as _factories
import pymel.internal.pmcmds as cmds
import general as _general
def pointPosition(*args, **kwargs):
return _general.datatypes.Point(cmds.pointPosition(*args, **kwargs))
def curve(*args, **kwargs):
"""
Maya Bug Fix:
- name parameter only applied to transform. now applies to shape as well
"""
# curve returns a transform
name = kwargs.pop('name', kwargs.pop('n', None))
res = _general.PyNode(cmds.curve(*args, **kwargs))
if name:
res.rename(name)
return res
def surface(*args, **kwargs):
"""
Maya Bug Fix:
- name parameter only applied to transform. now applies to shape as well
"""
# surface returns a shape
name = kwargs.pop('name', kwargs.pop('n', None))
res = _general.PyNode(cmds.surface(*args, **kwargs))
if name:
res.getParent().rename(name)
return res
_factories.createFunctions(__name__, _general.PyNode)
| {
"content_hash": "df7ca42f1fdadc89425c4a0d4bebb140",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 74,
"avg_line_length": 28.823529411764707,
"alnum_prop": 0.6673469387755102,
"repo_name": "shrtcww/pymel",
"id": "0c4b8781a745968069d862b54a9725600a7dc89e",
"size": "980",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "pymel/core/modeling.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "316"
},
{
"name": "CSS",
"bytes": "16762"
},
{
"name": "Mathematica",
"bytes": "770809"
},
{
"name": "Python",
"bytes": "2663810"
},
{
"name": "Shell",
"bytes": "7033"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import inspect
import linecache
import sys
import textwrap
import tokenize
import warnings
from ast import PyCF_ONLY_AST as _AST_FLAG
from bisect import bisect_right
import py
import six
class Source(object):
""" an immutable object holding a source code fragment,
possibly deindenting it.
"""
_compilecounter = 0
def __init__(self, *parts, **kwargs):
self.lines = lines = []
de = kwargs.get("deindent", True)
for part in parts:
if not part:
partlines = []
elif isinstance(part, Source):
partlines = part.lines
elif isinstance(part, (tuple, list)):
partlines = [x.rstrip("\n") for x in part]
elif isinstance(part, six.string_types):
partlines = part.split("\n")
else:
partlines = getsource(part, deindent=de).lines
if de:
partlines = deindent(partlines)
lines.extend(partlines)
def __eq__(self, other):
try:
return self.lines == other.lines
except AttributeError:
if isinstance(other, str):
return str(self) == other
return False
__hash__ = None
def __getitem__(self, key):
if isinstance(key, int):
return self.lines[key]
else:
if key.step not in (None, 1):
raise IndexError("cannot slice a Source with a step")
newsource = Source()
newsource.lines = self.lines[key.start : key.stop]
return newsource
def __len__(self):
return len(self.lines)
def strip(self):
""" return new source object with trailing
and leading blank lines removed.
"""
start, end = 0, len(self)
while start < end and not self.lines[start].strip():
start += 1
while end > start and not self.lines[end - 1].strip():
end -= 1
source = Source()
source.lines[:] = self.lines[start:end]
return source
def putaround(self, before="", after="", indent=" " * 4):
""" return a copy of the source object with
'before' and 'after' wrapped around it.
"""
before = Source(before)
after = Source(after)
newsource = Source()
lines = [(indent + line) for line in self.lines]
newsource.lines = before.lines + lines + after.lines
return newsource
def indent(self, indent=" " * 4):
""" return a copy of the source object with
all lines indented by the given indent-string.
"""
newsource = Source()
newsource.lines = [(indent + line) for line in self.lines]
return newsource
def getstatement(self, lineno):
""" return Source statement which contains the
given linenumber (counted from 0).
"""
start, end = self.getstatementrange(lineno)
return self[start:end]
def getstatementrange(self, lineno):
""" return (start, end) tuple which spans the minimal
statement region which containing the given lineno.
"""
if not (0 <= lineno < len(self)):
raise IndexError("lineno out of range")
ast, start, end = getstatementrange_ast(lineno, self)
return start, end
def deindent(self):
"""return a new source object deindented."""
newsource = Source()
newsource.lines[:] = deindent(self.lines)
return newsource
def isparseable(self, deindent=True):
""" return True if source is parseable, heuristically
deindenting it by default.
"""
from parser import suite as syntax_checker
if deindent:
source = str(self.deindent())
else:
source = str(self)
try:
# compile(source+'\n', "x", "exec")
syntax_checker(source + "\n")
except KeyboardInterrupt:
raise
except Exception:
return False
else:
return True
def __str__(self):
return "\n".join(self.lines)
def compile(
self, filename=None, mode="exec", flag=0, dont_inherit=0, _genframe=None
):
""" return compiled code object. if filename is None
invent an artificial filename which displays
the source/line position of the caller frame.
"""
if not filename or py.path.local(filename).check(file=0):
if _genframe is None:
_genframe = sys._getframe(1) # the caller
fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno
base = "<%d-codegen " % self._compilecounter
self.__class__._compilecounter += 1
if not filename:
filename = base + "%s:%d>" % (fn, lineno)
else:
filename = base + "%r %s:%d>" % (filename, fn, lineno)
source = "\n".join(self.lines) + "\n"
try:
co = compile(source, filename, mode, flag)
except SyntaxError:
ex = sys.exc_info()[1]
# re-represent syntax errors from parsing python strings
msglines = self.lines[: ex.lineno]
if ex.offset:
msglines.append(" " * ex.offset + "^")
msglines.append("(code was compiled probably from here: %s)" % filename)
newex = SyntaxError("\n".join(msglines))
newex.offset = ex.offset
newex.lineno = ex.lineno
newex.text = ex.text
raise newex
else:
if flag & _AST_FLAG:
return co
lines = [(x + "\n") for x in self.lines]
linecache.cache[filename] = (1, None, lines, filename)
return co
#
# public API shortcut functions
#
def compile_(source, filename=None, mode="exec", flags=0, dont_inherit=0):
""" compile the given source to a raw code object,
and maintain an internal cache which allows later
retrieval of the source code for the code object
and any recursively created code objects.
"""
if isinstance(source, ast.AST):
# XXX should Source support having AST?
return compile(source, filename, mode, flags, dont_inherit)
_genframe = sys._getframe(1) # the caller
s = Source(source)
co = s.compile(filename, mode, flags, _genframe=_genframe)
return co
def getfslineno(obj):
""" Return source location (path, lineno) for the given object.
If the source cannot be determined return ("", -1)
"""
from .code import Code
try:
code = Code(obj)
except TypeError:
try:
fn = inspect.getsourcefile(obj) or inspect.getfile(obj)
except TypeError:
return "", -1
fspath = fn and py.path.local(fn) or None
lineno = -1
if fspath:
try:
_, lineno = findsource(obj)
except IOError:
pass
else:
fspath = code.path
lineno = code.firstlineno
assert isinstance(lineno, int)
return fspath, lineno
#
# helper functions
#
def findsource(obj):
try:
sourcelines, lineno = inspect.findsource(obj)
except Exception:
return None, -1
source = Source()
source.lines = [line.rstrip() for line in sourcelines]
return source, lineno
def getsource(obj, **kwargs):
from .code import getrawcode
obj = getrawcode(obj)
try:
strsrc = inspect.getsource(obj)
except IndentationError:
strsrc = '"Buggy python version consider upgrading, cannot get source"'
assert isinstance(strsrc, str)
return Source(strsrc, **kwargs)
def deindent(lines):
return textwrap.dedent("\n".join(lines)).splitlines()
def get_statement_startend2(lineno, node):
import ast
# flatten all statements and except handlers into one lineno-list
# AST's line numbers start indexing at 1
values = []
for x in ast.walk(node):
if isinstance(x, (ast.stmt, ast.ExceptHandler)):
values.append(x.lineno - 1)
for name in ("finalbody", "orelse"):
val = getattr(x, name, None)
if val:
# treat the finally/orelse part as its own statement
values.append(val[0].lineno - 1 - 1)
values.sort()
insert_index = bisect_right(values, lineno)
start = values[insert_index - 1]
if insert_index >= len(values):
end = None
else:
end = values[insert_index]
return start, end
def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
if astnode is None:
content = str(source)
# See #4260:
# don't produce duplicate warnings when compiling source to find ast
with warnings.catch_warnings():
warnings.simplefilter("ignore")
astnode = compile(content, "source", "exec", _AST_FLAG)
start, end = get_statement_startend2(lineno, astnode)
# we need to correct the end:
# - ast-parsing strips comments
# - there might be empty lines
# - we might have lesser indented code blocks at the end
if end is None:
end = len(source.lines)
if end > start + 1:
# make sure we don't span differently indented code blocks
# by using the BlockFinder helper used which inspect.getsource() uses itself
block_finder = inspect.BlockFinder()
# if we start with an indented line, put blockfinder to "started" mode
block_finder.started = source.lines[start][0].isspace()
it = ((x + "\n") for x in source.lines[start:end])
try:
for tok in tokenize.generate_tokens(lambda: next(it)):
block_finder.tokeneater(*tok)
except (inspect.EndOfBlock, IndentationError):
end = block_finder.last + start
except Exception:
pass
# the end might still point to a comment or empty line, correct it
while end:
line = source.lines[end - 1].lstrip()
if line.startswith("#") or not line:
end -= 1
else:
break
return astnode, start, end
| {
"content_hash": "219800a2a2b5ec32f064f62150433bd1",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 84,
"avg_line_length": 31.865030674846626,
"alnum_prop": 0.5751829033500192,
"repo_name": "hackebrot/pytest",
"id": "887f323f96992f457c64ef160b8e00d8ad7b052d",
"size": "10388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/_pytest/_code/source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1923"
},
{
"name": "Python",
"bytes": "1815786"
}
],
"symlink_target": ""
} |
from sitemap import NewsCategoriesSitemap, NewsSitemap
| {
"content_hash": "1889ddcfaf225f192296be585e8f76a8",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 54,
"avg_line_length": 55,
"alnum_prop": 0.8909090909090909,
"repo_name": "aldryn/aldryn-news",
"id": "2980b7b3a5fb5c4188bad89080f6f6520f577fd5",
"size": "79",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aldryn_news/sitemaps/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Gettext Catalog",
"bytes": "14192"
},
{
"name": "HTML",
"bytes": "4680"
},
{
"name": "Python",
"bytes": "175914"
}
],
"symlink_target": ""
} |
import unittest
from django.test import TestCase
from .test_backends import BackendTests
class TestDBBackend(BackendTests, TestCase):
backend_path = 'wagtail.search.backends.db'
# Doesn't support autocomplete
@unittest.expectedFailure
def test_autocomplete(self):
super().test_autocomplete()
# Doesn't support autocomplete
@unittest.expectedFailure
def test_autocomplete_not_affected_by_stemming(self):
super().test_autocomplete_not_affected_by_stemming()
# Doesn't support autocomplete
@unittest.expectedFailure
def test_autocomplete_uses_autocompletefield(self):
super().test_autocomplete_uses_autocompletefield()
# Doesn't support autocomplete
@unittest.expectedFailure
def test_autocomplete_with_fields_arg(self):
super().test_autocomplete_with_fields_arg()
# Doesn't support ranking
@unittest.expectedFailure
def test_ranking(self):
super().test_ranking()
# Doesn't support ranking
@unittest.expectedFailure
def test_annotate_score(self):
super().test_annotate_score()
# Doesn't support ranking
@unittest.expectedFailure
def test_annotate_score_with_slice(self):
super().test_annotate_score_with_slice()
# Doesn't support ranking
@unittest.expectedFailure
def test_search_boosting_on_related_fields(self):
super().test_search_boosting_on_related_fields()
# Doesn't support searching specific fields
@unittest.expectedFailure
def test_search_child_class_field_from_parent(self):
super().test_search_child_class_field_from_parent()
# Doesn't support searching related fields
@unittest.expectedFailure
def test_search_on_related_fields(self):
super().test_search_on_related_fields()
# Doesn't support searching callable fields
@unittest.expectedFailure
def test_search_callable_field(self):
super().test_search_callable_field()
# Database backend always uses `icontains`, so always autocomplete
@unittest.expectedFailure
def test_incomplete_term(self):
super().test_incomplete_term()
# Database backend always uses `icontains`, so always autocomplete
@unittest.expectedFailure
def test_incomplete_plain_text(self):
super().test_incomplete_plain_text()
# Database backend doesn't support Boost() query class
@unittest.expectedFailure
def test_boost(self):
super().test_boost()
| {
"content_hash": "92cd501732ab331274c456538a488add",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 70,
"avg_line_length": 31.39240506329114,
"alnum_prop": 0.707258064516129,
"repo_name": "takeflight/wagtail",
"id": "f03c6fb06185e5ad9ae2ad33cde5fa94c0b916b8",
"size": "2480",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "wagtail/search/tests/test_db_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "181889"
},
{
"name": "Dockerfile",
"bytes": "703"
},
{
"name": "HTML",
"bytes": "367981"
},
{
"name": "JavaScript",
"bytes": "255453"
},
{
"name": "Makefile",
"bytes": "738"
},
{
"name": "Python",
"bytes": "3459754"
},
{
"name": "Shell",
"bytes": "7868"
}
],
"symlink_target": ""
} |
import json
import datetime
import discord
from jshbot import utilities, configurations, plugins, logger
from jshbot.exceptions import ConfiguredBotException
from jshbot.commands import Command, Response
__version__ = '0.1.0'
CBException = ConfiguredBotException('Emoji updater')
uses_configuration = True
@plugins.command_spawner
def get_commands(bot):
return [Command('ude', elevated_level=3, hidden=True)]
async def get_response(bot, context):
if 'discrank.py' not in bot.plugins:
raise CBException("Discrank plugin not detected.")
discrank_plugin = bot.plugins['discrank.py']
champions, spells = discrank_plugin.CHAMPIONS, discrank_plugin.SPELLS
chunks = [bot.get_guild(it).emojis for it in configurations.get(bot, __name__, 'guilds')]
emojis = [it for chunk in chunks for it in chunk]
final = {
'champions': {'id': {}, 'name': {}},
'spells': {'id': {}, 'name': {}},
'bdt': {'blue': {}, 'red': {}}
}
for emoji in emojis:
if emoji.name.startswith('Champion'):
clean_name = emoji.name.split('_')[1].lower()
if clean_name not in champions:
raise CBException("Champion {} not found.".format(clean_name))
item_id = champions[clean_name]['id']
final['champions']['id'][str(item_id)] = str(emoji)
final['champions']['name'][clean_name] = str(emoji)
elif emoji.name.startswith('Spell'):
clean_name = emoji.name.split('_')[1].lower()
if clean_name not in spells:
raise CBException("Spell {} not found.".format(clean_name))
item_id = spells[clean_name]['id']
final['spells']['id'][str(item_id)] = str(emoji)
final['spells']['name'][clean_name] = str(emoji)
elif emoji.name.startswith(('Red', 'Blue')):
color, name = emoji.name.split('_')
final['bdt'][color.lower()][name.lower()] = str(emoji)
else:
raise CBException("Invalid emoji detected: {}".format(emoji.name))
final_json = json.dumps(final, sort_keys=True, indent=4)
json_file = utilities.get_text_as_file(final_json)
file_url = await utilities.upload_to_discord(
bot, json_file, filename='lol_emojis.json', close=True)
embed = discord.Embed(
description='[Click here to download]({})'.format(file_url),
colour=discord.Colour(0x4CAF50),
timestamp=datetime.datetime.utcnow())
embed.set_footer(text="Updated")
try:
update_channel = bot.get_channel(configurations.get(bot, __name__, 'update_channel'))
message_id = configurations.get(bot, __name__, 'update_message')
update_message = await update_channel.fetch_message(message_id)
await update_message.edit(content='', embed=embed)
except Exception as e:
raise CBException("Failed to edit the update message.", e=e)
return Response(content="Updated!")
| {
"content_hash": "1f9369130514664f23d2dca492f88ccb",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 93,
"avg_line_length": 37.92307692307692,
"alnum_prop": 0.6240703177822853,
"repo_name": "jkchen2/JshBot-plugins",
"id": "cfa8360d5ca70ca7b16df5c10c21d3a511a6d3be",
"size": "2958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ude/ude.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "463597"
}
],
"symlink_target": ""
} |
"""Support for Fjäråskupan fans."""
from __future__ import annotations
from typing import Any
from fjaraskupan import (
COMMAND_AFTERCOOKINGTIMERAUTO,
COMMAND_AFTERCOOKINGTIMERMANUAL,
COMMAND_AFTERCOOKINGTIMEROFF,
COMMAND_STOP_FAN,
State,
)
from homeassistant.components.fan import FanEntity, FanEntityFeature
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.util.percentage import (
ordered_list_item_to_percentage,
percentage_to_ordered_list_item,
)
from . import Coordinator, async_setup_entry_platform
ORDERED_NAMED_FAN_SPEEDS = ["1", "2", "3", "4", "5", "6", "7", "8"]
PRESET_MODE_NORMAL = "normal"
PRESET_MODE_AFTER_COOKING_MANUAL = "after_cooking_manual"
PRESET_MODE_AFTER_COOKING_AUTO = "after_cooking_auto"
PRESET_MODES = [
PRESET_MODE_NORMAL,
PRESET_MODE_AFTER_COOKING_AUTO,
PRESET_MODE_AFTER_COOKING_MANUAL,
]
PRESET_TO_COMMAND = {
PRESET_MODE_AFTER_COOKING_MANUAL: COMMAND_AFTERCOOKINGTIMERMANUAL,
PRESET_MODE_AFTER_COOKING_AUTO: COMMAND_AFTERCOOKINGTIMERAUTO,
PRESET_MODE_NORMAL: COMMAND_AFTERCOOKINGTIMEROFF,
}
class UnsupportedPreset(HomeAssistantError):
"""The preset is unsupported."""
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up sensors dynamically through discovery."""
def _constructor(coordinator: Coordinator):
return [Fan(coordinator, coordinator.device_info)]
async_setup_entry_platform(hass, config_entry, async_add_entities, _constructor)
class Fan(CoordinatorEntity[Coordinator], FanEntity):
"""Fan entity."""
_attr_supported_features = FanEntityFeature.SET_SPEED | FanEntityFeature.PRESET_MODE
_attr_has_entity_name = True
def __init__(
self,
coordinator: Coordinator,
device_info: DeviceInfo,
) -> None:
"""Init fan entity."""
super().__init__(coordinator)
self._default_on_speed = 25
self._attr_unique_id = coordinator.device.address
self._attr_device_info = device_info
self._percentage = 0
self._preset_mode = PRESET_MODE_NORMAL
self._update_from_device_data(coordinator.data)
async def async_set_percentage(self, percentage: int) -> None:
"""Set speed."""
# Proactively update percentage to mange successive increases
self._percentage = percentage
async with self.coordinator.async_connect_and_update() as device:
if percentage == 0:
await device.send_command(COMMAND_STOP_FAN)
else:
new_speed = percentage_to_ordered_list_item(
ORDERED_NAMED_FAN_SPEEDS, percentage
)
await device.send_fan_speed(int(new_speed))
async def async_turn_on(
self,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs: Any,
) -> None:
"""Turn on the fan."""
if preset_mode is None:
preset_mode = self._preset_mode
if percentage is None:
percentage = self._default_on_speed
new_speed = percentage_to_ordered_list_item(
ORDERED_NAMED_FAN_SPEEDS, percentage
)
async with self.coordinator.async_connect_and_update() as device:
if preset_mode != self._preset_mode:
if command := PRESET_TO_COMMAND.get(preset_mode):
await device.send_command(command)
else:
raise UnsupportedPreset(f"The preset {preset_mode} is unsupported")
if preset_mode == PRESET_MODE_NORMAL:
await device.send_fan_speed(int(new_speed))
elif preset_mode == PRESET_MODE_AFTER_COOKING_MANUAL:
await device.send_after_cooking(int(new_speed))
elif preset_mode == PRESET_MODE_AFTER_COOKING_AUTO:
await device.send_after_cooking(0)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if command := PRESET_TO_COMMAND.get(preset_mode):
async with self.coordinator.async_connect_and_update() as device:
await device.send_command(command)
else:
raise UnsupportedPreset(f"The preset {preset_mode} is unsupported")
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
async with self.coordinator.async_connect_and_update() as device:
await device.send_command(COMMAND_STOP_FAN)
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return len(ORDERED_NAMED_FAN_SPEEDS)
@property
def percentage(self) -> int | None:
"""Return the current speed."""
return self._percentage
@property
def is_on(self) -> bool:
"""Return true if fan is on."""
return self._percentage != 0
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode."""
return self._preset_mode
@property
def preset_modes(self) -> list[str] | None:
"""Return a list of available preset modes."""
return PRESET_MODES
def _update_from_device_data(self, data: State | None) -> None:
"""Handle data update."""
if not data:
self._percentage = 0
return
if data.fan_speed:
self._percentage = ordered_list_item_to_percentage(
ORDERED_NAMED_FAN_SPEEDS, str(data.fan_speed)
)
else:
self._percentage = 0
if data.after_cooking_on:
if data.after_cooking_fan_speed:
self._preset_mode = PRESET_MODE_AFTER_COOKING_MANUAL
else:
self._preset_mode = PRESET_MODE_AFTER_COOKING_AUTO
else:
self._preset_mode = PRESET_MODE_NORMAL
@callback
def _handle_coordinator_update(self) -> None:
"""Handle data update."""
self._update_from_device_data(self.coordinator.data)
self.async_write_ha_state()
| {
"content_hash": "8dab7ae51953b03834c02673173c8689",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 88,
"avg_line_length": 33.40721649484536,
"alnum_prop": 0.6350871779046443,
"repo_name": "mezz64/home-assistant",
"id": "c856a94fa074b2649dfa4444c32925268cbe4c7f",
"size": "6483",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/fjaraskupan/fan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import os
import numpy as np
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
import shap
import mlflow
from mlflow.tracking import MlflowClient
from mlflow.artifacts import download_artifacts
# prepare training data
X, y = load_iris(return_X_y=True, as_frame=True)
# train a model
model = RandomForestClassifier()
model.fit(X, y)
# log an explanation
with mlflow.start_run() as run:
mlflow.shap.log_explanation(model.predict_proba, X)
# list artifacts
client = MlflowClient()
artifact_path = "model_explanations_shap"
artifacts = [x.path for x in client.list_artifacts(run.info.run_id, artifact_path)]
print("# artifacts:")
print(artifacts)
# load back the logged explanation
dst_path = download_artifacts(run_id=run.info.run_id, artifact_path=artifact_path)
base_values = np.load(os.path.join(dst_path, "base_values.npy"))
shap_values = np.load(os.path.join(dst_path, "shap_values.npy"))
# show a force plot
shap.force_plot(base_values[0], shap_values[0, 0, :], X.iloc[0, :], matplotlib=True)
| {
"content_hash": "5f54e3d196c1616fcf892af2f8a2ddcf",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 84,
"avg_line_length": 27.657894736842106,
"alnum_prop": 0.7554709800190295,
"repo_name": "mlflow/mlflow",
"id": "15934cf675f2f2c6b0187d4a5069a5054539410c",
"size": "1051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/shap/multiclass_classification.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24965"
},
{
"name": "Dockerfile",
"bytes": "1206"
},
{
"name": "HTML",
"bytes": "16439"
},
{
"name": "Java",
"bytes": "276538"
},
{
"name": "JavaScript",
"bytes": "3606345"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "6057051"
},
{
"name": "R",
"bytes": "202454"
},
{
"name": "Scala",
"bytes": "39353"
},
{
"name": "Shell",
"bytes": "27246"
},
{
"name": "TSQL",
"bytes": "211"
},
{
"name": "TypeScript",
"bytes": "313772"
}
],
"symlink_target": ""
} |
"""
Applies tonka-specific mods to CyPhyML_org.xme
..\..\externals\common-scripts\gmepy.exe xme2mga CyPhyML_org.xme
.\tonka_mods.py
..\..\externals\common-scripts\gmepy.exe mga2xme CyPhyML_org.mga
start CyPhyML_org.xme
refresh in CyPhyML.xme, etc
"""
import win32com.client
mods = {
"/@1_Component/@1x_Component/@Resource":
{ "Icon": "resource.png" },
"/@1_Component/@4x_Port/@Connector|kind=Model":
{ "Decorator": "Mga.CPMDecorator",
"GeneralPreferences": "fillColor = 0xdddddd\nportLabelLength=0\ntreeIcon=connector_port.png\nexpandedTreeIcon=connector_port.png",
"Icon": "connector.png",
"IsTypeInfoShown": False,
"PortIcon": "connector_port.png" },
"/@1_Component/@4x_Port/@ModelicaConnector|kind=Model":
{ "Decorator": "Mga.CPMDecorator",
"GeneralPreferences": "fillColor = 0xdddddd\nportLabelLength=0",
"Icon": "modelica_connector.png",
"PortIcon": "modelica_connector_port.png",
"IsTypeInfoShown": False },
"/@3_Domains/@3_SolidModeling/@CADModel":
{ "Decorator": "Mga.CPMDecorator",
"GeneralPreferences": "fillColor = 0xdddddd\nhelp=$META_DOCROOT$34d163d3-f7d6-4178-bcae-6c469f52be14.html\nportLabelLength=0",
"Icon": "cad_model.png" },
"/@2_ComponentBehaviorModels/@BehaviorModels/@ModelicaModel":
{ "Decorator": "Mga.CPMDecorator",
},
"/@1_Component/@3_Properties_Parameters/@IsProminent":
{ "BooleanDefault": False
}
}
project = win32com.client.DispatchEx("Mga.MgaProject")
project.Open("MGA=" + "CyPhyML_org.mga")
project.BeginTransactionInNewTerr()
for kind, attrs in mods.iteritems():
model = project.RootFolder.GetObjectByPathDisp(kind)
print model.AbsPath + " " + kind
for attrname, attrvalue in attrs.iteritems():
#print model.Meta.Name
#print [a.Name for a in model.Meta.DefinedAttributes]
if isinstance(attrvalue, basestring):
model.SetStrAttrByNameDisp(attrname, attrvalue)
print " " + attrname + "=" + attrvalue
else:
print " " + attrname
model.SetBoolAttrByNameDisp(attrname, attrvalue)
project.CommitTransaction()
project.Save("", True)
| {
"content_hash": "3b212fe86a5b3488a260a9f406de9868",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 133,
"avg_line_length": 37.44827586206897,
"alnum_prop": 0.679097605893186,
"repo_name": "pombredanne/metamorphosys-desktop",
"id": "26caf5cc21e7f26106a5ce7716baf79dcc37e674",
"size": "2172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metamorphosys/META/meta/CyPhyML/tonka_mods.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "10683"
},
{
"name": "Assembly",
"bytes": "117345"
},
{
"name": "Awk",
"bytes": "3591"
},
{
"name": "Batchfile",
"bytes": "228118"
},
{
"name": "BitBake",
"bytes": "4526"
},
{
"name": "C",
"bytes": "3613212"
},
{
"name": "C#",
"bytes": "11617773"
},
{
"name": "C++",
"bytes": "51448188"
},
{
"name": "CMake",
"bytes": "3055"
},
{
"name": "CSS",
"bytes": "109563"
},
{
"name": "Clojure",
"bytes": "37831"
},
{
"name": "Eagle",
"bytes": "3782687"
},
{
"name": "Emacs Lisp",
"bytes": "8514"
},
{
"name": "GAP",
"bytes": "49124"
},
{
"name": "Groff",
"bytes": "2178"
},
{
"name": "Groovy",
"bytes": "7686"
},
{
"name": "HTML",
"bytes": "4025250"
},
{
"name": "Inno Setup",
"bytes": "35715"
},
{
"name": "Java",
"bytes": "489537"
},
{
"name": "JavaScript",
"bytes": "167454"
},
{
"name": "Lua",
"bytes": "1660"
},
{
"name": "Makefile",
"bytes": "97209"
},
{
"name": "Mathematica",
"bytes": "26"
},
{
"name": "Matlab",
"bytes": "80874"
},
{
"name": "Max",
"bytes": "78198"
},
{
"name": "Modelica",
"bytes": "44541139"
},
{
"name": "Objective-C",
"bytes": "34004"
},
{
"name": "Perl",
"bytes": "19285"
},
{
"name": "PostScript",
"bytes": "400254"
},
{
"name": "PowerShell",
"bytes": "19749"
},
{
"name": "Processing",
"bytes": "1477"
},
{
"name": "Prolog",
"bytes": "3121"
},
{
"name": "Protocol Buffer",
"bytes": "58995"
},
{
"name": "Python",
"bytes": "5517835"
},
{
"name": "Ruby",
"bytes": "4483"
},
{
"name": "Shell",
"bytes": "956773"
},
{
"name": "Smarty",
"bytes": "37892"
},
{
"name": "TeX",
"bytes": "4183594"
},
{
"name": "Visual Basic",
"bytes": "22546"
},
{
"name": "XSLT",
"bytes": "332312"
}
],
"symlink_target": ""
} |
from keras.datasets import cifar10
from matplotlib import pyplot
from scipy.misc import toimage
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
K.set_image_dim_ordering('th')
seed = 7
numpy.random.seed(seed)
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train / 255.0
X_test = X_test / 255.0
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(3, 32, 32), padding='same', activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
epochs = 25
lrate = 0.01
decay = lrate/epochs
sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
print(model.summary())
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=32)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
for i in range(0, 9):
pyplot.subplot(330 + 1 + i)
pyplot.imshow(toimage(X_train[i]))
pyplot.show() | {
"content_hash": "6f87d626ca70e3b7f7accd2cd82886d0",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 119,
"avg_line_length": 32.68421052631579,
"alnum_prop": 0.7407407407407407,
"repo_name": "RichardLeeK/MachineLearning",
"id": "b537f12cafdc6f23a5a909bf48a7888031ab7704",
"size": "1863",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "MachineLearning/CBFV/ml/cnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "175155"
}
],
"symlink_target": ""
} |
import TrelloWrapper as wrapper
import unittest
class PrimesTestCase(unittest.TestCase):
"""Tests for TrelloWrapper."""
""" TESTS FOR LEVENSHTEIN DISTANCE """
def test_0_levenshtein_distance(self):
"""Is 0 successfully determined in the levenshtein distance?"""
# Two identical words should return 0
test = wrapper.levenshtein_distance("unito", "unito")
self.assertEqual(test,0, "Unito and Unito should return 0")
def test_2_levenshtein_distance(self):
"""Is 2 successfully determined in the levenshtein distance?"""
# Delete F, Add N at the end
test = wrapper.levenshtein_distance("flaw", "lawn")
self.assertEqual(test, 2, "flaw and lawn should return 2")
def test_3_levenshtein_distance(self):
"""Is 3 successfully determined in the levenshtein distance?"""
test = wrapper.levenshtein_distance("kitten", "sitting")
self.assertEqual(test,3, "Kitten and Sitting should return 3")
""" END TESTS FOR LEVENSHTEIN DISTANCE """
if __name__ == '__main__':
unittest.main() | {
"content_hash": "6dbe13f1ad5e777ba47b9555f43e3a8c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 71,
"avg_line_length": 35.16129032258065,
"alnum_prop": 0.6660550458715596,
"repo_name": "Mymoza/trello-similar-labels",
"id": "1834c118f51a719ab798dc9e554cf14efec3cecf",
"size": "1090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/Test_TrelloWrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5140"
}
],
"symlink_target": ""
} |
"""
DNSSEC Trust Anchor Tool (dnssec_ta_tool.py)
This tool will extract DNSSEC Trust Anchors from a Trust Anchor XML file
formatted as described in RFC 7958. Validation of the detached signature
over the Trust Anchor XML file is NOT performed by this tool.
"""
import sys
import time
import argparse
import base64
import iso8601
import xmltodict
import dns.dnssec
import dns.name
import dns.rdata
import dns.rdataclass
import dns.rdataset
import dns.resolver
import dns.rrset
DEFAULT_ANCHORS = 'root-anchors.xml'
def get_trust_anchors_as_ds(zone, digests, verbose):
"""Get currently valid Trust Anchors as DS RRset"""
now = time.time()
valid_ds_rdata = []
for keydigest in digests:
keydigest_id = keydigest['@id']
keytag = keydigest['KeyTag']
if '@validFrom' in keydigest:
valid_from = iso8601.parse_date(keydigest['@validFrom']).timestamp()
if now < valid_from:
if verbose:
emit_warning('TA {} ({}) not yet valid'.format(keytag, keydigest_id))
continue
if '@validUntil' in keydigest:
valid_until = iso8601.parse_date(keydigest['@validUntil']).timestamp()
if now > valid_until:
if verbose:
emit_warning('TA {} ({}) expired'.format(keytag, keydigest_id))
continue
if verbose:
emit_info('TA {} ({}) valid'.format(keytag, keydigest_id))
valid_ds_rdata.append(ds_rdata_from_keydigest(keydigest))
rrset = dns.rrset.from_rdata_list(dns.name.from_text(zone), 0,
valid_ds_rdata)
return rrset
def ds_rdata_from_keydigest(keydigest):
"""Return keydigest as DS rdata"""
keytag = keydigest['KeyTag']
algorithm = keydigest['Algorithm']
digest_type = keydigest['DigestType']
digest = keydigest['Digest']
rdata_text = '{} {} {} {}'.format(keytag, algorithm, digest_type, digest)
return dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DS, rdata_text)
def ds_digest_type_as_text(digest_type):
"""Get DS digest type as mnemonic"""
digest_types = {
1: 'SHA1',
2: 'SHA256'
}
return digest_types.get(digest_type)
def dnskey_from_ds_rrset(ds_rrset, verbose):
"""Match current DNSKEY RRset with DS RRset"""
zone = ds_rrset.name
dnskey_rrset = dns.rrset.RRset(name=zone,
rdclass=dns.rdataclass.IN,
rdtype=dns.rdatatype.DNSKEY)
answer = dns.resolver.query(zone, 'DNSKEY')
for answer_rr in answer.rrset:
if answer_rr.rdtype != dns.rdatatype.DNSKEY:
continue
if not answer_rr.flags & 0x0001:
continue
dnskey_rdata = answer_rr
for ds_rdata in ds_rrset:
ds_algo = ds_digest_type_as_text(ds_rdata.digest_type)
dnskey_as_ds = dns.dnssec.make_ds(name=zone,
key=dnskey_rdata,
algorithm=ds_algo)
if dnskey_as_ds == ds_rdata:
if verbose:
emit_info('DNSKEY {} found'.format(ds_rdata.key_tag))
dnskey_rrset.add(dnskey_rdata)
else:
if verbose:
emit_warning('DNSKEY {} not found'.format(ds_rdata.key_tag))
return dnskey_rrset
def bind_format_key(format_str, dnskey_rrset):
"""Format DNSKEY RRset for BIND"""
for dnskey_rr in dnskey_rrset:
print(format_str.format(dnskey_rrset.name,
dnskey_rr.flags,
dnskey_rr.protocol,
dnskey_rr.algorithm,
base64.b64encode(dnskey_rr.key).decode()))
def bind_trusted_keys(dnskey_rrset):
"""Output DNSKEY RRset as BIND trusted-keys"""
print('trusted-keys {')
bind_format_key(' "{}" {} {} {} "{}";', dnskey_rrset)
print('};')
def bind_managed_keys(dnskey_rrset):
"""Output DNSKEY RRset as BIND managed-keys"""
print('managed-keys {')
bind_format_key(' "{}" initial-key {} {} {} "{}";', dnskey_rrset)
print('};',)
def emit_warning(message):
"""Emit warning message on stderr"""
print('WARNING: {}'.format(message), file=sys.stderr)
def emit_info(message):
"""Emit informational message on stderr"""
print('NOTICE: {}'.format(message), file=sys.stderr)
def print_ds_rrset_without_ttl(ds_rrset):
"""Print DS RRset without TTL"""
for ds_rr in ds_rrset:
print('{} DS {} {} {} {}'.format(ds_rrset.name,
ds_rr.key_tag,
ds_rr.algorithm,
ds_rr.digest_type,
base64.b64encode(ds_rr.digest).decode()))
def print_dnskey_rrset_without_ttl(dnskey_rrset):
"""Print DNSKEY RRset without TTL"""
for dnskey_rr in dnskey_rrset:
print('{} DNSKEY {} {} {} {}'.format(dnskey_rrset.name,
dnskey_rr.flags,
dnskey_rr.protocol,
dnskey_rr.algorithm,
base64.b64encode(dnskey_rr.key).decode()))
def main():
""" Main function"""
parser = argparse.ArgumentParser(description='DNSSEC Trust Anchor Tool')
formats = ['ds', 'dnskey', 'bind-trusted', 'bind-managed']
parser.add_argument("--verbose",
dest='verbose',
action='store_true',
help='verbose output')
parser.add_argument("--anchors",
dest='anchors',
metavar='filename',
default=DEFAULT_ANCHORS,
help='trust anchor file (root-anchors.xml)')
parser.add_argument("--format",
dest='format',
metavar='format',
default='ds',
choices=formats,
help='output format ({})'.format('|'.join(formats)))
parser.add_argument("--output",
dest='output',
metavar='filename',
help='output file (stdout)')
args = parser.parse_args()
with open(args.anchors, 'rt') as anchors_fd:
doc = xmltodict.parse(anchors_fd.read())
zone = doc['TrustAnchor']['Zone']
digests = doc['TrustAnchor']['KeyDigest']
if isinstance(digests, list):
ds_rrset = get_trust_anchors_as_ds(zone, digests, verbose=args.verbose)
else:
ds_rrset = get_trust_anchors_as_ds(zone, [digests], verbose=args.verbose)
if args.format != 'ds':
dnskey_rrset = dnskey_from_ds_rrset(ds_rrset, verbose=args.verbose)
if args.output:
output_fd = open(args.output, 'wt')
old_stdout = sys.stdout
sys.stdout = output_fd
if args.format == 'ds':
print_ds_rrset_without_ttl(ds_rrset)
elif args.format == 'dnskey':
print_dnskey_rrset_without_ttl(dnskey_rrset)
elif args.format == 'bind-trusted':
bind_trusted_keys(dnskey_rrset)
elif args.format == 'bind-managed':
bind_managed_keys(dnskey_rrset)
else:
raise Exception('Invalid output format')
if args.output:
sys.stdout = old_stdout
output_fd.close()
if __name__ == "__main__":
main()
| {
"content_hash": "6f651a904b728dd593a1d5b8d11ca7b4",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 89,
"avg_line_length": 33.47787610619469,
"alnum_prop": 0.5459952418715305,
"repo_name": "kirei/dnssec-ta-tools",
"id": "f61deeb2b4c042705004e5a849ebf5aefd81add4",
"size": "8926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dnssec_ta_tool/dnssec_ta_tool.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "3136"
},
{
"name": "Python",
"bytes": "39973"
},
{
"name": "Shell",
"bytes": "2171"
}
],
"symlink_target": ""
} |
import cPickle
import logging
import numpy as np
import sys
import xgboost as xgb
from operator import itemgetter
logging.basicConfig(level=logging.INFO,
format="%(asctime)s [%(levelname)s]: %(message)s")
model_file = 'gbvc_model.pkl'
def train():
logging.info('loading training data')
trainf = open('feat_train/trnvld_feature.tsv')
data_train = np.array([l.rstrip().split('\t') for l in trainf.readlines()],
dtype='float32')
trainf.close()
data_train_dmat = xgb.DMatrix(data_train[:, 1:], data_train[:, 0])
del data_train
validf = open('feat_test/test_feature.tsv') # use test as valid for temp
data_valid = np.array([l.rstrip().split('\t') for l in validf.readlines()],
dtype='float32')
data_valid_dmat = xgb.DMatrix(data_valid[:, 1:], data_valid[:, 0])
del data_valid
logging.info('start training')
bst_params = {
'nthread': 4,
'silent': 1,
'eta': 0.2,
'eval_metric': ['auc', 'error'],
'max_depth': 5,
'subsample': 0.9,
'colsample_bytree': 0.9,
'objective': 'binary:logistic',
'lambda': 1.0
}
train_params = {
'params': bst_params,
'dtrain': data_train_dmat,
'num_boost_round': 1000, # max round
'evals': [(data_train_dmat, 'train'), (data_valid_dmat, 'valid_0')],
'maximize': False,
'early_stopping_rounds': 100,
'verbose_eval': True
}
mdl_bst = xgb.train(**train_params)
logging.info('Saving model')
# not use save_model mothod because it cannot dump best_iteration etc.
cPickle.dump(mdl_bst, open(model_file, 'wb'))
feat_imp = mdl_bst.get_score(importance_type='gain').items()
print sorted(feat_imp, key=itemgetter(1), reverse=True)[0:20]
def test():
testf = open('feat_test/test_feature.tsv')
data_test = np.array([l.rstrip().split('\t') for l in testf.readlines()],
dtype='float32')
x_test = data_test[:, 1:]
y_test = data_test[:, 0]
# init gbt
mdl_bst = cPickle.load(open(model_file, 'rb'))
mdl_bst.set_param('nthread', 8)
mdl_bst.set_param('eval_metric', 'auc')
mdl_bst.set_param('eval_metric', 'error') # add new metric
test_res = mdl_bst.eval_set([(xgb.DMatrix(x_test, y_test), 'test_0')])
print test_res
if __name__ == '__main__':
train()
test()
| {
"content_hash": "a2507c80482a0c77571d61c2d77b1f7b",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 79,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.5773322422258592,
"repo_name": "kn45/ClickBaits",
"id": "ecaac769a918594f10e6d77a08a586ce1f6b5774",
"size": "2470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model_gbvc/4_Train.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10934"
},
{
"name": "Shell",
"bytes": "2731"
}
],
"symlink_target": ""
} |
from go.conversation.view_definition import ConversationViewDefinitionBase
class ConversationViewDefinition(ConversationViewDefinitionBase):
pass
| {
"content_hash": "18b928d9eb2e08ce608a4b7013d465a0",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 74,
"avg_line_length": 30.4,
"alnum_prop": 0.875,
"repo_name": "praekelt/vumi-go",
"id": "c2a19fdd4225e2d3f003cbce4bdc6186624a6ece",
"size": "152",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "go/apps/opt_out/view_definition.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "154585"
},
{
"name": "HTML",
"bytes": "158025"
},
{
"name": "JavaScript",
"bytes": "446112"
},
{
"name": "Python",
"bytes": "2738963"
},
{
"name": "Shell",
"bytes": "6799"
}
],
"symlink_target": ""
} |
import unittest
class FakeTest(unittest.TestCase):
def test_true(self):
self.assertTrue(True)
| {
"content_hash": "dd59130a03adb7f3a54c242a611fdbf2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 34,
"avg_line_length": 15.571428571428571,
"alnum_prop": 0.6972477064220184,
"repo_name": "citrix-openstack-build/neutron-lbaas",
"id": "1ddb6e95555a92908d02df1bde9396124078c603",
"size": "728",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron_lbaas/tests/unit/test_true.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "716662"
}
],
"symlink_target": ""
} |
import os
import setuptools
# third party libraries
pass
# first party libraries
pass
project_name = 'tiny_id'
author = 'Brian J Petersen'
author_email = None
def load_file(fname, default=None):
try:
with open(fname, 'r') as f:
d = f.read()
except:
d = default
return d
readme = load_file('README.md', '')
history = load_file('HISTORY.md', '')
version = load_file('VERSION', None)
license = load_file('LICENSE', None)
roadmap = load_file('TODO.md', '')
assert project_name is not ValueError, 'Please name your project.'
assert author is not ValueError, 'Please define the author\'s name.'
if version is None:
package_data = {}
else:
package_data = {project_name: ['../VERSION', ]}
setuptools.setup(
name = project_name,
version = version,
description = readme,
long_description = readme + '\n\n' + history + '\n\n' + roadmap,
license = license,
author = author,
author_email = author_email,
packages = setuptools.find_packages(),
package_data = package_data,
)
| {
"content_hash": "06eaa9019a04695411ac766217baa6f0",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 68,
"avg_line_length": 23.886363636363637,
"alnum_prop": 0.6470028544243578,
"repo_name": "brianjpetersen/tiny_id",
"id": "6258c2e77a9a787bbd83de624a1b20f3316aece2",
"size": "1072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3921"
}
],
"symlink_target": ""
} |
"""
This module implements a clustering approach for OCLC numbers based on
HathiTrust data and data acquired via the OCLC Translation Table.
This version is a re-write to try and speedup the algorithm.
v.2 - removing 'rights' queries, farm out cluster_oclc and cluster_htitem_jn
tables to core memory.
v.3 - re-implementation of the fundamental aggregation loop
"""
import sys, re
import phdb_utils
import MySQLdb
import datetime
VERBOSE = 0
NOW = datetime.datetime(2013, 4, 2)
def get_password_from_file(fn):
infile = file(fn)
line = infile.readline()
return line.rstrip()
def get_connection():
# open DB connection
pw = get_password_from_file('/afs/umich.edu/user/p/u/pulintz/etc/.pw/ht_repository')
try:
conn = MySQLdb.connect (host = "mysql-htdev",
port=3306,
user = "ht_repository",
passwd = pw,
db = "ht_repository")
except MySQLdb.Error, e:
print "Couldn't get connection."
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit()
return conn
def run_list_query(cursor, query):
""" Generic query runner, appropriate for queries that return
simple lists. """
if VERBOSE:
print query
try:
cursor.execute(query)
except MySQLdb.Error, e:
print "[run_list_query] Error %d: %s" % (e.args[0], e.args[1])
print "Exiting..."
sys.exit(1)
items = []
if (cursor.rowcount > 0):
while(1):
row = cursor.fetchone()
if row == None:
break
items.append(row[0])
return items
def run_single_query(cursor, query):
""" Generic query runner, for a single result row """
if VERBOSE:
print query
try:
cursor.execute(query)
row = cursor.fetchone()
result = row[0]
except MySQLdb.Error, e:
print "[run_single_query] Error %d: %s" % (e.args[0], e.args[1])
print "Exiting..."
sys.exit()
return result
def create_cluster(ccursor, ocns, vol_id):
""" Creates a new cluster and populates tables appropriately. """
# insert into cluster, get id
ocn0 = ocns[0]
query2 = "INSERT INTO holdings_cluster (cost_rights_id, osici, last_mod) VALUES (0, '%s', '%s')" % (ocn0, NOW)
try:
if VERBOSE:
print query2
ccursor.execute(query2)
pkid = int(ccursor.lastrowid)
#if ((pkid%1000) == 0):
# print "Creating cluster ID %i" % pkid
#cconn.commit()
except MySQLdb.Error, e:
print "[create_cluster] Error %d: %s" % (e.args[0], e.args[1])
print "Exiting..."
sys.exit()
# insert OCNs into cluster_oclc tables
for nocn in ocns:
oc = int(nocn)
if (pkid in Cluster_oclc_d):
#print "Adding %i to Cluster_oclc_d[%i]" % (oc, pkid)
Cluster_oclc_d[pkid].add(oc)
#print "Cluster_oclc_d = %s" % Cluster_oclc_d
else:
#print "Adding %i to Cluster_oclc_d[%i]" % (oc, pkid)
Cluster_oclc_d[pkid] = set([oc])
#print "Cluster_oclc_d = %s" % Cluster_oclc_d
if (Oclc_cluster_d.has_key(oc)):
#print "Adding %i to Oclc_cluster_d[%i]" % (pkid, oc)
#print Oclc_cluster_d
#print "Len: %i" % len(Oclc_cluster_d[oc])
print "2 cluster oclc: vid=%s, oc=%i, pkid=%i, cid=%s" % (vol_id, oc, pkid, Oclc_cluster_d[oc])
sys.exit()
else:
Oclc_cluster_d[oc] = pkid
#print "Adding %i to Oclc_cluster_d[%i]" % (pkid, oc)
#print "Oclc_cluster_d = %s" % Oclc_cluster_d
# insert volume_id into cluster_htitem_jn
query4 = """ INSERT INTO holdings_cluster_htitem_jn (cluster_id, volume_id)
VALUES (%s, '%s') """ % (pkid, vol_id)
try:
if VERBOSE:
print query4
ccursor.execute(query4)
except MySQLdb.Error, e:
print "[create_cluster] Error %d: %s" % (e.args[0], e.args[1])
print "Exiting..."
sys.exit()
#cconn.commit()
#cconn.close()
return pkid
def merge_clusters(cid1, cid2):
""" Merges clusters together. Uses c1's id, adds
c2 OCNs and volume_ids to c1, resolves rights, deletes c2 entries from
tables. """
#if VERBOSE:
print "Merging '%i'->'%i'" % (cid2, cid1)
lconn = get_connection()
lcursor = lconn.cursor()
# get volume_ids
queryc1a = "SELECT volume_id FROM holdings_cluster_htitem_jn WHERE cluster_id = %s" % cid1
queryc2a = "SELECT volume_id FROM holdings_cluster_htitem_jn WHERE cluster_id = %s" % cid2
c1vids = run_list_query(lcursor, queryc1a)
c2vids = run_list_query(lcursor, queryc2a)
# insert c2 vol_ids into c1
for vid in c2vids:
if not (vid in c1vids):
mcquery2 = """ INSERT INTO holdings_cluster_htitem_jn (cluster_id, volume_id)
VALUES (%s, '%s') """ % (cid1, vid)
try:
if VERBOSE:
print mcquery2
lcursor.execute(mcquery2)
lconn.commit()
except MySQLdb.Error, e:
print "[merge_clusters 1] Error %d: %s" % (e.args[0], e.args[1])
print "Exiting..."
sys.exit(1)
# insert c2 OCNs into c1
c2ocns = Cluster_oclc_d[cid2]
for ocn in c2ocns:
Cluster_oclc_d[cid1].add(ocn)
Oclc_cluster_d[ocn] = cid1
# delete c2
del Cluster_oclc_d[cid2]
mcquery5a = "DELETE FROM holdings_cluster_htitem_jn WHERE cluster_id = %s" % cid2
mcquery5c = "DELETE FROM holdings_cluster WHERE cluster_id = %s" % cid2
try:
lcursor.execute(mcquery5a)
lcursor.execute(mcquery5c)
lconn.commit()
except MySQLdb.Error, e:
print "[merge_clusters 3] Error %d: %s" % (e.args[0], e.args[1])
print "Exiting..."
sys.exit(1)
lconn.commit()
lconn.close()
def cluster_main():
""" main routine to create PHDB clusters. Pass it the cursor. """
conn = get_connection()
cursor = conn.cursor()
### outer loop over all volume_ids ###
print "Grabbing volume_ids..."
query1 = "SELECT DISTINCT(volume_id) FROM holdings_htitem"
all_vids = run_list_query(cursor, query1)
print "%i ids received..." % len(all_vids)
# remove previously run vids
#vol_ids = remove_vids_from_list(all_vids)
viter = 0
for vid in all_vids:
viter += 1
if (len(vid)<3):
print "skipping: '%s'" % vid
continue
# skip already-run volume_ids.
#if vid in Volid_cluster_d:
# continue
## get the OCNs for each volume_id ##
query3 = "SELECT oclc FROM holdings_htitem_oclc WHERE volume_id = '%s'" % vid
ocns = run_list_query(cursor, query3)
# skip htitems with no oclc number
if (len(ocns) == 0):
continue
# are any OCNs already participating in other clusters? #
pclusters = set([])
for ocn in ocns:
if (Oclc_cluster_d.has_key(ocn)):
cid = Oclc_cluster_d[ocn]
pclusters.add(cid)
# if yes, aggregate
if (len(pclusters)>0):
# add current volume_id to lowest matching cluster number
cids = list(pclusters)
cids.sort()
lcid = cids.pop(0)
query4 = """ INSERT INTO holdings_cluster_htitem_jn (cluster_id, volume_id)
VALUES (%s, '%s') """ % (lcid, vid)
try:
if VERBOSE:
print query4
cursor.execute(query4)
except MySQLdb.Error, e:
print "[create_cluster] Error %d: %s" % (e.args[0], e.args[1])
print "Exiting..."
sys.exit()
# add all OCNs to lowest matching cluster number
for ocn in ocns:
Oclc_cluster_d[ocn] = lcid
Cluster_oclc_d[lcid].add(ocn)
# merge remaining clusters into root cluster
while (len(cids)>0):
cid = int(cids.pop())
# merge the cid with lcid
merge_clusters(lcid, cid)
else:
# make new cluster
#create_cluster(ocns, vid)
create_cluster(cursor, ocns, vid)
conn.commit()
# export data struct every 100k
if ((viter % 100000)==0):
dump_data_structure(Cluster_oclc_d, "/afs/umich.edu/user/p/u/pulintz/Code/PHDB/cluster_oclc_d.out.tsv")
conn.commit()
conn.close()
print "dumping final data structure"
dump_data_structure(Cluster_oclc_d, "/afs/umich.edu/user/p/u/pulintz/Code/PHDB/cluster_oclc_d.finalout.tsv")
def remove_vids_from_list(vids):
print "removing previously run volume_ids from run."
for vid in vids:
if (vid in Volid_cluster_d):
vids.remove(vid)
return vids
def dump_table(table, outfn):
""" dumps a flatfile of the specified query. Useful to get around the file permissions
problems for the DB servers using SELECT INTO OUTFILE. """
conn = get_connection()
cursor = conn.cursor()
outfile = file(outfn, "w")
query = "select * from %s" % table
try:
cursor.execute(query)
except MySQLdb.Error, e:
print "[run_list_query] Error %d: %s" % (e.args[0], e.args[1])
print "Exiting..."
sys.exit(1)
if (cursor.rowcount > 0):
while(1):
row = cursor.fetchone()
if row == None:
break
outline = "%s,%s,%s\n" % (row[0], row[1], row[2])
#print outline
outfile.write(outline)
conn.close()
def dump_data_structure(dstruct, outfn):
""" Exports one of the table data structures to a flatfile. Structs are
hashes of lists (sets). """
outfile = file(outfn, 'w')
for k, v in dstruct.iteritems():
for val in v:
outline = "%s\t%s\n" % (k, val)
outfile.write(outline)
def load_cluster_htitems_flatfile(filen):
""" loads data from a flatfile into data structures """
dfile = file(filen)
for row in dfile:
bits = row.split(',')
clust_id = int(bits[0])
vol_id = bits[1].strip()
Volid_cluster_d[vol_id] = []
Volid_cluster_d[vol_id].append(clust_id)
if (clust_id in Cluster_volid_d):
Cluster_volid_d[clust_id].append(vol_id)
else:
Cluster_volid_d[clust_id] = []
Cluster_volid_d[clust_id].append(vol_id)
if __name__ == '__main__':
#from sys import argv
## one-off: dumping the htitem_oclc table
#dump_table("cluster", "/htapps/pulintz.babel/phdb/cluster.csv.20110602")
#sys.exit()
### Main Clustering Routine ###
## load data structures ##
Volid_cluster_d = {}
Cluster_volid_d = {}
Cluster_oclc_d = {}
Oclc_cluster_d = {}
# read cluster_htitem_jn from flatfile (for those already generated).
#clhtfn = "/htapps/pulintz.babel/phdb/cluster_htitem_jn.csv"
#load_cluster_htitems_flatfile(clhtfn)
#print "done making data structures.\n"
## end load data structures ##
# remember to set NOW variable at beginning of file
cluster_main()
print "done."
| {
"content_hash": "8341d03e33decbfcfcaa1c194c20df73",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 115,
"avg_line_length": 32.87818696883853,
"alnum_prop": 0.5496295019817335,
"repo_name": "mlibrary/phdb",
"id": "955b2d7ca65081383cc7ae9296f0d8e74f350cd4",
"size": "11628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/cluster_oclc3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30831"
},
{
"name": "Ruby",
"bytes": "247275"
}
],
"symlink_target": ""
} |
class Invertible(object):
r"""
Mix-in for invertible transforms. Provides an interface for taking the
`pseudo` or true inverse of a transform.
Has to be implemented in conjunction with :map:`Transform`.
"""
@property
def has_true_inverse(self):
r"""
``True`` if the pseudoinverse is an exact inverse.
:type: `bool`
"""
raise NotImplementedError()
def pseudoinverse(self):
r"""
The pseudoinverse of the transform - that is, the transform that
results from swapping `source` and `target`, or more formally, negating
the transforms parameters. If the transform has a true inverse this
is returned instead.
:type: ``type(self)``
"""
raise NotImplementedError()
class VInvertible(Invertible):
r"""
Mix-in for :map:`Vectorizable` :map:`Invertible` :map:`Transform` s.
Prefer this mix-in over :map:`Invertible` if the :map:`Transform` in
question is :map:`Vectorizable` as this adds :meth:`from_vector` variants
to the :map:`Invertible` interface. These can be tuned for performance,
and are, for instance, needed by some of the machinery of fit.
"""
def pseudoinverse_vector(self, vector):
r"""
The vectorized pseudoinverse of a provided vector instance.
Syntactic sugar for::
self.from_vector(vector).pseudoinverse().as_vector()
Can be much faster than the explict call as object creation can be
entirely avoided in some cases.
Parameters
----------
vector : ``(n_parameters,)`` `ndarray`
A vectorized version of ``self``
Returns
-------
pseudoinverse_vector : ``(n_parameters,)`` `ndarray`
The pseudoinverse of the vector provided
"""
return self.from_vector(vector).pseudoinverse().as_vector()
| {
"content_hash": "d5cad216a28c4c3c6d8c0849b05e4d35",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 79,
"avg_line_length": 32.94827586206897,
"alnum_prop": 0.6232339089481946,
"repo_name": "grigorisg9gr/menpo",
"id": "1e51d368ff947e478dff28f5b39d3ff6e7851eb2",
"size": "1913",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "menpo/transform/base/invertible.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "63587"
},
{
"name": "C++",
"bytes": "47235"
},
{
"name": "Makefile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "1642052"
}
],
"symlink_target": ""
} |
import logging
import time
from .logger import setup_logger, setup_boto_logger
from .dest import S3Dest
class Base:
def __init__(self, **args):
self.args = args
setup_boto_logger()
self.logger = setup_logger(__name__, logging.INFO)
if not hasattr(self, 'dest'):
self.dest = S3Dest()
def run(self, dry=False):
source_exists = False
stats = []
for obj, data in self.source.get(dest=self.dest, **self.args):
source_exists = True
self.logger.info("Start transform: %s" % obj.key)
start_time = time.time()
transformed = self.transform(data)
self.logger.info("Finish transform: %s" % obj.key)
stats.append({
'time': time.time() - start_time,
'row': len(data),
})
if not dry:
self.dest.put(transformed, obj)
if not source_exists:
raise RuntimeError(
"source does not exist: `%s`" % self.source.prefix
)
self.show_stats(stats)
def show_stats(self, stats):
if not stats:
return
self.logger.info('')
self.logger.info('%d rows processed (%d files)' % (
sum([stat['row'] for stat in stats]),
len(stats)
))
self.logger.info('transform / file: %.3fs' % (
sum(stat['time'] for stat in stats) / len(stats)
))
self.logger.info('transform / row: %.3fs' % (
sum(stat['time'] / stat['row'] for stat in stats) / len(stats)
))
| {
"content_hash": "91dd9cccc8a32b3fa941cf5f7338ff63",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 74,
"avg_line_length": 26.639344262295083,
"alnum_prop": 0.5132307692307693,
"repo_name": "uiureo/demae",
"id": "5a0343ba2dbb17f787d065f8028804136897e5f6",
"size": "1625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demae/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1734"
},
{
"name": "Python",
"bytes": "17745"
}
],
"symlink_target": ""
} |
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this blob except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import uuid
import random
import io
import os
import time
from azure.storage.blob import (
ContentSettings,
SequenceNumberAction,
)
class PageBlobSamples():
def __init__(self, account):
self.account = account
def run_all_samples(self):
self.service = self.account.create_page_blob_service()
self.delete_blob()
self.blob_metadata()
self.blob_properties()
self.blob_exists()
self.copy_blob()
self.snapshot_blob()
self.lease_blob()
self.create_blob()
self.page_operations()
self.resize_blob()
self.set_sequence_number()
self.blob_with_bytes()
self.blob_with_stream()
self.blob_with_path()
def _get_resource_reference(self, prefix):
return '{}{}'.format(prefix, str(uuid.uuid4()).replace('-', ''))
def _get_blob_reference(self, prefix='blob'):
return self._get_resource_reference(prefix)
def _create_blob(self, container_name, prefix='blob'):
blob_name = self._get_resource_reference(prefix)
self.service.create_blob(container_name, blob_name, 512)
return blob_name
def _create_container(self, prefix='container'):
container_name = self._get_resource_reference(prefix)
self.service.create_container(container_name)
return container_name
def _get_random_bytes(self, size):
rand = random.Random()
result = bytearray(size)
for i in range(size):
result[i] = rand.randint(0, 255)
return bytes(result)
def delete_blob(self):
container_name = self._create_container()
blob_name = self._create_blob(container_name)
# Basic
self.service.delete_blob(container_name, blob_name)
self.service.delete_container(container_name)
def blob_metadata(self):
container_name = self._create_container()
blob_name = self._create_blob(container_name)
metadata = {'val1': 'foo', 'val2': 'blah'}
# Basic
self.service.set_blob_metadata(container_name, blob_name, metadata=metadata)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={'val1': 'foo', 'val2': 'blah'}
# Replaces values, does not merge
metadata = {'new': 'val'}
self.service.set_blob_metadata(container_name, blob_name, metadata=metadata)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={'new': 'val'}
# Capital letters
metadata = {'NEW': 'VAL'}
self.service.set_blob_metadata(container_name, blob_name, metadata=metadata)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={'new': 'VAL'}
# Clearing
self.service.set_blob_metadata(container_name, blob_name)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={}
self.service.delete_container(container_name)
def blob_properties(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
metadata = {'val1': 'foo', 'val2': 'blah'}
self.service.create_blob(container_name, blob_name, 512, metadata=metadata)
settings = ContentSettings(content_type='html', content_language='fr')
# Basic
self.service.set_blob_properties(container_name, blob_name, content_settings=settings)
blob = self.service.get_blob_properties(container_name, blob_name)
content_language = blob.properties.content_settings.content_language # fr
content_type = blob.properties.content_settings.content_type # html
content_length = blob.properties.content_length # 512
# Metadata
# Can't set metadata, but get will return metadata already on the blob
blob = self.service.get_blob_properties(container_name, blob_name)
metadata = blob.metadata # metadata={'val1': 'foo', 'val2': 'blah'}
# Replaces values, does not merge
settings = ContentSettings(content_encoding='utf-8')
self.service.set_blob_properties(container_name, blob_name, content_settings=settings)
blob = self.service.get_blob_properties(container_name, blob_name)
content_encoding = blob.properties.content_settings.content_encoding # utf-8
content_language = blob.properties.content_settings.content_language # None
self.service.delete_container(container_name)
def blob_exists(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
# Basic
exists = self.service.exists(container_name, blob_name) # False
self.service.create_blob(container_name, blob_name, 512)
exists = self.service.exists(container_name, blob_name) # True
self.service.delete_container(container_name)
def copy_blob(self):
container_name = self._create_container()
source_blob_name = self._create_blob(container_name)
# Basic
# Copy the blob from the directory to the root of the container
source = self.service.make_blob_url(container_name, source_blob_name)
copy = self.service.copy_blob(container_name, 'blob1copy', source)
# Poll for copy completion
while copy.status != 'success':
count = count + 1
if count > 5:
print('Timed out waiting for async copy to complete.')
time.sleep(30)
copy = self.service.get_blob_properties(container_name, 'blob1copy').properties.copy
# With SAS from a remote account to local blob
# Commented out as remote container, directory, blob, and sas would need to be created
'''
source_blob_url = self.service.make_blob_url(
remote_container_name,
remote_blob_name,
sas_token=remote_sas_token,
)
copy = self.service.copy_blob(destination_containername,
destination_blob_name,
source_blob_url)
'''
# Abort copy
# Commented out as this involves timing the abort to be sent while the copy is still running
# Abort copy is useful to do along with polling
# self.service.abort_copy_blob(container_name, blob_name, copy.id)
self.service.delete_container(container_name)
def snapshot_blob(self):
container_name = self._create_container()
base_blob_name = self._create_blob(container_name)
# Basic
snapshot_blob = self.service.snapshot_blob(container_name, base_blob_name)
snapshot_id = snapshot_blob.snapshot
# Set Metadata (otherwise metadata will be copied from base blob)
metadata = {'val1': 'foo', 'val2': 'blah'}
snapshot_blob = self.service.snapshot_blob(container_name, base_blob_name, metadata=metadata)
snapshot_id = snapshot_blob.snapshot
self.service.delete_container(container_name)
def lease_blob(self):
container_name = self._create_container()
blob_name1 = self._create_blob(container_name)
blob_name2 = self._create_blob(container_name)
blob_name3 = self._create_blob(container_name)
# Acquire
# Defaults to infinite lease
infinite_lease_id = self.service.acquire_blob_lease(container_name, blob_name1)
# Acquire
# Set lease time, may be between 15 and 60 seconds
fixed_lease_id = self.service.acquire_blob_lease(container_name, blob_name2, lease_duration=30)
# Acquire
# Proposed lease id
proposed_lease_id_1 = '55e97f64-73e8-4390-838d-d9e84a374321'
modified_lease_id = self.service.acquire_blob_lease(container_name,
blob_name3,
proposed_lease_id=proposed_lease_id_1,
lease_duration=30)
modified_lease_id # equal to proposed_lease_id_1
# Renew
# Resets the 30 second lease timer
# Note that the lease may be renewed even if it has expired as long as
# the container has not been leased again since the expiration of that lease
self.service.renew_blob_lease(container_name, blob_name3, proposed_lease_id_1)
# Change
# Change the lease ID of an active lease.
proposed_lease_id_2 = '55e97f64-73e8-4390-838d-d9e84a374322'
self.service.change_blob_lease(container_name, blob_name3, modified_lease_id,
proposed_lease_id=proposed_lease_id_2)
# Release
# Releasing the lease allows another client to immediately acquire the
# lease for the container as soon as the release is complete.
self.service.release_blob_lease(container_name, blob_name3, proposed_lease_id_2)
# Break
# A matching lease ID is not required.
# By default, a fixed-duration lease breaks after the remaining lease period
# elapses, and an infinite lease breaks immediately.
infinite_lease_break_time = self.service.break_blob_lease(container_name, blob_name1)
infinite_lease_break_time # 0
# Break
# By default this would leave whatever time remained of the 30 second
# lease period, but a break period can be provided to indicate when the
# break should take affect
lease_break_time = self.service.break_blob_lease(container_name, blob_name2, lease_break_period=10)
lease_break_time # 10
self.service.delete_container(container_name)
def blob_with_bytes(self):
container_name = self._create_container()
# Basic
data = self._get_random_bytes(1024)
blob_name = self._get_blob_reference()
self.service.create_blob_from_bytes(container_name, blob_name, data)
blob = self.service.get_blob_to_bytes(container_name, blob_name)
content = blob.content # data
# Download range
blob = self.service.get_blob_to_bytes(container_name, blob_name,
start_range=3, end_range=10)
content = blob.content # data from 3-10
# Upload from index in byte array
blob_name = self._get_blob_reference()
self.service.create_blob_from_bytes(container_name, blob_name, data, index=512)
# Content settings, metadata
settings = ContentSettings(content_type='html', content_language='fr')
metadata={'val1': 'foo', 'val2': 'blah'}
blob_name = self._get_blob_reference()
self.service.create_blob_from_bytes(container_name, blob_name, data,
content_settings=settings,
metadata=metadata)
blob = self.service.get_blob_to_bytes(container_name, blob_name)
metadata = blob.metadata # metadata={'val1': 'foo', 'val2': 'blah'}
content_language = blob.properties.content_settings.content_language # fr
content_type = blob.properties.content_settings.content_type # html
# Progress
# Use slightly larger data so the chunking is more visible
data = self._get_random_bytes(8 * 1024 *1024)
def upload_callback(current, total):
print('({}, {})'.format(current, total))
def download_callback(current, total):
print('({}, {}) '.format(current, total))
blob_name = self._get_blob_reference()
print('upload: ')
self.service.create_blob_from_bytes(container_name, blob_name, data,
progress_callback=upload_callback)
print('download: ')
blob = self.service.get_blob_to_bytes(container_name, blob_name,
progress_callback=download_callback)
# Parallelism
blob_name = self._get_blob_reference()
self.service.create_blob_from_bytes(container_name, blob_name, data,
max_connections=2)
blob = self.service.get_blob_to_bytes(container_name, blob_name,
max_connections=2)
self.service.delete_container(container_name)
def blob_with_stream(self):
container_name = self._create_container()
# Basic
input_stream = io.BytesIO(self._get_random_bytes(512))
output_stream = io.BytesIO()
blob_name = self._get_blob_reference()
self.service.create_blob_from_stream(container_name, blob_name,
input_stream, 512)
blob = self.service.get_blob_to_stream(container_name, blob_name,
output_stream)
content_length = blob.properties.content_length
# Download range
# Content settings, metadata
# Progress
# Parallelism
# See blob_with_bytes for these examples. The code will be very similar.
self.service.delete_container(container_name)
def blob_with_path(self):
container_name = self._create_container()
INPUT_FILE_PATH = 'blob_input.temp.dat'
OUTPUT_FILE_PATH = 'blob_output.temp.dat'
data = self._get_random_bytes(4 * 1024)
with open(INPUT_FILE_PATH, 'wb') as stream:
stream.write(data)
# Basic
blob_name = self._get_blob_reference()
self.service.create_blob_from_path(container_name, blob_name, INPUT_FILE_PATH)
blob = self.service.get_blob_to_path(container_name, blob_name, OUTPUT_FILE_PATH)
content_length = blob.properties.content_length
# Open mode
# Append to the blob instead of starting from the beginning
blob = self.service.get_blob_to_path(container_name, blob_name, OUTPUT_FILE_PATH, open_mode='ab')
content_length = blob.properties.content_length # will be the same, but local blob length will be longer
# Download range
# Content settings, metadata
# Progress
# Parallelism
# See blob_with_bytes for these examples. The code will be very similar.
self.service.delete_container(container_name)
if os.path.isfile(INPUT_FILE_PATH):
try:
os.remove(INPUT_FILE_PATH)
except:
pass
if os.path.isfile(OUTPUT_FILE_PATH):
try:
os.remove(OUTPUT_FILE_PATH)
except:
pass
def create_blob(self):
container_name = self._create_container()
# Basic
# Create a blob with no data
blob_name1 = self._get_blob_reference()
self.service.create_blob(container_name, blob_name1, 512)
# Properties
settings = ContentSettings(content_type='html', content_language='fr')
blob_name2 = self._get_blob_reference()
self.service.create_blob(container_name, blob_name2, 512, content_settings=settings)
# Metadata
metadata = {'val1': 'foo', 'val2': 'blah'}
blob_name2 = self._get_blob_reference()
self.service.create_blob(container_name, blob_name2, 512, metadata=metadata)
self.service.delete_container(container_name)
def resize_blob(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
# Basic
self.service.create_blob(container_name, blob_name, 512)
self.service.resize_blob(container_name, blob_name, 1024)
blob = self.service.get_blob_properties(container_name, blob_name)
length = blob.properties.content_length # 1024
self.service.delete_container(container_name)
def page_operations(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
self.service.create_blob(container_name, blob_name, 2048)
# Update the blob between offset 512 and 15351535
data = b'abcdefghijklmnop' * 64
self.service.update_page(container_name, blob_name, data, 512, 1535)
# List pages
print('list pages: ')
pages = self.service.get_page_ranges(container_name, blob_name)
for page in pages:
print('({}, {}) '.format(page.start, page.end)) # (512, 1535)
# Clear part of that page
self.service.clear_page(container_name, blob_name, 1024, 1535)
self.service.delete_container(container_name)
def set_sequence_number(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
# Create with a page number (default sets to 0)
self.service.create_blob(container_name, blob_name, 2048, sequence_number=1)
# Increment
properties = self.service.set_sequence_number(container_name, blob_name,
sequence_number_action=SequenceNumberAction.Increment)
sequence_number = properties.sequence_number # 2
# Update
properties = self.service.set_sequence_number(container_name, blob_name,
sequence_number_action=SequenceNumberAction.Update,
sequence_number=5)
sequence_number = properties.sequence_number # 5
# Max
# Takes the larger of the two sequence numbers
properties = self.service.set_sequence_number(container_name, blob_name,
sequence_number_action=SequenceNumberAction.Max,
sequence_number=3)
sequence_number = properties.sequence_number # 5 | {
"content_hash": "050d40ff22359a8c2a1e9eb70d65df70",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 119,
"avg_line_length": 41.25442477876106,
"alnum_prop": 0.6127527216174183,
"repo_name": "jehine-MSFT/azure-storage-python",
"id": "f501999ffa668a1e9ca6bb3a63b61632e40c1271",
"size": "18649",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "samples/blob/page_blob_usage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1242"
},
{
"name": "Python",
"bytes": "739574"
}
],
"symlink_target": ""
} |
import odf.opendocument
from odf.table import Table, TableRow, TableCell
from odf.text import P
# http://stackoverflow.com/a/4544699/1846474
class GrowingList(list):
def __setitem__(self, index, value):
if index >= len(self):
self.extend([None]*(index + 1 - len(self)))
list.__setitem__(self, index, value)
class ODSReader:
# loads the file
def __init__(self, file, clonespannedcolumns=None):
self.clonespannedcolumns = clonespannedcolumns
self.doc = odf.opendocument.load(file)
self.SHEETS = {}
for sheet in self.doc.spreadsheet.getElementsByType(Table):
self.readSheet(sheet)
# reads a sheet in the sheet dictionary, storing each sheet as an
# array (rows) of arrays (columns)
def readSheet(self, sheet):
name = sheet.getAttribute("name")
rows = sheet.getElementsByType(TableRow)
arrRows = []
# for each row
for row in rows:
row_comment = ""
arrCells = GrowingList()
cells = row.getElementsByType(TableCell)
# for each cell
count = 0
for cell in cells:
# repeated value?
repeat = cell.getAttribute("numbercolumnsrepeated")
if(not repeat):
repeat = 1
spanned = int(cell.getAttribute('numbercolumnsspanned') or 0)
# clone spanned cells
if self.clonespannedcolumns is not None and spanned > 1:
repeat = spanned
ps = cell.getElementsByType(P)
textContent = ""
# for each text/text:span node
for p in ps:
for n in p.childNodes:
if (n.nodeType == 1 and
((n.tagName == "text:span") or (n.tagName == "text:a"))):
for c in n.childNodes:
if (c.nodeType == 3):
textContent = u'{}{}'.format(textContent, c.data)
if (n.nodeType == 3):
textContent = u'{}{}'.format(textContent, n.data)
if(textContent):
if(textContent[0] != "#"): # ignore comments cells
for rr in range(int(repeat)): # repeated?
arrCells[count]=textContent
count+=1
else:
row_comment = row_comment + textContent + " "
else:
for rr in range(int(repeat)):
count+=1
# if row contained something
if(len(arrCells)):
arrRows.append(arrCells)
#else:
# print ("Empty or commented row (", row_comment, ")")
self.SHEETS[name] = arrRows
# returns a sheet as an array (rows) of arrays (columns)
def getSheet(self, name):
return self.SHEETS[name]
def keyval_sheet_to_dict(sheet, sheetname, funcs=None):
'''For a sheet with rows of 1 key and 1 value, returns a dictionary.
sheet is an ODSReader().
sheetname is the worksheet name.
Example: keyval(sheet, sheetname, str, int)
If only one function is provided, it will apply the function to keys and values.'''
outsheet = sheet.getSheet(sheetname)
out = {}
if not funcs:
for row in outsheet:
out[row[0]] = row[1]
else:
for row in outsheet:
if len(funcs) == 1:
out[funcs[0](row[0])] = funcs[0](row[1])
else:
out[funcs[0](row[0])] = funcs[1](row[1])
return out
def convert_dict_vals_to_objs_in_dict_of_dicts(dictin, objclass, depth=1):
'''Converts, in place, a dict of dicts into a dict of objects, with any
nesting depth. Typically depth will be the length of the keys of the
dictionary, though this method is typically only called from within this
module.'''
assert depth >= 1, 'Depth must be 1 or higher.'
if depth == 1:
for k,v in dictin.items():
dictin[k] = objclass(v)
else:
for k in dictin:
convert_dict_vals_to_objs_in_dict_of_dicts(dictin[k], objclass, depth-1)
def dict_sheet_to_dict_of_objs(sheet, sheetname, objclass, keys=None, funcs=None, nones='fill'):
'''Creates a dict of objects for a particular sheet in an ODSReader() object.
sheet is an ODSReader().
sheetname is the worksheet name.
key is the column that should be the key of the new dict of objs
objclass is the class that will be called via __init__(**kwargs), with kwargs populated from the rows.
funcs are functions that should be applied to the data as it becomes entries in the dict.
nones describes how to handle empty fields. 'fill' fills with None, 'trim' removes, 'string' fills with 'None'.'''
out = dict_sheet_to_dict_of_dicts(sheet, sheetname, keys, funcs, nones)
convert_dict_vals_to_objs_in_dict_of_dicts(out, objclass, len(keys))
return out
def interpret_none(key, interpreted_dict, nones='fill'):
'''Enters a value into row[key] based on nones.
'fill' will enter a None
'string' will enter 'None'
'trim' is valid and will do nothing.
Other values will raise assertion error.'''
if nones == 'fill':
interpreted_dict[key] = None
elif nones == 'string':
interpreted_dict[key] = 'None'
else:
assert nones == 'trim', f'Unknown interpretation of None: {nones}'
def row_to_dict(key_row, row, funcs=None, nones='fill'):
'''Takes a row of a data from a spreadsheet (list), converts to a dict.
Applies function to row items, with the default function being str'''
out = {}
for i,e in enumerate(key_row):
# Is the examined element of the row populated?
if len(row)-1 >= i:
if row[i] is None:
interpret_none(e, out, nones)
else:
# Does the examined element of the row have a function?
if funcs is not None and len(funcs)-1 >= i:
out[e] = funcs[i](row[i])
else:
# If element is beyond range of funcs, it defaults to str
out[e] = str(row[i])
else:
# If row doesn't extend this far, it is None
interpret_none(e, out, nones)
return out
def rows_to_list_of_dicts(sheet, funcs=None, nones='fill'):
'''Outputs a list of dicts from a spreadsheet, accepting functions to change the elements of the dicts.
First row is labels and is untouched. If number of elements exceeds the functions provided, the rest are just handled as strings.
Nones by default are "fill" (with None), "trim" (exclude from the dict), and "string" ("None")...'''
out = []
key_row = sheet[0]
for row in sheet[1:]:
out.append(row_to_dict(key_row, row, funcs, nones=nones))
return out
def dict_to_dict_of_dicts(dictin, keys):
'''Given keys, this creates a nested dictionary (any depth).'''
# Because this assumes creating a single dict of dicts using a single dict,
# no caution has to be taken to avoid overwriting,
# and therefore you can populate from the innermost to outermost layer.
assert keys != [], 'keys can not be empty list.'
out = {}
out[dictin[keys[-1]]] = dictin
if len(keys) > 0:
for k in reversed(keys[:-1]):
temp = out
out = {}
out[dictin[k]] = temp
return out
def add_dict_to_dict_of_dicts(dictin, keys, out):
'''Adds a dict to a dict of dicts.'''
assert keys, 'Need populated list.'
if len(keys) >= 2:
# If you have further depth before populating.
if dictin[keys[0]] in out:
out = out[dictin[keys[0]]]
add_dict_to_dict_of_dicts(dictin, keys[1:], out)
else:
out[dictin[keys[0]]] = {}
out = out[dictin[keys[0]]]
add_dict_to_dict_of_dicts(dictin, keys[1:], out)
else:
out[dictin[keys[0]]] = dictin
def list_of_dicts_to_dict_of_dicts(dicts, keys):
'''Converts list of dicts into dict of dicts (any depth).'''
out = {}
for d in dicts:
add_dict_to_dict_of_dicts(d, keys, out)
return out
def dict_sheet_to_dict_of_dicts(sheet, sheetname, keys, funcs=None, nones='fill'):
'''Creates a dict of dicts (a mini-database) for a particular sheet in an ODSReader() object.
sheet is an ODSReader().
sheetname is the worksheet name.
key is the column that should be the key of the new dict of dicts
funcs are functions that should be applied to the data as it becomes entries in the dict.
nones describes how to handle empty fields. 'fill' fills with None, 'trim' removes, 'string' fills with 'None'.'''
out = sheet.getSheet(sheetname)
out = rows_to_list_of_dicts(out, funcs, nones)
out = list_of_dicts_to_dict_of_dicts(out, keys)
return out
def dict_sheet_to_list_of_dicts(sheet, sheetname, keys, funcs=None, nones='fill'):
'''Creates a list of dicts for a particular sheet in an ODSReader() object.
sheet is an ODSReader().
sheetname is the worksheet name.
key is the column that should be the key of the new dict of dicts
funcs are functions that should be applied to the data as it becomes entries in the dict.
nones describes how to handle empty fields. 'fill' fills with None, 'trim' removes, 'string' fills with 'None'.'''
out = sheet.getSheet(sheetname)
out = rows_to_list_of_dicts(out, funcs, nones)
return out
| {
"content_hash": "7378cac9922e1ef3517ae28c84e5b55a",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 133,
"avg_line_length": 41.44017094017094,
"alnum_prop": 0.5868825409920594,
"repo_name": "marcoconti83/read-ods-with-odfpy",
"id": "a779ed40ed8ed23d1df1ba9c43ec53c24cebe4f5",
"size": "10301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ODSReader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10301"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.views.generic import RedirectView
from django.views.decorators.cache import never_cache
from honeypot.decorators import check_honeypot
from sked.views import (SessionList, SessionDetail, CreateSession,
UpdateSession, SingleDayView, CurrentTimeslotView, LazyEventRedirectView)
urlpatterns = patterns(
'sked.views',
url(r'^new/$', LazyEventRedirectView.as_view(viewname='sked:new_session')),
url(r'^$', LazyEventRedirectView.as_view(viewname='sked:session_list')),
url(r'^(?P<event_slug>[\w-]+)/$', never_cache(SessionList.as_view()), name="session_list"),
url(r'^(?P<event_slug>[\w-]+)/wall/$', never_cache(SingleDayView.as_view(template_name="sked/wall.html")), name="wall"),
url(r'^(?P<event_slug>[\w-]+)/print/$', never_cache(SingleDayView.as_view(template_name="sked/analog_wall.html")), name="analog_wall"),
url(r'^(?P<event_slug>[\w-]+)/print/timeslot/$', never_cache(CurrentTimeslotView.as_view(template_name="sked/analog_wall.html")), name="analog_wall_for_timeslot"),
url(r'^(?P<event_slug>[\w-]+)/tv/$', never_cache(CurrentTimeslotView.as_view(template_name="sked/tv.html")), name="tv"),
url(r'^(?P<event_slug>[\w-]+)/new/$', never_cache(check_honeypot(CreateSession.as_view())), name="new_session"),
url(r'^(?P<event_slug>[\w-]+)/(?P<slug>[\w-]+)/edit/$', never_cache(UpdateSession.as_view()), name="edit_session"),
url(r'^(?P<event_slug>[\w-]+)/(?P<slug>[\w-]+)/preview/$', SessionDetail.as_view(preview=True), name="session_preview"),
url(r'^(?P<event_slug>[\w-]+)/(?P<slug>[\w-]+)/$', SessionDetail.as_view(), name="session_detail"),
)
| {
"content_hash": "93a454297ef017bd7453d8dd537ae5d9",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 167,
"avg_line_length": 76.68181818181819,
"alnum_prop": 0.6716064018968583,
"repo_name": "sunlightlabs/tcamp",
"id": "018693715f4003ea0ddd22208134a6f1936f6fed",
"size": "1687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tcamp/sked/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "191488"
},
{
"name": "HTML",
"bytes": "832187"
},
{
"name": "JavaScript",
"bytes": "86789"
},
{
"name": "Python",
"bytes": "703083"
},
{
"name": "Shell",
"bytes": "623"
}
],
"symlink_target": ""
} |
import a10_neutron_lbaas.a10_context as a10_context
from a10_neutron_lbaas.vthunder import keystone as keystone_helpers
class A10Context(a10_context.A10Context):
pass
class A10WriteContext(a10_context.A10WriteContext):
pass
class A10WriteStatusContext(a10_context.A10WriteContext):
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.handler.openstack_manager.successful_completion(
self.openstack_context,
self.openstack_lbaas_obj)
else:
self.handler.openstack_manager.failed_completion(
self.openstack_context,
self.openstack_lbaas_obj)
super(A10WriteStatusContext, self).__exit__(exc_type, exc_value,
traceback)
class A10DeleteContext(a10_context.A10DeleteContextBase):
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.handler.openstack_manager.successful_completion(
self.openstack_context,
self.openstack_lbaas_obj,
delete=True)
super(A10DeleteContext, self).__exit__(exc_type, exc_value, traceback)
def remaining_root_objects(self):
ctx = self.openstack_context
if self.partition_key == self.tenant_id:
return self.handler.neutron.loadbalancer_total(ctx, self.partition_key)
else:
keystone_context = keystone_helpers.KeystoneFromContext(self.a10_driver.config,
self.openstack_context)
projects = keystone_context.client.projects.list()
idlist = [x.id for x in projects if x.parent_id == self.partition_key]
return self.handler.neutron.loadbalancer_parent(ctx, idlist)
| {
"content_hash": "4996794e7f7205a94590ad17ccd1d3b8",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 91,
"avg_line_length": 37.1,
"alnum_prop": 0.6199460916442049,
"repo_name": "a10networks/a10-neutron-lbaas",
"id": "580e47f262b5c33daf987ac233f3029ae0d9582c",
"size": "2485",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10_neutron_lbaas/v2/v2_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1083"
},
{
"name": "Python",
"bytes": "543754"
},
{
"name": "Shell",
"bytes": "6672"
}
],
"symlink_target": ""
} |
"""Kuler Sky lights integration."""
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from .const import DATA_ADDRESSES, DATA_DISCOVERY_SUBSCRIPTION, DOMAIN
PLATFORMS = [Platform.LIGHT]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Kuler Sky from a config entry."""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
if DATA_ADDRESSES not in hass.data[DOMAIN]:
hass.data[DOMAIN][DATA_ADDRESSES] = set()
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
# Stop discovery
unregister_discovery = hass.data[DOMAIN].pop(DATA_DISCOVERY_SUBSCRIPTION, None)
if unregister_discovery:
unregister_discovery()
hass.data.pop(DOMAIN, None)
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
| {
"content_hash": "accc971cd45edebe64c173838c0a96d0",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 83,
"avg_line_length": 31.515151515151516,
"alnum_prop": 0.7278846153846154,
"repo_name": "home-assistant/home-assistant",
"id": "39c0d0a5b8454eecb46c286e5665c7ea7a4001cf",
"size": "1040",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/kulersky/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
import contextlib, os.path, re, tempfile
from ..linkers import StaticLinker
from .. import coredata
from .. import mlog
from .. import mesonlib
from ..mesonlib import EnvironmentException, MesonException, version_compare, Popen_safe
"""This file contains the data files of all compilers Meson knows
about. To support a new compiler, add its information below.
Also add corresponding autodetection code in environment.py."""
header_suffixes = ('h', 'hh', 'hpp', 'hxx', 'H', 'ipp', 'moc', 'vapi', 'di')
obj_suffixes = ('o', 'obj', 'res')
lib_suffixes = ('a', 'lib', 'dll', 'dylib', 'so')
# Mapping of language to suffixes of files that should always be in that language
# This means we can't include .h headers here since they could be C, C++, ObjC, etc.
lang_suffixes = {
'c': ('c',),
'cpp': ('cpp', 'cc', 'cxx', 'c++', 'hh', 'hpp', 'ipp', 'hxx'),
# f90, f95, f03, f08 are for free-form fortran ('f90' recommended)
# f, for, ftn, fpp are for fixed-form fortran ('f' or 'for' recommended)
'fortran': ('f90', 'f95', 'f03', 'f08', 'f', 'for', 'ftn', 'fpp'),
'd': ('d', 'di'),
'objc': ('m',),
'objcpp': ('mm',),
'rust': ('rs',),
'vala': ('vala', 'vapi', 'gs'),
'cs': ('cs',),
'swift': ('swift',),
'java': ('java',),
}
cpp_suffixes = lang_suffixes['cpp'] + ('h',)
c_suffixes = lang_suffixes['c'] + ('h',)
# List of languages that can be linked with C code directly by the linker
# used in build.py:process_compilers() and build.py:get_dynamic_linker()
clike_langs = ('objcpp', 'objc', 'd', 'cpp', 'c', 'fortran',)
clike_suffixes = ()
for _l in clike_langs:
clike_suffixes += lang_suffixes[_l]
clike_suffixes += ('h', 'll', 's')
# All these are only for C-like languages; see `clike_langs` above.
def sort_clike(lang):
'''
Sorting function to sort the list of languages according to
reversed(compilers.clike_langs) and append the unknown langs in the end.
The purpose is to prefer C over C++ for files that can be compiled by
both such as assembly, C, etc. Also applies to ObjC, ObjC++, etc.
'''
if lang not in clike_langs:
return 1
return -clike_langs.index(lang)
def is_header(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1]
return suffix in header_suffixes
def is_source(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1].lower()
return suffix in clike_suffixes
def is_assembly(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
return fname.split('.')[-1].lower() == 's'
def is_llvm_ir(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
return fname.split('.')[-1] == 'll'
def is_object(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1]
return suffix in obj_suffixes
def is_library(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1]
return suffix in lib_suffixes
gnulike_buildtype_args = {'plain': [],
# -O0 is passed for improved debugging information with gcc
# See https://github.com/mesonbuild/meson/pull/509
'debug': ['-O0', '-g'],
'debugoptimized': ['-O2', '-g'],
'release': ['-O3'],
'minsize': ['-Os', '-g']}
msvc_buildtype_args = {'plain': [],
'debug': ["/MDd", "/ZI", "/Ob0", "/Od", "/RTC1"],
'debugoptimized': ["/MD", "/Zi", "/O2", "/Ob1"],
'release': ["/MD", "/O2", "/Ob2"],
'minsize': ["/MD", "/Zi", "/Os", "/Ob1"],
}
apple_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
}
gnulike_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': ['-Wl,-O1'],
'minsize': [],
}
msvc_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': ['/INCREMENTAL:NO'],
}
java_buildtype_args = {'plain': [],
'debug': ['-g'],
'debugoptimized': ['-g'],
'release': [],
'minsize': [],
}
rust_buildtype_args = {'plain': [],
'debug': ['-C', 'debuginfo=2'],
'debugoptimized': ['-C', 'debuginfo=2', '-C', 'opt-level=2'],
'release': ['-C', 'opt-level=3'],
'minsize': [], # In a future release: ['-C', 'opt-level=s'],
}
d_gdc_buildtype_args = {'plain': [],
'debug': ['-g', '-O0'],
'debugoptimized': ['-g', '-O'],
'release': ['-O3', '-frelease'],
'minsize': [],
}
d_ldc_buildtype_args = {'plain': [],
'debug': ['-g', '-O0'],
'debugoptimized': ['-g', '-O'],
'release': ['-O3', '-release'],
'minsize': [],
}
d_dmd_buildtype_args = {'plain': [],
'debug': ['-g'],
'debugoptimized': ['-g', '-O'],
'release': ['-O', '-release'],
'minsize': [],
}
mono_buildtype_args = {'plain': [],
'debug': ['-debug'],
'debugoptimized': ['-debug', '-optimize+'],
'release': ['-optimize+'],
'minsize': [],
}
swift_buildtype_args = {'plain': [],
'debug': ['-g'],
'debugoptimized': ['-g', '-O'],
'release': ['-O'],
'minsize': [],
}
gnu_winlibs = ['-lkernel32', '-luser32', '-lgdi32', '-lwinspool', '-lshell32',
'-lole32', '-loleaut32', '-luuid', '-lcomdlg32', '-ladvapi32']
msvc_winlibs = ['kernel32.lib', 'user32.lib', 'gdi32.lib',
'winspool.lib', 'shell32.lib', 'ole32.lib', 'oleaut32.lib',
'uuid.lib', 'comdlg32.lib', 'advapi32.lib']
gnu_color_args = {'auto': ['-fdiagnostics-color=auto'],
'always': ['-fdiagnostics-color=always'],
'never': ['-fdiagnostics-color=never'],
}
clang_color_args = {'auto': ['-Xclang', '-fcolor-diagnostics'],
'always': ['-Xclang', '-fcolor-diagnostics'],
'never': ['-Xclang', '-fno-color-diagnostics'],
}
base_options = {'b_pch': coredata.UserBooleanOption('b_pch', 'Use precompiled headers', True),
'b_lto': coredata.UserBooleanOption('b_lto', 'Use link time optimization', False),
'b_sanitize': coredata.UserComboOption('b_sanitize',
'Code sanitizer to use',
['none', 'address', 'thread', 'undefined', 'memory'],
'none'),
'b_lundef': coredata.UserBooleanOption('b_lundef', 'Use -Wl,--no-undefined when linking', True),
'b_asneeded': coredata.UserBooleanOption('b_asneeded', 'Use -Wl,--as-needed when linking', True),
'b_pgo': coredata.UserComboOption('b_pgo', 'Use profile guide optimization',
['off', 'generate', 'use'],
'off'),
'b_coverage': coredata.UserBooleanOption('b_coverage',
'Enable coverage tracking.',
False),
'b_colorout': coredata.UserComboOption('b_colorout', 'Use colored output',
['auto', 'always', 'never'],
'always'),
'b_ndebug': coredata.UserBooleanOption('b_ndebug',
'Disable asserts',
False),
'b_staticpic': coredata.UserBooleanOption('b_staticpic',
'Build static libraries as position independent',
True),
}
def sanitizer_compile_args(value):
if value == 'none':
return []
args = ['-fsanitize=' + value]
if value == 'address':
args.append('-fno-omit-frame-pointer')
return args
def sanitizer_link_args(value):
if value == 'none':
return []
args = ['-fsanitize=' + value]
return args
def get_base_compile_args(options, compiler):
args = []
# FIXME, gcc/clang specific.
try:
if options['b_lto'].value:
args.append('-flto')
except KeyError:
pass
try:
args += compiler.get_colorout_args(options['b_colorout'].value)
except KeyError:
pass
try:
args += sanitizer_compile_args(options['b_sanitize'].value)
except KeyError:
pass
try:
pgo_val = options['b_pgo'].value
if pgo_val == 'generate':
args.append('-fprofile-generate')
elif pgo_val == 'use':
args.append('-fprofile-use')
except KeyError:
pass
try:
if options['b_coverage'].value:
args += compiler.get_coverage_args()
except KeyError:
pass
try:
if options['b_ndebug'].value:
args += ['-DNDEBUG']
except KeyError:
pass
return args
def get_base_link_args(options, linker, is_shared_module):
args = []
# FIXME, gcc/clang specific.
try:
if options['b_lto'].value:
args.append('-flto')
except KeyError:
pass
try:
args += sanitizer_link_args(options['b_sanitize'].value)
except KeyError:
pass
try:
pgo_val = options['b_pgo'].value
if pgo_val == 'generate':
args.append('-fprofile-generate')
elif pgo_val == 'use':
args.append('-fprofile-use')
except KeyError:
pass
try:
if not is_shared_module and 'b_lundef' in linker.base_options and options['b_lundef'].value:
args.append('-Wl,--no-undefined')
except KeyError:
pass
try:
if 'b_asneeded' in linker.base_options and options['b_asneeded'].value:
args.append('-Wl,--as-needed')
except KeyError:
pass
try:
if options['b_coverage'].value:
args += linker.get_coverage_link_args()
except KeyError:
pass
return args
class CrossNoRunException(MesonException):
pass
class RunResult:
def __init__(self, compiled, returncode=999, stdout='UNDEFINED', stderr='UNDEFINED'):
self.compiled = compiled
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
class CompilerArgs(list):
'''
Class derived from list() that manages a list of compiler arguments. Should
be used while constructing compiler arguments from various sources. Can be
operated with ordinary lists, so this does not need to be used everywhere.
All arguments must be inserted and stored in GCC-style (-lfoo, -Idir, etc)
and can converted to the native type of each compiler by using the
.to_native() method to which you must pass an instance of the compiler or
the compiler class.
New arguments added to this class (either with .append(), .extend(), or +=)
are added in a way that ensures that they override previous arguments.
For example:
>>> a = ['-Lfoo', '-lbar']
>>> a += ['-Lpho', '-lbaz']
>>> print(a)
['-Lpho', '-Lfoo', '-lbar', '-lbaz']
Arguments will also be de-duped if they can be de-duped safely.
Note that because of all this, this class is not commutative and does not
preserve the order of arguments if it is safe to not. For example:
>>> ['-Ifoo', '-Ibar'] + ['-Ifez', '-Ibaz', '-Werror']
['-Ifez', '-Ibaz', '-Ifoo', '-Ibar', '-Werror']
>>> ['-Ifez', '-Ibaz', '-Werror'] + ['-Ifoo', '-Ibar']
['-Ifoo', '-Ibar', '-Ifez', '-Ibaz', '-Werror']
'''
# NOTE: currently this class is only for C-like compilers, but it can be
# extended to other languages easily. Just move the following to the
# compiler class and initialize when self.compiler is set.
# Arg prefixes that override by prepending instead of appending
prepend_prefixes = ('-I', '-L')
# Arg prefixes and args that must be de-duped by returning 2
dedup2_prefixes = ('-I', '-L', '-D', '-U')
dedup2_suffixes = ()
dedup2_args = ()
# Arg prefixes and args that must be de-duped by returning 1
dedup1_prefixes = ('-l',)
dedup1_suffixes = ('.lib', '.dll', '.so', '.dylib', '.a')
# Match a .so of the form path/to/libfoo.so.0.1.0
# Only UNIX shared libraries require this. Others have a fixed extension.
dedup1_regex = re.compile(r'([\/\\]|\A)lib.*\.so(\.[0-9]+)?(\.[0-9]+)?(\.[0-9]+)?$')
dedup1_args = ('-c', '-S', '-E', '-pipe', '-pthread')
compiler = None
def _check_args(self, args):
cargs = []
if len(args) > 2:
raise TypeError("CompilerArgs() only accepts at most 2 arguments: "
"The compiler, and optionally an initial list")
elif not args:
return cargs
elif len(args) == 1:
if isinstance(args[0], (Compiler, StaticLinker)):
self.compiler = args[0]
else:
raise TypeError("you must pass a Compiler instance as one of "
"the arguments")
elif len(args) == 2:
if isinstance(args[0], (Compiler, StaticLinker)):
self.compiler = args[0]
cargs = args[1]
elif isinstance(args[1], (Compiler, StaticLinker)):
cargs = args[0]
self.compiler = args[1]
else:
raise TypeError("you must pass a Compiler instance as one of "
"the two arguments")
else:
raise AssertionError('Not reached')
return cargs
def __init__(self, *args):
super().__init__(self._check_args(args))
@classmethod
def _can_dedup(cls, arg):
'''
Returns whether the argument can be safely de-duped. This is dependent
on three things:
a) Whether an argument can be 'overriden' by a later argument. For
example, -DFOO defines FOO and -UFOO undefines FOO. In this case, we
can safely remove the previous occurance and add a new one. The same
is true for include paths and library paths with -I and -L. For
these we return `2`. See `dedup2_prefixes` and `dedup2_args`.
b) Arguments that once specified cannot be undone, such as `-c` or
`-pipe`. New instances of these can be completely skipped. For these
we return `1`. See `dedup1_prefixes` and `dedup1_args`.
c) Whether it matters where or how many times on the command-line
a particular argument is present. This can matter for symbol
resolution in static or shared libraries, so we cannot de-dup or
reorder them. For these we return `0`. This is the default.
In addition to these, we handle library arguments specially.
With GNU ld, we surround library arguments with -Wl,--start/end-group
to recursively search for symbols in the libraries. This is not needed
with other linkers.
'''
# A standalone argument must never be deduplicated because it is
# defined by what comes _after_ it. Thus dedupping this:
# -D FOO -D BAR
# would yield either
# -D FOO BAR
# or
# FOO -D BAR
# both of which are invalid.
if arg in cls.dedup2_prefixes:
return 0
if arg in cls.dedup2_args or \
arg.startswith(cls.dedup2_prefixes) or \
arg.endswith(cls.dedup2_suffixes):
return 2
if arg in cls.dedup1_args or \
arg.startswith(cls.dedup1_prefixes) or \
arg.endswith(cls.dedup1_suffixes) or \
re.search(cls.dedup1_regex, arg):
return 1
return 0
@classmethod
def _should_prepend(cls, arg):
if arg.startswith(cls.prepend_prefixes):
return True
return False
def to_native(self):
# Check if we need to add --start/end-group for circular dependencies
# between static libraries.
if get_compiler_uses_gnuld(self.compiler):
group_started = False
for each in self:
if not each.startswith('-l') and not each.endswith('.a'):
continue
i = self.index(each)
if not group_started:
# First occurance of a library
self.insert(i, '-Wl,--start-group')
group_started = True
# Last occurance of a library
if group_started:
self.insert(i + 1, '-Wl,--end-group')
return self.compiler.unix_args_to_native(self)
def append_direct(self, arg):
'''
Append the specified argument without any reordering or de-dup
'''
super().append(arg)
def extend_direct(self, iterable):
'''
Extend using the elements in the specified iterable without any
reordering or de-dup
'''
super().extend(iterable)
def __add__(self, args):
new = CompilerArgs(self, self.compiler)
new += args
return new
def __iadd__(self, args):
'''
Add two CompilerArgs while taking into account overriding of arguments
and while preserving the order of arguments as much as possible
'''
pre = []
post = []
if not isinstance(args, list):
raise TypeError('can only concatenate list (not "{}") to list'.format(args))
for arg in args:
# If the argument can be de-duped, do it either by removing the
# previous occurance of it and adding a new one, or not adding the
# new occurance.
dedup = self._can_dedup(arg)
if dedup == 1:
# Argument already exists and adding a new instance is useless
if arg in self or arg in pre or arg in post:
continue
if dedup == 2:
# Remove all previous occurances of the arg and add it anew
if arg in self:
self.remove(arg)
if arg in pre:
pre.remove(arg)
if arg in post:
post.remove(arg)
if self._should_prepend(arg):
pre.append(arg)
else:
post.append(arg)
# Insert at the beginning
self[:0] = pre
# Append to the end
super().__iadd__(post)
return self
def __radd__(self, args):
new = CompilerArgs(args, self.compiler)
new += self
return new
def __mul__(self, args):
raise TypeError("can't multiply compiler arguments")
def __imul__(self, args):
raise TypeError("can't multiply compiler arguments")
def __rmul__(self, args):
raise TypeError("can't multiply compiler arguments")
def append(self, arg):
self.__iadd__([arg])
def extend(self, args):
self.__iadd__(args)
class Compiler:
def __init__(self, exelist, version):
if isinstance(exelist, str):
self.exelist = [exelist]
elif isinstance(exelist, list):
self.exelist = exelist
else:
raise TypeError('Unknown argument to Compiler')
# In case it's been overriden by a child class already
if not hasattr(self, 'file_suffixes'):
self.file_suffixes = lang_suffixes[self.language]
if not hasattr(self, 'can_compile_suffixes'):
self.can_compile_suffixes = set(self.file_suffixes)
self.default_suffix = self.file_suffixes[0]
self.version = version
self.base_options = []
def __repr__(self):
repr_str = "<{0}: v{1} `{2}`>"
return repr_str.format(self.__class__.__name__, self.version,
' '.join(self.exelist))
def can_compile(self, src):
if hasattr(src, 'fname'):
src = src.fname
suffix = os.path.splitext(src)[1].lower()
if suffix and suffix[1:] in self.can_compile_suffixes:
return True
return False
def get_id(self):
return self.id
def get_language(self):
return self.language
def get_display_language(self):
return self.language.capitalize()
def get_default_suffix(self):
return self.default_suffix
def get_exelist(self):
return self.exelist[:]
def get_builtin_define(self, *args, **kwargs):
raise EnvironmentException('%s does not support get_builtin_define.' % self.id)
def has_builtin_define(self, *args, **kwargs):
raise EnvironmentException('%s does not support has_builtin_define.' % self.id)
def get_always_args(self):
return []
def get_linker_always_args(self):
return []
def gen_import_library_args(self, implibname):
"""
Used only on Windows for libraries that need an import library.
This currently means C, C++, Fortran.
"""
return []
def get_options(self):
return {} # build afresh every time
def get_option_compile_args(self, options):
return []
def get_option_link_args(self, options):
return []
def has_header(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support header checks.' % self.get_display_language())
def has_header_symbol(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support header symbol checks.' % self.get_display_language())
def compiles(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support compile checks.' % self.get_display_language())
def links(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support link checks.' % self.get_display_language())
def run(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support run checks.' % self.get_display_language())
def sizeof(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support sizeof checks.' % self.get_display_language())
def alignment(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support alignment checks.' % self.get_display_language())
def has_function(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support function checks.' % self.get_display_language())
@classmethod
def unix_args_to_native(cls, args):
"Always returns a copy that can be independently mutated"
return args[:]
def find_library(self, *args, **kwargs):
raise EnvironmentException('Language {} does not support library finding.'.format(self.get_display_language()))
def get_library_dirs(self):
return []
def has_argument(self, arg, env):
return self.has_multi_arguments([arg], env)
def has_multi_arguments(self, args, env):
raise EnvironmentException(
'Language {} does not support has_multi_arguments.'.format(
self.get_display_language()))
def get_cross_extra_flags(self, environment, link):
extra_flags = []
if self.is_cross and environment:
if 'properties' in environment.cross_info.config:
props = environment.cross_info.config['properties']
lang_args_key = self.language + '_args'
extra_flags += props.get(lang_args_key, [])
lang_link_args_key = self.language + '_link_args'
if link:
extra_flags += props.get(lang_link_args_key, [])
return extra_flags
def _get_compile_output(self, dirname, mode):
# In pre-processor mode, the output is sent to stdout and discarded
if mode == 'preprocess':
return None
# Extension only matters if running results; '.exe' is
# guaranteed to be executable on every platform.
if mode == 'link':
suffix = 'exe'
else:
suffix = 'obj'
return os.path.join(dirname, 'output.' + suffix)
@contextlib.contextmanager
def compile(self, code, extra_args=None, mode='link'):
if extra_args is None:
extra_args = []
try:
with tempfile.TemporaryDirectory() as tmpdirname:
if isinstance(code, str):
srcname = os.path.join(tmpdirname,
'testfile.' + self.default_suffix)
with open(srcname, 'w') as ofile:
ofile.write(code)
elif isinstance(code, mesonlib.File):
srcname = code.fname
output = self._get_compile_output(tmpdirname, mode)
# Construct the compiler command-line
commands = CompilerArgs(self)
commands.append(srcname)
commands += extra_args
commands += self.get_always_args()
if mode == 'compile':
commands += self.get_compile_only_args()
# Preprocess mode outputs to stdout, so no output args
if mode == 'preprocess':
commands += self.get_preprocess_only_args()
else:
commands += self.get_output_args(output)
# Generate full command-line with the exelist
commands = self.get_exelist() + commands.to_native()
mlog.debug('Running compile:')
mlog.debug('Working directory: ', tmpdirname)
mlog.debug('Command line: ', ' '.join(commands), '\n')
mlog.debug('Code:\n', code)
p, p.stdo, p.stde = Popen_safe(commands, cwd=tmpdirname)
mlog.debug('Compiler stdout:\n', p.stdo)
mlog.debug('Compiler stderr:\n', p.stde)
p.input_name = srcname
p.output_name = output
yield p
except (PermissionError, OSError):
# On Windows antivirus programs and the like hold on to files so
# they can't be deleted. There's not much to do in this case. Also,
# catch OSError because the directory is then no longer empty.
pass
def get_colorout_args(self, colortype):
return []
# Some compilers (msvc) write debug info to a separate file.
# These args specify where it should be written.
def get_compile_debugfile_args(self, rel_obj, **kwargs):
return []
def get_link_debugfile_args(self, rel_obj):
return []
def get_std_shared_lib_link_args(self):
return []
def get_std_shared_module_link_args(self):
return self.get_std_shared_lib_link_args()
def get_link_whole_for(self, args):
if isinstance(args, list) and not args:
return []
raise EnvironmentException('Language %s does not support linking whole archives.' % self.get_display_language())
def build_unix_rpath_args(self, build_dir, from_dir, rpath_paths, install_rpath):
if not rpath_paths and not install_rpath:
return []
# The rpaths we write must be relative, because otherwise
# they have different length depending on the build
# directory. This breaks reproducible builds.
rel_rpaths = []
for p in rpath_paths:
if p == from_dir:
relative = '' # relpath errors out in this case
else:
relative = os.path.relpath(p, from_dir)
rel_rpaths.append(relative)
paths = ':'.join([os.path.join('$ORIGIN', p) for p in rel_rpaths])
if len(paths) < len(install_rpath):
padding = 'X' * (len(install_rpath) - len(paths))
if not paths:
paths = padding
else:
paths = paths + ':' + padding
args = ['-Wl,-rpath,' + paths]
if get_compiler_is_linuxlike(self):
# Rpaths to use while linking must be absolute. These are not
# written to the binary. Needed only with GNU ld:
# https://sourceware.org/bugzilla/show_bug.cgi?id=16936
# Not needed on Windows or other platforms that don't use RPATH
# https://github.com/mesonbuild/meson/issues/1897
lpaths = ':'.join([os.path.join(build_dir, p) for p in rpath_paths])
args += ['-Wl,-rpath-link,' + lpaths]
return args
GCC_STANDARD = 0
GCC_OSX = 1
GCC_MINGW = 2
GCC_CYGWIN = 3
CLANG_STANDARD = 0
CLANG_OSX = 1
CLANG_WIN = 2
# Possibly clang-cl?
ICC_STANDARD = 0
ICC_OSX = 1
ICC_WIN = 2
def get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, path, soversion, is_shared_module):
if soversion is None:
sostr = ''
else:
sostr = '.' + soversion
if gcc_type in (GCC_STANDARD, GCC_MINGW, GCC_CYGWIN):
# Might not be correct for mingw but seems to work.
return ['-Wl,-soname,%s%s.%s%s' % (prefix, shlib_name, suffix, sostr)]
elif gcc_type == GCC_OSX:
if is_shared_module:
return []
return ['-install_name', os.path.join(path, 'lib' + shlib_name + '.dylib')]
else:
raise RuntimeError('Not implemented yet.')
def get_compiler_is_linuxlike(compiler):
if (getattr(compiler, 'gcc_type', None) == GCC_STANDARD) or \
(getattr(compiler, 'clang_type', None) == CLANG_STANDARD) or \
(getattr(compiler, 'icc_type', None) == ICC_STANDARD):
return True
return False
def get_compiler_uses_gnuld(c):
# FIXME: Perhaps we should detect the linker in the environment?
# FIXME: Assumes that *BSD use GNU ld, but they might start using lld soon
if (getattr(c, 'gcc_type', None) in (GCC_STANDARD, GCC_MINGW, GCC_CYGWIN)) or \
(getattr(c, 'clang_type', None) in (CLANG_STANDARD, CLANG_WIN)) or \
(getattr(c, 'icc_type', None) in (ICC_STANDARD, ICC_WIN)):
return True
return False
def get_largefile_args(compiler):
'''
Enable transparent large-file-support for 32-bit UNIX systems
'''
if get_compiler_is_linuxlike(compiler):
# Enable large-file support unconditionally on all platforms other
# than macOS and Windows. macOS is now 64-bit-only so it doesn't
# need anything special, and Windows doesn't have automatic LFS.
# You must use the 64-bit counterparts explicitly.
# glibc, musl, and uclibc, and all BSD libcs support this. On Android,
# support for transparent LFS is available depending on the version of
# Bionic: https://github.com/android/platform_bionic#32-bit-abi-bugs
# https://code.google.com/p/android/issues/detail?id=64613
#
# If this breaks your code, fix it! It's been 20+ years!
return ['-D_FILE_OFFSET_BITS=64']
# We don't enable -D_LARGEFILE64_SOURCE since that enables
# transitionary features and must be enabled by programs that use
# those features explicitly.
return []
class GnuCompiler:
# Functionality that is common to all GNU family compilers.
def __init__(self, gcc_type, defines):
self.id = 'gcc'
self.gcc_type = gcc_type
self.defines = defines or {}
self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',
'b_colorout', 'b_ndebug', 'b_staticpic']
if self.gcc_type != GCC_OSX:
self.base_options.append('b_lundef')
self.base_options.append('b_asneeded')
# All GCC backends can do assembly
self.can_compile_suffixes.add('s')
def get_colorout_args(self, colortype):
if mesonlib.version_compare(self.version, '>=4.9.0'):
return gnu_color_args[colortype][:]
return []
def get_warn_args(self, level):
args = super().get_warn_args(level)
if mesonlib.version_compare(self.version, '<4.8.0') and '-Wpedantic' in args:
# -Wpedantic was added in 4.8.0
# https://gcc.gnu.org/gcc-4.8/changes.html
args[args.index('-Wpedantic')] = '-pedantic'
return args
def has_builtin_define(self, define):
return define in self.defines
def get_builtin_define(self, define):
if define in self.defines:
return self.defines[define]
def get_pic_args(self):
if self.gcc_type in (GCC_CYGWIN, GCC_MINGW, GCC_OSX):
return [] # On Window and OS X, pic is always on.
return ['-fPIC']
def get_buildtype_args(self, buildtype):
return gnulike_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
if self.gcc_type == GCC_OSX:
return apple_buildtype_linker_args[buildtype]
return gnulike_buildtype_linker_args[buildtype]
def get_pch_suffix(self):
return 'gch'
def split_shlib_to_parts(self, fname):
return os.path.split(fname)[0], fname
def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, is_shared_module):
return get_gcc_soname_args(self.gcc_type, prefix, shlib_name, suffix, path, soversion, is_shared_module)
def get_std_shared_lib_link_args(self):
if self.gcc_type == GCC_OSX:
return ['-bundle']
return ['-shared']
def get_link_whole_for(self, args):
return ['-Wl,--whole-archive'] + args + ['-Wl,--no-whole-archive']
def gen_vs_module_defs_args(self, defsfile):
if not isinstance(defsfile, str):
raise RuntimeError('Module definitions file should be str')
# On Windows targets, .def files may be specified on the linker command
# line like an object file.
if self.gcc_type in (GCC_CYGWIN, GCC_MINGW):
return [defsfile]
# For other targets, discard the .def file.
return []
def get_gui_app_args(self):
if self.gcc_type in (GCC_CYGWIN, GCC_MINGW):
return ['-mwindows']
return []
class ClangCompiler:
def __init__(self, clang_type):
self.id = 'clang'
self.clang_type = clang_type
self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',
'b_ndebug', 'b_staticpic', 'b_colorout']
if self.clang_type != CLANG_OSX:
self.base_options.append('b_lundef')
self.base_options.append('b_asneeded')
# All Clang backends can do assembly and LLVM IR
self.can_compile_suffixes.update(['ll', 's'])
def get_pic_args(self):
if self.clang_type in (CLANG_WIN, CLANG_OSX):
return [] # On Window and OS X, pic is always on.
return ['-fPIC']
def get_colorout_args(self, colortype):
return clang_color_args[colortype][:]
def get_buildtype_args(self, buildtype):
return gnulike_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
if self.clang_type == CLANG_OSX:
return apple_buildtype_linker_args[buildtype]
return gnulike_buildtype_linker_args[buildtype]
def get_pch_suffix(self):
return 'pch'
def get_pch_use_args(self, pch_dir, header):
# Workaround for Clang bug http://llvm.org/bugs/show_bug.cgi?id=15136
# This flag is internal to Clang (or at least not documented on the man page)
# so it might change semantics at any time.
return ['-include-pch', os.path.join(pch_dir, self.get_pch_name(header))]
def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, is_shared_module):
if self.clang_type == CLANG_STANDARD:
gcc_type = GCC_STANDARD
elif self.clang_type == CLANG_OSX:
gcc_type = GCC_OSX
elif self.clang_type == CLANG_WIN:
gcc_type = GCC_MINGW
else:
raise MesonException('Unreachable code when converting clang type to gcc type.')
return get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, path, soversion, is_shared_module)
def has_multi_arguments(self, args, env):
return super().has_multi_arguments(
['-Werror=unknown-warning-option'] + args,
env)
def has_function(self, funcname, prefix, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
# Starting with XCode 8, we need to pass this to force linker
# visibility to obey OS X and iOS minimum version targets with
# -mmacosx-version-min, -miphoneos-version-min, etc.
# https://github.com/Homebrew/homebrew-core/issues/3727
if self.clang_type == CLANG_OSX and version_compare(self.version, '>=8.0'):
extra_args.append('-Wl,-no_weak_imports')
return super().has_function(funcname, prefix, env, extra_args, dependencies)
def get_std_shared_module_link_args(self):
if self.clang_type == CLANG_OSX:
return ['-bundle', '-Wl,-undefined,dynamic_lookup']
return ['-shared']
def get_link_whole_for(self, args):
if self.clang_type == CLANG_OSX:
result = []
for a in args:
result += ['-Wl,-force_load', a]
return result
return ['-Wl,--whole-archive'] + args + ['-Wl,--no-whole-archive']
# Tested on linux for ICC 14.0.3, 15.0.6, 16.0.4, 17.0.1
class IntelCompiler:
def __init__(self, icc_type):
self.id = 'intel'
self.icc_type = icc_type
self.lang_header = 'none'
self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',
'b_colorout', 'b_ndebug', 'b_staticpic', 'b_lundef', 'b_asneeded']
# Assembly
self.can_compile_suffixes.add('s')
def get_pic_args(self):
return ['-fPIC']
def get_buildtype_args(self, buildtype):
return gnulike_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return gnulike_buildtype_linker_args[buildtype]
def get_pch_suffix(self):
return 'pchi'
def get_pch_use_args(self, pch_dir, header):
return ['-pch', '-pch_dir', os.path.join(pch_dir), '-x',
self.lang_header, '-include', header, '-x', 'none']
def get_pch_name(self, header_name):
return os.path.split(header_name)[-1] + '.' + self.get_pch_suffix()
def split_shlib_to_parts(self, fname):
return os.path.split(fname)[0], fname
def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, is_shared_module):
if self.icc_type == ICC_STANDARD:
gcc_type = GCC_STANDARD
elif self.icc_type == ICC_OSX:
gcc_type = GCC_OSX
elif self.icc_type == ICC_WIN:
gcc_type = GCC_MINGW
else:
raise MesonException('Unreachable code when converting icc type to gcc type.')
return get_gcc_soname_args(gcc_type, prefix, shlib_name, suffix, path, soversion, is_shared_module)
def get_std_shared_lib_link_args(self):
# FIXME: Don't know how icc works on OSX
# if self.icc_type == ICC_OSX:
# return ['-bundle']
return ['-shared']
| {
"content_hash": "bfe0284d8491f6204dd4bdcb7bce305c",
"timestamp": "",
"source": "github",
"line_count": 1048,
"max_line_length": 120,
"avg_line_length": 38.82538167938932,
"alnum_prop": 0.5525572021922387,
"repo_name": "aaronp24/meson",
"id": "579988f06f01f09a6473d5652028933821a91d1f",
"size": "41282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesonbuild/compilers/compilers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2394"
},
{
"name": "Batchfile",
"bytes": "795"
},
{
"name": "C",
"bytes": "91520"
},
{
"name": "C#",
"bytes": "631"
},
{
"name": "C++",
"bytes": "17456"
},
{
"name": "CMake",
"bytes": "1670"
},
{
"name": "D",
"bytes": "2026"
},
{
"name": "Emacs Lisp",
"bytes": "1226"
},
{
"name": "Fortran",
"bytes": "1946"
},
{
"name": "Genie",
"bytes": "341"
},
{
"name": "Inno Setup",
"bytes": "372"
},
{
"name": "Java",
"bytes": "994"
},
{
"name": "JavaScript",
"bytes": "136"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "110"
},
{
"name": "Meson",
"bytes": "194442"
},
{
"name": "Objective-C",
"bytes": "699"
},
{
"name": "Objective-C++",
"bytes": "332"
},
{
"name": "Protocol Buffer",
"bytes": "92"
},
{
"name": "Python",
"bytes": "1254683"
},
{
"name": "Roff",
"bytes": "232"
},
{
"name": "Rust",
"bytes": "618"
},
{
"name": "Shell",
"bytes": "1787"
},
{
"name": "Swift",
"bytes": "972"
},
{
"name": "Vala",
"bytes": "6405"
},
{
"name": "Vim script",
"bytes": "9434"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2010-2011 Tyler Kennedy <tk@tkte.ch>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__all__ = ["classfile", "constants"]
from .classfile import ClassFile, ClassError
from .constants import ConstantType
| {
"content_hash": "b4d1168675b41af0af1cb84520313f1f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 48.08,
"alnum_prop": 0.7936772046589018,
"repo_name": "deathcap/Burger",
"id": "a178d19591f9d4798e8558df6288ca3136b2a066",
"size": "1247",
"binary": false,
"copies": "2",
"ref": "refs/heads/mc18",
"path": "solum/classfile/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "156819"
}
],
"symlink_target": ""
} |
import stopwatch
import RPi.GPIO as GPIO
pin = 25
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.IN)
print "\n***************************************"
print "*** Welcome to Five Second Stadium! ***"
print "***************************************\n"
print "Press the BUTTON to start and press again to stop after 5 sec!"
print "Press ctrl + c to quit"
print "--------------------------------------------------------------\n"
while True:
print "Start?"
GPIO.wait_for_edge(pin, GPIO.FALLING)
t = stopwatch.Timer()
print "***Started!***\n\n"
print "Stop?"
GPIO.wait_for_edge(pin, GPIO.FALLING)
t.stop()
rounded = "%.2f" % t.elapsed
print rounded, " seconds!"
if float(rounded) == 5:
print "*** Congratulations Champion! ***"
else:
print "So Close!"
print "Would you like to play again?"
GPIO.wait_for_edge(pin, GPIO.FALLING) | {
"content_hash": "03513887d2c08757076acb92b72eb882",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 27.363636363636363,
"alnum_prop": 0.521594684385382,
"repo_name": "nodu/RPiWorkshop",
"id": "ab2dcac4b923e1a50f93513275894f52263d93f7",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "fss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "77910"
},
{
"name": "JavaScript",
"bytes": "138509"
},
{
"name": "Python",
"bytes": "903"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django.forms import ModelForm,Textarea
from django import forms
from django.contrib.auth.forms import UserCreationForm
import string
from play.models import *
import constants
import datetime
class IdeaForm(ModelForm):
title=forms.CharField()
description=forms.CharField(widget = forms.Textarea(attrs={}))
points=forms.DecimalField(widget = forms.TextInput(attrs={ 'size':'4'}))
experience=forms.DecimalField(widget = forms.TextInput(attrs={ 'size':'4'}))
class Meta:
model=Idea
fields = ('title','description','author','points', 'experience')
class CommentForm(ModelForm):
comment=forms.CharField(widget = forms.Textarea)
class Meta:
model=Comment
fields = ('comment',)
def __init__(self, *args, **kwargs):
super(CommentForm, self).__init__(*args, **kwargs) # Call to ModelForm constructor
self.fields['comment'].widget.attrs['cols'] = 15
self.fields['comment'].widget.attrs['rows'] = 1
class CommentFeedForm(ModelForm):
comment=forms.CharField(widget = forms.Textarea)
class Meta:
model=CommentFeed
fields = ('comment', )
def __init__(self, *args, **kwargs):
super(CommentFeedForm, self).__init__(*args, **kwargs) # Call to ModelForm constructor
self.fields['comment'].widget.attrs['cols'] = 15
self.fields['comment'].widget.attrs['rows'] = 1
class EditUserForm(ModelForm):
class Meta:
model=User
fields = ('username',)
class EditPicForm(ModelForm):
class Meta:
model=Player
fields = ('picture_url', 'facebook_pic')
class SignUpForm(UserCreationForm):
""" Require email address when a user signs up """
email = forms.EmailField(label='Email address', max_length=75)
def __init__(self, *args, **kwargs):
super(SignUpForm, self).__init__(*args, **kwargs)
# remove username
self.fields.pop('username')
class Meta:
model = User
fields = ('email', 'first_name', 'last_name')
exclude=['username']
def clean_email(self):
email = self.cleaned_data["email"]
try:
user = User.objects.get(email=email)
raise forms.ValidationError("This email address already exists. Did you forget your password?")
except User.DoesNotExist:
return email
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
user.email = self.cleaned_data["email"]
user.username=user.email
user.first_name=self.cleaned_data['first_name']
user.last_name=self.cleaned_data['last_name']
user.is_active = True # change to false if using email activation
if commit:
user.save()
return user | {
"content_hash": "d089230bc7c398f651f56f3522176534",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 107,
"avg_line_length": 33.66279069767442,
"alnum_prop": 0.6404145077720207,
"repo_name": "fraferra/PlayCity",
"id": "117619347b024d35bb1afc91c0e37f53a9a9fe5c",
"size": "2895",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "play/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "720208"
},
{
"name": "JavaScript",
"bytes": "2228440"
},
{
"name": "Python",
"bytes": "355729"
},
{
"name": "Shell",
"bytes": "169"
}
],
"symlink_target": ""
} |
from model import load_encoders,save_object
from model_export import Exporter
from db_operations import DbOperator
import cPickle as pickle
import argparse
import h5py
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='Encoder Exporter')
parser.add_argument('-t','--table',required=True)
parser.add_argument('-n','--model_name',required=True)
args = parser.parse_args()
####################################DATA SOURCE################################
# table = 'data_little_enc'
var_args = vars(args)
table = var_args['table']
save_dir = var_args['model_name']
address = 'postgresql://script@localhost:5432/ccfd'
db_ops = DbOperator(address=address)
col_names = list(db_ops.get_columns(table))
encoders = load_encoders()
# encoder_mapper = {}
# encoder_mapper['columns'] = col_names
# for c in encoders.keys():
# encoder_mapper[c] = encoders[c]._classes
path = Exporter.export_dist+'/'+save_dir+'/'+'encoders.h5'
# save_object(path)
f = h5py.File(path, "x")
col_names.remove('index') #!!!!!!
skip_list = ['frd_ind']
for x in skip_list:
col_names.remove(x)
arr = np.array(col_names)
arr = np.char.array(col_names)
dset_name = 'columns'
col_names_ds = Exporter.create_ds(f,dset_name,arr,dtype='S50')
group_name = 'encoders'
group = Exporter.create_group(f,group_name)
for dset_name in encoders.keys():
if dset_name in skip_list:
continue
arr = encoders[dset_name].classes_
arr = map(lambda x: x if x!=None else 'N/A',arr)
arr = np.asarray(arr)
print arr
Exporter.create_ds(f,dset_name,arr,group=group,dtype='S50')
f.close() | {
"content_hash": "fbf5984a2e5879ccf109363ebe6219be",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 83,
"avg_line_length": 34.549019607843135,
"alnum_prop": 0.604994324631101,
"repo_name": "bottydim/detect-credit-card-fraud",
"id": "d4ec3c2db7cc7eb971e5a91001ac035157ee8b1b",
"size": "1762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ccfd_dnn/encoder_export.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1164"
},
{
"name": "HTML",
"bytes": "1310723"
},
{
"name": "JavaScript",
"bytes": "55422"
},
{
"name": "Jupyter Notebook",
"bytes": "21790133"
},
{
"name": "Python",
"bytes": "231081"
}
],
"symlink_target": ""
} |
from sys import argv
import string
import random
import os
# read in command line args
params = list(argv)
# Create dir name
directory = os.path.join(params[1],params[2])
# Create directory
os.makedirs(directory)
print "#@@# " + directory + " #@@#"
| {
"content_hash": "05e97189f67ba8411c5e53b606fbda99",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 45,
"avg_line_length": 16.866666666666667,
"alnum_prop": 0.7035573122529645,
"repo_name": "gkiar/MR-devtools",
"id": "c21d9c1b63ea7f60bd3e01daab61d037ebb8b78b",
"size": "703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/mkDir.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Matlab",
"bytes": "2000"
},
{
"name": "Python",
"bytes": "26641"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import frappe, os
from frappe.utils import touch_file
def make_boilerplate(dest, app_name):
if not os.path.exists(dest):
print "Destination directory does not exist"
return
hooks = frappe._dict()
hooks.app_name = app_name
app_title = hooks.app_name.replace("_", " ").title()
for key in ("App Title (defaut: {0})".format(app_title),
"App Description", "App Publisher", "App Email",
"App Icon (default 'octicon octicon-file-directory')",
"App Color (default 'grey')",
"App License (default 'MIT')"):
hook_key = key.split(" (")[0].lower().replace(" ", "_")
hook_val = None
while not hook_val:
hook_val = raw_input(key + ": ")
if hook_key=="app_name" and hook_val.lower().replace(" ", "_") != hook_val:
print "App Name must be all lowercase and without spaces"
hook_val = ""
elif not hook_val:
defaults = {
"app_title": app_title,
"app_icon": "octicon octicon-file-directory",
"app_color": "grey",
"app_license": "MIT"
}
if hook_key in defaults:
hook_val = defaults[hook_key]
hooks[hook_key] = hook_val
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, frappe.scrub(hooks.app_title)),
with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "www"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates",
"pages"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates",
"generators"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates",
"includes"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "config"), with_init=True)
touch_file(os.path.join(dest, hooks.app_name, hooks.app_name, "__init__.py"))
with open(os.path.join(dest, hooks.app_name, "MANIFEST.in"), "w") as f:
f.write(manifest_template.format(**hooks))
with open(os.path.join(dest, hooks.app_name, ".gitignore"), "w") as f:
f.write(gitignore_template)
with open(os.path.join(dest, hooks.app_name, "setup.py"), "w") as f:
f.write(setup_template.format(**hooks))
with open(os.path.join(dest, hooks.app_name, "requirements.txt"), "w") as f:
f.write("frappe")
with open(os.path.join(dest, hooks.app_name, "README.md"), "w") as f:
f.write("## {0}\n\n{1}\n\n#### License\n\n{2}".format(hooks.app_title,
hooks.app_description, hooks.app_license))
with open(os.path.join(dest, hooks.app_name, "license.txt"), "w") as f:
f.write("License: " + hooks.app_license)
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "modules.txt"), "w") as f:
f.write(hooks.app_title)
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "hooks.py"), "w") as f:
f.write(hooks_template.format(**hooks))
touch_file(os.path.join(dest, hooks.app_name, hooks.app_name, "patches.txt"))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "config", "desktop.py"), "w") as f:
f.write(desktop_template.format(**hooks))
manifest_template = """include MANIFEST.in
include requirements.txt
include *.json
include *.md
include *.py
include *.txt
recursive-include {app_name} *.css
recursive-include {app_name} *.csv
recursive-include {app_name} *.html
recursive-include {app_name} *.ico
recursive-include {app_name} *.js
recursive-include {app_name} *.json
recursive-include {app_name} *.md
recursive-include {app_name} *.png
recursive-include {app_name} *.py
recursive-include {app_name} *.svg
recursive-include {app_name} *.txt
recursive-exclude {app_name} *.pyc"""
hooks_template = """app_name = "{app_name}"
app_title = "{app_title}"
app_publisher = "{app_publisher}"
app_description = "{app_description}"
app_icon = "{app_icon}"
app_color = "{app_color}"
app_email = "{app_email}"
app_version = "0.0.1"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/{app_name}/css/{app_name}.css"
# app_include_js = "/assets/{app_name}/js/{app_name}.js"
# include js, css files in header of web template
# web_include_css = "/assets/{app_name}/css/{app_name}.css"
# web_include_js = "/assets/{app_name}/js/{app_name}.js"
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {{
# "Role": "home_page"
# }}
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "{app_name}.install.before_install"
# after_install = "{app_name}.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "{app_name}.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {{
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }}
#
# has_permission = {{
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }}
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {{
# "*": {{
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }}
# }}
# Scheduled Tasks
# ---------------
# scheduler_events = {{
# "all": [
# "{app_name}.tasks.all"
# ],
# "daily": [
# "{app_name}.tasks.daily"
# ],
# "hourly": [
# "{app_name}.tasks.hourly"
# ],
# "weekly": [
# "{app_name}.tasks.weekly"
# ]
# "monthly": [
# "{app_name}.tasks.monthly"
# ]
# }}
# Testing
# -------
# before_tests = "{app_name}.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {{
# "frappe.desk.doctype.event.event.get_events": "{app_name}.event.get_events"
# }}
"""
desktop_template = """from frappe import _
def get_data():
return {{
"{app_title}": {{
"color": "{app_color}",
"icon": "{app_icon}",
"type": "module",
"label": _("{app_title}")
}}
}}
"""
setup_template = """from setuptools import setup, find_packages
import os
version = '0.0.1'
setup(
name='{app_name}',
version=version,
description='{app_description}',
author='{app_publisher}',
author_email='{app_email}',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=("frappe",),
)
"""
gitignore_template = """.DS_Store
*.pyc
*.egg-info
*.swp
tags"""
| {
"content_hash": "5de1fca6344e138247822f53f12f9675",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 104,
"avg_line_length": 26.910931174089068,
"alnum_prop": 0.6443508349631413,
"repo_name": "gangadharkadam/vervefrappe",
"id": "419b3f0552d821b37b77d19869cd1f9622faa48e",
"size": "6748",
"binary": false,
"copies": "4",
"ref": "refs/heads/v5.0",
"path": "frappe/utils/boilerplate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "192986"
},
{
"name": "HTML",
"bytes": "125447"
},
{
"name": "JavaScript",
"bytes": "1568218"
},
{
"name": "Python",
"bytes": "1059147"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-lbutils'
copyright = u'2016, vicalloy'
author = u'vicalloy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-lbutilsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'django-lbutils.tex', u'django-lbutils Documentation',
u'vicalloy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'django-lbutils', u'django-lbutils Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'django-lbutils', u'django-lbutils Documentation',
author, 'django-lbutils', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "036fd03c70fbad214f73724a149c28ae",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 80,
"avg_line_length": 32.70260223048327,
"alnum_prop": 0.7054677731044674,
"repo_name": "vicalloy/lbutils",
"id": "bac96d1116403b0ea479a2b23f16e3fc3db40f56",
"size": "9224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2228"
},
{
"name": "Makefile",
"bytes": "349"
},
{
"name": "Python",
"bytes": "40112"
}
],
"symlink_target": ""
} |
version = '$VER: ParOpen.py_V1.00.00_(C)15-01-2006_B.Walker_G0LCU.'
# Set up a basic screen, NOTE:- ~print '\f'~ is used as the CLS command.
print '\f'
print ' ',version
print
print ' Parallel Port access on the AMIGA using PAR: as a VOLUME.'
print
print ' Press Ctrl-C to stop.'
print
print ' The decimal value at the parallel port is:- 0 .'
# This is the start of the continuous loop to grab the data sitting on the
# parallel port. It does about 2 samples per second and there IS a flaw here.
# It is NOT a bug however...
def main():
while 1:
# -----------------------------------------------------------
# Set a pointer to the PAR: device and OPEN it up.
pointer = open('PAR:', 'rb', 1)
# Once set, grab my byte and ~store~ it.
mybyte = str(pointer.read(1))
# As soon as my byte is grabbed CLOSE down PAR:.
pointer.close()
# ===========================================================
# Over print the printed line AND convert mybyte to a decimal value.
print '\v',' The decimal value at the parallel port is:-',ord(mybyte),'. '
# Ctrl-C is used for stopping the program, or set all DATA lines to 1.
# -----------------------------------------------------------
if mybyte == chr(255): break
# -----------------------------------------------------------
main()
# End of DEMO.
# Enjoy finding simple solutions to often very difficult problems... ;o)
| {
"content_hash": "b7c353f63d6cf6a05eddc80823ceb4e4",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 93,
"avg_line_length": 39.567567567567565,
"alnum_prop": 0.5307377049180327,
"repo_name": "ActiveState/code",
"id": "74fba8e23e578adf8679d6a07902ff7215e10ef4",
"size": "2954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/577750_AMIGAHeads_Only_PAR_VOLUME_READ_Mode/recipe-577750.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
import os.path
from unittest.case import expectedFailure
from commoncode.testcase import FileBasedTesting
from cluecode_assert_utils import check_detection
class TestHolders(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_holder_acme_c(self):
test_file = self.get_test_loc('holders/holder_acme_c-c.c')
expected = [
u'ACME, Inc.',
]
check_detection(expected, test_file, what='holders')
def test_holder_addr_c(self):
test_file = self.get_test_loc('holders/holder_addr_c-addr_c.c')
expected = [
u'Cornell University.',
u'Jon Doe.',
]
check_detection(expected, test_file, what='holders')
def test_holder_atheros_py(self):
test_file = self.get_test_loc('holders/holder_atheros_py-py.py')
expected = [
u'Atheros Communications, Inc.',
u'Atheros Communications, Inc.',
u'Intel Corporation.',
]
check_detection(expected, test_file, what='holders')
def test_holder_audio_c(self):
test_file = self.get_test_loc('holders/holder_audio_c-c.c')
expected = [
u'AudioCodes, DSP Group',
u'France Telecom, Universite de Sherbrooke.',
u'France Telecom',
u'Universite de Sherbrooke.',
]
check_detection(expected, test_file, what='holders')
def test_holder_basic(self):
test_file = self.get_test_loc('holders/holder_basic-copy_c.c')
expected = [
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
]
check_detection(expected, test_file, what='holders')
def test_holder_complex(self):
test_file = self.get_test_loc('holders/holder_complex-strtol_c.c')
expected = [
'Regents of the University of California.',
'University of California.',
]
check_detection(expected, test_file, what='holders')
def test_holder_extjs_c(self):
test_file = self.get_test_loc('holders/holder_extjs_c-c.c')
expected = [
u'Ext JS, LLC.',
]
check_detection(expected, test_file, what='holders')
def test_holder_hans_jurgen_html(self):
test_file = self.get_test_loc('holders/holder_hans_jurgen_html-9_html.html')
expected = [
u'Hans-Jurgen Koch.',
]
check_detection(expected, test_file, what='holders')
def test_holder_hostpad(self):
test_file = self.get_test_loc('holders/holder_hostpad-hostapd_cli_c.c')
expected = [
'Jouni Malinen',
'Jouni Malinen',
]
check_detection(expected, test_file, what='holders')
def test_holder_ibm_c(self):
test_file = self.get_test_loc('holders/holder_ibm_c-ibm_c.c')
expected = [
u'ibm technologies',
u'IBM Corporation',
u'Ibm Corp.',
u'ibm.com',
u'IBM technology',
u'IBM company',
]
check_detection(expected, test_file, what='holders')
def test_holder_ifrename(self):
test_file = self.get_test_loc('holders/holder_ifrename-ifrename_c.c')
expected = [
'Jean Tourrilhes',
]
check_detection(expected, test_file, what='holders')
def test_holder_in_c(self):
test_file = self.get_test_loc('holders/holder_in_c-c.c')
expected = [
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
]
check_detection(expected, test_file, what='holders')
def test_holder_in_copyright(self):
test_file = self.get_test_loc('holders/holder_in_copyright-COPYRIGHT_madwifi.madwifi')
expected = [
'Sam Leffler, Errno Consulting, Atheros Communications, Inc.',
]
check_detection(expected, test_file, what='holders')
def test_holder_in_h(self):
test_file = self.get_test_loc('holders/holder_in_h-ah_h.h')
expected = [
'Sam Leffler, Errno Consulting, Atheros Communications, Inc.',
]
check_detection(expected, test_file, what='holders')
def test_holder_in_license(self):
test_file = self.get_test_loc('holders/holder_in_license-COPYING_gpl.gpl')
expected = [
'Free Software Foundation, Inc.',
'Free Software Foundation',
]
check_detection(expected, test_file, what='holders')
def test_holder_in_readme(self):
test_file = self.get_test_loc('holders/holder_in_readme-README')
expected = [
'Jouni Malinen',
]
check_detection(expected, test_file, what='holders')
def test_holder_in_text_(self):
test_file = self.get_test_loc('holders/holder_in_text_.txt')
expected = [
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
'Markus Franz Xaver Johannes Oberhumer',
]
check_detection(expected, test_file, what='holders')
def test_holder_in_uuencode_binary(self):
test_file = self.get_test_loc('holders/holder_in_uuencode_binary-mips_be_elf_hal_o_uu.uu')
expected = [
'Sam Leffler, Errno Consulting, Atheros Communications, Inc.',
]
check_detection(expected, test_file, what='holders')
def test_holder_javascript(self):
test_file = self.get_test_loc('holders/holder_javascript-utilities_js.js')
expected = [
'Yahoo! Inc.',
'Robert Penner',
]
check_detection(expected, test_file, what='holders')
def test_holder_javascript_large(self):
test_file = self.get_test_loc('holders/holder_javascript_large-ext_all_js.js')
expected = [
u'Ext JS, LLC',
u'a.commit()'
]
check_detection(expected, test_file, what='holders')
@expectedFailure
def test_holder_javascript_large_correct(self):
test_file = self.get_test_loc('holders/holder_javascript_large-ext_all_js.js')
expected = [
'Ext JS, LLC',
]
check_detection(expected, test_file, what='holders')
def test_holder_mergesort_java(self):
test_file = self.get_test_loc('holders/holder_mergesort_java-MergeSort_java.java')
expected = [
u'Sun Microsystems, Inc.',
]
check_detection(expected, test_file, what='holders')
@expectedFailure
def test_holder_multiline(self):
test_file = self.get_test_loc('holders/holder_multiline-Historical.txt')
expected = [
'GEORGE J. CARRETTE, CONCORD, MASSACHUSETTS.',
]
check_detection(expected, test_file, what='holders')
def test_holder_nokia_cpp(self):
test_file = self.get_test_loc('holders/holder_nokia_cpp-cpp.cpp')
expected = [
u'Nokia Mobile Phones.',
]
check_detection(expected, test_file, what='holders')
def test_holder_sample_java(self):
test_file = self.get_test_loc('holders/holder_sample_java-java.java')
expected = [
u'Sample ABC Inc.',
]
check_detection(expected, test_file, what='holders')
def test_holder_simple(self):
test_file = self.get_test_loc('holders/holder_simple-copy_c.c')
expected = [
'Markus Franz Xaver Johannes Oberhumer',
]
check_detection(expected, test_file, what='holders')
def test_holder_snmptrapd_c(self):
test_file = self.get_test_loc('holders/holder_snmptrapd_c-snmptrapd_c.c')
expected = [
u'Carnegie Mellon University',
]
check_detection(expected, test_file, what='holders')
def test_holder_somefile_cpp(self):
test_file = self.get_test_loc('holders/holder_somefile_cpp-somefile_cpp.cpp')
expected = [
u'Private Company',
u'(PC) Property of Private Company',
u'(PC) Property',
u'Private Company',
u'Private Company'
]
check_detection(expected, test_file, what='holders')
def test_holder_stacktrace_cpp(self):
test_file = self.get_test_loc('holders/holder_stacktrace_cpp-stacktrace_cpp.cpp')
expected = [
u'Rickard E. Faith',
]
check_detection(expected, test_file, what='holders')
def test_holder_super_c(self):
test_file = self.get_test_loc('holders/holder_super_c-c.c')
expected = [
u'Super Technologies Corporation',
u'Cedar Rapids, Iowa',
u'Benjamin Herrenschmuidt',
u'IBM Corp.',
]
check_detection(expected, test_file, what='holders')
def test_holder_treetablemodeladapter_java(self):
test_file = self.get_test_loc('holders/holder_treetablemodeladapter_java-TreeTableModelAdapter_java.java')
expected = [
u'Sun Microsystems, Inc.',
]
check_detection(expected, test_file, what='holders')
def test_holder_tunnel_h(self):
test_file = self.get_test_loc('holders/holder_tunnel_h-tunnel_h.h')
expected = [
u'Frank Strauss',
]
check_detection(expected, test_file, what='holders')
def test_holder_var_route_c(self):
test_file = self.get_test_loc('holders/holder_var_route_c-var_route_c.c')
expected = [
u'Carnegie Mellon University',
u'TGV, Incorporated',
u'Erik Schoenfelder',
u'Simon Leinen'
]
check_detection(expected, test_file, what='holders')
def test_holder_xcon_sh(self):
test_file = self.get_test_loc('holders/holder_xcon_sh-9_sh.sh')
expected = [
u'X Consortium',
]
check_detection(expected, test_file, what='holders')
def test_holder_young_c(self):
test_file = self.get_test_loc('holders/holder_young_c-c.c')
expected = [
u'Eric Young',
]
check_detection(expected, test_file, what='holders')
| {
"content_hash": "850ffdaa3c8c261b5ee67f8d84ce048b",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 114,
"avg_line_length": 37.03492063492064,
"alnum_prop": 0.5961769243956797,
"repo_name": "retrography/scancode-toolkit",
"id": "ce704ae283be33b2e7071554c57d034975112853",
"size": "13024",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/cluecode/test_holders.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "1251"
},
{
"name": "AppleScript",
"bytes": "168"
},
{
"name": "Assembly",
"bytes": "78231"
},
{
"name": "Awk",
"bytes": "248"
},
{
"name": "Batchfile",
"bytes": "4403"
},
{
"name": "C",
"bytes": "2089020"
},
{
"name": "C#",
"bytes": "5901"
},
{
"name": "C++",
"bytes": "539671"
},
{
"name": "CMake",
"bytes": "142"
},
{
"name": "CSS",
"bytes": "171"
},
{
"name": "GAP",
"bytes": "579"
},
{
"name": "Groff",
"bytes": "209319"
},
{
"name": "HTML",
"bytes": "2796995"
},
{
"name": "Inno Setup",
"bytes": "235"
},
{
"name": "Java",
"bytes": "150670"
},
{
"name": "JavaScript",
"bytes": "3847"
},
{
"name": "Makefile",
"bytes": "8284"
},
{
"name": "Matlab",
"bytes": "148"
},
{
"name": "Objective-C",
"bytes": "25247"
},
{
"name": "Objective-C++",
"bytes": "950"
},
{
"name": "PHP",
"bytes": "7482"
},
{
"name": "Pascal",
"bytes": "3417"
},
{
"name": "Perl",
"bytes": "16719"
},
{
"name": "PostScript",
"bytes": "562"
},
{
"name": "Protocol Buffer",
"bytes": "374"
},
{
"name": "Python",
"bytes": "2642140"
},
{
"name": "Scala",
"bytes": "4500"
},
{
"name": "Shell",
"bytes": "1600051"
},
{
"name": "Smalltalk",
"bytes": "603"
},
{
"name": "TeX",
"bytes": "3126"
},
{
"name": "VimL",
"bytes": "1129"
},
{
"name": "Visual Basic",
"bytes": "23"
},
{
"name": "XSLT",
"bytes": "474"
},
{
"name": "Yacc",
"bytes": "1497"
}
],
"symlink_target": ""
} |
from base import * # noqa
from log_util import init_logger # noqa
| {
"content_hash": "2a8434f181bfaed6947e458f050cc273",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 40,
"avg_line_length": 34,
"alnum_prop": 0.7205882352941176,
"repo_name": "fabrickit/fabkit",
"id": "615875a0902c1492fe05ae183daa702916df2962",
"size": "86",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/fabkit/log/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4979"
},
{
"name": "CoffeeScript",
"bytes": "65442"
},
{
"name": "HTML",
"bytes": "40630"
},
{
"name": "JavaScript",
"bytes": "2315"
},
{
"name": "Mako",
"bytes": "988"
},
{
"name": "Python",
"bytes": "256382"
},
{
"name": "Shell",
"bytes": "2697"
}
],
"symlink_target": ""
} |
import math
from BaseInteractor import _BaseInteractor
class RadiusInteractor(_BaseInteractor):
"""
Select an annulus through a 2D plot
"""
def __init__(self, base, axes, color='black', zorder=5, arc1=None,
arc2=None, theta=math.pi / 8):
"""
"""
_BaseInteractor.__init__(self, base, axes, color=color)
self.markers = []
self.axes = axes
self.r1 = arc1.get_radius()
self.r2 = arc2.get_radius()
self.theta = theta
self.save_theta = theta
self.move_stop = False
self.theta_left = None
self.theta_right = None
self.arc1 = arc1
self.arc2 = arc2
x1 = self.r1 * math.cos(self.theta)
y1 = self.r1 * math.sin(self.theta)
x2 = self.r2 * math.cos(self.theta)
y2 = self.r2 * math.sin(self.theta)
self.line = self.axes.plot([x1, x2], [y1, y2],
linestyle='-', marker='',
color=self.color,
visible=True)[0]
self.phi = theta
self.npts = 20
self.has_move = False
self.connect_markers([self.line])
self.update()
def set_layer(self, n):
"""
"""
self.layernum = n
self.update()
def clear(self):
"""
"""
self.clear_markers()
try:
self.line.remove()
except:
# Old version of matplotlib
for item in range(len(self.axes.lines)):
del self.axes.lines[0]
def get_angle(self):
"""
"""
return self.theta
def update(self, r1=None, r2=None, theta=None):
"""
Draw the new roughness on the graph.
"""
if r1 is not None:
self.r1 = r1
if r2 is not None:
self.r2 = r2
if theta is not None:
self.theta = theta
x1 = self.r1 * math.cos(self.theta)
y1 = self.r1 * math.sin(self.theta)
x2 = self.r2 * math.cos(self.theta)
y2 = self.r2 * math.sin(self.theta)
self.line.set(xdata=[x1, x2], ydata=[y1, y2])
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self.save_theta = math.atan2(ev.y, ev.x)
self.base.freeze_axes()
def moveend(self, ev):
"""
"""
self.has_move = False
self.base.moveend(ev)
def restore(self, ev):
"""
Restore the roughness for this layer.
"""
self.theta = self.save_theta
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
self.theta = math.atan2(y, x)
self.has_move = True
self.base.base.update()
def set_cursor(self, r_min, r_max, theta):
"""
"""
self.theta = theta
self.r1 = r_min
self.r2 = r_max
self.update()
def get_params(self):
"""
"""
params = {}
params["radius1"] = self.r1
params["radius2"] = self.r2
params["theta"] = self.theta
return params
def set_params(self, params):
"""
"""
x1 = params["radius1"]
x2 = params["radius2"]
theta = params["theta"]
self.set_cursor(x1, x2, theta)
| {
"content_hash": "9893233d3b5dc8dc86d7d8a79c0e73aa",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 77,
"avg_line_length": 27.0078125,
"alnum_prop": 0.48973098061903386,
"repo_name": "lewisodriscoll/sasview",
"id": "c72d1914810066d07843c83e3062b495bbccd366",
"size": "3457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sas/sasgui/guiframe/local_perspectives/plotting/Edge.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AutoIt",
"bytes": "5122"
},
{
"name": "Batchfile",
"bytes": "9544"
},
{
"name": "C",
"bytes": "79248"
},
{
"name": "C++",
"bytes": "228413"
},
{
"name": "HTML",
"bytes": "9252"
},
{
"name": "Makefile",
"bytes": "28052"
},
{
"name": "Python",
"bytes": "3696992"
},
{
"name": "Shell",
"bytes": "12936"
}
],
"symlink_target": ""
} |
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys, string
import unique
import copy
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def identifyPutativeSpliceEvents(exon_db,constituitive_probeset_db,array_id_db,agglomerate_inclusion_probesets,onlyAnalyzeJunctions):
exon_dbase = {}; probeset_comparison_db = {}; x = 0; y = 0
### Grab all probesets where we can identify a potential exon inclusion/exclusion event
if len(array_id_db) == 0: array_id_db = exon_db ### Used when exporting all comparitive junction data
for probeset in array_id_db:
if probeset in exon_db:
affygene = exon_db[probeset].GeneID() #exon_db[probeset] = affygene,exons,ensembl,block_exon_ids,block_structure,comparison_info
exons = exon_db[probeset].ExonID() #get rid of last pipe
if probeset not in constituitive_probeset_db:
#thus, there is a 'gene' probeset for that gene, but we don't want to look at the gene probesets
if '|' not in exons: #get rid of any block exons or ambiguities)
try: x += 1; probeset_comparison_db[affygene].append(exons)
except KeyError: x += 1; probeset_comparison_db[affygene] = [exons]
exon_dbase[affygene,exons] = probeset
print "Number of putative probeset comparisons:",x
probe_level_db = {}
for affygene in probeset_comparison_db:
for exon_probeset1 in probeset_comparison_db[affygene]:
for exon_probeset2 in probeset_comparison_db[affygene]:
if exon_probeset1 != exon_probeset2:
if '-' in exon_probeset1: #get both pair-wise possibilities with this, to grab junctions
e1a,e1b = string.split(exon_probeset1,'-')
e1 = e1a,e1b
try:
e2a,e2b = string.split(exon_probeset2,'-')
e2 = e2a,e2b
except ValueError: e2 = exon_probeset2
try: probe_level_db[affygene,e1].append(e2)
except KeyError: probe_level_db[affygene,e1] = [e2]
else: ### Required when exon_probeset1 is a single exon rather than a junction
if '-' in exon_probeset2:
e2a,e2b = string.split(exon_probeset2,'-')
e2 = e2a,e2b
e1 = exon_probeset1
try: probe_level_db[affygene,e2].append(e1)
except KeyError: probe_level_db[affygene,e2] = [e1]
#print "Looking for exon events defined by probeset exon associations"
alt_junction_db,critical_exon_db = independently_rank_analyze_junction_sets(probe_level_db,onlyAnalyzeJunctions)
#print "Associations Built\n"
### Rearange alt_junction_db and agglomerate data for inclusion probesets
exon_inclusion_db={}; exon_inclusion_event_db={}; alt_junction_db_collapsed={}
if agglomerate_inclusion_probesets == 'yes':
for affygene in alt_junction_db:
alt_junction_db[affygene].sort() ### Should be no need to sort later if we do this
for event in alt_junction_db[affygene]:
### event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
event1 = event[0][0]; exon_set1 = event[0][1]; exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene,exon_set1]; probeset2 = exon_dbase[affygene,exon_set2]
if event1 == 'ei':
###First generate the original fold values for export summary, then the adjusted
try: exon_inclusion_db[probeset2].append(probeset1)
except KeyError: exon_inclusion_db[probeset2] = [probeset1]
try: exon_inclusion_event_db[(affygene, probeset2, event[1])].append(event)
except KeyError: exon_inclusion_event_db[(affygene, probeset2, event[1])] = [event]
else: ### Store all the missing mutual exclusive splicing events
try: alt_junction_db_collapsed[affygene].append(event)
except KeyError: alt_junction_db_collapsed[affygene] = [event]
###Create a new alt_junction_db with merged inclusion events
for key in exon_inclusion_event_db:
affygene = key[0]; excl_probeset=key[1]; excl_event = key[2]
###Collect critical exon information from each inclusion exon-set to agglomerate and delete old entries
new_critical_exon_list=[]; incl_exon_sets=[]
for event in exon_inclusion_event_db[key]:
incl_exon_set = event[0][1]; incl_exon_sets.append(incl_exon_set) ### Don't sort since this will throw off probeset relationships: incl_exon_sets.sort()
if len(exon_inclusion_event_db[key])>1: ###If the original list of events > 1
critical_exon_list = critical_exon_db[affygene,tuple(event)][1]
for exon in critical_exon_list: new_critical_exon_list.append(exon)
#del critical_exon_db[affygene,tuple(event)]
new_critical_exon_list = unique.unique(new_critical_exon_list); new_critical_exon_list.sort()
new_critical_exon_list = [1,new_critical_exon_list]
incl_exon_sets_str = string.join(incl_exon_sets,'|') ### New inclusion exon group
event = [('ei',incl_exon_sets_str),excl_event] ### Store new inclusion exon group
try: alt_junction_db_collapsed[affygene].append(event)
except KeyError: alt_junction_db_collapsed[affygene] = [event]
###Replace exon_dbase entries with new combined probeset IDs
incl_probesets = exon_inclusion_db[excl_probeset]
incl_probesets_str = string.join(incl_probesets,'|')
if len(incl_exon_sets)>1: ###Often there will be only a single inclusion probeset
"""for exons in incl_exon_sets:
key = affygene,exons
try: del exon_dbase[key] ###delete individual inclusion exons and replace with a single inclusion agglomerate
except KeyError: continue ###Can occur more than once, if an exon participates in more than one splicing event
"""
exon_dbase[affygene,incl_exon_sets_str] = incl_probesets_str
critical_exon_db[affygene,tuple(event)] = new_critical_exon_list
###Create a new probeset entry in exon_db for the agglomerated probesets
new_block_exon_ids=[] #exon_db[probeset] = affygene,exons,ensembl,block_exon_ids,block_structure
for probeset in incl_probesets:
edat = exon_db[probeset]; ensembl = edat.ExternalGeneID(); block_exon_ids = edat.SecondaryExonID(); block_structure = edat.GeneStructure()
new_block_exon_ids.append(block_exon_ids)
new_block_exon_ids = string.join(new_block_exon_ids,'')
edat = exon_db[incl_probesets[0]]; edat1 = edat; edat1.setDisplayExonID(incl_exon_sets_str) #; edat1.setExonID(edat.ExonID()) ### Use the first inclusion probeset instance for storing all instance data
edat1.setSecondaryExonID(new_block_exon_ids); edat1.setProbeset(incl_probesets[0])
exon_db[incl_probesets_str] = edat1
print "Length of original splice event database:",len(alt_junction_db)
print "Length of agglomerated splice event database:",len(alt_junction_db_collapsed)
alt_junction_db = alt_junction_db_collapsed ### Replace with agglomerated database
### End Rearangement
return alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db
def independently_rank_analyze_junction_sets(probe_level_db,onlyAnalyzeJunctions):
### The below code is used to identify sets of junctions and junction and exon sets anti-correlated with each other
### independently storing the critical exons involved
#probe_level_db[affygene,exons1].append(exons2)
x = 0
critical_exon_db = {}
alt_junction_db = {}
probe_level_db = eliminate_redundant_dict_values(probe_level_db)
for key in probe_level_db:
critical_exon_list = []
affygene = key[0]
exon_pair1 = key[1]
e1a = int(exon_pair1[0][1:])
e1b = int(exon_pair1[1][1:])
for exon_pair2 in probe_level_db[key]: #exon_pair2 could be a single exon
s = 0 #moved this down!!!!!!!!!
if exon_pair2[0] == 'E': # thus, exon_pair2 is actually a single exon
e2 = int(exon_pair2[1:])
s=1
else:
e2a = int(exon_pair2[0][1:])
e2b = int(exon_pair2[1][1:])
if s==0:
e1_pair = e1a,e1b
e2_pair = e2a,e2b
if s==1: # thus, exon_pair2 is actually a single exon
e1_pair = e1a,e1b
if e1a < e2 and e1b > e2 and onlyAnalyzeJunctions == 'no': # e.g. E3-E5 vs. E4
e1 = 'ex','E'+str(e1a)+'-'+'E'+str(e1b); e2x = 'ei','E'+str(e2)
critical_exons = [e1,e2x]
critical_exons.sort(); critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e2)]] ###The 1 indicates that the exon can be called up or down, since it is an ei or ex event vs. mx
### Note: everything except for the last one should have two instances added to the database
elif (e1b == e2b and e1a > e2a): # e.g. E2-E3 vs. E1-E3
e1 = 'ei','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'ex','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exons.sort();critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e1a)]]
#print affygene, exon_pair1,e1a,e1b,'----',exon_pair2,e2a,e2b
elif (e1b == e2b and e1a < e2a): # e.g. E1-E3 vs. E2-E3
e1 = 'ex','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'ei','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exons.sort();critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e2a)]]
elif (e1a == e2a and e1b < e2b): # e.g. E2-E3 vs. E2-E4
e1 = 'ei','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'ex','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exons.sort();critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e1b)]]
elif (e1a == e2a and e1b > e2b): # e.g. E2-E4 vs. E2-E3
e1 = 'ex','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'ei','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exons.sort();critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e2b)]]
elif (e1a < e2a and e1b > e2a) and (e1a < e2b and e1b > e2b): # e.g. E2-E6 vs. E3-E5
e1 = 'ex','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'ei','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exons.sort();critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e2a),'E'+str(e2b)]]
elif (e1a > e2a and e1b < e2a) and (e1a > e2b and e1b < e2b): # e.g. E3-E5 vs. E2-E6
e1 = 'ei','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'ex','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exons.sort();critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [1,['E'+str(e1a),'E'+str(e1b)]]
elif (e1a < e2a and e1b > e2a): # e.g. E2-E6 vs. E3-E8
e1 = 'mx','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'mx','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [2,['E'+str(e1b),'E'+str(e2a)]]
elif (e1a < e2b and e1b > e2b): # e.g. E2-E6 vs. E1-E3
e1 = 'mx','E'+str(e1a)+'-'+'E'+str(e1b); e2 = 'mx','E'+str(e2a)+'-'+'E'+str(e2b)
critical_exons = [e1,e2]
critical_exon_list.append(critical_exons)
critical_exon_db[affygene,tuple(critical_exons)] = [2,['E'+str(e1a),'E'+str(e2b)]]
if len(critical_exon_list)>0:
for entry in critical_exon_list:
try:
alt_junction_db[affygene].append(entry)
except KeyError:
alt_junction_db[affygene] = [entry]
alt_junction_db = eliminate_redundant_dict_values(alt_junction_db)
return alt_junction_db, critical_exon_db
def exportJunctionComparisons(alt_junction_db,critical_exon_db,exon_dbase):
competitive_junction_export = 'AltDatabase\Mm\AltMouse\AltMouse_junction-comparisons.txt'
fn=filepath(competitive_junction_export)
data = open(fn,'w')
title = ['Affygene','probeset1','probeset2','critical-exons']; title = string.join(title,'\t')+'\n'; data.write(title)
for affygene in alt_junction_db:
alt_junction_db[affygene].sort() ### Should be no need to sort later if we do this
for event in alt_junction_db[affygene]:
### event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
exon_set1 = event[0][1]; exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene,exon_set1]; probeset2 = exon_dbase[affygene,exon_set2]
critical_exon_list = critical_exon_db[affygene,tuple(event)];critical_exon_list = critical_exon_list[1]
critical_exon_list = string.join(critical_exon_list,'|')
export_data = string.join([affygene]+[probeset1,probeset2,critical_exon_list],'\t')+'\n'
data.write(export_data)
data.close()
def annotate_splice_event(exons1,exons2,block_structure):
#1(E13|E12)-2(E11)-3(E10)-4(E9|E8)-5(E7|E6|E5)-6(E4|E3|E2|E1)
splice_event = ''; evidence = 'clear'
string.replace(block_structure,')','|')
block_list = string.split(block_structure,'-')
#[1(E13|E12|,2(E11|,3(E10|,4(E9|E8|,5(E7|E6|E5|,6(E4|E3|E2|E1|]
###Perform a membership query
try: exon1a,exon1b = string.split(exons1,'-') ###***
except ValueError: exon1a = exons1; exon1b = exons1; evidence = 'unclear'
try: exon2a,exon2b = string.split(exons2,'-')
except ValueError: exon2a = exons2; exon2b = exons2; evidence = 'unclear'
a = '';b = '';block_a='';block_b=''
if exon1a == exon2a: a = 'same'
if exon1b == exon2b: b = 'same'
ex1a_m = exon_membership(exon1a,block_list);ex2a_m = exon_membership(exon2a,block_list)
ex1b_m = exon_membership(exon1b,block_list);ex2b_m = exon_membership(exon2b,block_list)
#print ex1a_m, ex2a_m,ex1b_m,ex2b_m;dog
if ex1a_m == ex2a_m: block_a = 'same'
if ex1b_m == ex2b_m: block_b = 'same'
### Correct for strand differences
strand = "+"
if ex1a_m > ex1b_m: #strand therefore is negative
strand = "-"
if (abs(ex1a_m - ex2a_m) == 1) or (abs(ex1b_m - ex2b_m) == 1): alternative_exons = 'one'
else: alternative_exons = 'multiple'
if (ex1a_m == -1) or (ex2a_m == -1) or (ex1b_m == -1) or (ex2b_m == -1): splice_event = "retained_intron"
elif block_a == 'same' and b == 'same': splice_event = "alt5'"
elif block_b == 'same' and a == 'same': splice_event = "alt3'"
elif (block_a == 'same' and block_b != 'same'):
if a == 'same':
if alternative_exons == 'one': splice_event = "cassette-exon"
else: splice_event = "cassette-exons"
else:
if alternative_exons == 'one': splice_event = "alt5'-cassette-exon"
else: splice_event = "alt5'-cassette-exons"
elif (block_b == 'same' and block_a != 'same'):
if b == 'same':
if alternative_exons == 'one': splice_event = "cassette-exon"
else: splice_event = "cassette-exons"
else:
if alternative_exons == 'one': splice_event = "cassette-exon-alt3'"
else: splice_event = "cassette-exons-alt3'"
else:
if alternative_exons == 'one': splice_event = "alt5'-cassette-exon-alt3'"
else: splice_event = "alt5'-cassette-exons-alt3'"
if evidence == 'unclear':
###If the first probeset is a junction and the second is an exon, are junction exons 2 blocks way
if (abs(ex1a_m - ex2a_m) == 1) and (abs(ex1b_m - ex2b_m) == 1): splice_event = "cassette-exon"
elif (block_a == 'same' and block_b != 'same'):
if alternative_exons == 'one': splice_event = "alt5'"
else: splice_event = "alt5'-cassette-exons"
elif (block_a != 'same' and block_b == 'same'):
if alternative_exons == 'one': splice_event = "alt3'"
else: splice_event = "cassette-exons-alt3'"
else: splice_event = "unclear"
if strand == "-":
if splice_event == "alt5'": splice_event = "alt3'"
elif splice_event == "alt3'": splice_event = "alt5'"
elif splice_event == "alt5'-cassette-exon": splice_event = "cassette-exon-alt3'"
elif splice_event == "alt5'-cassette-exons": splice_event = "cassette-exons-alt3'"
elif splice_event == "cassette-exons-alt3'": splice_event = "alt5'-cassette-exons"
elif splice_event == "cassette-exon-alt3'": splice_event = "alt5'-cassette-exon"
#print splice_event
return splice_event
def exon_membership(exon,block_structure):
i=0; x = -1
exon_temp1 = exon+'|'; exon_temp2 = exon+')'
for exon_block in block_structure:
if exon_temp1 in exon_block or exon_temp2 in exon_block:
x = i
i += 1
return x
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
if __name__ == '__main__':
exons1 = 'E9-E12'
exons2 = 'E11-E15'
block_structure = '1(E1)-2(E2)-3(E3|E4)-4(E5)-5(E6)-6(E7|E8|E9|E10|E11)-7(E12)-8(E13|E14)-9(E15)-10(E16|E17)-11(E18)-12(E19|E20)-13(E21|E22)-14(E23|E24)'
a = annotate_splice_event(exons1,exons2,block_structure)
print a
#alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db = identifyPutativeSpliceEvents(exon_db,constituitive_probeset_db,agglomerate_inclusion_probesets) | {
"content_hash": "c3a81c2bdae346cbc9fb8dff5b45bd69",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 217,
"avg_line_length": 62.104615384615386,
"alnum_prop": 0.5814011097899326,
"repo_name": "kdaily/altanalyze",
"id": "af27eb1cb7d51cdda9e8ff9b110c9f6e2550b8eb",
"size": "20334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ExonAnnotate_module.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "32"
},
{
"name": "Python",
"bytes": "7659917"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from copy import copy
from itertools import chain
import logging
import os
import re
import sys
import shlex
from tempfile import NamedTemporaryFile
from django.utils.encoding import smart_text
try:
from urllib.request import pathname2url
from urllib.parse import urljoin
except ImportError: # Python2
from urllib import pathname2url
from urlparse import urljoin
import django
from django.conf import settings
from django.template.context import Context, RequestContext
from django.utils import six
from .subprocess import check_output
logger = logging.getLogger(__name__)
NO_ARGUMENT_OPTIONS = ['--collate', '--no-collate', '-H', '--extended-help', '-g',
'--grayscale', '-h', '--help', '--htmldoc', '--license', '-l',
'--lowquality', '--manpage', '--no-pdf-compression', '-q',
'--quiet', '--read-args-from-stdin', '--readme',
'--use-xserver', '-V', '--version', '--dump-default-toc-xsl',
'--outline', '--no-outline', '--background', '--no-background',
'--custom-header-propagation', '--no-custom-header-propagation',
'--debug-javascript', '--no-debug-javascript', '--default-header',
'--disable-external-links', '--enable-external-links',
'--disable-forms', '--enable-forms', '--images', '--no-images',
'--disable-internal-links', '--enable-internal-links', '-n',
'--disable-javascript', '--enable-javascript', '--keep-relative-links',
'--load-error-handling', '--load-media-error-handling',
'--disable-local-file-access', '--enable-local-file-access',
'--exclude-from-outline', '--include-in-outline', '--disable-plugins',
'--enable-plugins', '--print-media-type', '--no-print-media-type',
'--resolve-relative-links', '--disable-smart-shrinking',
'--enable-smart-shrinking', '--stop-slow-scripts',
'--no-stop-slow-scripts', '--disable-toc-back-links',
'--enable-toc-back-links', '--footer-line', '--no-footer-line',
'--header-line', '--no-header-line', '--disable-dotted-lines',
'--disable-toc-links', '--verbose']
def _options_to_args(**options):
"""
Converts ``options`` into a list of command-line arguments.
Skip arguments where no value is provided
For flag-type (No argument) variables, pass only the name and only then if the value is True
"""
flags = []
for name in sorted(options):
value = options[name]
formatted_flag = '--%s' % name if len(name) > 1 else '-%s' % name
formatted_flag = formatted_flag.replace('_', '-')
accepts_no_arguments = formatted_flag in NO_ARGUMENT_OPTIONS
if value is None or (value is False and accepts_no_arguments):
continue
flags.append(formatted_flag)
if accepts_no_arguments:
continue
flags.append(six.text_type(value))
return flags
def wkhtmltopdf(pages, output=None, **kwargs):
"""
Converts html to PDF using http://wkhtmltopdf.org/.
pages: List of file paths or URLs of the html to be converted.
output: Optional output file path. If None, the output is returned.
**kwargs: Passed to wkhtmltopdf via _extra_args() (See
https://github.com/antialize/wkhtmltopdf/blob/master/README_WKHTMLTOPDF
for acceptable args.)
Kwargs is passed through as arguments. e.g.:
{'footer_html': 'http://example.com/foot.html'}
becomes
'--footer-html http://example.com/foot.html'
Where there is no value passed, use True. e.g.:
{'disable_javascript': True}
becomes:
'--disable-javascript'
To disable a default option, use None. e.g:
{'quiet': None'}
becomes:
''
example usage:
wkhtmltopdf(pages=['/tmp/example.html'],
dpi=300,
orientation='Landscape',
disable_javascript=True)
"""
if isinstance(pages, six.string_types):
# Support a single page.
pages = [pages]
if output is None:
# Standard output.
output = '-'
has_cover = kwargs.pop('has_cover', False)
# Default options:
options = getattr(settings, 'WKHTMLTOPDF_CMD_OPTIONS', None)
if options is None:
options = {'quiet': True}
else:
options = copy(options)
options.update(kwargs)
page_list = list(pages)
# handle Table of Contents
use_toc = options.pop('toc', False)
toc_xsl = options.pop('toc_xsl', '')
if use_toc:
insert_at = 1 if has_cover else 0
if toc_xsl:
page_list.insert(insert_at, 'toc --xsl-style-sheet %s' % toc_xsl)
else:
page_list.insert(insert_at, 'toc')
# Force --encoding utf8 unless the user has explicitly overridden this.
options.setdefault('encoding', 'utf8')
env = getattr(settings, 'WKHTMLTOPDF_ENV', None)
if env is not None:
env = dict(os.environ, **env)
cmd = 'WKHTMLTOPDF_CMD'
cmd = getattr(settings, cmd, os.environ.get(cmd, 'wkhtmltopdf'))
ck_args = list(chain(shlex.split(cmd),
_options_to_args(**options),
page_list,
[output]))
ck_kwargs = {
'env': env,
'shell': True
}
# Handling of fileno() attr. based on https://github.com/GrahamDumpleton/mod_wsgi/issues/85
try:
i = sys.stderr.fileno()
ck_kwargs['stderr'] = sys.stderr
except (AttributeError, IOError):
# can't call fileno() on mod_wsgi stderr object
pass
ck_args = ' '.join(ck_args)
return check_output(ck_args, **ck_kwargs)
def convert_to_pdf(filename, header_filename=None, footer_filename=None, cmd_options=None, cover_filename=None):
# Clobber header_html and footer_html only if filenames are
# provided. These keys may be in self.cmd_options as hardcoded
# static files.
# The argument `filename` may be a string or a list. However, wkhtmltopdf
# will coerce it into a list if a string is passed.
cmd_options = cmd_options if cmd_options else {}
if cover_filename:
pages = [cover_filename, filename]
cmd_options['has_cover'] = True
else:
pages = [filename]
if header_filename is not None:
cmd_options['header_html'] = header_filename
if footer_filename is not None:
cmd_options['footer_html'] = footer_filename
return wkhtmltopdf(pages=pages, **cmd_options)
class RenderedFile(object):
"""
Create a temporary file resource of the rendered template with context.
The filename will be used for later conversion to PDF.
"""
temporary_file = None
filename = ''
def __init__(self, template, context, request=None):
debug = getattr(settings, 'WKHTMLTOPDF_DEBUG', settings.DEBUG)
self.temporary_file = render_to_temporary_file(
template=template,
context=context,
request=request,
prefix='wkhtmltopdf', suffix='.html',
delete=(not debug)
)
self.filename = self.temporary_file.name
def __del__(self):
# Always close the temporary_file on object destruction.
if self.temporary_file is not None:
self.temporary_file.close()
def render_pdf_from_template(input_template, header_template, footer_template, context, request=None, cmd_options=None,
cover_template=None):
# For basic usage. Performs all the actions necessary to create a single
# page PDF from a single template and context.
cmd_options = cmd_options if cmd_options else {}
header_filename = footer_filename = None
# Main content.
input_file = RenderedFile(
template=input_template,
context=context,
request=request
)
# Optional. For header template argument.
if header_template:
header_file = RenderedFile(
template=header_template,
context=context,
request=request
)
header_filename = header_file.filename
# Optional. For footer template argument.
if footer_template:
footer_file = RenderedFile(
template=footer_template,
context=context,
request=request
)
footer_filename = footer_file.filename
cover = None
if cover_template:
cover = RenderedFile(
template=cover_template,
context=context,
request=request
)
return convert_to_pdf(filename=input_file.filename,
header_filename=header_filename,
footer_filename=footer_filename,
cmd_options=cmd_options,
cover_filename=cover.filename if cover else None)
def content_disposition_filename(filename):
"""
Sanitize a file name to be used in the Content-Disposition HTTP
header.
Even if the standard is quite permissive in terms of
characters, there are a lot of edge cases that are not supported by
different browsers.
See http://greenbytes.de/tech/tc2231/#attmultinstances for more details.
"""
filename = filename.replace(';', '').replace('"', '')
return http_quote(filename)
def http_quote(string):
"""
Given a unicode string, will do its dandiest to give you back a
valid ascii charset string you can use in, say, http headers and the
like.
"""
if isinstance(string, six.text_type):
try:
import unidecode
except ImportError:
pass
else:
string = unidecode.unidecode(string)
string = string.encode('ascii', 'replace')
# Wrap in double-quotes for ; , and the like
string = string.replace(b'\\', b'\\\\').replace(b'"', b'\\"')
return '"{0!s}"'.format(string.decode())
def pathname2fileurl(pathname):
"""Returns a file:// URL for pathname. Handles OS-specific conversions."""
return urljoin('file:', pathname2url(pathname))
def make_absolute_paths(content):
"""Convert all MEDIA files into a file://URL paths in order to
correctly get it displayed in PDFs."""
overrides = [
{
'root': settings.MEDIA_ROOT,
'url': settings.MEDIA_URL,
},
{
'root': settings.STATIC_ROOT,
'url': settings.STATIC_URL,
}
]
has_scheme = re.compile(r'^[^:/]+://')
for x in overrides:
if not x['url'] or has_scheme.match(x['url']):
continue
if not x['root'].endswith('/'):
x['root'] += '/'
occur_pattern = '''["|']({0}.*?)["|']'''
occurences = re.findall(occur_pattern.format(x['url']), content)
occurences = list(set(occurences)) # Remove dups
for occur in occurences:
content = content.replace(occur,
pathname2fileurl(x['root']) +
occur[len(x['url']):])
return content
def render_to_temporary_file(template, context, request=None, mode='w+b',
bufsize=-1, suffix='.html', prefix='tmp',
dir=None, delete=True):
if django.VERSION < (1, 8):
# If using a version of Django prior to 1.8, ensure ``context`` is an
# instance of ``Context``
if not isinstance(context, Context):
if request:
context = RequestContext(request, context)
else:
context = Context(context)
content = template.render(context)
else:
content = template.render(context, request)
content = smart_text(content)
content = make_absolute_paths(content)
try:
# Python3 has 'buffering' arg instead of 'bufsize'
tempfile = NamedTemporaryFile(mode=mode, buffering=bufsize,
suffix=suffix, prefix=prefix,
dir=dir, delete=delete)
except TypeError:
tempfile = NamedTemporaryFile(mode=mode, bufsize=bufsize,
suffix=suffix, prefix=prefix,
dir=dir, delete=delete)
try:
tempfile.write(content.encode('utf-8'))
tempfile.flush()
return tempfile
except:
# Clean-up tempfile if an Exception is raised.
tempfile.close()
raise
| {
"content_hash": "9cf73af20372f540bd7b416cca0b6658",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 119,
"avg_line_length": 35.81163434903047,
"alnum_prop": 0.5761912128712872,
"repo_name": "tclancy/django-wkhtmltopdf",
"id": "9246b8b5ca6f49b28c60d223961af6308e56f18b",
"size": "12928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wkhtmltopdf/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "398"
},
{
"name": "Makefile",
"bytes": "227"
},
{
"name": "Python",
"bytes": "39374"
}
],
"symlink_target": ""
} |
from .contrib import * # noqa
DATABASES = {
'default': {
# Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
'ENGINE': 'django.contrib.gis.db.backends.postgis',
# Or path to database file if using sqlite3.
'NAME': '',
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
# Empty for localhost through domain sockets or '127.0.0.1' for
# localhost through TCP.
'HOST': '',
# Set to empty string for default.
'PORT': '',
}
}
# Project apps
INSTALLED_APPS += (
'frontend',
'realtime'
)
PIPELINE_TEMPLATE_SEPARATOR = '__'
PIPELINE_JS = {
'contrib': {
'source_filenames': (
'js/jquery-1.11.1.min.js',
'js/bootstrap.js',
),
'output_filename': 'js/contrib.js',
},
'appjs': {
'source_filenames': (
'js/csrf-ajax.js',
),
'output_filename': 'js/appjs.js'
},
'realtime_contrib': {
'source_filenames': (
'realtime/js/jquery.dynatable.js',
'realtime/js/leaflet.markercluster-src.js',
'realtime/js/locationfilter.js',
'realtime/js/validator.js',
'realtime/js/moment.js',
'realtime/js/moment-timezone-all-years.js',
'realtime/js/typeahead.jquery.js',
'realtime/js/sprintf.js',
),
'output_filename': 'js/realtime_contrib.js',
},
'realtime_appjs': {
'source_filenames': (
'realtime/js/realtime.js',
),
'output_filename': 'js/realtime_appjs.js'
},
'realtime_shakejs': {
'source_filenames': (
'realtime/js/earthquake/shake.js',
'realtime/js/templates/earthquake/*.jst'
),
'output_filename': 'js/realtime_shakejs.js'
},
'realtime_floodjs': {
'source_filenames': (
'realtime/js/flood/flood.js',
'realtime/js/templates/flood/*.jst'
),
'output_filename': 'js/realtime_floodjs.js'
},
'realtime_ashjs': {
'source_filenames': (
'realtime/js/ash/ash.js',
'realtime/js/templates/ash/*.jst'
),
'output_filename': 'js/realtime_ashjs.js'
},
'usermap_contrib': {
'source_filenames': (
'user_map/js/leaflet.markercluster-src.js',
'user_map/js/validate.js',
),
'output_filename': 'js/usermap_contrib.js',
},
'usermap_appjs': {
'source_filenames': (
'user_map/js/user-map.js',
),
'output_filename': 'js/usermap_appjs.js'
},
}
PIPELINE_CSS = {
'contrib': {
'source_filenames': (
'css/bootstrap.min.css',
'css/inasafe-blog-style.css'
),
'output_filename': 'css/contrib.css',
'extra_context': {
'media': 'screen, projection',
},
},
'main': {
'source_filenames': (
'css/main.css',
),
'output_filename': 'css/main.css',
'extra_context': {
'media': 'screen, projection',
},
},
'realtime_contrib': {
'source_filenames': (
'realtime/css/jquery.dynatable.css',
'realtime/css/locationfilter.css',
'realtime/css/MarkerCluster.css',
'realtime/css/MarkerCluster.user-map.css'
),
'output_filename': 'css/realtime_contrib.css',
'extra_context': {
'media': 'screen, projection',
},
},
'realtime_appcss': {
'source_filenames': (
'realtime/css/realtime.css',
),
'output_filename': 'css/realtime_appcss.css',
'extra_context': {
'media': 'screen, projection'
}
},
'realtime_shakecss': {
'source_filenames': (
'realtime/css/earthquake/shake.css',
),
'output_filename': 'css/realtime_shakecss.css',
'extra_context': {
'media': 'screen, projection'
}
},
'realtime_floodcss': {
'source_filenames': (
'realtime/css/flood/flood.css',
),
'output_filename': 'css/realtime_floodcss.css',
'extra_context': {
'media': 'screen, projection'
}
},
'realtime_ashcss': {
'source_filenames': (
'realtime/css/ash/ash.css',
),
'output_filename': 'css/realtime_ashcss.css',
'extra_context': {
'media': 'screen, projection'
}
},
'usermap_contrib': {
'source_filenames': (
'user_map/css/MarkerCluster.css',
),
'output_filename': 'css/usermap_contrib.css',
'extra_context': {
'media': 'screen, projection'
}
},
'usermap_appcss': {
'source_filenames': (
'user_map/css/MarkerCluster.user-map.css',
'user_map/css/user-map.css',
),
'output_filename': 'css/usermap_appcss.css',
'extra_context': {
'media': 'screen, projection'
}
},
}
| {
"content_hash": "ab6783bb6494ec2da44eb87c7182ef5e",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 71,
"avg_line_length": 28.037634408602152,
"alnum_prop": 0.502588686481304,
"repo_name": "AIFDR/inasafe-django",
"id": "758d5cc3a618ff2f42400444513949ccb8e44e1b",
"size": "5239",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "django_project/core/settings/project.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "196369"
},
{
"name": "HTML",
"bytes": "93481"
},
{
"name": "JavaScript",
"bytes": "346781"
},
{
"name": "Makefile",
"bytes": "9201"
},
{
"name": "Python",
"bytes": "285851"
},
{
"name": "Shell",
"bytes": "2169"
}
],
"symlink_target": ""
} |
def simple_print(msg):
print(msg)
def compat_input(msg):
return input(msg)
| {
"content_hash": "aa21f7bfcadf129833e990580dc8380f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 22,
"avg_line_length": 14.166666666666666,
"alnum_prop": 0.6588235294117647,
"repo_name": "danifus/pssepath",
"id": "5f9a3eb66918ec61860102bf02ed13ef95277516",
"size": "85",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pssepath/compat/_compat3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30849"
}
],
"symlink_target": ""
} |
import numpy as np
class BaseEstimator(object):
X = None
y = None
y_required = True
fit_required = True
def _setup_input(self, X, y = None):
"""
Ensures X and y are stored as numpy ndarrays by converting from an
array-like object if necessary. Enables estimators to define whether
they require a set of y target values or not with y_required, e.g.
kmeans clustering requires no target labels and is fit against only X.
Parameters
----------
X : array Like
Feature dataset
y : array-like
Target Values, by default is required especially for supervised learning,
but may be omitted if y_required = false, this is normally true for unsupervised learning
"""
if not isinstance(X, np.ndarray):
X = np.array(X) # will give error from np.array() if not possible
if X.size == 0:
raise ValueError('Number of features must be greater than zero')
if X.ndim == 1:
self.n_samples, self.n_features = 1, X.shape
else:
self.n_samples, self.n_features = X.shape[0]
self.X = X
if self.y_required:
if y is None:
raise ValueError('Missed required argument y')
if not isinstance(y, np.ndarray):
y = np.array(y)
if y.size = 0:
raise ValueError('Number of targets must be greater than 0')
self.y = y
def fit(self, X, y=None):
sef._set_input(X, y)
def predict(self, X = None):
if not isinstance(X, np.ndarray):
X = np.array(X)
if self.X is not None or not self.fit_required:
return self._predict(X)
else:
raise ValueError("Yo must call fit before predit")
def _predict(self, X = None)
raise NotImplementedError() | {
"content_hash": "bff77e75cae430604166a9e633d586aa",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 92,
"avg_line_length": 25.904761904761905,
"alnum_prop": 0.6691176470588235,
"repo_name": "paulmorio/grusData",
"id": "7fd693974be2773a0ead56cc1ed6f694a4068452",
"size": "1632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "base/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1883928"
},
{
"name": "Python",
"bytes": "31331"
},
{
"name": "R",
"bytes": "4625"
}
],
"symlink_target": ""
} |
import http.server
import http.client
import json
import socketserver
class OpenFDAClient():
OPENFDA_API_URL = "api.fda.gov"
OPENFDA_API_EVENT = "/drug/event.json"
def get_event(self, limit):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + "?limit=" + limit)
r1 = conn.getresponse()
print (r1.status, r1.reason)
data1 = r1.read()
data1 = data1.decode("utf8")
event = data1
return event
def get_SEARCH_drug(self, drug):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + '?search=patient.drug.medicinalproduct='+ drug +'&limit=10')
r1 = conn.getresponse()
print (r1.status, r1.reason)
data1 = r1.read()
data1 = data1.decode("utf8")
event = data1
return event
def get_SEARCH_company(self, comp):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + '?search=companynumb:'+ comp +'&limit=10')
r1 = conn.getresponse()
print (r1.status, r1.reason)
data1 = r1.read()
data1 = data1.decode("utf8")
event = data1
return event
class OpenFDAParser():
def get_drugs(self,limit):
client = OpenFDAClient()
data2 = client.get_event(limit)
med_list = []
events = json.loads(data2)
results =events["results"]
for i in results:
patient = i["patient"]
drug = patient["drug"]
med_prod = drug[0]["medicinalproduct"]
med_list.append(med_prod)
return med_list
def get_COMPANIES(self,drug):
client = OpenFDAClient()
event = client.get_SEARCH_drug(drug)
companies_list = []
info = json.loads(event)
results = info["results"]
for event in results:
companies_list += [event ['companynumb']]
return companies_list
def get_COMPANIES_list(self,limit):
client = OpenFDAClient()
event = client.get_event(limit)
companies_list = []
info = json.loads(event)
results = info["results"]
for event in results:
companies_list += [event ['companynumb']]
return companies_list
def get_company_search(self,drug):
client = OpenFDAClient()
event = client.get_SEARCH_company(drug)
drugs_list = []
info = json.loads(event)
results = info["results"]
for event in results:
drugs_list += [event["patient"]["drug"][0]["medicinalproduct"]]
return drugs_list
def get_genders(self,limit):
client = OpenFDAClient()
event = client.get_event(limit)
number_list = []
gender_list = []
info = json.loads(event)
results = info['results']
for number in results:
number_list += [number['patient']['patientsex']]
for number in number_list:
if number =="1":
gender_list.append("Male")
elif number == "2":
gender_list.append("Female")
elif number == "0":
gender_list.append("Unknown")
return gender_list
class OpenFDAHTML():
def main_page(self):
html = '''
<html>
<head>
<link rel="shortcut icon" href="https://pbs.twimg.com/profile_images/701113332183371776/57JHEzt7.jpg">
<title>OpenFDA Cool App</title>
</form>
</head>
<body bgcolor=#CEE3F6>
<h1>OpenFDA Client</h1>
<form method="get" action="listDrugs">
<input type="submit" value = "Drug list: Send to OpenFDA">
</input>
Limit: <input type="text" name = "limit" size="5">
</input>
</form>
<form method="get" action="listCompanies">
<input type="submit" value= "Companies list: Send to OpenFDA">
</input>
Limit: <input type="text" name = "limit" size="5">
</input>
</form>
<h5>Introduzca el medicamento que desea buscar:</h5>
<form method="get" action = "searchDrug">
<input type="text" name = "drug">
</input>
<input type="submit" value = "Send drug to OpenFDA">
</input>
</form>
<h5>Introduzca el proveedor que desea buscar:</h5>
<form method="get" action="searchCompany">
<input type="text" name= "company">
</input>
<input type="submit" value = "Send company to OpenFDA">
</input>
</form>
<form method="get" action="listGender">
<input type = "submit" value= "Gender List: Send to OpenFDA">
</input>
Limit: <input type= "text" name = "limit" size ="5">
</input>
</form>
</body>
</html>
'''
return html
def get_second_page(self,items):
med_list = items
html2 = """
<html>
<head>
<link rel="shortcut icon" href="https://pbs.twimg.com/profile_images/701113332183371776/57JHEzt7.jpg">
<title>OpenFDA Cool App</title>
</head>
<body bgcolor=#CEE3F6>
<ol>
"""
for drug in med_list:
html2 += "<li>"+drug+"</li>"
html2 += """
</ol>
</body>
</html>
"""
return html2
def get_error_page(self):
html3 = """
<html>
<head>
<body>
<h1>Error 404</h1>
<body>
</head>
<body>
Page not found
</body>
</html>
"""
return html3
class testHTTPRequestHandler (http.server.BaseHTTPRequestHandler):
def execute(self,html):
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(bytes(html, "utf8"))
def do_GET (self): #self - peticion del cliente
if self.path == '/':
self.send_response(200)
HTML = OpenFDAHTML()
html = HTML.main_page()
self.execute(html) #wfile es un fichero enganchado con el cliente
elif '/listDrugs' in self.path:
self.send_response(200)
HTML = OpenFDAHTML()
parser = OpenFDAParser()
limit = self.path.split("=")[-1]
items=parser.get_drugs(limit)
html = HTML.get_second_page(items)
self.execute(html)
elif "searchDrug?drug=" in self.path:
self.send_response(200)
HTML = OpenFDAHTML()
parser = OpenFDAParser()
drug = self.path.split("=")[-1]
items= parser.get_COMPANIES(drug)
html = HTML.get_second_page(items)
self.execute(html)
elif '/listCompanies' in self.path:
self.send_response(200)
HTML = OpenFDAHTML()
parser = OpenFDAParser()
limit = self.path.split("=")[-1]
items = parser.get_COMPANIES_list(limit)
html = HTML.get_second_page(items)
self.execute(html)
elif "searchCompany?company=" in self.path:
self.send_response(200)
HTML = OpenFDAHTML()
parser = OpenFDAParser()
drug = self.path.split("=")[-1]
items = parser.get_company_search(drug)
html = HTML.get_second_page(items)
self.execute(html)
elif "/listGender" in self.path:
self.send_response(200)
HTML = OpenFDAHTML()
parser = OpenFDAParser()
limit = self.path.split("=")[-1]
items = parser.get_genders(limit)
html = HTML.get_second_page(items)
self.execute(html)
elif "/secret" in self.path:
self.send_response(401)
self.send_header('WWW-Authenticate','Basic realm="User Visible Realm"')
self.end_headers()
elif "/redirect" in self.path:
self.send_response(302)
self.send_header('Location', 'http://localhost:8000/')
self.end_headers()
else:
self.send_response(404)
HTML = OpenFDAHTML()
html=HTML.get_error_page()
self.execute(html)
return
| {
"content_hash": "5132b7521b18468305333f0c5f8fbd04",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 118,
"avg_line_length": 32.79245283018868,
"alnum_prop": 0.5164556962025316,
"repo_name": "nakagawamariana/OpenFDA",
"id": "c005ac66c79b933fcac0ed628d5f8fc4f1918eb8",
"size": "9419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9894"
}
],
"symlink_target": ""
} |
import itertools
import sys
# Require Python 3.7+ for ordered dictionaries so that the order of the
# generated tests remain the same.
# Usage:
# python3.7 mongos-pin-auto-tests.py > mongos-pin-auto.yml
if sys.version_info[:2] < (3, 7):
print('ERROR: This script requires Python >= 3.7, not:')
print(sys.version)
print('Usage: python3.7 mongos-pin-auto-tests.py > mongos-pin-auto.yml')
exit(1)
HEADER = '''# Autogenerated tests that transient errors in a transaction unpin the session.
# See mongos-pin-auto-tests.py
runOn:
-
minServerVersion: "4.1.8"
topology: ["sharded"]
# serverless proxy doesn't append error labels to errors in transactions
# caused by failpoints (CLOUDP-88216)
serverless: "forbid"
database_name: &database_name "transaction-tests"
collection_name: &collection_name "test"
data: &data
- {_id: 1}
- {_id: 2}
tests:
- description: remain pinned after non-transient Interrupted error on insertOne
useMultipleMongoses: true
operations:
- &startTransaction
name: startTransaction
object: session0
- &initialCommand
name: insertOne
object: collection
arguments:
session: session0
document: {_id: 3}
result:
insertedId: 3
- name: targetedFailPoint
object: testRunner
arguments:
session: session0
failPoint:
configureFailPoint: failCommand
mode: {times: 1}
data:
failCommands: ["insert"]
errorCode: 11601
- name: insertOne
object: collection
arguments:
session: session0
document:
_id: 4
result:
errorLabelsOmit: ["TransientTransactionError", "UnknownTransactionCommitResult"]
errorCodeName: Interrupted
- &assertSessionPinned
name: assertSessionPinned
object: testRunner
arguments:
session: session0
- &commitTransaction
name: commitTransaction
object: session0
expectations:
- command_started_event:
command:
insert: *collection_name
documents:
- _id: 3
ordered: true
readConcern:
lsid: session0
txnNumber:
$numberLong: "1"
startTransaction: true
autocommit: false
writeConcern:
command_name: insert
database_name: *database_name
- command_started_event:
command:
insert: *collection_name
documents:
- _id: 4
ordered: true
readConcern:
lsid: session0
txnNumber:
$numberLong: "1"
startTransaction:
autocommit: false
writeConcern:
command_name: insert
database_name: *database_name
- command_started_event:
command:
commitTransaction: 1
lsid: session0
txnNumber:
$numberLong: "1"
startTransaction:
autocommit: false
writeConcern:
recoveryToken: 42
command_name: commitTransaction
database_name: admin
outcome: &outcome
collection:
data:
- {_id: 1}
- {_id: 2}
- {_id: 3}
- description: unpin after transient error within a transaction
useMultipleMongoses: true
operations:
- &startTransaction
name: startTransaction
object: session0
- &initialCommand
name: insertOne
object: collection
arguments:
session: session0
document:
_id: 3
result:
insertedId: 3
- name: targetedFailPoint
object: testRunner
arguments:
session: session0
failPoint:
configureFailPoint: failCommand
mode: { times: 1 }
data:
failCommands: ["insert"]
closeConnection: true
- name: insertOne
object: collection
arguments:
session: session0
document:
_id: 4
result:
errorLabelsContain: ["TransientTransactionError"]
errorLabelsOmit: ["UnknownTransactionCommitResult"]
# Session unpins from the first mongos after the insert error and
# abortTransaction succeeds immediately on any mongos.
- &assertSessionUnpinned
name: assertSessionUnpinned
object: testRunner
arguments:
session: session0
- &abortTransaction
name: abortTransaction
object: session0
expectations:
- command_started_event:
command:
insert: *collection_name
documents:
- _id: 3
ordered: true
readConcern:
lsid: session0
txnNumber:
$numberLong: "1"
startTransaction: true
autocommit: false
writeConcern:
command_name: insert
database_name: *database_name
- command_started_event:
command:
insert: *collection_name
documents:
- _id: 4
ordered: true
readConcern:
lsid: session0
txnNumber:
$numberLong: "1"
startTransaction:
autocommit: false
writeConcern:
command_name: insert
database_name: *database_name
- command_started_event:
command:
abortTransaction: 1
lsid: session0
txnNumber:
$numberLong: "1"
startTransaction:
autocommit: false
writeConcern:
recoveryToken: 42
command_name: abortTransaction
database_name: admin
outcome: &outcome
collection:
data: *data
# The rest of the tests in this file test every operation type against
# multiple types of transient errors (connection and error code).'''
TEMPLATE = '''
- description: {test_name} {error_name} error on {op_name} {command_name}
useMultipleMongoses: true
operations:
- *startTransaction
- *initialCommand
- name: targetedFailPoint
object: testRunner
arguments:
session: session0
failPoint:
configureFailPoint: failCommand
mode: {{times: 1}}
data:
failCommands: ["{command_name}"]
{error_data}
- name: {op_name}
object: {object_name}
arguments:
session: session0
{op_args}
result:
{error_labels}: ["TransientTransactionError"]
- *{assertion}
- *abortTransaction
outcome: *outcome
'''
# Maps from op_name to (command_name, object_name, op_args)
OPS = {
# Write ops:
'insertOne': ('insert', 'collection', r'document: {_id: 4}'),
'insertMany': ('insert', 'collection', r'documents: [{_id: 4}, {_id: 5}]'),
'updateOne': ('update', 'collection', r'''filter: {_id: 1}
update: {$inc: {x: 1}}'''),
'replaceOne': ('update', 'collection', r'''filter: {_id: 1}
replacement: {y: 1}'''),
'updateMany': ('update', 'collection', r'''filter: {_id: {$gte: 1}}
update: {$set: {z: 1}}'''),
'deleteOne': ('delete', 'collection', r'filter: {_id: 1}'),
'deleteMany': ('delete', 'collection', r'filter: {_id: {$gte: 1}}'),
'findOneAndDelete': ('findAndModify', 'collection', r'filter: {_id: 1}'),
'findOneAndUpdate': ('findAndModify', 'collection', r'''filter: {_id: 1}
update: {$inc: {x: 1}}
returnDocument: Before'''),
'findOneAndReplace': ('findAndModify', 'collection', r'''filter: {_id: 1}
replacement: {y: 1}
returnDocument: Before'''),
# Bulk write insert/update/delete:
'bulkWrite insert': ('insert', 'collection', r'''requests:
- name: insertOne
arguments:
document: {_id: 1}'''),
'bulkWrite update': ('update', 'collection', r'''requests:
- name: updateOne
arguments:
filter: {_id: 1}
update: {$set: {x: 1}}'''),
'bulkWrite delete': ('delete', 'collection', r'''requests:
- name: deleteOne
arguments:
filter: {_id: 1}'''),
# Read ops:
'find': ('find', 'collection', r'filter: {_id: 1}'),
'countDocuments': ('aggregate', 'collection', r'filter: {}'),
'aggregate': ('aggregate', 'collection', r'pipeline: []'),
'distinct': ('distinct', 'collection', r'fieldName: _id'),
# runCommand:
'runCommand': (
'insert',
r'''database
command_name: insert''', # runCommand requires command_name.
r'''command:
insert: *collection_name
documents:
- _id : 1'''),
}
# Maps from error_name to error_data.
NON_TRANSIENT_ERRORS = {
'Interrupted': 'errorCode: 11601',
}
# Maps from error_name to error_data.
TRANSIENT_ERRORS = {
'connection': 'closeConnection: true',
'ShutdownInProgress': 'errorCode: 91',
}
def create_pin_test(op_name, error_name):
test_name = 'remain pinned after non-transient'
assertion = 'assertSessionPinned'
error_labels = 'errorLabelsOmit'
command_name, object_name, op_args = OPS[op_name]
error_data = NON_TRANSIENT_ERRORS[error_name]
if op_name.startswith('bulkWrite'):
op_name = 'bulkWrite'
return TEMPLATE.format(**locals())
def create_unpin_test(op_name, error_name):
test_name = 'unpin after transient'
assertion = 'assertSessionUnpinned'
error_labels = 'errorLabelsContain'
command_name, object_name, op_args = OPS[op_name]
error_data = TRANSIENT_ERRORS[error_name]
if op_name.startswith('bulkWrite'):
op_name = 'bulkWrite'
return TEMPLATE.format(**locals())
tests = []
for op_name, error_name in itertools.product(OPS, NON_TRANSIENT_ERRORS):
tests.append(create_pin_test(op_name, error_name))
for op_name, error_name in itertools.product(OPS, TRANSIENT_ERRORS):
tests.append(create_unpin_test(op_name, error_name))
print(HEADER)
print(''.join(tests))
| {
"content_hash": "04a80bcc4dcff92d37b9cb43fa4142a1",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 91,
"avg_line_length": 30.50294117647059,
"alnum_prop": 0.5624337093819304,
"repo_name": "mongodb/node-mongodb-native",
"id": "1072ec29073c0357c0c426b105c5fc649c58110d",
"size": "10371",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/spec/transactions/legacy/mongos-pin-auto-tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "1702456"
},
{
"name": "Makefile",
"bytes": "1590"
},
{
"name": "Python",
"bytes": "15715"
},
{
"name": "Shell",
"bytes": "34026"
},
{
"name": "TypeScript",
"bytes": "2117734"
}
],
"symlink_target": ""
} |
'''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <radu.cosnita@gmail.com>
.. py:module:: fantastico.contrib.dynamic_pages.models.test_dynamicpage
'''
from fantastico.tests.base_case import FantasticoUnitTestsCase
from fantastico.contrib.dynamic_pages.models.pages import DynamicPage
class DynamicPageTests(FantasticoUnitTestsCase):
'''This class provides the test cases for DynamicPage model class.'''
def test_dynamicpage_init_ok_defaults(self):
'''This test case ensures all page meta information are initialized correctly with default values.'''
model = DynamicPage()
self.assertIsNone(model.id)
self.assertIsNone(model.name)
self.assertIsNone(model.url)
self.assertIsNone(model.template)
self.assertIsNone(model.keywords)
self.assertIsNone(model.description)
self.assertIsNone(model.title)
self.assertEqual("en", model.language)
def test_dynamicpage_init_ok(self):
'''This test case ensures all page meta information are initialized correctly with given values.'''
name = "/en/home"
url = "/en/home"
template = "/frontend/views/main.html"
keywords = "keyword 1, ..."
description = "description"
title = "Home page"
language = "en-US"
model = DynamicPage(name, url, template, keywords, description, title, language)
self.assertIsNone(model.id)
self.assertEqual(name, model.name)
self.assertEqual(url, model.url)
self.assertEqual(template, model.template)
self.assertEqual(keywords, model.keywords)
self.assertEqual(description, model.description)
self.assertEqual(title, model.title)
self.assertEqual(language, model.language)
| {
"content_hash": "2843b067d4abbe4d600eb2c32b430953",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 126,
"avg_line_length": 47.74576271186441,
"alnum_prop": 0.7323393681221158,
"repo_name": "rcosnita/fantastico",
"id": "7c5b1a1fc1b90005af0f19dd8ee87fb063925a29",
"size": "2817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fantastico/contrib/dynamic_pages/models/tests/test_dynamicpage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6802"
},
{
"name": "Python",
"bytes": "2168052"
},
{
"name": "Shell",
"bytes": "13309"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django.test import TestCase
from checkouts import util
from checkouts.models import AircraftType
import checkouts.tests.helper as helper
class GetAircraftTypeNamesTests(TestCase):
def test_empty(self):
self.assertEqual(len(util.get_aircrafttype_names()), 0)
def test_single(self):
name = 'ACTypeName'
AircraftType.objects.create(name=name)
self.assertEqual(util.get_aircrafttype_names(), [name,])
def test_default_sort_without_position(self):
"""Without sorted_position values, defaults to an alpha sort"""
names = ['Name1', 'Name2', 'Name3']
for n in names:
AircraftType.objects.create(name=n)
self.assertEqual(util.get_aircrafttype_names(), names)
def test_specific_sort(self):
names = ['Name2','Name1', 'Name3']
for i, n in enumerate(names):
AircraftType.objects.create(name=n, sorted_position=i)
# Default
self.assertEqual(util.get_aircrafttype_names(), names)
# By name, ascending
names.sort()
self.assertEqual(util.get_aircrafttype_names("name"), names)
# By name, descending
names.reverse()
self.assertEqual(util.get_aircrafttype_names("-name"), names)
class GetPilotAirstripPairsTests(TestCase):
def test_empty(self):
self.assertEqual(util.get_pilot_airstrip_pairs(), [])
def test_single(self):
pilot = helper.create_pilot()
airstrip = helper.create_airstrip()
expected = [(pilot.username, airstrip.ident)]
self.assertEqual(util.get_pilot_airstrip_pairs(), expected)
def test_multiple(self):
pilot1 = helper.create_pilot('kim', 'Kim', 'Pilot1')
pilot2 = helper.create_pilot('sam', 'Sam', 'Pilot2')
airstrip1 = helper.create_airstrip('ID1', 'Airstrip1')
airstrip2 = helper.create_airstrip('ID2', 'Airstrip2')
expected = [
(pilot1.username, airstrip1.ident),
(pilot1.username, airstrip2.ident),
(pilot2.username, airstrip1.ident),
(pilot2.username, airstrip2.ident),
]
self.assertEqual(util.get_pilot_airstrip_pairs(), expected)
# Filter on Pilot
expected = [
(pilot1.username, airstrip1.ident),
(pilot1.username, airstrip2.ident),
]
self.assertEqual(util.get_pilot_airstrip_pairs(pilot=pilot1), expected)
# Filter on Airstrip
expected = [
(pilot1.username, airstrip2.ident),
(pilot2.username, airstrip2.ident),
]
self.assertEqual(util.get_pilot_airstrip_pairs(airstrip=airstrip2), expected)
# Filter on Base
base1 = helper.create_airstrip('BASE', 'Base1', is_base=True)
airstrip1.bases.add(base1)
expected = [
(pilot1.username, airstrip1.ident),
(pilot2.username, airstrip1.ident),
]
self.assertEqual(util.get_pilot_airstrip_pairs(base=base1), expected)
class GetPrecedentedCheckoutsTests(TestCase):
def test_empty(self):
self.assertEqual(util.get_precedented_checkouts(), {})
def test_single_precedented(self):
c = helper.create_checkout()
expected = {
c.airstrip.ident: {
c.aircraft_type.name: True,
},
}
self.assertEqual(util.get_precedented_checkouts(), expected)
def test_single_unprecedented(self):
pilot = helper.create_pilot()
airstrip = helper.create_airstrip()
aircraft = helper.create_aircrafttype()
expected = {}
self.assertEqual(util.get_precedented_checkouts(), expected)
def test_multiple(self):
pilot1 = helper.create_pilot('kim', 'Kim', 'Pilot1')
pilot2 = helper.create_pilot('sam', 'Sam', 'Pilot2')
actype1 = helper.create_aircrafttype('Name1')
actype2 = helper.create_aircrafttype('Name2')
airstrip1 = helper.create_airstrip('ID1', 'Airstrip1')
airstrip2 = helper.create_airstrip('ID2', 'Airstrip2')
airstrip3 = helper.create_airstrip('ID3', 'Airstrip3')
c1 = helper.create_checkout(pilot=pilot1, airstrip=airstrip1, aircraft_type=actype1)
c2 = helper.create_checkout(pilot=pilot1, airstrip=airstrip1, aircraft_type=actype2)
c3 = helper.create_checkout(pilot=pilot2, airstrip=airstrip2, aircraft_type=actype1)
expected = {
airstrip1.ident: {
actype1.name: True,
actype2.name: True,
},
airstrip2.ident: {
actype1.name: True,
},
}
self.assertEqual(util.get_precedented_checkouts(), expected)
class CheckoutFilterTests(TestCase):
def test_empty(self):
self.assertEqual(util.checkout_filter(), [])
def test_single_checkout(self):
c = helper.create_checkout()
expected = [{
'pilot_name': c.pilot.full_name,
'pilot_slug': c.pilot.username,
'airstrip_ident': c.airstrip.ident,
'airstrip_name': c.airstrip.name,
'actypes': {
c.aircraft_type.name: util.CHECKOUT_SUDAH,
},
},]
self.assertEqual(util.checkout_filter(), expected)
def test_multiple_aircrafttypes(self):
actype1 = helper.create_aircrafttype('Name1')
actype2 = helper.create_aircrafttype('Name2')
c = helper.create_checkout(aircraft_type=actype1)
expected = [{
'pilot_name': c.pilot.full_name,
'pilot_slug': c.pilot.username,
'airstrip_ident': c.airstrip.ident,
'airstrip_name': c.airstrip.name,
'actypes': {
actype1.name: util.CHECKOUT_SUDAH,
actype2.name: util.CHECKOUT_BELUM,
},
},]
self.assertEqual(util.checkout_filter(), expected)
# Testing what happens when limiting to a particular aircraft type
expected[0]['actypes'].pop(actype2.name)
self.assertEqual(util.checkout_filter(aircraft_type=actype1), expected)
helper.create_checkout(aircraft_type=actype2, pilot=c.pilot, airstrip=c.airstrip)
expected[0]['actypes'][actype2.name] = util.CHECKOUT_SUDAH
self.assertEqual(util.checkout_filter(), expected)
def test_multiple_airstrips(self):
airstrip1 = helper.create_airstrip('ID1', 'Airstrip1')
airstrip2 = helper.create_airstrip('ID2', 'Airstrip2')
c = helper.create_checkout(airstrip=airstrip1)
expected = [{
'pilot_name': c.pilot.full_name,
'pilot_slug': c.pilot.username,
'airstrip_ident': airstrip1.ident,
'airstrip_name': airstrip1.name,
'actypes': {
c.aircraft_type.name: util.CHECKOUT_SUDAH,
},
},]
self.assertEqual(util.checkout_filter(), expected)
helper.create_checkout(airstrip=airstrip2, pilot=c.pilot, aircraft_type=c.aircraft_type)
r = {
'pilot_name': c.pilot.full_name,
'pilot_slug': c.pilot.username,
'airstrip_ident': airstrip2.ident,
'airstrip_name': airstrip2.name,
'actypes': {
c.aircraft_type.name: util.CHECKOUT_SUDAH,
},
}
expected.append(r)
self.assertEqual(util.checkout_filter(), expected)
def test_filtering_by_base(self):
base1 = helper.create_airstrip('BASE', 'Base1', is_base=True)
airstrip1 = helper.create_airstrip('ID1', 'Airstrip1')
airstrip1.bases.add(base1)
airstrip2 = helper.create_airstrip('ID2', 'Airstrip2')
c = helper.create_checkout(airstrip=airstrip1)
_ = helper.create_checkout(airstrip=airstrip2, pilot=c.pilot, aircraft_type=c.aircraft_type)
expected = [{
'pilot_name': c.pilot.full_name,
'pilot_slug': c.pilot.username,
'airstrip_ident': airstrip1.ident,
'airstrip_name': airstrip1.name,
'actypes': {
c.aircraft_type.name: util.CHECKOUT_SUDAH,
},
},]
self.assertEqual(util.checkout_filter(base=base1), expected)
def test_multiple_pilots(self):
pilot1 = helper.create_pilot('kim', 'Kim', 'Pilot1')
pilot2 = helper.create_pilot('sam', 'Sam', 'Pilot2')
c1 = helper.create_checkout(pilot=pilot1)
expected = [{
'pilot_name': pilot1.full_name,
'pilot_slug': c1.pilot.username,
'airstrip_ident': c1.airstrip.ident,
'airstrip_name': c1.airstrip.name,
'actypes': {
c1.aircraft_type.name: util.CHECKOUT_SUDAH,
},
},]
self.assertEqual(util.checkout_filter(), expected)
c2 = helper.create_checkout(pilot=pilot2, airstrip=c1.airstrip, aircraft_type=c1.aircraft_type)
r = {
'pilot_name': pilot2.full_name,
'pilot_slug': c2.pilot.username,
'airstrip_ident': c2.airstrip.ident,
'airstrip_name': c2.airstrip.name,
'actypes': {
c2.aircraft_type.name: util.CHECKOUT_SUDAH,
},
}
expected.append(r)
self.assertEqual(util.checkout_filter(), expected)
def test_multiple_all(self):
pilot1 = helper.create_pilot('kim', 'Kim', 'Pilot1')
pilot2 = helper.create_pilot('sam', 'Sam', 'Pilot2')
actype1 = helper.create_aircrafttype('Name1')
actype2 = helper.create_aircrafttype('Name2')
airstrip1 = helper.create_airstrip('ID1', 'Airstrip1')
airstrip2 = helper.create_airstrip('ID2', 'Airstrip2')
c1 = helper.create_checkout(pilot=pilot1, airstrip=airstrip1, aircraft_type=actype1)
c2 = helper.create_checkout(pilot=pilot1, airstrip=airstrip1, aircraft_type=actype2)
c3 = helper.create_checkout(pilot=pilot1, airstrip=airstrip2, aircraft_type=actype1)
c3 = helper.create_checkout(pilot=pilot1, airstrip=airstrip2, aircraft_type=actype2)
c4 = helper.create_checkout(pilot=pilot2, airstrip=airstrip1, aircraft_type=actype1)
c5 = helper.create_checkout(pilot=pilot2, airstrip=airstrip1, aircraft_type=actype2)
c6 = helper.create_checkout(pilot=pilot2, airstrip=airstrip2, aircraft_type=actype1)
c7 = helper.create_checkout(pilot=pilot2, airstrip=airstrip2, aircraft_type=actype2)
expected = [{
'pilot_name': pilot1.full_name,
'pilot_slug': pilot1.username,
'airstrip_ident': airstrip1.ident,
'airstrip_name': airstrip1.name,
'actypes': {
actype1.name: util.CHECKOUT_SUDAH,
actype2.name: util.CHECKOUT_SUDAH,
},
}, {
'pilot_name': pilot1.full_name,
'pilot_slug': pilot1.username,
'airstrip_ident': airstrip2.ident,
'airstrip_name': airstrip2.name,
'actypes': {
actype1.name: util.CHECKOUT_SUDAH,
actype2.name: util.CHECKOUT_SUDAH,
},
}, {
'pilot_name': pilot2.full_name,
'pilot_slug': pilot2.username,
'airstrip_ident': airstrip1.ident,
'airstrip_name': airstrip1.name,
'actypes': {
actype1.name: util.CHECKOUT_SUDAH,
actype2.name: util.CHECKOUT_SUDAH,
},
}, {
'pilot_name': pilot2.full_name,
'pilot_slug': pilot2.username,
'airstrip_ident': airstrip2.ident,
'airstrip_name': airstrip2.name,
'actypes': {
actype1.name: util.CHECKOUT_SUDAH,
actype2.name: util.CHECKOUT_SUDAH,
},
},]
self.assertEqual(util.checkout_filter(), expected)
# Since we already have all these objects created, let's test the
# filtering feature!
self.assertEqual(util.checkout_filter(pilot=pilot1), expected[:2])
self.assertEqual(util.checkout_filter(pilot=pilot2), expected[2:])
t = [r for i, r in enumerate(expected) if i in (0,2)]
self.assertEqual(util.checkout_filter(airstrip=airstrip1), t)
t = [r for i, r in enumerate(expected) if i in (1,3)]
self.assertEqual(util.checkout_filter(airstrip=airstrip2), t)
c1.delete()
expected = expected[1:]
for e in expected:
e['actypes'].pop(actype2.name)
self.assertEqual(util.checkout_filter(aircraft_type=actype1), expected)
class PilotCheckoutsGroupedByAirstripTests(TestCase):
def setUp(self):
# Template for expected results
self.expected = {
'populate': {
'pilot': False,
'airstrip': True,
},
'aircraft_types': [],
'results': [],
}
def test_empty(self):
pilot = helper.create_pilot()
self.assertEqual(util.pilot_checkouts_grouped_by_airstrip(pilot), self.expected)
def test_multiple_pilots(self):
pilot1 = helper.create_pilot('kim', 'Kim', 'Pilot1')
pilot2 = helper.create_pilot('sam', 'Sam', 'Pilot2')
c1 = helper.create_checkout(pilot=pilot1)
c2 = helper.create_checkout(pilot=pilot2, airstrip=c1.airstrip, aircraft_type=c1.aircraft_type)
results = [{
'pilot_name': pilot1.full_name,
'pilot_slug': pilot1.username,
'airstrip_ident': c1.airstrip.ident,
'airstrip_name': c1.airstrip.name,
'actypes': {
c1.aircraft_type.name: util.CHECKOUT_SUDAH,
},
},]
self.expected['aircraft_types'] = [c1.aircraft_type.name,]
self.expected['results'] = results
self.assertEqual(util.pilot_checkouts_grouped_by_airstrip(pilot1), self.expected)
class AirstripCheckoutsGroupedByPilotTests(TestCase):
def setUp(self):
# Template for expected results
self.expected = {
'populate': {
'pilot': True,
'airstrip': False,
},
'aircraft_types': [],
'results': [],
}
def test_empty(self):
airstrip = helper.create_airstrip()
self.assertEqual(util.airstrip_checkouts_grouped_by_pilot(airstrip), self.expected)
def test_multiple_pilots(self):
airstrip1 = helper.create_airstrip('ID1', 'Airstrip1')
airstrip2 = helper.create_airstrip('ID2', 'Airstrip2')
c1 = helper.create_checkout(airstrip=airstrip1)
c2 = helper.create_checkout(airstrip=airstrip2, pilot=c1.pilot, aircraft_type=c1.aircraft_type)
results = [{
'pilot_name': c1.pilot.full_name,
'pilot_slug': c1.pilot.username,
'airstrip_ident': airstrip1.ident,
'airstrip_name': airstrip1.name,
'actypes': {
c1.aircraft_type.name: util.CHECKOUT_SUDAH,
},
},]
self.expected['aircraft_types'] = [c1.aircraft_type.name,]
self.expected['results'] = results
self.assertEqual(util.airstrip_checkouts_grouped_by_pilot(airstrip1), self.expected)
class CheckoutsSelesaiTests(TestCase):
def setUp(self):
# Template for expected results
self.expected = {
'populate': {
'pilot': True,
'airstrip': True,
},
'aircraft_types': [],
'results': [],
}
def test_empty(self):
self.assertEqual(util.sudah_selesai(), self.expected)
def test_with_checkouts(self):
c = helper.create_checkout()
results = [{
'pilot_name': c.pilot.full_name,
'pilot_slug': c.pilot.username,
'airstrip_ident': c.airstrip.ident,
'airstrip_name': c.airstrip.name,
'actypes': {
c.aircraft_type.name: util.CHECKOUT_SUDAH,
},
},]
self.expected['aircraft_types'] = [c.aircraft_type.name,]
self.expected['results'] = results
self.assertEqual(util.sudah_selesai(), self.expected)
class CheckoutsBelumSelesaiTests(TestCase):
def setUp(self):
# Template for expected results
self.expected = {
'populate': {
'pilot': True,
'airstrip': True,
},
'aircraft_types': [],
'results': [],
}
def test_empty(self):
self.assertEqual(util.belum_selesai(), self.expected)
def test_exclude_fully_selesai(self):
"""If all AircraftTypes for a Pilot/Airstrip pair have a Sudah Selesai
status, that Pilot/Airstrip pair should be removed from the results."""
c = helper.create_checkout()
self.expected['aircraft_types'] = util.get_aircrafttype_names()
self.assertEqual(util.belum_selesai(), self.expected)
def test_with_data(self):
pilot1 = helper.create_pilot('kim', 'Kim', 'Pilot1')
pilot2 = helper.create_pilot('sam', 'Sam', 'Pilot2')
actype1 = helper.create_aircrafttype('Name1')
actype2 = helper.create_aircrafttype('Name2')
airstrip1 = helper.create_airstrip('ID1', 'Airstrip1')
airstrip2 = helper.create_airstrip('ID2', 'Airstrip2')
airstrip3 = helper.create_airstrip('ID3', 'Airstrip3')
c1 = helper.create_checkout(pilot=pilot1, airstrip=airstrip1, aircraft_type=actype1)
c2 = helper.create_checkout(pilot=pilot1, airstrip=airstrip1, aircraft_type=actype2)
c3 = helper.create_checkout(pilot=pilot2, airstrip=airstrip2, aircraft_type=actype1)
results = [{
'pilot_name': pilot1.full_name,
'pilot_slug': pilot1.username,
'airstrip_ident': airstrip2.ident,
'airstrip_name': airstrip2.name,
'actypes': {
actype1.name: util.CHECKOUT_BELUM,
actype2.name: util.CHECKOUT_UNPRECEDENTED,
},
}, {
'pilot_name': pilot2.full_name,
'pilot_slug': pilot2.username,
'airstrip_ident': airstrip1.ident,
'airstrip_name': airstrip1.name,
'actypes': {
actype1.name: util.CHECKOUT_BELUM,
actype2.name: util.CHECKOUT_BELUM,
},
}, {
'pilot_name': pilot2.full_name,
'pilot_slug': pilot2.username,
'airstrip_ident': airstrip2.ident,
'airstrip_name': airstrip2.name,
'actypes': {
actype1.name: util.CHECKOUT_SUDAH,
actype2.name: util.CHECKOUT_UNPRECEDENTED,
},
},]
self.expected['aircraft_types'] = util.get_aircrafttype_names()
self.expected['results'] = results
self.assertEqual(util.belum_selesai(), self.expected)
# Since we already have all these objects created, let's test the
# 'belum selesai' filtering feature!
self.expected['results'] = results[:1]
self.assertEqual(util.belum_selesai(pilot=pilot1), self.expected)
self.expected['results'] = results[1:]
self.assertEqual(util.belum_selesai(pilot=pilot2), self.expected)
self.expected['results'] = [results[1],]
self.assertEqual(util.belum_selesai(airstrip=airstrip1), self.expected)
self.expected['results'] = [r for i, r in enumerate(results) if i in (0,2)]
self.assertEqual(util.belum_selesai(airstrip=airstrip2), self.expected)
self.expected['results'] = results[:2]
for r in self.expected['results']:
r['actypes'].pop(actype2.name)
self.expected['aircraft_types'] = [actype1.name,]
self.assertEqual(util.belum_selesai(aircraft_type=actype1), self.expected)
class ChoicesTests(TestCase):
def test_choices_checkout_status(self):
expected = [(util.CHECKOUT_SUDAH, util.CHECKOUT_SUDAH_LABEL), (util.CHECKOUT_BELUM, util.CHECKOUT_BELUM_LABEL)]
self.assertEqual(util.choices_checkout_status(), expected)
class QueryTests(TestCase):
def test_get_pilots(self):
self.assertEqual(len(util.get_pilots()), 0)
pilot1 = helper.create_pilot('kim', 'Kim', 'Pilot1')
pilot2 = helper.create_pilot('sam', 'Sam', 'Pilot2')
pilot3 = helper.create_pilot('ada', 'Ada', 'Pilot0')
expected = [pilot3, pilot1, pilot2]
query = util.get_pilots()
self.assertEqual([o for o in query], expected)
user1 = User.objects.create_user('user', 'User', 'Non-Pilot')
query = util.get_pilots()
self.assertEqual([o for o in query], expected)
def test_get_bases(self):
self.assertEqual(len(util.get_bases()), 0)
base1 = helper.create_airstrip('SCND', 'Second', is_base=True)
base2 = helper.create_airstrip('FRST', 'First', is_base=True)
base3 = helper.create_airstrip('THRD', 'Third', is_base=True)
expected = [base2, base1, base3]
query = util.get_bases()
self.assertEqual([o for o in query], expected)
airstrip1 = helper.create_airstrip('FRTH', 'Fourth', is_base=False)
query = util.get_bases()
self.assertEqual([o for o in query], expected)
| {
"content_hash": "836ff30ef0b7291df0df0dffea6c5f33",
"timestamp": "",
"source": "github",
"line_count": 597,
"max_line_length": 119,
"avg_line_length": 37.02512562814071,
"alnum_prop": 0.572566051393413,
"repo_name": "eallrich/checkniner",
"id": "8f1f5fd2fd887158885c82439c8fcb0abea6c84a",
"size": "22104",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cotracker/checkouts/tests/test_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3804"
},
{
"name": "HTML",
"bytes": "18395"
},
{
"name": "Procfile",
"bytes": "103"
},
{
"name": "Python",
"bytes": "134565"
},
{
"name": "Shell",
"bytes": "18789"
}
],
"symlink_target": ""
} |
from pyglet.gl import *
from plot_rotation import get_spherical_rotatation
from util import get_model_matrix, get_projection_matrix
from util import get_view_direction_vectors, get_basis_vectors
from util import screen_to_model, model_to_screen
from util import vec_subs, get_direction_vectors
class PlotCamera(object):
min_dist = 0.05
max_dist = 500.0
min_ortho_dist = 100.0
max_ortho_dist = 10000.0
_default_dist = 6.0
_default_ortho_dist = 600.0
rot_presets = {
'xy':(0,0,0),
'xz':(-90,0,0),
'yz':(0,90,0),
'perspective':(-45,0,-45)
}
def __init__(self, window, ortho = False):
self.window = window
self.axes = self.window.plot.axes
self.ortho = ortho
self.reset()
def init_rot_matrix(self):
glPushMatrix()
glLoadIdentity()
self._rot = get_model_matrix()
glPopMatrix()
def set_rot_preset(self, preset_name):
self.init_rot_matrix()
try: r = self.rot_presets[preset_name]
except:
raise ValueError("%s is not a valid rotation preset." % preset_name)
try:
self.euler_rotate(r[0], 1, 0, 0)
self.euler_rotate(r[1], 0, 1, 0)
self.euler_rotate(r[2], 0, 0, 1)
except: pass
def reset(self):
self._dist = 0.0
self._x, self._y = 0.0, 0.0
self._rot = None
if self.ortho:
self._dist = self._default_ortho_dist
else:
self._dist = self._default_dist
self.init_rot_matrix()
def mult_rot_matrix(self, rot):
glPushMatrix()
glLoadMatrixf(rot)
glMultMatrixf(self._rot)
self._rot = get_model_matrix()
glPopMatrix()
def setup_projection(self):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if self.ortho:
# yep, this is pseudo ortho (don't tell anyone)
gluPerspective(0.3, float(self.window.width)/float(self.window.height),
self.min_ortho_dist-0.01, self.max_ortho_dist+0.01)
else:
gluPerspective(30.0, float(self.window.width)/float(self.window.height),
self.min_dist-0.01, self.max_dist+0.01)
glMatrixMode(GL_MODELVIEW)
def _get_scale(self):
return 1.0, 1.0, 1.0
def apply_transformation(self):
glLoadIdentity()
glTranslatef(self._x, self._y, -self._dist)
if self._rot is not None:
glMultMatrixf(self._rot)
glScalef(*self._get_scale())
def spherical_rotate(self, p1, p2, sensitivity=1.0):
mat = get_spherical_rotatation(p1, p2, self.window.width, self.window.height, sensitivity)
if mat is not None: self.mult_rot_matrix(mat)
def euler_rotate(self, angle, x, y, z):
glPushMatrix()
glLoadMatrixf(self._rot)
glRotatef(angle, x, y, z)
self._rot = get_model_matrix()
glPopMatrix()
def zoom_relative(self, clicks, sensitivity):
if self.ortho:
dist_d = clicks * sensitivity * 50.0
min_dist = self.min_ortho_dist
max_dist = self.max_ortho_dist
else:
dist_d = clicks * sensitivity
min_dist = self.min_dist
max_dist = self.max_dist
new_dist = (self._dist - dist_d)
if (clicks < 0 and new_dist < max_dist) or new_dist > min_dist:
self._dist = new_dist
def mouse_translate(self, x, y, dx, dy):
glPushMatrix()
glLoadIdentity()
glTranslatef(0,0,-self._dist)
z = model_to_screen(0,0,0)[2]
d = vec_subs(screen_to_model(x,y,z), screen_to_model(x-dx,y-dy,z))
glPopMatrix()
self._x += d[0]
self._y += d[1]
| {
"content_hash": "31b67a73c3a88a5a928ca60329bbd879",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 98,
"avg_line_length": 31.008196721311474,
"alnum_prop": 0.5651599259846682,
"repo_name": "jbaayen/sympy",
"id": "14ff564bf4fbfbe92f770c96051311f0d363aea9",
"size": "3783",
"binary": false,
"copies": "6",
"ref": "refs/heads/i1667",
"path": "sympy/plotting/plot_camera.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6833033"
},
{
"name": "Scheme",
"bytes": "125"
}
],
"symlink_target": ""
} |
from oslo_utils import uuidutils
from solum.api.handlers import handler
from solum import objects
class SensorHandler(handler.Handler):
"""Fulfills a request on the sensor resource."""
def get(self, id):
"""Return a sensor."""
return objects.registry.Sensor.get_by_uuid(self.context, id)
def update(self, id, data):
"""Modify the sensor."""
updated = objects.registry.Sensor.update_and_save(self.context,
id, data)
return updated
def delete(self, id):
"""Delete the sensor."""
db_obj = objects.registry.Sensor.get_by_uuid(self.context, id)
db_obj.destroy(self.context)
def create(self, data):
"""Create a new sensor."""
db_obj = objects.registry.Sensor()
db_obj.update(data)
db_obj.uuid = uuidutils.generate_uuid()
db_obj.user_id = self.context.user
db_obj.project_id = self.context.project_id
db_obj.create(self.context)
return db_obj
def get_all(self):
"""Return all sensors."""
return objects.registry.SensorList.get_all(self.context)
| {
"content_hash": "0235a4aa957e55e0e35af15e12c33ad4",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 71,
"avg_line_length": 31.675675675675677,
"alnum_prop": 0.6006825938566553,
"repo_name": "stackforge/solum",
"id": "e574767df62771d207c800f5e513ffeee337e21b",
"size": "1718",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "solum/api/handlers/sensor_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "958"
},
{
"name": "Python",
"bytes": "1243294"
},
{
"name": "Shell",
"bytes": "80784"
}
],
"symlink_target": ""
} |
import unittest
import imath
import IECore
import IECoreScene
import IECoreGL
IECoreGL.init( False )
class InstancingTest( unittest.TestCase ) :
class RandomMeshProcedural( IECoreScene.Renderer.Procedural ) :
def __init__( self, meshes, name="/1", depth=0, maxDepth=8 ) :
IECoreScene.Renderer.Procedural.__init__( self )
self.__meshes = meshes
self.__depth = depth
self.__maxDepth = maxDepth
self.__name = name
def bound( self ) :
b = imath.Box3f()
for m in self.__meshes :
b.extendBy( m.bound() )
return b
def render( self, renderer ) :
with IECoreScene.AttributeBlock( renderer ) :
renderer.setAttribute( "name", IECore.StringData( self.__name ) )
if self.__depth < self.__maxDepth :
for n in ( "1", "2" ) :
renderer.procedural(
InstancingTest.RandomMeshProcedural(
self.__meshes,
self.__name + "/" + n,
self.__depth + 1,
self.__maxDepth,
)
)
else :
mesh = self.__meshes[ int( self.__name.split( "/" )[-1] ) - 1 ]
mesh.render( renderer )
def hash( self ):
h = IECore.MurmurHash()
return h
def __collectMeshes( self, group, result ) :
name = group.getState().get( IECoreGL.NameStateComponent.staticTypeId() )
for c in group.children() :
if isinstance( c, IECoreGL.Group ) :
self.__collectMeshes( c, result )
else :
d = result.setdefault( name.name().split( "/" )[-1], [] )
d.append( c )
def testAutomaticInstancingOn( self ) :
m1 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
m2 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ) )
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "deferred" ) )
with IECoreScene.WorldBlock( r ) :
r.procedural( self.RandomMeshProcedural( [ m1, m2 ] ) )
meshes = {}
self.__collectMeshes( r.scene().root(), meshes )
for meshList in meshes.values() :
for i in range( 0, len( meshList ) ) :
self.failUnless( meshList[i].isSame( meshList[0] ) )
def testAutomaticInstancingOff( self ) :
m1 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) )
m2 = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ) )
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "deferred" ) )
with IECoreScene.WorldBlock( r ) :
r.setAttribute( "automaticInstancing", IECore.BoolData( False ) )
r.procedural( self.RandomMeshProcedural( [ m1, m2 ] ) )
meshes = {}
self.__collectMeshes( r.scene().root(), meshes )
for meshList in meshes.values() :
for i in range( 1, len( meshList ) ) :
self.failIf( meshList[i].isSame( meshList[0] ) )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "c6b1878d2a33fda131b5e8fa4e1d0e1d",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 94,
"avg_line_length": 25.6,
"alnum_prop": 0.6317471590909091,
"repo_name": "appleseedhq/cortex",
"id": "5f8f41ac1bb686096060e046e9c7b465c5fc5c37",
"size": "4600",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/IECoreGL/InstancingTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1374"
},
{
"name": "C",
"bytes": "66503"
},
{
"name": "C++",
"bytes": "9536541"
},
{
"name": "CMake",
"bytes": "95418"
},
{
"name": "GLSL",
"bytes": "24422"
},
{
"name": "Mathematica",
"bytes": "255937"
},
{
"name": "Objective-C",
"bytes": "2360"
},
{
"name": "Python",
"bytes": "4651272"
},
{
"name": "Tcl",
"bytes": "1796"
}
],
"symlink_target": ""
} |
"""A setup module for the GAPIC Monitoring library.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
import sys
install_requires = [
'googleapis-common-protos>=1.3.5, <2.0.0dev',
'google-gax>=0.14.1, <0.15.0dev',
'grpc-google-monitoring-v3>=0.11.1, <0.12.0dev',
'oauth2client>=2.0.0, <4.0.0dev',
]
setup(
name='gapic-google-monitoring-v3',
version='0.11.1',
author='Google Inc',
author_email='googleapis-packages@google.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
description='GAPIC library for the Stackdriver Monitoring API',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=install_requires,
license='Apache-2.0',
packages=find_packages(),
namespace_packages=['google', 'google.cloud', 'google.cloud.gapic', 'google.cloud.gapic.monitoring', ],
url='https://github.com/googleapis/googleapis'
)
| {
"content_hash": "0b6e4f5afca639f64390d5f2b7c79728",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 107,
"avg_line_length": 34.86363636363637,
"alnum_prop": 0.6499348109517601,
"repo_name": "ethanbao/api-client-staging-1",
"id": "1f5bf5082651522bf75cbbf185a57c8921af4fc9",
"size": "1534",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "generated/python/gapic-google-monitoring-v3/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "793619"
},
{
"name": "JavaScript",
"bytes": "280535"
},
{
"name": "PHP",
"bytes": "141330"
},
{
"name": "Python",
"bytes": "370870"
},
{
"name": "Ruby",
"bytes": "409679"
}
],
"symlink_target": ""
} |
"""
Stub functions that are used by the Amazon CloudFront unit tests.
When tests are run against an actual AWS account, the stubber class does not
set up stubs and passes all calls through to the Boto3 client.
"""
import datetime
from test_tools.example_stubber import ExampleStubber
class CloudFrontStubber(ExampleStubber):
"""
A class that implements stub functions used by CloudFront unit tests.
The stubbed functions expect certain parameters to be passed to them as
part of the tests, and raise errors if the parameters are not as expected.
"""
def __init__(self, client, use_stubs=True):
"""
Initializes the object with a specific client and configures it for
stubbing or AWS passthrough.
:param client: A Boto3 CloudFront client.
:param use_stubs: When True, use stubs to intercept requests. Otherwise,
pass requests through to AWS.
"""
super().__init__(client, use_stubs)
def stub_list_distributions(self, distribs, error_code=None):
expected_params = {}
response = {
'DistributionList': {
'Marker': 'marker',
'MaxItems': 100,
'IsTruncated': False,
'Quantity': len(distribs),
'Items': [{
'ARN': f'arn:aws:cloudfront::123456789012:distribution/{index}',
'Status': 'Deployed',
'LastModifiedTime': datetime.datetime.now(),
'Aliases': {'Quantity': 0},
'Origins': {'Quantity': 0, 'Items': [{'Id': 'test-id', 'DomainName': 'test'}]},
'DefaultCacheBehavior': {'TargetOriginId': '', 'ViewerProtocolPolicy': ''},
'CacheBehaviors': {'Quantity': 0},
'CustomErrorResponses': {'Quantity': 0},
'Comment': 'Testing!',
'PriceClass': 'PriceClass_All',
'Enabled': True,
'Restrictions': {'GeoRestriction': {'Quantity': 0, 'RestrictionType': ''}},
'WebACLId': '',
'HttpVersion': 'http2',
'IsIPV6Enabled': True,
'DomainName': distrib['name'],
'Id': distrib['id'],
'ViewerCertificate': {
'CertificateSource': distrib['cert_source'],
'Certificate': distrib['cert']
}
} for index, distrib in enumerate(distribs)]
}
}
self._stub_bifurcator(
'list_distributions', expected_params, response, error_code=error_code)
def stub_get_distribution_config(self, distrib_id, comment, etag, error_code=None):
expected_params = {'Id': distrib_id}
response = {
'DistributionConfig': {
'CallerReference': 'test',
'Origins': {'Quantity': 0, 'Items': [{'Id': 'test-id', 'DomainName': 'test'}]},
'DefaultCacheBehavior': {'TargetOriginId': '', 'ViewerProtocolPolicy': ''},
'Enabled': True,
'Comment': comment
},
'ETag': etag}
self._stub_bifurcator(
'get_distribution_config', expected_params, response, error_code=error_code)
def stub_update_distribution(self, distrib_id, comment, etag, error_code=None):
expected_params = {
'Id': distrib_id,
'DistributionConfig': {
'CallerReference': 'test',
'Origins': {'Quantity': 0, 'Items': [{'Id': 'test-id', 'DomainName': 'test'}]},
'DefaultCacheBehavior': {'TargetOriginId': '', 'ViewerProtocolPolicy': ''},
'Enabled': True,
'Comment': comment
},
'IfMatch': etag}
response = {}
self._stub_bifurcator(
'update_distribution', expected_params, response, error_code=error_code)
| {
"content_hash": "f264cf6c082099e7829f910a9623646c",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 99,
"avg_line_length": 43.18279569892473,
"alnum_prop": 0.5323705179282868,
"repo_name": "awsdocs/aws-doc-sdk-examples",
"id": "db54a5c53cde3013be94c5e9b5bc6ad0432b1280",
"size": "4124",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/test_tools/cloudfront_stubber.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "476653"
},
{
"name": "Batchfile",
"bytes": "900"
},
{
"name": "C",
"bytes": "3852"
},
{
"name": "C#",
"bytes": "2051923"
},
{
"name": "C++",
"bytes": "943634"
},
{
"name": "CMake",
"bytes": "82068"
},
{
"name": "CSS",
"bytes": "33378"
},
{
"name": "Dockerfile",
"bytes": "2243"
},
{
"name": "Go",
"bytes": "1764292"
},
{
"name": "HTML",
"bytes": "319090"
},
{
"name": "Java",
"bytes": "4966853"
},
{
"name": "JavaScript",
"bytes": "1655476"
},
{
"name": "Jupyter Notebook",
"bytes": "9749"
},
{
"name": "Kotlin",
"bytes": "1099902"
},
{
"name": "Makefile",
"bytes": "4922"
},
{
"name": "PHP",
"bytes": "1220594"
},
{
"name": "Python",
"bytes": "2507509"
},
{
"name": "Ruby",
"bytes": "500331"
},
{
"name": "Rust",
"bytes": "558811"
},
{
"name": "Shell",
"bytes": "63776"
},
{
"name": "Swift",
"bytes": "267325"
},
{
"name": "TypeScript",
"bytes": "119632"
}
],
"symlink_target": ""
} |
import os
import sys
from subprocess import Popen, PIPE
import re
def main():
if len(sys.argv) != 1:
print "wrong number of argments"
else:
executeAll()
def executeAll():
(stdout, stderr) = Popen(["time","./zip/zip30/zip","-r","/tmp/archive.zip","./zip/testes/"],stderr=PIPE,stdout=PIPE).communicate()
#print stdout
objMatch = re.match(r'(.+)user',stderr)
print objMatch.groups()[0]
if __name__ == '__main__':
main()
| {
"content_hash": "fda7cd4e5f1093d9d337f10f1e37b4cd",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 134,
"avg_line_length": 22.238095238095237,
"alnum_prop": 0.6081370449678801,
"repo_name": "lucashmorais/x-Bench",
"id": "3a4a0df456b113a07ac6d5758794fa128f842467",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "execute_tests_zip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "989"
},
{
"name": "C",
"bytes": "561526"
},
{
"name": "C++",
"bytes": "27037"
},
{
"name": "CMake",
"bytes": "20902"
},
{
"name": "CSS",
"bytes": "115363"
},
{
"name": "HTML",
"bytes": "101202396"
},
{
"name": "Java",
"bytes": "9402"
},
{
"name": "JavaScript",
"bytes": "2082146"
},
{
"name": "Makefile",
"bytes": "13295"
},
{
"name": "PowerShell",
"bytes": "8252"
},
{
"name": "Python",
"bytes": "18613167"
},
{
"name": "Shell",
"bytes": "55623"
},
{
"name": "TeX",
"bytes": "8"
},
{
"name": "XSLT",
"bytes": "43523"
}
],
"symlink_target": ""
} |
from argparse import ArgumentParser
from pathlib import Path
from demisto_sdk.commands.content_graph.interface.neo4j.neo4j_graph import Neo4jContentGraphInterface
from demisto_sdk.commands.common.constants import MarketplaceVersions
from demisto_sdk.commands.content_graph.objects.repository import ContentDTO
from Tests.scripts.utils.log_util import install_logging
import logging as logger
from demisto_sdk.commands.common.logger import logging_setup
from demisto_sdk.commands.common.tools import get_content_path
import json
logging_setup(3)
install_logging("create_artifacts.log", logger=logger)
def create_zips(content_dto: ContentDTO, output: Path, marketplace: str, zip: bool):
content_dto.dump(output, marketplace, zip)
def create_dependencies(content_dto: ContentDTO, output: Path):
pack_dependencies = {}
for pack in content_dto.packs:
dependencies = pack.depends_on
first_level_dependencies = {}
all_level_dependencies = []
for dependency in dependencies:
all_level_dependencies.append(dependency.content_item.object_id)
if dependency.is_direct:
first_level_dependencies[dependency.content_item.object_id] = {
"display_name": dependency.content_item.name,
"mandatory": dependency.mandatorily,
}
pack_dependencies[pack.object_id] = {
"path": str(pack.path.relative_to(get_content_path())),
"fullPath": str(pack.path),
"dependencies": first_level_dependencies,
"displayedImages": list(first_level_dependencies.keys()),
"allLevelDependencies": all_level_dependencies,
}
with open(output, "w") as f:
json.dump(pack_dependencies, f, indent=4)
def main():
parser = ArgumentParser()
parser.add_argument("-mp", "--marketplace", type=MarketplaceVersions, help="marketplace version", default="xsoar")
parser.add_argument("-ao", "--artifacts-output", help="Artifacts output directory", required=True)
parser.add_argument("-do", "--dependencies-output", help="Dependencies output directory", required=True)
parser.add_argument("--zip", default=True, action="store_true")
parser.add_argument("--no-zip", dest="zip", action="store_false")
args = parser.parse_args()
with Neo4jContentGraphInterface() as interface:
content_dto: ContentDTO = interface.marshal_graph(args.marketplace, all_level_dependencies=True)
logger.info("Creating content artifacts zips")
create_zips(content_dto, Path(args.artifacts_output), args.marketplace, args.zip)
logger.info("Creating pack dependencies mapping")
create_dependencies(content_dto, Path(args.dependencies_output))
if __name__ == "__main__":
main()
| {
"content_hash": "c76920414e710f7bddc1249f604e7078",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 118,
"avg_line_length": 43.78125,
"alnum_prop": 0.6973590292648109,
"repo_name": "demisto/content",
"id": "17f24ca08e58703026bb46fdc4276830ef072e3a",
"size": "2802",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Tests/scripts/create_artifacts_graph/create_artifacts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
from .BaseDestination import BaseDestination
from ..mixins import APIMixin
class APIDestination(APIMixin, BaseDestination):
def backup(self, *args, **kwargs):
print "Hello! This is %s's backup method" % self.__class__.__name__
#raise NotImplementedError
def restore(self, filename):
#print "Hello! This is %s's restore method" % self.__class__.__name__
raise NotImplementedError
class Meta:
verbose_name = u'destino API'
verbose_name_plural = u'destinos API'
app_label = 'server' | {
"content_hash": "4d04f755e054412086df4ba779636180",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 77,
"avg_line_length": 35.125,
"alnum_prop": 0.6405693950177936,
"repo_name": "gustavomazevedo/tbackup-server",
"id": "a39bffbdd9291654ac025662fdce1a028d215777",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/models/destination/APIDestination.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "275"
},
{
"name": "Python",
"bytes": "99284"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import functools
import json
import os
from poetry.core.spdx.license import License
def license_by_id(identifier: str) -> License:
if not identifier:
raise ValueError("A license identifier is required")
licenses = _load_licenses()
return licenses.get(
identifier.lower(), License(identifier, identifier, False, False)
)
@functools.lru_cache()
def _load_licenses() -> dict[str, License]:
licenses = {}
licenses_file = os.path.join(os.path.dirname(__file__), "data", "licenses.json")
with open(licenses_file, encoding="utf-8") as f:
data = json.loads(f.read())
for name, license_info in data.items():
license = License(name, license_info[0], license_info[1], license_info[2])
licenses[name.lower()] = license
full_name = license_info[0].lower()
if full_name in licenses:
existing_license = licenses[full_name]
if not existing_license.is_deprecated:
continue
licenses[full_name] = license
# Add a Proprietary license for non-standard licenses
licenses["proprietary"] = License("Proprietary", "Proprietary", False, False)
return licenses
if __name__ == "__main__":
from poetry.core.spdx.updater import Updater
updater = Updater()
updater.dump()
| {
"content_hash": "4c4401859f81f916d26cacfb6a8604ea",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 84,
"avg_line_length": 27.04,
"alnum_prop": 0.6479289940828402,
"repo_name": "python-poetry/poetry-core",
"id": "00d4bc6ea0710da11637187c5274da9a9a2c4474",
"size": "1352",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/poetry/core/spdx/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2664"
},
{
"name": "Makefile",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2084191"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.