content
stringlengths 5
1.05M
|
|---|
from collections import defaultdict
from functools import wraps
import importlib.util
import inspect
import os
import re
import sys
from types import MethodType
class IndexingError(Exception):
"""
Raised for indexing errors
"""
pass
class MonitoringError(Exception):
"""
Raised for monitoring errors
"""
pass
class FunctionCallMonitor:
def __init__(self):
self._modules = defaultdict(lambda: {})
self._target_modules = []
def register_function(self, f, parent_class=None):
"""
Register function for call tracking. Wraps functions without changing
signatures. Classmethods are unwrapped and the returned function is
wrapped in @classmethod again as it is returned to preserve
functionality.
Args:
f (FunctionType): Function to track
parent_class (Type): Parent class of the function if part of a
class; defaults to None
Raises:
MonitoringError: if f seems to be a classmethod but it does not have
an __func__ attribute holding the wrapped method.
"""
# If function is part of a class and it is bound to it
is_classmethod = isinstance(f, MethodType)
# Unwrap @classmethod
if is_classmethod:
try:
f = f.__func__
except AttributeError as e:
raise MonitoringError(
f"Function {get_full_function_name(f)} not a classmethod"
)
self._modules[f.__module__][f] = []
@wraps(f)
def _(*args, **kwargs):
# Check the filename of the previous stack frame - that is where
# the function call originates from
source_frame = inspect.stack()[1]
source_file = source_frame.filename
source_function = source_frame.function
self.record_call(f, source_file, source_function)
return f(*args, **kwargs)
_.__signature__ = inspect.signature(f)
# Re-wrap @classmethod
if is_classmethod:
_ = classmethod(_)
return _
@property
def registered_functions(self):
"""
Returns:
Tuple[Tuple[str, Tuple[FunctionType, ...]], ...]: all registered
functions, grouped by module
"""
return tuple(
(module_name, tuple(functions.keys()))
for module_name, functions in self._modules.items()
)
@property
def called_functions(self):
"""
Returns:
Tuple[Tuple[str, Tuple[FunctionType, ...]], ...]: all called registered
functions, grouped by module
"""
return tuple(
(
module_name,
tuple(f for f in functions if len(self._modules[module_name][f]) > 0),
)
for module_name, functions in self.registered_functions
)
@property
def missed_functions(self):
"""
Returns:
Tuple[Tuple[str, Tuple[FunctionType, ...]], ...]: all missed registered
functions, grouped by module
"""
return tuple(
(
module_name,
tuple(f for f in functions if len(self._modules[module_name][f]) == 0),
)
for module_name, functions in self.registered_functions
)
def register_target_module(self, m):
"""
Registers a module from which an eligible function call may originate.
Args:
m (str): Absolute file path to the module
"""
self._target_modules.append(m)
def record_call(self, f, source_file, source_function):
"""
Records a function call if the originating module is being tracked.
Args:
f (Callable): Invoked function
source_file (str): Absolute file path to the module from where the call
originates
source_function (str): Name of the function from where the call
originates
Returns:
bool: True if the call was recorded, False otherwise.
"""
if source_file in self._target_modules:
try:
self._modules[f.__module__][f].append((source_file, source_function))
except KeyError:
raise MonitoringError(
f"Function {get_full_function_name(f)} not monitored."
)
return True
return False
class ModuleLoader:
def __init__(self):
self._modules = {}
def load_from_package(self, path):
"""
Recursively load all modules in a package specified in path.
Args:
path (str): Path to the package folder
"""
for module_path, module_name in find_modules(path):
module = import_module_from_file(module_name, module_path)
self._modules[module_name] = module
def __iter__(self):
return iter(self._modules.items())
class FunctionIndexer:
def __init__(self, ignore_func_names=None):
"""
Args:
ignore_func_names (List[str]): Function name patterns to
ignore. Defaults to None
"""
self._ignore_func_names = ignore_func_names or []
# Compile regular expressions
self._func_names_rgx = [
re.compile(pattern) for pattern in self._ignore_func_names
]
# Initialise indexer and monitor
self._loader = ModuleLoader()
self._monitor = FunctionCallMonitor()
def index_package(self, package_path):
"""
Args:
package_path (str): Path to package
"""
self._loader.load_from_package(package_path)
for module_name, module in self._loader:
functions = get_functions_defined_in_module(module)
classes = get_classes_defined_in_module(module)
for f_name, f in functions:
if not self.matches_filters(f_name):
setattr(module, f_name, self._monitor.register_function(f))
for cls_name, cls in classes:
for f_name, f in get_methods_defined_in_class(cls):
if not self.matches_filters(f_name):
setattr(cls, f_name, self._monitor.register_function(f, cls))
def matches_filters(self, f_name):
"""
Checks if the given function matches any of the filters.
Args:
f_name (str): Name of the function
Returns:
bool
"""
return any(rgx.search(f_name) is not None for rgx in self._func_names_rgx)
def register_source_module(self, module_name):
"""
Registers a module by name from which function calls are considered
eligible for tracking.
Args:
module_name (str):
"""
self._monitor.register_target_module(module_name)
@property
def monitor(self):
"""
Returns:
FunctionCallMonitor
"""
return self._monitor
def import_module_from_file(module_name, file_path):
"""
Imports module from a given file path under a given module name. If the module
exists the function returns the module object from sys.modules.
Args:
module_name (str): Full qualified name of the module.
Example: mypackage.mymodule
file_path (str): Path to module, assumed to be a ".py" file
Returns:
ModuleType
"""
if module_name in sys.modules:
module = sys.modules[module_name]
else:
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
return module
def is_defined_in_module(o, module):
"""
Checks if an object is defined in a given module and not imported.
Args:
o (Type): Object to check
module (ModuleType): Module
Returns:
bool
"""
return o.__module__ == module.__name__
def get_functions_defined_in_module(module):
"""
Get all the functions defined in a given module.
Args:
module (ModuleType): Module for lookup
Returns:
List[Tuple[str, FunctionType]]
"""
all_functions = inspect.getmembers(module, inspect.isfunction)
functions_defined_in_module = [
f for f in all_functions if is_defined_in_module(f[1], module)
]
return functions_defined_in_module
def get_classes_defined_in_module(module):
"""
Get all the classes defined in a given module.
Args:
module (ModuleType): Module for lookup
Returns:
List[Tuple[str, Type]]
"""
all_classes = inspect.getmembers(module, inspect.isclass)
classes_defined_in_module = [
f for f in all_classes if is_defined_in_module(f[1], module)
]
return classes_defined_in_module
def get_methods_defined_in_class(cls):
"""
Get all functions defined in a given class. This includes all
non-inherited methods, static methods and class methods.
Args:
cls (Type): Class for lookup
Returns:
List[Tuple[str, Union[FunctionType, MethodType]]]
"""
methods = inspect.getmembers(cls, inspect.isfunction)
class_methods = inspect.getmembers(cls, inspect.ismethod)
functions = methods + class_methods
# Only keep non-inherited functions
cls_symbols = cls.__dict__
functions = [f for f in functions if f[0] in cls_symbols]
return functions
def find_modules(path):
"""
Discover all Python module files in a path. Uses os.walk to recursively traverse
all the nested directories but does not follow symlinks. Returns a generator
of 2-tuples, (absolute_file_path, absolute_module_name).
Args:
path (str):
Returns:
Generator[Tuple[str, str], None, None]
"""
root_path = os.path.dirname(path)
for dir_path, _, file_names in os.walk(path):
package_name = dir_path[len(root_path) + 1 :].replace(os.path.sep, ".")
for file_name in sorted(file_names):
# We are only interested in .py files
if not file_name.endswith(".py"):
continue
# Get the absolute path of the file
absolute_path = os.path.join(dir_path, file_name)
module_name = file_name[:-3]
# If the module name is __init__, then it should match the package_name
if module_name == "__init__":
absolute_module_name = package_name
else:
absolute_module_name = f"{package_name}.{module_name}"
yield (absolute_path, absolute_module_name)
def get_full_function_name(f):
"""
Constructs full module path for a given function.
Args:
f (function): Function to construct the path for
Returns:
str
"""
return f"{f.__module__}.{f.__qualname__}"
|
from exception.error_code import ErrorCode
class BaseException(Exception):
"""
カスタム例外の基底クラス
"""
__status_code: int
"""
ステータスコード
"""
__error_code: ErrorCode
"""
エラーコード
"""
def __init__(self, status_code: int, error_code: ErrorCode):
self.__status_code = status_code
self.__error_code = error_code
def get_status_code(self) -> int:
"""
ステータスコードのGetter
@return ステータスコード
"""
return self.__status_code
def get_error_code(self) -> ErrorCode:
"""
エラーコードのGetter
@return エラーコード
"""
return self.__error_code.value
|
import grpc
import copy
from scannerpy.common import *
from scannerpy.op import OpColumn, collect_per_stream_args, check_modules
from scannerpy.protobufs import python_to_proto, protobufs, analyze_proto
class Sink:
def __init__(self, sc, name, inputs, job_args, sink_args={}):
self._sc = sc
self._name = name
self._args = sink_args
self._job_args = job_args
sink_info = self._sc._get_sink_info(self._name)
cols = sink_info.input_columns
variadic_inputs = sink_info.variadic_inputs
# TODO: Verify columns are the correct type here
if name == 'FrameColumn' or name == 'Column':
if 'columns' not in sink_args:
raise ScannerException(
'Columns must be specified for Column Sink. For example, '
'sc.sinks.Column(columns={\'column_name\': col}).')
columns = sink_args['columns']
self._output_names = [n for n, _ in columns.items()]
self._inputs = [c for _, c in columns.items()]
del sink_args['columns']
else:
self._output_names = ['']
self._inputs = inputs
if name == 'FrameColumn' or name == 'Column':
# We insert the storage config to allow the ColumSink
# to read from the database
sc = self._sc.config.config['storage']
def check_and_add(key):
if key in sc:
self._args[key] = sc[key]
self._args['storage_type'] = sc['type']
check_and_add('bucket')
check_and_add('region')
check_and_add('endpoint')
def inputs(self):
return self._inputs
def to_proto(self, indices):
e = protobufs.Op()
e.name = self._name
e.is_sink = True
for i in self._inputs:
inp = e.inputs.add()
idx = indices[i._op] if i._op is not None else -1
inp.op_index = idx
inp.column = i._col
if isinstance(self._args, dict):
# To convert an arguments dict, we search for a protobuf with the
# name {Name}SourceArgs (e.g. ColumnSourceArgs) and the name
# {Name}EnumeratorArgs (e.g. ColumnEnumeratorArgs) in the
# args.proto module, and fill that in with keys from the args dict.
if len(self._args) > 0:
sink_info = self._sc._get_sink_info(self._name)
if len(sink_info.protobuf_name) > 0:
proto_name = sink_info.protobuf_name
e.kernel_args = python_to_proto(proto_name, self._args)
else:
e.kernel_args = self._args
else:
# If arguments are a protobuf object, serialize it directly
e.kernel_args = self._args.SerializeToString()
return e
class SinkGenerator:
"""
Creates Sink instances to define a computation.
When a particular Sink is requested from the generator, e.g.
`sc.sink.Column`, the generator does a dynamic lookup for the
Sink in the servers registry.
"""
def __init__(self, sc):
self._sc = sc
def __getattr__(self, name):
check_modules(self._sc)
# Use Sequence as alias of Column
if name == 'Sequence' or name == 'FrameSequence':
name = name.replace('Sequence', 'Column')
def make_sink(*args, **kwargs):
column_name = 'frame' if 'Frame' in name else 'column'
return Sink(self._sc, name, [], dict(columns={column_name: args[0]}))
return make_sink
else:
# This will raise an exception if the source does not exist.
sink_info = self._sc._get_sink_info(name)
def make_sink(*args, **kwargs):
inputs = []
if sink_info.variadic_inputs:
inputs.extend(args)
else:
for c in sink_info.input_columns:
val = kwargs.pop(c.name, None)
if val is None:
raise ScannerException(
'sink {} required column {} as input'
.format(name, c.name))
inputs.append(val)
if name == 'Column' or name == 'FrameColumn':
job_args = [s.encode('utf-8') for s in kwargs.pop('table_name', None)]
kwargs.pop('column_name', None)
else:
assert sink_info.stream_protobuf_name != ''
job_args = collect_per_stream_args(name, sink_info.stream_protobuf_name, kwargs)
sink_args = kwargs.pop('args', kwargs)
sink = Sink(self._sc, name,
inputs,
job_args,
kwargs if args is None else sink_args)
return sink
return make_sink
|
"""
Defining Class of custom environment for V-Rep
@author: hussein
"""
# import vrep_env
# from vrep_env import vrep
import sys
import os
sys.path.append(os.path.abspath("/home/hussein/Desktop/Multi-agent-path-planning/Reinforcement Learning"))
sys.path.append(os.path.abspath("/home/hussein/Desktop/Multi-agent-path-planning/Reinforcement Learning/vrep_env"))
import os
# vrep_scenes_path = os.environ['/home/hussein/Desktop/Multi-agent-path-planning/Reinforcement Learning/examples/scenes']
import torch
import rclpy
from rclpy.node import Node
from tf2_msgs.msg import TFMessage
from std_msgs.msg import Float32
from vrep_env import sim
from data_collection_v3 import MinimalPublisher
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import time
L = 1 # Parameter of robot
d = 0.5 # Parameter of robot
A = np.ones(6) - np.identity(6) # Adjancency Matrix fully connected case 6x6
ux = np.zeros((6,1)) # 6x1
uy = np.zeros((6,1)) # 6x1
" Connecting to V-Rep "
sim.simxFinish(-1) # just in case, close all opened connections
clientID=sim.simxStart('127.0.0.1',19997,True,True,-500000,5) # Connect to CoppeliaSim
N_SCENES = 80
scenes = np.hstack(( np.random.uniform(-2,2,size=(N_SCENES,2)), np.random.uniform(0,np.pi,size=(N_SCENES,1)), np.random.uniform(-2,2,(N_SCENES,2)), np.random.uniform(0,np.pi,size=(N_SCENES,1)) ))
def euler_from_quaternion(x, y, z, w):
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return yaw_z # in radians
"""
Description:
Consensus environment of 6 robots, where each episode they converge towards each other. DQN applied to robot 1 and rest are controlled with the consensus algorithm.
Source:
This environment corresponds to V-Rep simulator, integrated with ROS to publish actions & subscribe to observations.
Observation:
Type: Box(4)
Num Observation Min Max
0 Mx -4.8 4.8
1 My -4.8 4.8
2 Phix -4.8 4.8
3 Phiy -4.8 4.8
Actions:
Type: Discrete(4)
Num Action
0 Move the robot upwards
1 Move the robot downwards
2 Move the robot to the left
3 Move the robot to the right
"""
class MinimalPublisherGym(MinimalPublisher):
def __init__(self):
#vrep_env.VrepEnv.__init__(self, server_addr, server_port, scene_path)
super().__init__()
self.publisher_l1 = self.create_publisher(Float32, '/leftMotorSpeedrobot1', 0) #Change according to topic in child script,String to Float32
self.publisher_r1 = self.create_publisher(Float32, '/rightMotorSpeedrobot1',0) #Change according to topic in child script,String to Float32
self.publisher_l2 = self.create_publisher(Float32, '/leftMotorSpeedrobot2', 0) #Change according to topic in child script,String to Float32
self.publisher_r2 = self.create_publisher(Float32, '/rightMotorSpeedrobot2',0) #Change according to topic in child script,String to Float32
self.publisher_l3 = self.create_publisher(Float32, '/leftMotorSpeedrobot3', 0) #Change according to topic in child script,String to Float32
self.publisher_r3 = self.create_publisher(Float32, '/rightMotorSpeedrobot3',0) #Change according to topic in child script,String to Float32
self.publisher_l4 = self.create_publisher(Float32, '/leftMotorSpeedrobot4', 0) #Change according to topic in child script,String to Float32
self.publisher_r4 = self.create_publisher(Float32, '/rightMotorSpeedrobot4',0) #Change according to topic in child script,String to Float32
self.publisher_l5 = self.create_publisher(Float32, '/leftMotorSpeedrobot5', 0) #Change according to topic in child script,String to Float32
self.publisher_r5 = self.create_publisher(Float32, '/rightMotorSpeedrobot5',0) #Change according to topic in child script,String to Float32
self.publisher_l6 = self.create_publisher(Float32, '/leftMotorSpeedrobot6', 0) #Change according to topic in child script,String to Float32
self.publisher_r6 = self.create_publisher(Float32, '/rightMotorSpeedrobot6',0) #Change according to topic in child script,String to Float32
self.subscription = self.create_subscription(
TFMessage,
'/tf',
self.listener_callback,
0)
" Timer Callback "
timer_period = 0.03 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
self.i = 0
" Parameters "
self.t = 0 # Just to intialized Phix's and Phiy's
" Initialize Phi's "
self.Phix1 = 0 # 1x1
self.Phiy1 = 0 # 1x1
self.Phix2 = 0 # 1x1
self.Phiy2 = 0 # 1x1
self.Phix3 = 0 # 1x1
self.Phiy3 = 0 # 1x1
self.Phix4 = 0 # 1x1
self.Phiy4 = 0 # 1x1
self.Phix5 = 0 # 1x1
self.Phiy5 = 0 # 1x1
self.Phix6 = 0 # 1x1
self.Phiy6 = 0 # 1x1
" Mobile Robot 1 Parameters "
self.x1 = 0
self.y1 = 0
self.Theta1 = 0
self.v1 = 0
self.w1 = 0
self.vL1 = 0
self.vR1 = 0
" Mobile Robot 2 Parameters "
self.x2 = 0
self.y2 = 0
self.Theta2 = 0
self.v2 = 0
self.w2 = 0
self.vL2 = 0
self.vR2 = 0
" Mobile Robot 3 Parameters "
self.x3 = 0
self.y3 = 0
self.Theta3 = 0
self.v3 = 0
self.w3 = 0
self.vL3 = 0
self.vR3 = 0
" Mobile Robot 4 Parameters "
self.x4 = 0
self.y4 = 0
self.Theta4 = 0
self.v4 = 0
self.w4 = 0
self.vL4 = 0
self.vR4 = 0
" Mobile Robot 5 Parameters "
self.x5 = 0
self.y5 = 0
self.Theta5 = 0
self.v5 = 0
self.w5 = 0
self.vL5 = 0
self.vR5 = 0
" Mobile Robot 6 Parameters "
self.x6 = 0
self.y6 = 0
self.Theta6 = 0
self.v6 = 0
self.w6 = 0
self.vL6 = 0
self.vR6 = 0
def listener_callback(self, msg):
if msg.transforms[0].child_frame_id == 'robot1' :
self.x1 = msg.transforms[0].transform.translation.x
self.y1 = msg.transforms[0].transform.translation.y
self.xr1 = msg.transforms[0].transform.rotation.x
self.yr1 = msg.transforms[0].transform.rotation.y
self.zr1 = msg.transforms[0].transform.rotation.z
self.wr1 = msg.transforms[0].transform.rotation.w
self.Theta1 = euler_from_quaternion(self.xr1,self.yr1,self.zr1,self.wr1)
if msg.transforms[0].child_frame_id == 'robot2' :
self.x2 = msg.transforms[0].transform.translation.x
self.y2 = msg.transforms[0].transform.translation.y
self.xr2 = msg.transforms[0].transform.rotation.x
self.yr2 = msg.transforms[0].transform.rotation.y
self.zr2 = msg.transforms[0].transform.rotation.z
self.wr2 = msg.transforms[0].transform.rotation.w
self.Theta2 = euler_from_quaternion(self.xr2,self.yr2,self.zr2,self.wr2)
if msg.transforms[0].child_frame_id == 'robot3' :
self.x3 = msg.transforms[0].transform.translation.x
self.y3 = msg.transforms[0].transform.translation.y
self.xr3 = msg.transforms[0].transform.rotation.x
self.yr3 = msg.transforms[0].transform.rotation.y
self.zr3 = msg.transforms[0].transform.rotation.z
self.wr3 = msg.transforms[0].transform.rotation.w
self.Theta3 = euler_from_quaternion(self.xr3,self.yr3,self.zr3,self.wr3)
if msg.transforms[0].child_frame_id == 'robot4' :
self.x4 = msg.transforms[0].transform.translation.x
self.y4 = msg.transforms[0].transform.translation.y
self.xr4 = msg.transforms[0].transform.rotation.x
self.yr4 = msg.transforms[0].transform.rotation.y
self.zr4 = msg.transforms[0].transform.rotation.z
self.wr4 = msg.transforms[0].transform.rotation.w
self.Theta4 = euler_from_quaternion(self.xr4,self.yr4,self.zr4,self.wr4)
if msg.transforms[0].child_frame_id == 'robot5' :
self.x5 = msg.transforms[0].transform.translation.x
self.y5 = msg.transforms[0].transform.translation.y
self.xr5 = msg.transforms[0].transform.rotation.x
self.yr5 = msg.transforms[0].transform.rotation.y
self.zr5 = msg.transforms[0].transform.rotation.z
self.wr5 = msg.transforms[0].transform.rotation.w
self.Theta5 = euler_from_quaternion(self.xr5,self.yr5,self.zr5,self.wr5)
if msg.transforms[0].child_frame_id == 'robot6' :
self.x6 = msg.transforms[0].transform.translation.x
self.y6 = msg.transforms[0].transform.translation.y
self.xr6 = msg.transforms[0].transform.rotation.x
self.yr6 = msg.transforms[0].transform.rotation.y
self.zr6 = msg.transforms[0].transform.rotation.z
self.wr6 = msg.transforms[0].transform.rotation.w
self.Theta6 = euler_from_quaternion(self.xr6,self.yr6,self.zr6,self.wr6)
def timer_callback(self):
" Publish Speed Commands to Robot 1 "
msgl1 = Float32()
msgr1 = Float32()
msgl1.data = self.VL1
msgr1.data = self.VR1
self.publisher_l1.publish(msgl1)
self.publisher_r1.publish(msgr1)
" Publish Speed Commands to Robot 2 "
msgl2 = Float32()
msgr2 = Float32()
msgl2.data = self.VL2
msgr2.data = self.VR2
self.publisher_l2.publish(msgl2)
self.publisher_r2.publish(msgr2)
" Publish Speed Commands to Robot 3 "
msgl3 = Float32()
msgr3 = Float32()
msgl3.data = self.VL3
msgr3.data = self.VR3
self.publisher_l3.publish(msgl3)
self.publisher_r3.publish(msgr3)
" Publish Speed Commands to Robot 4 "
msgl4 = Float32()
msgr4 = Float32()
msgl4.data = self.VL4
msgr4.data = self.VR4
self.publisher_l4.publish(msgl4)
self.publisher_r4.publish(msgr4)
" Publish Speed Commands to Robot 5 "
msgl5 = Float32()
msgr5 = Float32()
msgl5.data = self.VL5
msgr5.data = self.VR5
self.publisher_l5.publish(msgl5)
self.publisher_r5.publish(msgr5)
" Publish Speed Commands to Robot 6 "
msgl6 = Float32()
msgr6 = Float32()
msgl6.data = self.VL6
msgr6.data = self.VR6
self.publisher_l6.publish(msgl6)
self.publisher_r6.publish(msgr6)
def spin_once_gym(self):
rclpy.spin_once(self)
class MobileRobotVrepEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self):
self.mpg = MinimalPublisherGym()
self.scene = 0 # Nb of scene iteration
" Distance at which to fail the episode "
self.distance_threshold = 2.2
" Observation & Action Space "
# Define Action Space
self.action_space = spaces.Discrete(4)
# Define Observation Space
high_observation = np.array([4.8,
4.8,
4.8,
4.8],
dtype=np.float32)
self.observation_space = spaces.Box(-high_observation, -high_observation, dtype=np.float32)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action))
" Distance Threshold "
self.distance = abs(self.mpg.x1 - self.mpg.x2) + abs(self.mpg.y1 - self.mpg.y2) + abs(self.mpg.x1 - self.mpg.x3) + abs(self.mpg.y1 - self.mpg.y3) + abs(self.mpg.x1 - self.mpg.x4) + abs(self.mpg.y1 - self.mpg.y4) + abs(self.mpg.x1 - self.mpg.x5) + abs(self.mpg.y1 - self.mpg.y5) + abs(self.mpg.x1 - self.mpg.x6) + abs(self.mpg.y1 - self.mpg.y6)
" Use Adjacency Matrix to find Mxy and Phi's "
A = np.ones(6) - np.identity(6) # Adjancency Matrix
self.X = np.array([ [self.mpg.x1], [self.mpg.x2], [self.mpg.x3], [self.mpg.x4], [self.mpg.x5], [self.mpg.x6] ]) #6x1
self.Y = np.array([ [self.mpg.y1], [self.mpg.y2], [self.mpg.y3], [self.mpg.y4], [self.mpg.y5], [self.mpg.y6] ]) #6x1
ux = np.zeros((6,1)) # 6x1
uy = np.zeros((6,1)) # 6x1
for i in range(1,7):
for j in range(1,7):
ux[i-1] += -(A[i-1][j-1])*(self.X[i-1]-self.X[j-1]) # 1x1 each
uy[i-1] += -(A[i-1][j-1])*(self.Y[i-1]-self.Y[j-1]) # 1x1 each
# Manage 4 directions (Up/Down/Left/Right)
if action==0:
self.mpg.v1 = -1.0
elif action==1:
self.mpg.v1 = +1.0
elif action==2:
self.mpg.w1 = -1.0
elif action==3:
self.mpg.w1 = +1.0
u2 = np.array([ [float(ux[1])], [float(uy[1])] ]) # 2x1
u3 = np.array([ [float(ux[2])], [float(uy[2])] ]) # 2x1
u4 = np.array([ [float(ux[3])], [float(uy[3])] ]) # 2x1
u5 = np.array([ [float(ux[4])], [float(uy[4])] ]) # 2x1
u6 = np.array([ [float(ux[5])], [float(uy[5])] ]) # 2x1
" Calculate V1/W1, V2/W2, V3/W3, V4/W4, V5/W5, V6/W6 "
S1 = np.array([[self.mpg.v1], [self.mpg.w1]]) #2x1
# G1 = np.array([[1,0], [0,1/L]]) #2x2
# R1 = np.array([[math.cos(self.Theta1),math.sin(self.Theta1)],[-math.sin(self.Theta1),math.cos(self.Theta1)]]) #2x2
# S1 = np.dot(np.dot(G1, R1), u1) #2x1
S2 = np.array([[self.mpg.v2], [self.mpg.w2]]) #2x1
G2 = np.array([[1,0], [0,1/L]]) #2x2
R2 = np.array([[math.cos(self.mpg.Theta2),math.sin(self.mpg.Theta2)],[-math.sin(self.mpg.Theta2),math.cos(self.mpg.Theta2)]]) #2x2
S2 = np.dot(np.dot(G2, R2), u2) # 2x1
S3 = np.array([[self.mpg.v3], [self.mpg.w3]]) #2x1
G3 = np.array([[1,0], [0,1/L]]) #2x2
R3 = np.array([[math.cos(self.mpg.Theta3),math.sin(self.mpg.Theta3)],[-math.sin(self.mpg.Theta3),math.cos(self.mpg.Theta3)]]) #2x2
S3 = np.dot(np.dot(G3, R3), u3) #2x1
S4 = np.array([[self.mpg.v4], [self.mpg.w4]]) #2x1
G4 = np.array([[1,0], [0,1/L]]) #2x2
R4 = np.array([[math.cos(self.mpg.Theta4),math.sin(self.mpg.Theta4)],[-math.sin(self.mpg.Theta4),math.cos(self.mpg.Theta4)]]) #2x2
S4 = np.dot(np.dot(G4, R4), u4) #2x1
S5 = np.array([[self.mpg.v5], [self.mpg.w5]]) #2x1
G5 = np.array([[1,0], [0,1/L]]) #2x2
R5 = np.array([[math.cos(self.mpg.Theta5),math.sin(self.mpg.Theta5)],[-math.sin(self.mpg.Theta5),math.cos(self.mpg.Theta5)]]) #2x2
S5 = np.dot(np.dot(G5, R5), u5) #2x1
S6 = np.array([[self.mpg.v6], [self.mpg.w6]]) #2x1
G6 = np.array([[1,0], [0,1/L]]) #2x2
R6 = np.array([[math.cos(self.mpg.Theta6),math.sin(self.mpg.Theta6)],[-math.sin(self.mpg.Theta6),math.cos(self.mpg.Theta6)]]) #2x2
S6 = np.dot(np.dot(G6, R6), u6) #2x1
" Calculate VL1/VR1, VL2/VR2, VL3/VR3, VL4/VR4, VL5/VR5, VL6/VR6 "
D = np.array([[1/2,1/2],[-1/(2*d),1/(2*d)]]) #2x2
Di = np.linalg.inv(D) #2x2
Speed_L1 = np.array([[self.mpg.vL1], [self.mpg.vR1]]) # Vector 2x1 for Speed of Robot 1
Speed_L2 = np.array([[self.mpg.vL2], [self.mpg.vR2]]) # Vector 2x1 for Speed of Robot 2
Speed_L3 = np.array([[self.mpg.vL3], [self.mpg.vR3]]) # Vector 2x1 for Speed of Robot 3
Speed_L4 = np.array([[self.mpg.vL4], [self.mpg.vR4]]) # Vector 2x1 for Speed of Robot 4
Speed_L5 = np.array([[self.mpg.vL5], [self.mpg.vR5]]) # Vector 2x1 for Speed of Robot 5
Speed_L6 = np.array([[self.mpg.vL6], [self.mpg.vR6]]) # Vector 2x1 for Speed of Robot 6
M1 = np.array([[S1[0]],[S1[1]]]).reshape(2,1) #2x1
M2 = np.array([[S2[0]],[S2[1]]]).reshape(2,1) #2x1
M3 = np.array([[S3[0]],[S3[1]]]).reshape(2,1) #2x1
M4 = np.array([[S4[0]],[S4[1]]]).reshape(2,1) #2x1
M5 = np.array([[S5[0]],[S5[1]]]).reshape(2,1) #2x1
M6 = np.array([[S6[0]],[S6[1]]]).reshape(2,1) #2x1
Speed_L1 = np.dot(Di, M1) # 2x1 (VL1, VR1)
Speed_L2 = np.dot(Di, M2) # 2x1 (VL2, VR2)
Speed_L3 = np.dot(Di, M3) # 2x1 (VL3, VR3)
Speed_L4 = np.dot(Di, M4) # 2x1 (VL4, VR4)
Speed_L5 = np.dot(Di, M5) # 2x1 (VL5, VR5)
Speed_L6 = np.dot(Di, M6) # 2x1 (VL6, VR6)
self.mpg.VL1 = float(Speed_L1[0])
self.mpg.VR1 = float(Speed_L1[1])
self.mpg.VL2 = float(Speed_L2[0])
self.mpg.VR2 = float(Speed_L2[1])
self.mpg.VL3 = float(Speed_L3[0])
self.mpg.VR3 = float(Speed_L3[1])
self.mpg.VL4 = float(Speed_L4[0])
self.mpg.VR4 = float(Speed_L4[1])
self.mpg.VL5 = float(Speed_L5[0])
self.mpg.VR5 = float(Speed_L5[1])
self.mpg.VL6 = float(Speed_L6[0])
self.mpg.VR6 = float(Speed_L6[1])
Mx = np.zeros((6,1)) # 6x1
My = np.zeros((6,1)) # 6x1
for i in range(1,7):
for j in range(1,7):
Mx[i-1] += (A[i-1][j-1])*(self.X[j-1] - self.X[i-1]) # 1x1 each
My[i-1] += (A[i-1][j-1])*(self.Y[j-1] - self.Y[i-1]) # 1x1 each
Mx1 = float(Mx[0]) / 5 # 1x1
My1 = float(My[0]) / 5 # 1x1
Mx2 = float(Mx[1]) / 5 # 1x1
My2 = float(My[1]) / 5 # 1x1
Mx3 = float(Mx[2]) / 5 # 1x1
My3 = float(My[2]) / 5 # 1x1
Mx4 = float(Mx[3]) / 5 # 1x1
My4 = float(My[3]) / 5 # 1x1
Mx5 = float(Mx[4]) / 5 # 1x1
My5 = float(My[4]) / 5 # 1x1
Mx6 = float(Mx[5]) / 5 # 1x1
My6 = float(My[5]) / 5 # 1x1
self.mpg.Phix1 = ( Mx2 + Mx3 + Mx4 + Mx5 + Mx6 ) / 5 # 1x1
self.mpg.Phiy1 = ( My2 + My3 + My4 + My5 + My6 ) / 5 # 1x1
# self.Phix2 = ( Mx1 + Mx3 + Mx4 + Mx5 + Mx6 ) / 5 # 1x1
# self.Phiy2 = ( My1 + My3 + My4 + My5 + My6 ) / 5 # 1x1
# self.Phix3 = ( Mx1 + Mx2 + Mx4 + Mx5 + Mx6 ) / 5 # 1x1
# self.Phiy3 = ( My1 + My2 + My4 + My5 + My6 ) / 5 # 1x1
# self.Phix4 = ( Mx1 + Mx2 + Mx3 + Mx5 + Mx6 ) / 5 # 1x1
# self.Phiy4 = ( My1 + My2 + My3 + My5 + My6 ) / 5 # 1x1
# self.Phix5 = ( Mx1 + Mx2 + Mx3 + Mx4 + Mx6 ) / 5 # 1x1
# self.Phiy5 = ( My1 + My2 + My3 + My4 + My6 ) / 5 # 1x1
# self.Phix6 = ( Mx1 + Mx2 + Mx3 + Mx4 + Mx5 ) / 5 # 1x1
# self.Phiy6 = ( My1 + My2 + My3 + My4 + My5 ) / 5 # 1x1
observation_DQN = torch.tensor(np.array([Mx1, My1, self.mpg.Phix1, self.mpg.Phiy1], dtype=np.double))
done = self.distance < self.distance_threshold
done = bool(done)
reward = -self.distance
self.mpg.spin_once_gym()
return observation_DQN, reward, done, {}
def reset(self):
observation_DQN = np.array([0, 0, 0, 0])
# Stop Simulation
sim.simxStopSimulation(clientID, sim.simx_opmode_oneshot_wait)
# Retrieve some handles:
ErrLocM1,LocM1 =sim.simxGetObjectHandle(clientID, 'robot1', sim.simx_opmode_oneshot_wait)
if (not ErrLocM1==sim.simx_return_ok):
pass
ErrLocM2,LocM2 =sim.simxGetObjectHandle(clientID, 'robot2#0', sim.simx_opmode_oneshot_wait)
if (not ErrLocM2==sim.simx_return_ok):
pass
ErrLoc1,Loc1 =sim.simxGetObjectPosition(clientID, LocM1, -1, sim.simx_opmode_oneshot_wait)
if (not ErrLoc1==sim.simx_return_ok):
pass
ErrLoc2,Loc2 =sim.simxGetObjectPosition(clientID, LocM2, -1, sim.simx_opmode_oneshot_wait)
if (not ErrLoc2==sim.simx_return_ok):
pass
ErrLocO1,OriRobo1 =sim.simxGetObjectOrientation(clientID,LocM1, -1, sim.simx_opmode_oneshot_wait)
if (not ErrLocO1==sim.simx_return_ok):
pass
ErrLocO2,OriRobo2 =sim.simxGetObjectOrientation(clientID,LocM2, -1, sim.simx_opmode_oneshot_wait)
if (not ErrLocO2==sim.simx_return_ok):
pass
OriRobo1[2] = scenes[self.scene][2]
OriRobo2[2] = scenes[self.scene][5]
# Set Robot Orientation
sim.simxSetObjectOrientation(clientID, LocM1, -1, OriRobo1, sim.simx_opmode_oneshot_wait)
sim.simxSetObjectOrientation(clientID, LocM2, -1, OriRobo2, sim.simx_opmode_oneshot_wait)
Loc1[0] = scenes[self.scene][0]
Loc2[0] = scenes[self.scene][3]
Loc1[1] = scenes[self.scene][1]
Loc2[1] = scenes[self.scene][4]
# Set Robot Position
sim.simxSetObjectPosition(clientID, LocM1, -1, Loc1, sim.simx_opmode_oneshot)
sim.simxSetObjectPosition(clientID, LocM2, -1, Loc2, sim.simx_opmode_oneshot)
# Nb of Scene Counter
self.scene += 1
# Start Simulation
sim.simxStartSimulation(clientID, sim.simx_opmode_oneshot_wait)
" Use Adjacency Matrix to find Mxy and Phi's "
A = np.ones(6) - np.identity(6) # Adjancency Matrix
self.X = np.array([ [self.mpg.x1], [self.mpg.x2], [self.mpg.x3], [self.mpg.x4], [self.mpg.x5], [self.mpg.x6] ]) #6x1
self.Y = np.array([ [self.mpg.y1], [self.mpg.y2], [self.mpg.y3], [self.mpg.y4], [self.mpg.y5], [self.mpg.y6] ]) #6x1
Mx = np.zeros((6,1)) # 6x1
My = np.zeros((6,1)) # 6x1
for i in range(1,7):
for j in range(1,7):
Mx[i-1] += (A[i-1][j-1])*(self.X[j-1] - self.X[i-1]) # 1x1 each
My[i-1] += (A[i-1][j-1])*(self.Y[j-1] - self.Y[i-1]) # 1x1 each
Mx1 = float(Mx[0]) / 5 # 1x1
My1 = float(My[0]) / 5 # 1x1
Mx2 = float(Mx[1]) / 5 # 1x1
My2 = float(My[1]) / 5 # 1x1
Mx3 = float(Mx[2]) / 5 # 1x1
My3 = float(My[2]) / 5 # 1x1
Mx4 = float(Mx[3]) / 5 # 1x1
My4 = float(My[3]) / 5 # 1x1
Mx5 = float(Mx[4]) / 5 # 1x1
My5 = float(My[4]) / 5 # 1x1
Mx6 = float(Mx[5]) / 5 # 1x1
My6 = float(My[5]) / 5 # 1x1
self.mpg.Phix1 = ( Mx2 + Mx3 + Mx4 + Mx5 + Mx6 ) / 5 # 1x1
self.mpg.Phiy1 = ( My2 + My3 + My4 + My5 + My6 ) / 5 # 1x1
# self.Phix2 = ( Mx1 + Mx3 + Mx4 + Mx5 + Mx6 ) / 5 # 1x1
# self.Phiy2 = ( My1 + My3 + My4 + My5 + My6 ) / 5 # 1x1
# self.Phix3 = ( Mx1 + Mx2 + Mx4 + Mx5 + Mx6 ) / 5 # 1x1
# self.Phiy3 = ( My1 + My2 + My4 + My5 + My6 ) / 5 # 1x1
# self.Phix4 = ( Mx1 + Mx2 + Mx3 + Mx5 + Mx6 ) / 5 # 1x1
# self.Phiy4 = ( My1 + My2 + My3 + My5 + My6 ) / 5 # 1x1
# self.Phix5 = ( Mx1 + Mx2 + Mx3 + Mx4 + Mx6 ) / 5 # 1x1
# self.Phiy5 = ( My1 + My2 + My3 + My4 + My6 ) / 5 # 1x1
# self.Phix6 = ( Mx1 + Mx2 + Mx3 + Mx4 + Mx5 ) / 5 # 1x1
# self.Phiy6 = ( My1 + My2 + My3 + My4 + My5 ) / 5 # 1x1
observation_DQN = torch.tensor(np.array([Mx1, My1, self.mpg.Phix1, self.mpg.Phiy1], dtype=np.double))
return observation_DQN
def render(self):
pass
# def main(args=None):
# rclpy.init(args=args)
# minimal_publisher = MinimalPublisherGym()
# time.sleep(5)
# rclpy.spin(minimal_publisher)
# minimal_publisher.destroy_node()
# rclpy.shutdown()
# if __name__ == '__main__':
# main()
|
from preprocess import preprocesses
input_datadir = './train_img'
output_datadir = './pre_img'
obj = preprocesses(input_datadir, output_datadir)
num_images_total, num_successfully_aligned = obj.collect_data()
print('Total number of images: %d' % num_images_total)
print('Number of successfully aligned images: %d' % num_successfully_aligned)
|
# coding=utf-8
from __future__ import absolute_import
from config import config
from .helpers import connect_mongodb
from .migrations import *
def migration(cfg_type='default'):
print '-----------------'
print 'Migration: {}'.format(cfg_type)
print '-----------------'
cfg = config.get(cfg_type)
if not cfg:
return None
mongodb_conn, mongodb = connect_mongodb(cfg)
# users
UserMigration(mongodb.User).\
migrate_all(collection=mongodb.User.collection)
# book
BookMigration(mongodb.Book).\
migrate_all(collection=mongodb.Book.collection)
BookVolumeMigration(mongodb.BookVolume).\
migrate_all(collection=mongodb.BookVolume.collection)
# configuration
ConfigMigration(mongodb.Configuration).\
migrate_all(collection=mongodb.Configuration.collection)
return True
|
def test_init(initial_setup):
from netnir import __version__
assert isinstance(__version__, str)
|
import cv2
from HW1helpers import centroid_histogram,plot_colors
import numpy as np
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from scipy.spatial import distance as dist
from scipy import special
import glob
##### Question 2: Compute Homography on sample images //DONE
# Read source image.
im_src = cv2.imread('p1.jpg')
# Four corners of the book in source image
pts_src = np.array([[923, 903], [397, 293], [863, 0],[1466, 431]])
# Read destination image.
im_dst = cv2.imread('p2.jpg')
# Four corners of the book in destination image.
pts_dst = np.array([[231, 319],[1024, 0],[1470, 405],[709, 910]])
# Calculate Homography
h, status = cv2.findHomography(pts_src, pts_dst)
# Warp source image to destination based on homography
im_out = cv2.warpPerspective(im_src, h, (im_dst.shape[1],im_dst.shape[0]))
# Display images
cv2.imshow("Source Image", im_src)
cv2.imshow("Destination Image", im_dst)
cv2.imshow("Warped Source Image", im_out)
cv2.waitKey(0)
|
from django.dispatch import receiver
from django.urls import resolve, reverse
from django.utils.translation import gettext_lazy as _
from django.template.loader import get_template
from pretix.base.middleware import _parse_csp, _merge_csp, _render_csp
from pretix.presale.signals import (
html_head,
process_response,
)
from pretix.base.signals import (
logentry_display,
register_payment_providers,
register_data_exporters,
)
from pretix.control.signals import (
event_dashboard_widgets,
nav_event_settings,
)
from .exporter import EthereumOrdersExporter
from . import models
NUM_WIDGET = '<div class="numwidget"><span class="num">{num}</span><span class="text">{text}</span></div>' # noqa: E501
@receiver(process_response, dispatch_uid="payment_eth_add_question_type_csp")
def signal_process_response(sender, request, response, **kwargs):
# TODO: enable js only when question is asked
# url = resolve(request.path_info)
h = {}
if 'Content-Security-Policy' in response:
h = _parse_csp(response['Content-Security-Policy'])
_merge_csp(h, {
'style-src': [
"'sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU='",
"'sha256-O+AX3tWIOimhuzg+lrMfltcdtWo7Mp2Y9qJUkE6ysWE='",
],
# Chrome correctly errors out without this CSP
'connect-src': [
"wss://bridge.walletconnect.org/",
],
'manifest-src': ["'self'"],
})
response['Content-Security-Policy'] = _render_csp(h)
return response
@receiver(html_head, dispatch_uid="payment_eth_add_question_type_javascript")
def add_question_type_javascript(sender, request, **kwargs):
# TODO: enable js only when question is asked
# url = resolve(request.path_info)
template = get_template('pretix_eth/question_type_javascript.html')
context = {
'event': sender,
}
return template.render(context)
@receiver(event_dashboard_widgets)
def address_count_widget(sender, lazy=False, **kwargs):
total_address = len(models.WalletAddress.objects.all().for_event(sender))
unused_addresses = len(
models.WalletAddress.objects.get_queryset().unused().for_event(sender)
)
used_addresses = total_address - unused_addresses
return [
{
"content": None
if lazy
else NUM_WIDGET.format(
num="{}/{}".format(used_addresses, total_address),
text=_("Used/Total Addresses"),
),
# value for lazy must be a fixed string.
# str(lazy) or any if-else statement won't work.
"lazy": "lazy",
"display_size": "small",
"priority": 100,
}
]
@receiver(register_payment_providers, dispatch_uid="payment_eth")
def register_payment_provider(sender, **kwargs):
from .payment import Ethereum
return Ethereum
@receiver(nav_event_settings, dispatch_uid='pretix_eth_nav_wallet_address_upload')
def navbar_wallet_address_upload(sender, request, **kwargs):
url = resolve(request.path_info)
return [{
'label': _('Wallet address upload'),
'url': reverse('plugins:pretix_eth:wallet_address_upload', kwargs={
'event': request.event.slug,
'organizer': request.organizer.slug,
}),
'active': (
url.namespace == 'plugins:pretix_eth'
and (
url.url_name == 'wallet_address_upload'
or url.url_name == 'wallet_address_upload_confirm'
)
),
}]
@receiver(signal=logentry_display)
def wallet_address_upload_logentry_display(sender, logentry, **kwargs):
if logentry.action_type == 'pretix_eth.wallet_address_upload':
data = logentry.parsed_data
return _(
'Uploaded {file_address_count} addresses '
'with {new_address_count} new addresses '
'and {existing_address_count} existing addresses.'
).format(
file_address_count=data['file_address_count'],
new_address_count=data['new_address_count'],
existing_address_count=data['existing_address_count'],
)
@receiver(register_data_exporters, dispatch_uid='single_event_eth_orders')
def register_data_exporter(sender, **kwargs):
return EthereumOrdersExporter
|
# Copyright 2021 portfolio-robustfpm-framework Authors
# Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
# http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
# http://opensource.org/licenses/MIT>, at your option. This file may not be
# copied, modified, or distributed except according to those terms.
import gc
import numpy as np
from scipy.spatial import ConvexHull
from sklearn.utils import check_random_state
from ..finance import IOption
from .set_handler import ISetHandler
from .lattice import Lattice
from ..util import coalesce, ProfilerData, Timer, PTimer, minksum_points, isin_points
from robustfpm.cxhull import get_max_coordinates
__all__ = ['OptionPricer']
class OptionPricer:
def __init__(self, lattice, N, option, x0, price_support, constraint_set=None,
debug_mode=False, ignore_warnings=False, enable_timer=False, profiler_data=None,
pricer_options={}):
self.lattice = lattice if isinstance(lattice, Lattice) else Lattice(lattice)
self.n = lattice.delta.size
self.N = N
assert isinstance(option, IOption), 'option must implement the IOption interface'
self.option = option
self.x0 = x0
assert isinstance(price_support, ISetHandler), 'price_support must implement the ISetHandler interface'
assert price_support.iscompact(), 'compactness of the price support is required'
self.price_support = price_support
assert isinstance(constraint_set, ISetHandler), 'constraint_set must implement the ISetHandler interface'
self.constraint_set = coalesce(constraint_set, SetHandler('unbounded', verify_compactness=False))
self.debug_mode = debug_mode
self.ignore_warnings = ignore_warnings
self.enable_timer = enable_timer
self.profiler_data = coalesce(profiler_data, ProfilerData())
if not isinstance(pricer_options, dict):
pricer_options = {}
self.pricer_options = {
'convex_hull_filter': pricer_options.get('convex_hull_filter', None),
'convex_hull_prune_fail_count': pricer_options.get('convex_hull_prune_fail_count', 20),
'convex_hull_prune_success_count': pricer_options.get('convex_hull_prune_success_count', 20),
'convex_hull_prune_corner_n': pricer_options.get('convex_hull_prune_corner_n', 3),
'convex_hull_prune_seed': pricer_options.get('convex_hull_prune_seed', None)
}
def __precalc(self):
self.p0_ = self.lattice.get_projection(self.x0) # map x0
self.dK_ = self.lattice.get_projection(self.price_support) # map support neighbourhood
self.silent_timer_ = not self.enable_timer
self.pruning_random_state_ = check_random_state(self.pricer_options['convex_hull_prune_seed'])
def generate_evaluation_point_lists(self, p0, dK, N, profiler_data=None):
with PTimer(header='V = [], V.append([p0])', silent=True, profiler_data=profiler_data):
Vp = []
Vp.append(p0.reshape(1,-1))
Vf = [np.empty(Vp[-1].shape[0], dtype=Vp[-1].dtype)]
for i in range(N):
with PTimer(header='Vp.append(minksum_points(Vp[-1], dK, recur_max_level=None))', silent=True,
profiler_data=profiler_data):
Vp.append(minksum_points(Vp[-1], dK, recur_max_level=None))
Vf.append(np.empty(Vp[-1].shape[0], dtype=Vp[-1].dtype))
return (Vp, Vf)
def __chull_prune_points(self, xv):
fail_cnt = 0
success_cnt = 0
eps = 1e-8
n = xv.shape[1]
res_ind = np.arange(xv.shape[0])
it = 0
it_max = self.pricer_options['convex_hull_prune_fail_count'] * self.pricer_options['convex_hull_prune_success_count']
while (xv.shape[0] > n) and (fail_cnt < self.pricer_options['convex_hull_prune_fail_count']) and (success_cnt < self.pricer_options['convex_hull_prune_success_count']):
xv_size = xv.shape[0]
ind_tf = np.ndarray((res_ind.shape[0], 2*n), dtype=np.bool)
for i in range(n):
ind_tf[:,2*i] = (xv[:,i] == np.amax(xv[:,i]))
ind_tf[:,2*i+1] = (xv[:,i] == np.amin(xv[:,i]))
ind = np.arange(xv.shape[0])[np.sum(ind_tf, axis=1) >= self.pricer_options['convex_hull_prune_corner_n']]
if ind.shape[0] < n:
print('few corner points')
break
ind_c = rs.choice(ind, size=n, replace=False)
xc = np.vstack((np.ones(n, dtype=xv.dtype),
xv[ind_c,:-1].T))
vc = xv[ind_c,-1]
if np.linalg.matrix_rank(xc) != xc.shape[0]:
fail_cnt += 1
# print('fail, rank')
# print('xc = ', xc)
# print('xv[ind] = ', xv[ind])
continue
ind_rest = np.arange(xv.shape[0])
ind_rest = ind_rest[np.in1d(ind_rest, ind_c, assume_unique=True, invert=True)]
x_rest = xv[ind_rest,:-1]
v_rest = xv[ind_rest,-1]
E = np.hstack((np.zeros((x_rest.shape[0],1)), x_rest))
A = xc - E[...,np.newaxis]
if n == 3:
d12 = A[:,1,1] * A[:,2,2] - A[:,1,2] * A[:,2,1]
d02 = A[:,2,0] * A[:,1,2] - A[:,1,0] * A[:,2,2]
d01 = A[:,1,0] * A[:,2,1] - A[:,2,0] * A[:,1,1]
detA = d12 + d02 + d01
lmb = np.vstack( (d12, d02, d01) ).T / detA.reshape(-1,1)
else:
raise ValueError('n <> 3 is not supported')
ind_remove = ind_rest[np.bitwise_and(np.all(lmb >= 0, axis=1), v_rest <= lmb @ vc + eps)]
if ind_remove.shape[0] == 0:
# print('fail, not found')
# print('xv[ind_c] = ', xv[ind_c])
fail_cnt += 1
else:
# print('success')
success_cnt += 1
fail_cnt = 0
# if (ind_remove.shape[0] > 0) and np.any(np.max(np.abs(xv[ind_remove] - np.array([[0.5, 0.9, 0.0]], dtype=xv.dtype)), axis=1) <= 0.001):
# print('x_rest, lmb, v, v_thresh')
# tmp = lmb @ vc
# for i in range(x_rest.shape[0]):
# print(x_rest[i,:], lmb[i,:], v_rest[i], tmp[i])
# print('xc = ', xc)
# print('vc = ', vc)
# print('xv[ind_remove] = ', xv[ind_remove])
tf = np.in1d(np.arange(xv.shape[0]), ind_remove, assume_unique=True, invert=True)
xv = xv[tf]
res_ind = res_ind[tf]
# print('xv_size = ', xv_size)
# print('xv.shape[0] = ', xv.shape[0])
it+=1
if it > it_max:
print('unexpected eternal loop')
break
return res_ind
def __get_cvhull_indices(self, x, v):
if self.pricer_options['convex_hull_filter'] is None:
return np.arange(x.shape[0])
if len(x.shape) > 1:
v = v.reshape(-1,1)
points = np.hstack((x, v))
try:
pruned_ind = self.__chull_prune_points(points)
points = points[pruned_ind]
except:
pass
points_zero = points[points[:,-1] > 0]
# try:
# if (self.pricer_options['convex_hull_filter'] == 'qhull') and (points_zero.shape[0] > x.shape[1]):
# points_zero = points_zero[ConvexHull(points_zero[:,:-1]).vertices]
# except:
# pass
points_zero[:,-1] = 0.0
points = np.vstack((points,points_zero))
if self.pricer_options['convex_hull_filter'] == 'qhull':
# with Timer('Convex hull', flush=True):
cv_point_indices = ConvexHull(points).vertices
# print('result = {0}/{1}'.format(cv_point_indices[cv_point_indices < x.shape[0]].shape[0], points.shape[0]))
# raise Exception('stopped')
# print('x.shape[0] = ', x.shape[0])
# print('cv_point_indices = ', cv_point_indices)
# print('pruned_ind', pruned_ind)
return pruned_ind[cv_point_indices[cv_point_indices < pruned_ind.shape[0]]]
raise ValueError('unknown convex_hull_filter value \'{0}\''.format(self.pricer_options['convex_hull_filter']))
def find_u(self, x, v, z):
# flat surface
if np.max(v)-np.min(v) < 1e-15:
ind = np.argmin(np.max(np.abs(x-z), axis=1))
Qopt = 0*v
Qopt[ind] = 1
# return (np.mean(v), Qopt)
return (np.mean(v), np.nan)
try:
ind = self.__get_cvhull_indices(x, v)
# print(np.hstack((x[ind], v[ind].reshape(-1,1))))
# print('z = ', z)
except Exception as ex:
print('x = ', x)
print('v = ', v)
raise ex
try:
Qopt = get_max_coordinates(x[ind], v[ind], z, debug_mode=self.debug_mode, ignore_warnings=self.ignore_warnings)
# print('Qopt = ', Qopt)
except Exception as ex:
# print('[(n, x, v)] = ')
# for c in zip(range(x.shape[0]),x,v): print(c)
print('convex hull [(n, x, v)] = ')
for c in zip(ind, x[ind],v[ind]): print(c)
print('z = ', z)
raise ex
# print('Qopt = ', Qopt)
# print('v[ind] = ', v[ind])
Vopt = Qopt @ v[ind]
return (Vopt, Qopt)
def find_rho(self, x, v, K_x, convdK_x):
convdK_x = np.atleast_2d(convdK_x)
K_x = np.atleast_2d(K_x)
supp_func = self.constraint_set.support_function(convdK_x - (1 if self.lattice.logscale else 0))
tf = supp_func < np.Inf
if np.sum(tf) == 0:
print('support function is +Inf')
return (-np.Inf, np.nan)
K_x = K_x[tf]
convdK_x = convdK_x[tf]
supp_func = supp_func[tf]
n = x.shape[1]
res_u = np.ndarray(K_x.shape[0], dtype=v.dtype)
for i in range(K_x.shape[0]):
Vopt, _ = self.find_u(x, v, K_x[i])
res_u[i] = Vopt
maxind = np.argmax(res_u - supp_func)
return (res_u[maxind] - supp_func[maxind], convdK_x[maxind])
def evaluate(self):
with PTimer(header='Init stage of evaluate()', silent=True, profiler_data=self.profiler_data) as tm:
self.__precalc()
with Timer('Main stage of evaluate()', flush=False, silent=self.silent_timer_) as tm_total:
pdata = self.profiler_data.data[tm_total.header]
with Timer('Precalculation of the evaluation points for the value function', silent=self.silent_timer_) as tm:
Vp, Vf = self.generate_evaluation_point_lists(self.p0_, self.dK_, self.N, profiler_data=pdata.data[tm.header])
with PTimer('Evaluation of the value function at the terminal moment', silent=self.silent_timer_,
profiler_data=pdata) as tm:
x = self.lattice.map2x(Vp[-1])
Vf[-1] = self.option.payoff(x)
with Timer('Evaluation of the value function at the intermediate moments', silent=self.silent_timer_) as tm:
pdata2 = pdata.data[tm.header]
for t in reversed(range(self.N)):
if not self.silent_timer_: print('t = {0}'.format(t))
res = np.empty(Vp[t].shape[0], dtype=Vf[t+1].dtype)
for i, vp in enumerate(Vp[t]):
if not self.silent_timer_:
if (np.random.uniform()<0.001): print('iter = {0}/{1}'.format(i, len(Vp[t])))
with PTimer(header='K = vp + self.dK_', silent=True, profiler_data=pdata2) as tm2:
K = vp + self.dK_
with PTimer(header='tf = isin_points(Vp[t+1], K)', silent=True, profiler_data=pdata2) as tm2:
tf = isin_points(Vp[t+1], K)
with PTimer(header='find_rho', silent=True, profiler_data=pdata2) as tm2:
res_v, _ = self.find_rho(self.lattice.map2x(Vp[t+1][tf]), Vf[t+1][tf], self.lattice.map2x(K), self.lattice.map2x(self.dK_))
res[i] = res_v
# print('vp = ', self.lattice.map2x(vp))
# print('res_v = ', res_v)
# print('Vp[t+1], Vf[t+1] = ')
# for c1, c2 in zip(self.lattice.map2x(Vp[t+1][tf]), Vf[t+1][tf]):
# print(c1, c2)
Vf[t] = res
gc.collect()
return Vf[0][0]
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import core.cli_utils
class CliUtilsTest(unittest.TestCase):
def testMergeIndexRanges(self):
ranges = [(0, 6), (7, 11), (4, 8), (8, 9)]
merged = core.cli_utils.MergeIndexRanges(ranges)
self.assertEqual([(0, 11)], merged)
def testMergeIndexRangesEmpty(self):
ranges = []
merged = core.cli_utils.MergeIndexRanges(ranges)
self.assertEqual([], merged)
def testMergeIndexRangesNoMerge(self):
ranges = [(7, 11), (0, 6)]
merged = core.cli_utils.MergeIndexRanges(ranges)
self.assertEqual([(0, 6), (7, 11)], merged)
def testMergeIndexRangesEdgeCase(self):
ranges = [(0, 8), (8, 11), (11, 12)]
merged = core.cli_utils.MergeIndexRanges(ranges)
self.assertEqual([(0, 12)], merged)
def testMergeIndexRangesInvalidRange(self):
with self.assertRaises(ValueError):
ranges = [(0, 8), (8, 5)]
core.cli_utils.MergeIndexRanges(ranges)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Tumblr photo downloader """
import sys, os, errno
import urllib2
import threading
import Queue
import time
import signal
import argparse
from xml.dom import minidom
__version__ = 0.1
URL_FORMAT = 'http://%s.tumblr.com/api/read?type=photo&num=%d&start=%d'
def imagesdownloader(urls):
""" Thread function used to download an image """
while 1:
try:
url, localfile = urls.get(True, 2)
try:
ufd = urllib2.urlopen(url)
# Open our local file for writing
with open(localfile, "wb") as lfd:
lfd.write(ufd.read())
except: #handle errors
print "\nError getting file,", sys.exc_info()[1]
urls.task_done()
except:
break # No more jobs
def _getnewimageslist(account, directory):
""" Search for new images to download """
print "Searching for new images from account %s to download ..." % (account,)
num = 50
start = 0
newimages = []
while True:
images_counter = []
feed = urllib2.urlopen(URL_FORMAT % (account, num, start)).read()
dom = minidom.parseString(feed)
photoslist = dom.getElementsByTagName('photo-url')
for photo in photoslist:
if int(photo.attributes['max-width'].value) == 1280:
url = photo.firstChild.nodeValue
localfile = os.path.join(directory, url.split('/')[-1].split('#')[0].split('?')[0])
if not os.path.exists(localfile):
newimages.append((url, localfile))
sys.stdout.write('\r%d new images ...' % (len(newimages),))
sys.stdout.flush()
images_counter.append(url)
if len(newimages) == 0:
print "Nothing new to download. Done."
break
if len(images_counter) < num:
break
else:
start += num
return newimages
def updateprogress(qsize, nbimages):
""" Download progress bar """
progress = 100 - int((100.0 * qsize) / nbimages)
dsp = '\r%03d%% [%-50s] (%d, %d)' % (progress, '#'*(progress/2), nbimages - qsize, nbimages)
sys.stdout.write(dsp)
sys.stdout.flush()
def main():
""" Main function """
parser = argparse.ArgumentParser(description="Download all the photos of a Tumblr blog")
parser.add_argument("account", help="account to download")
parser.add_argument("directory", help="output directory", nargs='?')
parser.add_argument("-c", "--concurrency", type=int, default=8, help="set the number of download threads (default: 8)")
args = parser.parse_args()
account = args.account
directory = args.directory if args.directory is not None else account
try:
os.makedirs(directory)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(directory):
pass
else: raise
jobs = Queue.LifoQueue()
newimages = _getnewimageslist(account, directory)
nbimages = len(newimages)
if nbimages == 0:
return
for image in newimages:
jobs.put(image)
print "\nStarting download threads"
# Start workers
for windex in xrange(args.concurrency):
worker = threading.Thread(target=imagesdownloader, args=(jobs,))
worker.setDaemon(True)
worker.start()
# Display progress bar
while not jobs.empty():
qsize = jobs.qsize()
updateprogress(qsize, nbimages)
time.sleep(0.3)
updateprogress(0, nbimages)
print
for thread in threading.enumerate():
if thread is not threading.currentThread():
thread.join()
print "Done."
def _sigint_handler(sig, frame):
""" Manage SIGINT signal """
print "\nStopped by user"
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, _sigint_handler)
main()
|
# coding:utf-8
# 导入pandas并且更名为pd。
import pandas as pd
def get_titantic():
titanic = pd.read_csv('../Datasets/titanic.txt')
# 分离数据特征与预测目标。
y = titanic['survived']
X = titanic.drop(['row.names', 'name', 'survived'], axis=1)
# 对对缺失数据进行填充。
X['age'].fillna(X['age'].mean(), inplace=True)
X.fillna('UNKNOWN', inplace=True)
return X, y
# 分割数据,依然采样25%用于测试。
from sklearn.cross_validation import train_test_split
def get_train_tezt_X_Y(X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=33)
return X_train, X_test, y_train, y_test
# 类别型特征向量化。
from sklearn.feature_extraction import DictVectorizer
def vectorize(X_train, X_test):
vec = DictVectorizer()
X_train = vec.fit_transform(X_train.to_dict(orient='record'))
X_test = vec.transform(X_test.to_dict(orient='record'))
# 输出处理后特征向量的维度。
print len(vec.feature_names_)
return X_train, X_test
# 从sklearn导入特征筛选器。
from sklearn import feature_selection
def selection_percentile(X_train, X_test, y_train, percentile):
# 筛选前20%的特征,使用相同配置的决策树模型进行预测,并且评估性能。
fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile)
X_train_fs = fs.fit_transform(X_train, y_train)
X_test_fs = fs.transform(X_test)
return X_train_fs, X_test_fs
# 使用决策树模型依靠所有特征进行预测,并作性能评估。
from sklearn.tree import DecisionTreeClassifier
def get_DT():
dt = DecisionTreeClassifier(criterion='entropy')
return dt
def classifiaction_DT(X_train, X_test, y_train, y_test):
dt = get_DT()
dt.fit(X_train, y_train)
dt.score(X_test, y_test)
def classifiaction_DT_percent(X_train, X_test, y_train, y_test, percentile):
dt = get_DT()
X_train_fs, X_test_fs = selection_percentile(X_train, X_test, y_train, percentile)
dt.fit(X_train_fs, y_train)
dt.score(X_test_fs, y_test)
import pylab as pl
def pl_plot(percentiles, results):
pl.plot(percentiles, results)
pl.xlabel('percentiles of features')
pl.ylabel('accuracy')
pl.show()
# 通过交叉验证(下一节将详细介绍)的方法,按照固定间隔的百分比筛选特征,并作图展示性能随特征筛选比例的变化。
from sklearn.cross_validation import cross_val_score
import numpy as np
def get_percentiles_results(X_train, y_train):
percentiles = range(1, 100, 2)
results = []
dt = get_DT()
for i in percentiles:
fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=i)
X_train_fs = fs.fit_transform(X_train, y_train)
scores = cross_val_score(dt, X_train_fs, y_train, cv=5)
results = np.append(results, scores.mean())
print results
opt = np.where(results == results.max())[0][0] # 这一句跟源代码有出入,查看文档np.where返回的是 ndarray or tuple of ndarrays类型数据
print 'Optimal number of features %d' % percentiles[opt]
return percentiles, results
def main():
print("===========start===========")
X, y = get_titantic()
X_train, X_test, y_train, y_test = get_train_tezt_X_Y(X, y)
X_train, X_test = vectorize(X_train, X_test)
dt = get_DT()
classifiaction_DT(X_train, X_test, y_train, y_test)
print("===========running===========")
classifiaction_DT_percent(X_train, X_test, y_train, y_test, 20)
print("===========running===========")
classifiaction_DT_percent(X_train, X_test, y_train, y_test, 7)
print("===========running===========")
percentiles, results = get_percentiles_results(X_train, y_train)
pl_plot(percentiles, results)
print("===========end===========")
if __name__ == '__main__':
main()
|
#!/usr/bin/python
'''
creates bundle graph from filtered multigraph
'''
### imports ###
import sys
import os
import logging
import networkx as nx
import numpy as np
import scipy.stats as stats
import cPickle
import helpers.io as io
import helpers.misc as misc
### definitions ###
### functions ###
def compress_edges(MG, p, q):
''' compresses the edges '''
# check for types.
bcnts = [0, 0, 0, 0]
for z in MG[p][q]:
bcnts[MG[p][q][z]['state']] += 1
# build numpy arrays for each distance type.
bdists = list()
for i in range(4):
bdists.append(np.zeros(bcnts[i], dtype=np.float))
# populate array with distances.
bidxs = [0, 0, 0, 0]
for z in MG[p][q]:
state = MG[p][q][z]['state']
dist = MG[p][q][z]['dist']
bdists[state][bidxs[state]] = dist
bidxs[state] += 1
# compute bundle info.
devs = list()
means = list()
mins = list()
maxs = list()
for i in range(4):
if bdists[i].shape[0] <= 0:
devs.append(-1)
means.append(-1)
mins.append(-1)
maxs.append(-1)
else:
devs.append(np.std(bdists[i]))
means.append(np.mean(bdists[i]))
mins.append(bdists[i].min())
maxs.append(bdists[i].max())
# return summaries.
return bcnts, bdists, devs, means, mins, maxs
def _load_reps(file_path):
''' loads repeat info from cpickle'''
# no weights.
if file_path == None:
return dict()
# try dictionary emthod.
if os.path.isdir(file_path) == True:
reps = dict()
for f in os.listdir(file_path):
n = f.replace(".npy","")
try:
reps[n] = np.load("%s/%s" % (file_path, f))
except:
continue
return reps
# get weights.
try:
with open(file_path) as fin:
return cPickle.load(fin)
except:
logging.warning("unable to load repeat pickle, ignoring weights")
return dict()
def create_bundles(paths, args):
""" creates bundles
Parameters
----------
paths.edge_file : string
args.bundle_size : int
args.pthresh : int
args.bup : int
"""
# load repeat annotations.
repcnts = _load_reps(args.rep_file)
# load the multi graph.
MG = nx.read_gpickle(paths.edge_file)
# create bundle graph.
BG = nx.Graph()
# add nodes.
for n in MG.nodes():
BG.add_node(n, MG.node[n])
# build set of adjacencies.
adjset = set()
for p, nbrs in MG.adjacency_iter():
for q in nbrs:
adjset.add(tuple(sorted([p,q])))
# compute bundles from adjacencies.
zerod = 0
zcnt = 0
ztot = len(adjset)
for p, q in adjset:
#logging.info("progress: %d of %d" % (zcnt, ztot))
zcnt += 1
# sanity check.
if MG.node[p]['cov'] == 0.0 or MG.node[q]['cov'] == 0.0:
logging.error("how can this happen?")
sys.exit()
# bundle size check.
bsize = len(MG[p][q])
if bsize < args.bundle_size:
continue
# group by insert size.
groups = dict()
std_devs = dict()
for z in MG[p][q]:
ins_size = MG[p][q][z]['ins_size']
if ins_size not in groups:
groups[ins_size] = list()
std_devs[ins_size] = MG[p][q][z]['std_dev']
groups[ins_size].append(z)
# loop over groups.
for ins_size in groups:
# compress info.
bcnts, bdists, devs, means, mins, maxs = compress_edges(MG, p, q)
# compute weights.
cov = 1 - abs(MG.node[p]['cov'] - MG.node[q]['cov']) / (MG.node[p]['cov'] + MG.node[q]['cov'])
# swap bdists for python lists.
for i in range(len(bdists)):
bdists[i] = list(bdists[i])
# add start stop info.
poses1 = list()
poses2 = list()
for z in MG[p][q]:
tmp = MG[p][q][z]
poses1.append((tmp['left1'], tmp['right1']))
poses2.append((tmp['left2'], tmp['right2']))
# create bundle.
if BG.has_edge(p, q):
logging.error("can't have multiple insert sizes between same node")
sys.exit(1)
# zero out negative distances.
avgs = [np.average(bdists[i]) for i in range(4)]
for i in range(4):
if avgs[i] == np.nan:
bcnts[i] = 0.0
if avgs[i] < -2 * args.bundle_size:
bcnts[i] = 0.0
zerod += 1
# don't add it if no support.
if np.sum(bcnts) == 0:
continue
#BG.add_edge(p, q, bcnts=bcnts, bdists=bdists, devs=devs, means=means, mins=mins, maxs=maxs, ins_size=ins_size, std_dev=std_devs[ins_size], poses1=poses1, poses2=poses2)
BG.add_edge(p, q, bcnts=bcnts, bdists=bdists, ins_size=ins_size, std_dev=std_devs[ins_size], cov=cov)
# start the slimming.
logging.info("starting repeat based slimming")
# do repeat mods.
track_upped = 0
track_remed = 0
track_ogedg = len(BG.edges())
idxs = np.zeros(1)
if repcnts != dict():
# create repeat distrib.
repavgs = np.zeros(len(repcnts), dtype=np.dtype([('name','S256'),('avg',np.float)]))
i = 0
for name in repcnts:
# save the name.
repavgs[i]['name'] = name
# skip no repeat info.
if name not in repcnts or repcnts[name] == None:
repavgs[i]['avg'] = 0
i += 1
continue
# take the average over ins_size + 6 (std_dev)
d = args.ins_size + (6 * args.std_dev)
if repcnts[name].shape[0] < d:
repavgs[i]['avg'] = np.average(repcnts[name])
else:
r = range(0,d)+range(len(repcnts[name])-d,len(repcnts[name]))
repavgs[i]['avg'] = np.average(repcnts[name][r])
i += 1
# compute the cutoff threshold.
score = stats.scoreatpercentile(repavgs[:]['avg'], args.pthresh)
idxs = repavgs[:]['avg'] > score
# look at each bundle and see if the repeats necessitates attention.
for p, q in BG.edges():
# get index of pairs.
idp = np.where(repavgs[:]['name'] == p)[0]
idq = np.where(repavgs[:]['name'] == q)[0]
# skip if both not high.
if idxs[idp] == False and idxs[idq] == False:
continue
# get score.
scp = repavgs[idp]['avg']
scq = repavgs[idq]['avg']
# check if this bundle needs attention.
if max(scp, scq) > score:
track_upped += 1
# it gets its minumm bundle size upped.
for i in range(len(BG[p][q]['bcnts'])):
# clear if it doesn't meet criteria.
if BG[p][q]['bcnts'][i] < args.bundle_size + args.bup:
BG[p][q]['bcnts'][i] = 0
# remove bundle if no support.
if np.sum(BG[p][q]['bcnts']) == 0:
track_remed += 1
BG.remove_edge(p,q)
else:
logging.info('no repeat information supplied')
# add repeat weights.
for p, q in BG.edges():
# create weight.
BG[p][q]['u'] = [0.0] * 4
# sum weights.
for z in MG[p][q]:
left1 = MG[p][q][z]['left1']
left2 = MG[p][q][z]['left2']
right1 = MG[p][q][z]['right1']
right2 = MG[p][q][z]['right2']
cntl = np.sum(repcnts[p][left1:left2])
cntr = np.sum(repcnts[p][right1:right2])
try:
propl = 1.0 - (float(cntl) / float(left2-left1))
propr = 1.0 - (float(cntr) / float(right2-right1))
except:
continue
# add average.
p_k = (propl + propr) / 2.0
# add it.
BG[p][q]['u'][MG[p][q][z]['state']] += p_k
# note the modifications due to filtering.
logging.info("contigs with repeat regions in %.2f threshold: %i of %i" % (args.pthresh, np.sum(idxs), len(idxs)))
logging.info("bundles effected by repeats: %i of %i" % (track_upped, track_ogedg))
logging.info("bundles removed by repeats: %i of %i" % (track_remed, track_ogedg))
logging.info("bundles removed by neg dist: %i" % (zerod))
logging.info("total bundles: %i" % (len(BG.edges())))
# write to disk.
nx.write_gpickle(BG, paths.bundle_file)
|
#!/usr/bin/env python3.5
# coding=utf-8
# Licensed Materials - Property of IBM®
# Copyright IBM® Corp. 2015,2017
"""Submit for submission of SPL applications.
The main function is submitSplApp to submit an SPL Application
to a Streaming Analytics service or IBM® Streams instance for execution.
usage: submitSPL.py [-h] --main_composite MAIN_COMPOSITE --project_dir
PROJECT_DIR [--job_name JOB_NAME] [--job_group JOB_GROUP]
[--data_directory DATA_DIRECTORY] --service_name
SERVICE_NAME --credentials_file CREDENTIALS_FILE
[--param_file PARAM_FILE]
[--toolkits_list_file TOOLKITS_LIST_FILE]
Submit SPL Application to IBM® Cloud Streaming Service
optional arguments:
-h, --help show this help message and exit
--main_composite MAIN_COMPOSITE SPL Main composite with namespace i.e. com.ibm.streams::MainApp
--project_dir PROJECT_DIR SPL application project directory
--job_name JOB_NAME Job name to appear in Streams console
--job_group JOB_GROUP Job group, this must exist in the Streams instance to successfully submit
--data_directory DATA_DIRECTORY SPL application data directory
--service_name SERVICE_NAME Name of the IBM® Cloud Streaming service
--credentials_file CREDENTIALS_FILE File containing the JSON of IBM® Cloud Streaming service credentials
--param_file PARAM_FILE SPL parameters file with json array i.e.
[ { "name": "param1", "type": "rstring", "value": "paramValue1"}, ...]
--toolkits_list_file TOOLKITS_LIST_FILE List of toolkits of dependencies
"""
import sys
import argparse
import streamsx.topology.topology
import streamsx.spl.op
import streamsx.topology.context
import streamsx.spl.toolkit
import streamsx.spl.types
import json
import os
'''
'''
def processParamFile(spl_main_param_file):
splParams = {}
try:
jsonParams = json.load(open(spl_main_param_file))
except Exception as err:
print('ERROR : While processing spl_main_param_file : ', spl_main_param_file)
print('Run-time error ', err)
sys.exit(1)
p = ''
try:
for p in jsonParams:
if 'type' in p:
exec('splParams[ p[\'name\'] ] = streamsx.spl.types.' + p['type'] + '(p[\'value\'])')
else:
splParams[p['name']] = p['value']
except Exception as err:
print('ERROR : While processing spl_main_param_file : ', spl_main_param_file)
print('Error in the entry ->', p)
print('Run-time error ', err)
sys.exit(2)
return splParams
def processToolkits(dep_toolkits):
tkList =[]
for tk in open(dep_toolkits):
tk = tk.split('#')[0]
tk = tk.replace(' ', '')
tk = tk.rstrip()
tk = tk.strip(' ')
if len(tk) > 0 :
if isToolkitDir(tk) :
tkList.append(tk)
else :
print('ERROR : Could not find the toolkit directory : '+tk)
print('it is specified in toolkit list file : ', dep_toolkits)
sys.exit(4)
return tkList
def isToolkitDir(tk):
return (os.path.exists(tk) and os.path.isdir(tk))
def checkMainProjDir(dir):
if not isToolkitDir(dir):
print('ERROR : Could not find the main SPL project directory : ' + dir)
sys.exit(4)
def submitSplApp(spl_main_composite,
spl_main_project_dir,
streaming_service_name,
service_credentials_filename,
spl_params=None,
dep_toolkits_list=None,
job_name=None,
job_group=None,
data_directory=None):
'''
:param spl_main_composite: Must contain the namespace and main composite name i.e. com.ibm.streams::MainApp
:param spl_main_project_dir: The Streams application project directory
:param streaming_service_name: Name of the IBM® Cloud Streaming service
:param service_credentials_filename: File containing the JSON of IBM® Cloud Streaming service credentials
:param spl_params: SPL parameters dictionary loaded from file with json array as following
[ { "name": "param1", "type": "rstring", "value": "paramValue1"},...]
:param dep_toolkits_list: List of toolkits of dependencies
:param job_name: Job name to appear in Streams console
:param job_group: Job group, this must exist in the Streams instance to successfully submit
:param data_directory: Application data directory
:return: SubmissionResult: Result of the submission. For details of what is contained see the :py:class:`ContextTypes`
constant passed as `ctxtype`.
'''
# Topology object
topo = streamsx.topology.topology.Topology(spl_main_composite.split('::')[-1])
# ==================================================
# IBM® Cloud Streaming Service Context Configuration
# ==================================================
try :
credentials = json.load(open(service_credentials_filename))
except Exception as err:
print('ERROR : While processing service_credentials_filename : ', service_credentials_filename)
print('Run-time error ', err)
sys.exit(3)
vs = {'streaming-analytics': [{'name': streaming_service_name, 'credentials': credentials}]}
cfg = {}
cfg[streamsx.topology.context.ConfigParams.VCAP_SERVICES] = vs
cfg[streamsx.topology.context.ConfigParams.SERVICE_NAME] = streaming_service_name
# job_name=None, job_group=None, preload=False, data_directory=None
job_config = streamsx.topology.context.JobConfig(job_name=job_name, job_group=job_group, data_directory=data_directory)
job_config.add(cfg)
# ========================
# Toolkit Dependencies
# ========================
streamsx.spl.toolkit.add_toolkit(topo, spl_main_project_dir)
if dep_toolkits_list is not None:
for toolkit in dep_toolkits_list:
streamsx.spl.toolkit.add_toolkit(topo, toolkit)
# ===============
# Invoke SPL
# ===============
splMain = streamsx.spl.op.Invoke(topo, spl_main_composite, params=spl_params)
# ===============
# Submit
# ===============
# submit(ctxtype, graph, config=None, username=None, password=None)
# Submit the topology to to executed - STANDALONE, DISTRIBUTED, BLUEMIX
# streamsx.topology.context.submit("DISTRIBUTED", topo)
ctx = streamsx.topology.context.submit('STREAMING_ANALYTICS_SERVICE', topo, config=cfg)
print('Submitted job to service:', streaming_service_name)
return ctx
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Submit SPL Application to IBM® Cloud Streaming Service')
parser.add_argument('--main_composite', help="SPL Main composite with namespace i.e. com.ibm.streams::MainApp", required=True )
parser.add_argument('--project_dir', help="SPL application project directory", required=True )
parser.add_argument('--job_name', help="Job name to appear in Streams console", default=None, required=False )
parser.add_argument('--job_group', help="Job group, this must exist in the Streams instance to successfully submit", default=None, required=False )
parser.add_argument('--data_directory', help="SPL application data directory", default=None, required=False)
parser.add_argument('--service_name', help="Name of the IBM® Cloud Streaming service", required=True)
parser.add_argument('--credentials_file', help="File containing the JSON of IBM® Cloud Streaming service credentials", required=True)
parser.add_argument('--param_file', help='''SPL parameters file with json array i.e.
[ { "name": "param1", "type": "rstring", "value": "paramValue1"}, ...]''', default=None, required=False)
parser.add_argument('--toolkits_list_file', help="List of toolkits of dependencies", default=None, required=False)
args = parser.parse_args()
spl_params = None
dep_toolkits_list = None
print("Submitting the application "+ args.main_composite +" with the following parameters:")
print(" - main_composite : " + args.main_composite)
print(" - project_dir : " + args.project_dir)
if args.job_name is not None: print(" - job_name : " + args.job_name)
if args.job_group is not None: print(" - job_group : " + args.job_group)
if args.data_directory is not None: print(" - data_directory : " + args.data_directory)
print(" - service_name : " + args.service_name)
print(" - credentials_file : " + args.credentials_file)
if args.param_file is not None:
print(" - param_file : " + args.param_file)
spl_params = processParamFile(args.param_file)
for k,v in spl_params.items():
print(' -> '+k + ' = '+str(v))
if args.toolkits_list_file is not None:
print(" - toolkits_list_file : " + args.toolkits_list_file)
dep_toolkits_list = processToolkits(args.toolkits_list_file)
for tk in dep_toolkits_list:
print(' -> '+tk)
# Assert the project directory exists
assert isToolkitDir(args.project_dir), 'ERROR : Could not find the main SPL project directory : %s'%args.project_dir
spl_main_project_dir = args.project_dir
submitSplApp(args.main_composite, spl_main_project_dir, args.service_name, args.credentials_file,
spl_params, dep_toolkits_list, args.job_name, args.job_group, args.data_directory)
|
"""Defines the logic for handling requests to the `/recs` route"""
from dependencies.spotify import Client, SpotifyClient
from fastapi import APIRouter, Depends
from models.collection import Collection
from models.rec import Rec, RecQuery
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter
trace.set_tracer_provider(TracerProvider())
trace.get_tracer_provider().add_span_processor( # type: ignore
BatchSpanProcessor(ConsoleSpanExporter())
)
tracer = trace.get_tracer(__name__)
router = APIRouter(prefix="/recs", tags=["recommendations", "recs"])
def get_recs(client: SpotifyClient) -> Collection[Rec]:
"""
Parses recommendations into a list of `Rec`s
Params
------
query: RecQuery
the query object containing seed data for the recommendation api
retriever: Callable[[RecQuery], List[Dict]]
a function that takes a `RecQuery` and returns a list of recommendation
objects
Returns
-------
recs: Collection[Rec]
a collection of `Rec` objects
"""
items = [Rec.from_dict(item) for item in client.get_recommendations_from_spotify()]
return Collection.from_list(items)
@router.get("", response_model=Collection[Rec])
async def get_recommendations(query: RecQuery = Depends()) -> Collection[Rec]:
"""
Retrieves recommendations for the user based on their input parameters
Params
------
query: RecQuery
the query object with seed data from the request url
Returns
-------
recs: Collection[Rec]
a collection of `Rec` objects
"""
with tracer.start_as_current_span(
name="Retrieving recommendations",
attributes={
"limit": str(query.limit),
"seed_artists": str(query.seed_artists),
"seed_genres": str(query.seed_genres),
"seed_tracks": str(query.seed_tracks),
},
):
return get_recs(Client(query))
|
def solution(num):
cnt = 0
while num != 1 :
if cnt > 500 :
cnt = -1
break
# 짝수일 경우,
if num % 2 == 0 :
num = num // 2
# 홀수일 경우,
else :
num = (num * 3) + 1
cnt += 1
return cnt
if __name__ == '__main__':
num = 16
solution(num)
|
# Copyright 2020 QuantInsti Quantitative Learnings Pvt Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import json
import awscli
import logging
from piggin.common.utils import read_tags
class AwsEC2(object):
def __init__(self, access_key=None, secret_key=None,
profile_name=None, region=None):
session = boto3.Session(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
profile_name=profile_name)
if session.region_name is None:
self._default_region = region
else:
self._default_region = session.region_name
self._ec2r = session.resource('ec2', region_name=self._default_region)
self._ec2c = session.client('ec2', region_name=self._default_region)
self._logger = logging.getLogger('ec2')
def create_ec2(self, image_id=None, ninstance=1, key_name=None,
ebs_size=None, instance_type=None, tags = None,
ebs_type=None, config=None):
if config:
with open(config) as fp:
data = json.load(fp)
# required options
image_id = image_id | data.get('image_id', None)
key_name = key_name | data.get('key_name', None)
ebs_size = ebs_size | data.get('ebs_size', None)
if not all([image_id, key_name, ebs_size]):
msg = 'image_id, key_name and ebs_size must be specified.'
raise ValueError(msg)
tags = read_tags(tags)
if not tags:
msg = 'no tags specified.'
raise ValueError(msg)
# options with defaults
ninstance = ninstance | data.get('ninstance', 1)
instance_type = instance_type | data.get('instance_type', 't2.micro')
ebs_type = key_name | data.get('key_name', 'standard')
try:
ins = self._ec2r.create_instances(
ImageId=image_id,
MinCount=1,
MaxCount=ninstance,
InstanceType=instance_type,
KeyName=key_name,
NetworkInterfaces=[
{'SubnetId': data['subnet'],
'DeviceIndex': 0,
'AssociatePublicIpAddress': True,
'Groups': data['group']}
],
BlockDeviceMappings=[
{'DeviceName': '/dev/sda1',
'Ebs': {'VolumeSize': ebs_size,
'VolumeType': ebs_type}}
]
)
except Exception as e:
raise e
try:
ins[0].wait_until_running()
except Exception as e:
raise e
try:
tagname=ins[0].id
self._ec2r.create_tags(Resources=[tagname], Tags=[tags])
except Exception as e:
raise e
def ls(self):
instances = self._ec2r.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])
return instances
|
import os
import numpy as np
import torch
from torch.utils import data
from NER_src.Config import max_seq_length
class InputExample(object):
"""A single training/test example for NER."""
def __init__(self, guid, words, labels):
"""Constructs a InputExample.
Args:
guid: Unique id for the example(a sentence or a pair of sentences).
words: list of words of sentence
labels_a/labels_b: (Optional) string. The label seqence of the text_a/text_b. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
class InputFeatures(object):
"""A single set of features of data.
result of convert_examples_to_features(InputExample)
"""
def __init__(self, input_ids, input_mask, segment_ids, predict_mask, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.predict_mask = predict_mask
self.label_ids = label_ids
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_data(cls, input_file):
"""
Reads a BIO data.
"""
with open(input_file) as f:
# out_lines = []
out_lists = []
entries = f.read().strip().split("\n\n")
for entry in entries:
words = []
ner_labels = []
pos_tags = []
bio_pos_tags = []
for line in entry.splitlines():
pieces = line.strip().split()
if len(pieces) < 1:
continue
word = pieces[0]
words.append(word)
pos_tags.append(pieces[1])
bio_pos_tags.append(pieces[2])
ner_labels.append(pieces[-1])
out_lists.append([words, pos_tags, bio_pos_tags, ner_labels])
return out_lists
class CoNLLDataProcessor(DataProcessor):
def __init__(self):
self._label_types = ['X', '[CLS]', '[SEP]', 'O', 'I-LOC', 'B-PER', 'I-PER', 'I-ORG', 'I-MISC', 'B-MISC',
'B-LOC', 'B-ORG']
self._num_labels = len(self._label_types)
self._label_map = {label: i for i, label in enumerate(self._label_types)}
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_data(os.path.join(data_dir, "train.txt")))
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_data(os.path.join(data_dir, "valid.txt")))
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_data(os.path.join(data_dir, "test.txt")))
def get_labels(self):
return self._label_types
def get_num_labels(self):
return self.get_num_labels
def get_label_map(self):
return self._label_map
def get_start_label_id(self):
return self._label_map['[CLS]']
def get_stop_label_id(self):
return self._label_map['[SEP]']
@staticmethod
def _create_examples(all_lists):
examples = []
for (i, one_lists) in enumerate(all_lists):
guid = i
words = one_lists[0]
labels = one_lists[-1]
examples.append(InputExample(
guid=guid, words=words, labels=labels))
return examples
@staticmethod
def _create_examples2(lines):
examples = []
for (i, line) in enumerate(lines):
guid = i
text = line[0]
ner_label = line[-1]
examples.append(InputExample(
guid=guid, text_a=text, labels_a=ner_label))
return examples
def example2feature(example, tokenizer, label_map, max_seq_length):
add_label = 'X'
tokens = ['[CLS]']
predict_mask = [0]
label_ids = [label_map['[CLS]']]
for i, w in enumerate(example.words):
sub_words = tokenizer.tokenize(w)
if not sub_words:
sub_words = ['[UNK]']
tokens.extend(sub_words)
for j in range(len(sub_words)):
if j == 0:
predict_mask.append(1)
label_ids.append(label_map[example.labels[i]])
else:
predict_mask.append(0)
label_ids.append(label_map[add_label])
if len(tokens) > max_seq_length - 1:
print('Example No.{} is too long, length is {}, truncated to {}!'.format(example.guid, len(tokens),
max_seq_length))
tokens = tokens[0:(max_seq_length - 1)]
predict_mask = predict_mask[0:(max_seq_length - 1)]
label_ids = label_ids[0:(max_seq_length - 1)]
tokens.append('[SEP]')
predict_mask.append(0)
label_ids.append(label_map['[SEP]'])
input_ids = tokenizer.convert_tokens_to_ids(tokens)
segment_ids = [0] * len(input_ids)
input_mask = [1] * len(input_ids)
feat = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
predict_mask=predict_mask,
label_ids=label_ids)
return feat
class NerDataset(data.Dataset):
def __init__(self, examples, tokenizer, label_map, max_seq_length):
self.examples = examples
self.tokenizer = tokenizer
self.label_map = label_map
self.max_seq_length = max_seq_length
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
feat = example2feature(self.examples[idx], self.tokenizer, self.label_map, max_seq_length)
return feat.input_ids, feat.input_mask, feat.segment_ids, feat.predict_mask, feat.label_ids
@classmethod
def pad(cls, batch):
seqlen_list = [len(sample[0]) for sample in batch]
maxlen = np.array(seqlen_list).max()
f = lambda x, seqlen: [sample[x] + [0] * (seqlen - len(sample[x])) for sample in batch] # 0: X for padding
input_ids_list = torch.LongTensor(f(0, maxlen))
input_mask_list = torch.LongTensor(f(1, maxlen))
segment_ids_list = torch.LongTensor(f(2, maxlen))
predict_mask_list = torch.ByteTensor(f(3, maxlen))
label_ids_list = torch.LongTensor(f(4, maxlen))
return input_ids_list, input_mask_list, segment_ids_list, predict_mask_list, label_ids_list
|
from pytest import mark
from leetcode.remove_element import Solution
from . import read_csv
@mark.parametrize(
'nums, val, expect, new_nums',
read_csv(__file__, parser=eval),
)
def test_two_sum(nums, val, expect, new_nums):
assert expect == Solution().removeElement(nums, val)
assert nums == new_nums
|
# coding=utf-8
import os
import unittest
from nose.tools import *
import pandas as pd
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.matcher.matcherutils as mu
from py_entitymatching.debugmatcher.debug_gui_decisiontree_matcher import _vis_debug_dt, \
vis_tuple_debug_dt_matcher
from py_entitymatching.debugmatcher.debug_decisiontree_matcher import visualize_tree, \
debug_decisiontree_matcher
from py_entitymatching.feature.autofeaturegen import get_features_for_matching
from py_entitymatching.feature.extractfeatures import extract_feature_vecs
from py_entitymatching.io.parsers import read_csv_metadata
from py_entitymatching.matcher.dtmatcher import DTMatcher
from py_entitymatching.utils.generic_helper import get_install_path
datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets'])
path_a = os.sep.join([datasets_path, 'A.csv'])
path_b = os.sep.join([datasets_path, 'B.csv'])
path_c = os.sep.join([datasets_path, 'C.csv'])
class VisDTDebugMatcherTestCases(unittest.TestCase):
def setUp(self):
cm.del_catalog()
def tearDown(self):
cm.del_catalog()
def test_vis_debug_matcher_dt_valid_1(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
train_test = mu.split_train_test(feature_vectors)
train = train_test['train']
test = train_test['test']
_vis_debug_dt(dt, train, test,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels', show_window=False)
def test_vis_tuple_debug_dt_matcher_valid_1(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
dt.fit(table=feature_vectors, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels')
s = pd.DataFrame(feature_vectors.loc[0])
s1 = s.T
vis_tuple_debug_dt_matcher(dt, s1,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'])
def test_vis_tuple_debug_dt_matcher_valid_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
dt.fit(table=feature_vectors, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels')
s = pd.DataFrame(feature_vectors.loc[0])
s1 = s.T
vis_tuple_debug_dt_matcher(dt.clf, s1,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'])
def test_vis_tuple_debug_dt_matcher_valid_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
dt.fit(table=feature_vectors, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels')
feature_vectors.drop(['_id', 'ltable_ID', 'rtable_ID', 'labels'], axis=1, inplace=True)
s = pd.DataFrame(feature_vectors.loc[0])
s1 = s.T
vis_tuple_debug_dt_matcher(dt.clf, s1, exclude_attrs=None)
@raises(AssertionError)
def test_vis_debug_matcher_dt_invalid_df(self):
_vis_debug_dt(None, pd.DataFrame(), pd.DataFrame(),
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels', show_window=False)
@raises(AssertionError)
def test_vis_debug_matcher_dt_invalid_tar_attr(self):
_vis_debug_dt(DTMatcher(), pd.DataFrame(), pd.DataFrame(),
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr=None, show_window=False)
@raises(AssertionError)
def test_vis_debug_matcher_dt_ex_attrs_notin_train(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
train_test = mu.split_train_test(feature_vectors)
train = train_test['train']
test = train_test['test']
_vis_debug_dt(dt, train, test,
exclude_attrs=['_id', 'ltable_ID1', 'rtable_ID', 'labels'],
target_attr='labels', show_window=False)
@raises(AssertionError)
def test_vis_debug_matcher_dt_tar_attr_notin_train(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
train_test = mu.split_train_test(feature_vectors)
train = train_test['train']
test = train_test['test']
_vis_debug_dt(dt, train, test,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels1', show_window=False)
@raises(AssertionError)
def test_vis_debug_matcher_dt_ex_attrs_notin_test(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
train_test = mu.split_train_test(feature_vectors)
train = train_test['train']
test = train_test['test']
test.drop('_id', inplace=True, axis=1)
_vis_debug_dt(dt, train, test,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels', show_window=False)
# def test_vis_debug_matcher_dt_tar_attrs_notin_exattrs(self):
# A = read_csv_metadata(path_a)
# B = read_csv_metadata(path_b, key='ID')
# C = read_csv_metadata(path_c, ltable=A, rtable=B)
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
#
# feature_table = get_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
# attrs_after='labels')
#
# dt = DTMatcher()
# train_test = mu.split_train_test(feature_vectors)
#
# train = train_test['train']
# test = train_test['test']
# _vis_debug_dt(dt, train, test,
# exclude_attrs=['_id', 'ltable_ID', 'rtable_ID'],
# target_attr='labels', show_window=False)
# def test_vis_debug_matcher_dt_label_col_wi_sp_name(self):
# A = read_csv_metadata(path_a)
# B = read_csv_metadata(path_b, key='ID')
# C = read_csv_metadata(path_c, ltable=A, rtable=B)
# labels = [0] * 7
# labels.extend([1] * 8)
# C['_predicted'] = labels
#
# feature_table = get_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
# attrs_after='_predicted')
#
# dt = DTMatcher()
# train_test = mu.split_train_test(feature_vectors)
#
# train = train_test['train']
# test = train_test['test']
# _vis_debug_dt(dt, train, test,
# exclude_attrs=['_id', 'ltable_ID', 'rtable_ID'],
# target_attr='_predicted', show_window=False)
class DTDebugMatcherTestCases(unittest.TestCase):
def setUp(self):
cm.del_catalog()
def tearDown(self):
cm.del_catalog()
def test_visualize_tree_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
dt.fit(table=feature_vectors, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels')
visualize_tree(dt, feature_vectors.columns, exclude_attrs=['_id', 'ltable_ID',
'rtable_ID', 'labels'])
# @raises(AssertionError)
def test_visualize_tree_invalid_df(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
dt.fit(table=feature_vectors, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels')
visualize_tree(dt.clf, feature_vectors.columns, exclude_attrs=['_id', 'ltable_ID',
'rtable_ID', 'labels'])
def test_debug_dt_matcher_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
labels = [0] * 7
labels.extend([1] * 8)
C['labels'] = labels
feature_table = get_features_for_matching(A, B)
feature_vectors = extract_feature_vecs(C, feature_table=feature_table,
attrs_after='labels')
dt = DTMatcher()
dt.fit(table=feature_vectors, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'labels'],
target_attr='labels')
debug_decisiontree_matcher(dt, A.loc[1], B.loc[2], feature_table=feature_table,
table_columns=feature_vectors.columns,
exclude_attrs=['ltable_ID', 'rtable_ID', '_id', 'labels'])
|
def fact(x):
if x == 0:
return 1
return x * fact(x - 1)
x=int(input())
print (fact(x))
|
import json
import os
import random
import xml.dom.minidom as xmldom
import re
import sys
args = sys.argv
if (len(args) < 2):
sys.exit(1)
path = args[1]
if(path[-1:] == "/"):
path = path[:-1]
filename = path + "/command/3/stdout.txt"
#filename = "D:/SVN/Ansible02/pargen-IIS/roles/stdout.txt"
result = {}
applicationPools_list = []
website_list = []
website_list_result = []
def getTagNodes(elementObj, tagName):
nodeList = elementObj.getElementsByTagName(tagName)
return nodeList
def getChildNodes(node):
nodeList = node.childNodes
return nodeList
def getNodeLocalName(node):
return node.localName
def hasChildNodes(node):
return node.hasChildNodes()
def getPoolNodeAttri(node, nodeMap, nodename):
nodeArrributes = node.attributes
i = 0
if nodeArrributes is not None and nodeArrributes.length != 0:
while nodeArrributes.length != 0 and i < nodeArrributes.length:
nodeAttr = nodeArrributes.item(i)
if nodename is not None:
attriName = nodename + '.' + nodeAttr.name
#attriName = getNodeLocalName(node.parentNode) + '.' + getNodeLocalName(node) + '.' + nodeAttr.name
nodeMap[attriName] = nodeAttr.value
i = i + 1
if hasChildNodes(node):
childNodeList = getChildNodes(node)
if childNodeList is not None and childNodeList.length > 0:
for childNode in childNodeList:
if getNodeLocalName(childNode) is not None:
nodename = nodename + '.' + getNodeLocalName(childNode) + '.'
nodeMap = getPoolNodeAttri(childNode, nodeMap, nodename)
if nodeArrributes is not None and nodeArrributes.length == 0:
if hasChildNodes(node):
childNodeList = getChildNodes(node)
if childNodeList is not None and childNodeList.length > 0:
for childNode in childNodeList:
if getNodeLocalName(childNode) is not None:
nodename = nodename + '.' + getNodeLocalName(childNode) + '.'
nodeMap = getPoolNodeAttri(childNode, nodeMap, nodename)
return nodeMap
def getWebsiteNodeAttri(parentNode, index, node, nodeMap):
nodeName = None
if parentNode is not None and getNodeLocalName(parentNode) == 'application':
parentAttributes = parentNode.attributes
if parentAttributes is not None and parentAttributes.length > 0:
j = 0
while j < parentAttributes.length:
if parentAttributes.item(j).name == 'path':
nodeName = parentAttributes.item(j).value.split('/')[1]
break
j = j + 1
if node is not None and getNodeLocalName(node) == 'application':
appliAttributes = node.attributes
if appliAttributes is not None and appliAttributes.length > 0:
j = 0
while j < appliAttributes.length:
if appliAttributes.item(j).name == 'path':
nodeName = appliAttributes.item(j).value.split('/')[1]
break
j = j + 1
nodeArrributes = node.attributes
if nodeArrributes is not None and nodeArrributes.length != 0:
i = 0
while nodeArrributes.length != 0 and i < nodeArrributes.length:
nodeAttr = nodeArrributes.item(i)
if nodeName is not None:
attriName = getNodeLocalName(node.parentNode) + '.' + nodeName + '.' + getNodeLocalName(node) + str(
index) + '.' + nodeAttr.name
else:
attriName = getNodeLocalName(node.parentNode) + '.' + getNodeLocalName(node) + str(
index) + '.' + nodeAttr.name
nodeMap[attriName] = nodeAttr.value
i = i + 1
if hasChildNodes(node):
childNodeList = getChildNodes(node)
if childNodeList is not None and childNodeList.length > 0:
ii = 0
while ii < len(childNodeList):
nodeMap = getWebsiteNodeAttri(node, ii, childNodeList[ii], nodeMap)
ii = ii + 1
if nodeArrributes is not None and nodeArrributes.length == 0:
if hasChildNodes(node):
childNodeList = getChildNodes(node)
if childNodeList is not None and childNodeList.length > 0:
i = 0
while i < len(childNodeList):
nodeMap = getWebsiteNodeAttri(node, i, childNodeList[i], nodeMap)
i = i + 1
return nodeMap
# main process
if os.path.isfile(filename):
doc = xmldom.parse(filename)
applicationHost = getTagNodes(doc, "system.applicationHost")
if applicationHost is not None and applicationHost.length > 0:
# get applicationPools
applicationPools = getTagNodes(applicationHost[0], "applicationPools")
if applicationPools is not None and applicationPools.length > 0:
pool_nodeList = getTagNodes(applicationPools[0], "add")
if pool_nodeList is not None and pool_nodeList.length > 0:
for pool in pool_nodeList:
poolMap = {}
poolMap = getPoolNodeAttri(pool, poolMap, 'add')
applicationPools_list.append(poolMap)
pool_nodeList = getTagNodes(applicationPools[0], "applicationPoolDefaults")
if pool_nodeList is not None and pool_nodeList.length > 0:
for pool in pool_nodeList:
poolMap = {}
poolMap = getPoolNodeAttri(pool, poolMap, 'applicationPoolDefaults')
applicationPools_list.append(poolMap)
# print 'applicationPools_list is:', applicationPools_list
# get sites
sites = getTagNodes(applicationHost[0], "sites")
if sites is not None and sites.length > 0:
site_nodeList = getTagNodes(sites[0], "site")
if site_nodeList is not None and site_nodeList.length > 0:
for site in site_nodeList:
siteMap = {}
siteMap = getWebsiteNodeAttri(None, 0, site, siteMap)
website_list.append(siteMap)
site_nodeList = getTagNodes(sites[0], "siteDefaults")
if site_nodeList is not None and site_nodeList.length > 0:
for site in site_nodeList:
siteMap = {}
siteMap = getWebsiteNodeAttri(None, 0, site, siteMap)
website_list.append(siteMap)
site_nodeList = getTagNodes(sites[0], "applicationDefaults")
if site_nodeList is not None and site_nodeList.length > 0:
for site in site_nodeList:
siteMap = {}
siteMap = getWebsiteNodeAttri(None, 0, site, siteMap)
website_list.append(siteMap)
site_nodeList = getTagNodes(sites[0], "virtualDirectoryDefaults")
if site_nodeList is not None and site_nodeList.length > 0:
for site in site_nodeList:
siteMap = {}
siteMap = getWebsiteNodeAttri(None, 0, site, siteMap)
website_list.append(siteMap)
#print('website_list is:', website_list)
# get all website information:application, virtual, binding
defaultPool_name = None
if len(website_list) > 0:
for website_info in website_list:
websiteMap = {}
parameter_map = {}
applicationId_list = []
bindingId_list = []
dict_keys = website_info.keys()
# get application and binging for each website, and virtual for application
# 1. Get the ID that distinguishes each application, virtual, binding
for key in dict_keys:
key_match = re.match('sites.site\d*.name', key)
if key_match is not None:
key = key_match.group().strip()
websiteMap['VAR_WEBSITE_NAME'] = website_info.get(key)
key_match = re.match('site.(.*)\d*.application\d*.path', key)
if key_match is not None:
applicationId_list.append(key_match.group(1).strip())
key_match = re.match('bindings.binding(\d*).protocol', key)
if key_match is not None:
bindingId_list.append(key_match.group(1).strip())
# 2. Use id to assign application to the corresponding website group.
# Use id to assign virtual to the corresponding application group.
for key in dict_keys:
isParam = False
key_match = re.match('sites.site' + '\d*..*', key)
if key_match is not None:
isParam = True
continue
key_match = re.match('bindings.binding' + '\d*..*', key)
if key_match is not None:
isParam = True
continue
key_match = re.match('sites.virtualDirectoryDefaults' + '\d*..*', key)
if key_match is not None:
isParam = True
continue
key_match = re.match('sites.applicationDefaults' + '\d*..*', key)
if key_match is not None:
isParam = True
continue
for applicationId in applicationId_list:
key_match = re.match('site.' + applicationId + '\d*.application\d*..*', key)
if key_match is not None:
isParam = True
break
key_match = re.match('application.' + applicationId + '\d*.virtualDirectory(\d*)..*', key)
if key_match is not None:
isParam = True
break
if isParam == False:
parameter_map[key] = website_info.get(key)
websiteMap['parameters'] = parameter_map
siteApplication_list = []
for applicationId in applicationId_list:
applicationMap = {}
applicationVirtual_list = []
virtualDirId_list = []
for key in dict_keys:
key_match = re.match('site.' + applicationId + '\d*.application\d*.applicationPool', key)
if key_match is not None:
applicationMap['applicationPool'] = website_info.get(key)
continue
key_match = re.match('site.' + applicationId + '\d*.application\d*.path', key)
if key_match is not None:
applicationMap['applicationPath'] = website_info.get(key)
continue
key_match = re.match('application.' + applicationId + '\d*.virtualDirectory(\d*).physicalPath', key)
if key_match is not None:
virtualDirId_list.append(key_match.group(1).strip())
continue
applicationMap['applicationName'] = applicationId
for virtualDirId in virtualDirId_list:
applicationVirtualMap = {}
for key in dict_keys:
key_match = re.match(
'application.' + applicationId + '\d*.virtualDirectory' + virtualDirId + '.path', key)
if key_match is not None:
applicationVirtualMap['path'] = website_info.get(key)
key_match = re.match(
'application.' + applicationId + '\d*.virtualDirectory' + virtualDirId + '.physicalPath',
key)
if key_match is not None:
applicationVirtualMap['physicalPath'] = website_info.get(key)
if len(applicationVirtualMap) > 0:
applicationVirtual_list.append(applicationVirtualMap)
if len(applicationVirtual_list) > 0:
applicationMap['vituralDirList'] = applicationVirtual_list
if len(applicationMap) > 0:
siteApplication_list.append(applicationMap)
websiteMap['application'] = siteApplication_list
# 3. Use id to assign binding to the corresponding website group.
siteBinding_list = []
for bindId in bindingId_list:
bindingMap = {}
for key in dict_keys:
key_match = re.match('bindings.binding' + bindId + '.protocol', key)
if key_match is not None:
bindingMap['protocol'] = website_info.get(key)
key_match = re.match('bindings.binding' + bindId + '.bindingInformation', key)
if key_match is not None:
bindingMap['bindingInformation'] = website_info.get(key)
key_match = re.match('bindings.binding' + bindId + '.sslFlags', key)
if key_match is not None:
bindingMap['sslFlags'] = website_info.get(key)
if len(bindingMap) > 0:
siteBinding_list.append(bindingMap)
if len(siteBinding_list) > 0:
websiteMap['binding'] = siteBinding_list
website_list_result.append(websiteMap)
for key in dict_keys:
defaultPool_match = re.match('sites\d*.applicationDefaults\d*.applicationPool', key)
if defaultPool_match is not None:
defaultPool_name = website_info.get(key)
break
website_list_result.append(websiteMap)
# print('website_list_result is:',website_list_result)
# Finalize the parameters in applicationPools_list_tmp to VAR_WEBAPPPOOL_NAME and VAR_WEBAPPPOOL_ATTRI
# get all applicationPools information
applicationPools_list_tmp = []
defaultPoolMap = {}
if len(applicationPools_list) > 0:
for applicationPool in applicationPools_list:
webapppool_map = {}
pool_keys = applicationPool.keys()
for pool_key in pool_keys:
attri_match = re.match('add..*(recycling.*)', pool_key)
if attri_match is not None:
attriName = attri_match.group(1).strip().replace('..', '.')
if (re.match('.*(processModel).*', attriName) is None) and (re.match('.*(failure).*', attriName) is None) and (re.match('.*(cpu).*', attriName) is None):
webapppool_map[attriName] = applicationPool.get(pool_key)
continue
secondAttri_match = re.match('add..*(failure.*)', pool_key)
if secondAttri_match is not None:
attriName = secondAttri_match.group(1).strip().replace('..', '.')
if (re.match('.*(processModel).*', attriName) is None) and (re.match('.*(recycling).*', attriName) is None) and (re.match('.*(cpu).*', attriName) is None):
webapppool_map[attriName] = applicationPool.get(pool_key)
continue
threeAttri_match = re.match('add..*(cpu.*)', pool_key)
if threeAttri_match is not None:
attriName = threeAttri_match.group(1).strip().replace('..', '.')
if (re.match('.*(processModel).*', attriName) is None) and (re.match('.*(recycling).*', attriName) is None) and (re.match('.*(failure).*', attriName) is None):
webapppool_map[attriName] = applicationPool.get(pool_key)
continue
threeAttri_match = re.match('add..*(processModel.*)', pool_key)
if threeAttri_match is not None:
attriName = threeAttri_match.group(1).strip().replace('..', '.')
if (re.match('.*(cpu).*', attriName) is None) and (re.match('.*(recycling).*', attriName) is None) and (re.match('.*(failure).*', attriName) is None):
webapppool_map[attriName] = applicationPool.get(pool_key)
continue
Attri_match = re.match('add.(.*)', pool_key)
if Attri_match is not None:
attriName = Attri_match.group(1).strip().replace('..', '.')
webapppool_map[attriName] = applicationPool.get(pool_key)
continue
poolDefault_match = re.match('applicationPoolDefaults.(.*)', pool_key)
if poolDefault_match is not None:
attriName = poolDefault_match.group(1).strip().replace('..', '.')
defaultPoolMap[attriName] = applicationPool.get(pool_key)
if defaultPool_name is not None:
defaultPoolMap['name'] = defaultPool_name
continue
'''
poolDefault_match = re.match('applicationPools.applicationPoolDefaults.(.*)', pool_key)
if poolDefault_match is not None:
defaultPoolMap[poolDefault_match.group(1).strip()] = applicationPool.get(pool_key)
continue
if re.match('applicationPool(.*)', pool_key) is not None:
continue
threeAttri_match = re.match('^(.*)', pool_key)
if threeAttri_match is not None:
webapppool_map[threeAttri_match.group(1).strip()] = applicationPool.get(pool_key)
'''
if len(webapppool_map) > 0:
applicationPools_list_tmp.append(webapppool_map)
#print 'applicationPools_list_tmp is:', applicationPools_list_tmp
for poolmap in applicationPools_list_tmp:
if 'name' in poolmap and poolmap['name'] == defaultPoolMap['name']:
keys = poolmap.keys()
if keys is not None and len(keys) > 0:
for key in keys:
defaultPoolMap[key] = poolmap.get(key)
applicationPools_list_tmp.remove(poolmap)
applicationPools_list_tmp.append(defaultPoolMap)
break
# 'applicationPools_list_tmp is:', applicationPools_list_tmp
pools_list = []
for pool in applicationPools_list_tmp:
pool_map = {}
poolAttri_map = {}
keys = pool.keys()
for key in keys:
if 'name' == key:
pool_map['VAR_WEBAPPPOOL_NAME'] = pool.get(key)
else:
poolAttri_map[key] = pool.get(key)
if len(poolAttri_map) > 0:
pool_map['VAR_WEBAPPPOOL_ATTRI'] = poolAttri_map
if len(pool_map) > 0:
pools_list.append(pool_map)
# ('website_list_result is:', website_list_result)
# Finalize the parameters in website_list_result to VAR_WEBSITE_NAME and VAR_WEBBINDING_INFO and VAR_WEBAPP_INFO and VAR_VIRTUALDIR_INFO
websiteInfo_list = []
for website in website_list_result:
websiteMap = {}
websiteInfo = {}
keys = website.keys()
for key in keys:
if 'VAR_WEBSITE_NAME' == key:
websiteMap['VAR_WEBSITE_NAME'] = website.get(key)
if 'binding' == key:
bindingList = []
bindings = website.get(key)
for binding in bindings:
bindingMap = {}
if 'sslFlags' in binding:
bindingMap['sslFlags'] = int(binding.get('sslFlags'))
if binding.get('protocol') == 'http' or binding.get('protocol') == 'https':
bindinginforList = binding.get('bindingInformation').split(':')
if len(bindinginforList) == 2:
if bindinginforList[0] != '*':
bindingMap['ip'] = bindinginforList[0]
bindingMap['port'] = bindinginforList[1]
if len(bindinginforList) == 3:
if bindinginforList[0] != '*':
bindingMap['ip'] = bindinginforList[0]
bindingMap['port'] = bindinginforList[1]
if bindinginforList[2] != '':
bindingMap['host_header'] = bindinginforList[2]
bindingMap['protocol'] = binding.get('protocol')
else:
bindingMap['protocol'] = binding.get('protocol')
bindingMap['host_header'] = binding.get('bindingInformation')
bindingMap['protocol'] = binding.get('protocol')
if len(bindingMap) > 0:
bindingList.append(bindingMap)
if len(bindingList) > 0:
websiteMap['VAR_WEBBINDING_INFO'] = bindingList
if 'application' == key:
applicationList = website.get(key)
application_list = []
websitePhysicalPath = False
virtual_list = []
for application in applicationList:
applicationName = ''
applicationMap = {}
if '/' != application.get('applicationPath'):
applicationName = application.get('applicationPath').split('/')[1]
applicationMap['name'] = applicationName
if 'applicationPool' in application and application.get('applicationPool') is not None:
applicationMap['poolname'] = application.get('applicationPool')
if '/' == application.get('applicationPath'):
websiteMap['poolname'] = application.get('applicationPool')
websitePhysicalPath = True
virtualList = application.get('vituralDirList')
for virtual in virtualList:
virtualMap = {}
if '/' == virtual.get('path'):
if websitePhysicalPath == False:
applicationMap['physical_path'] = virtual.get('physicalPath')
if websitePhysicalPath == True:
websiteInfo['physical_path'] = virtual.get('physicalPath')
websitePhysicalPath = False
elif '/' != virtual.get('path'):
virtualMap['name'] = virtual.get('path').split('/')[1]
virtualMap['physical_path'] = virtual.get('physicalPath')
if applicationName != '':
virtualMap['application'] = applicationName
if len(virtualMap) > 0:
virtual_list.append(virtualMap)
if len(applicationMap) > 0:
application_list.append(applicationMap)
if len(virtual_list) > 0:
websiteMap['VAR_VIRTUALDIR_INFO'] = virtual_list
# print ("virtual_list is:", websiteMap['VAR_VIRTUALDIR_INFO'])
if len(application_list) > 0:
websiteMap['VAR_WEBAPP_INFO'] = application_list
if 'parameters' == key:
parameterStr = None
paramMap = website.get(key)
parametersList_tmp = []
paramKeys = list(paramMap.keys())
if len(paramKeys) > 0:
i = 0
while i < len(paramKeys):
paramName_list = paramKeys[i].split('.', 2)
if paramName_list is not None and len(paramName_list) == 3:
paramName = re.sub('\d', '', paramName_list[1].strip()) + '.' + paramName_list[2].strip()
parameterStr = '"' + paramName + '":"' + paramMap.get(paramKeys[i]) + '"'
parametersList_tmp.append(parameterStr)
i = i + 1
parameterStr = '|'.join(parametersList_tmp)
if parameterStr is not None:
websiteInfo['parameters'] = parameterStr
websiteMap['VAR_WEBSITE_INFO'] = websiteInfo
if len(websiteMap) > 0:
websiteInfo_list.append(websiteMap)
#print 'websiteInfo_list is:', websiteInfo_list
#print 'pools_list is:', pools_list
result_list = []
website_pool_list = []
for pool in pools_list:
result_map = {}
#print "pool['VAR_WEBAPPPOOL_NAME'] is:", pool['VAR_WEBAPPPOOL_NAME']
for website in websiteInfo_list:
if 'VAR_WEBAPPPOOL_NAME' in pool and 'poolname' in website:
#print 'poolname is:', website['poolname']
if pool['VAR_WEBAPPPOOL_NAME'] == website['poolname']:
#pools_list.remove(pool)
website_pool_list.append(pool)
result_map['VAR_IIS_OS_Version'] = 'Windows Server 2016'
result_map['VAR_WEBAPPPOOL_NAME'] = pool['VAR_WEBAPPPOOL_NAME']
if 'VAR_WEBAPPPOOL_ATTRI' in pool:
result_map['VAR_WEBAPPPOOL_ATTRI'] = pool['VAR_WEBAPPPOOL_ATTRI']
result_map['VAR_WEBSITE_NAME'] = website['VAR_WEBSITE_NAME']
if 'VAR_WEBSITE_INFO' in website:
result_map['VAR_WEBSITE_INFO'] = website['VAR_WEBSITE_INFO']
if 'VAR_WEBBINDING_INFO' in website:
result_map['VAR_WEBBINDING_INFO'] = website['VAR_WEBBINDING_INFO']
if 'VAR_WEBAPP_INFO' in website:
result_map['VAR_WEBAPP_INFO'] = website['VAR_WEBAPP_INFO']
if 'VAR_VIRTUALDIR_INFO' in website:
result_map['VAR_VIRTUALDIR_INFO'] = website['VAR_VIRTUALDIR_INFO']
if website['poolname'] is None and pool['VAR_WEBAPPPOOL_NAME'] == defaultPool_name:
#print 'defaultPool_name is:', defaultPool_name
pools_list.remove(pool)
result_map['VAR_IIS_OS_Version'] = 'Windows Server 2016'
result_map['VAR_WEBAPPPOOL_NAME'] = defaultPool_name
if pool['VAR_WEBAPPPOOL_NAME'] == defaultPool_name and 'VAR_WEBAPPPOOL_ATTRI' in pool:
result_map['VAR_WEBAPPPOOL_ATTRI'] = pool['VAR_WEBAPPPOOL_ATTRI']
result_map['VAR_WEBSITE_NAME'] = website['VAR_WEBSITE_NAME']
if 'VAR_WEBSITE_INFO' in website:
result_map['VAR_WEBSITE_INFO'] = website['VAR_WEBSITE_INFO']
if 'VAR_WEBBINDING_INFO' in website:
result_map['VAR_WEBBINDING_INFO'] = website['VAR_WEBBINDING_INFO']
if 'VAR_WEBAPP_INFO' in website:
result_map['VAR_WEBAPP_INFO'] = website['VAR_WEBAPP_INFO']
if 'VAR_VIRTUALDIR_INFO' in website:
result_map['VAR_VIRTUALDIR_INFO'] = website['VAR_VIRTUALDIR_INFO']
# print('result_map is:', result_map)
if len(result_map) > 0:
result_list.append(result_map)
#print 'pools_list is:', pools_list
#print 'website_pool_list is:', website_pool_list
singPoolList = []
for pool in pools_list:
isSingle = True
for website_pool in website_pool_list:
if 'VAR_WEBAPPPOOL_NAME' in website_pool and 'VAR_WEBAPPPOOL_NAME' in pool:
if website_pool['VAR_WEBAPPPOOL_NAME'] == pool['VAR_WEBAPPPOOL_NAME']:
isSingle = False
break
if isSingle == True:
singPoolList.append(pool)
#print 'singPoolList is:', singPoolList
for pool in singPoolList:
result_map = {}
if 'VAR_WEBAPPPOOL_NAME' in pool:
result_map['VAR_IIS_OS_Version'] = 'Windows Server 2016'
result_map['VAR_WEBAPPPOOL_NAME'] = pool['VAR_WEBAPPPOOL_NAME']
if 'VAR_WEBAPPPOOL_ATTRI' in pool:
result_map['VAR_WEBAPPPOOL_ATTRI'] = pool['VAR_WEBAPPPOOL_ATTRI']
if len(result_map) > 0:
result_list.append(result_map)
#result['websitelist'] = result_list
#print('result list is:', result_list)
result_count = {}
result_count['count'] = len(result_list)
print(json.dumps(result_count))
|
# The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import traceback
from xcube.core.normalize import DatasetIsNotACubeError
from xcube.core.normalize import decode_cube
from xcube.core.store import DATASET_TYPE
from xcube.core.store import DataStoreError
from xcube.core.store import DataStorePool
from xcube.core.store import get_data_store_instance
from xcube.core.store import new_data_opener
from xcube.util.assertions import assert_instance
from xcube.util.progress import observe_progress
from .transformer import TransformedCube
from ..config import CubeConfig
from ..config import InputConfig
from ..error import CubeGeneratorError
# Names of cube configuration parameters that
# are not shared with open parameters.
_STEADY_CUBE_CONFIG_NAMES = {
'chunks',
'tile_size'
}
class CubeOpener:
def __init__(self,
cube_config: CubeConfig,
store_pool: DataStorePool = None):
assert_instance(cube_config, CubeConfig, 'cube_config')
if store_pool is not None:
assert_instance(store_pool, DataStorePool, 'store_pool')
self._cube_config = cube_config
self._store_pool = store_pool
def open_cube(self, input_config: InputConfig) -> TransformedCube:
cube_config = self._cube_config
cube_params = cube_config.to_dict()
opener_id = input_config.opener_id
store_params = input_config.store_params or {}
open_params = input_config.open_params or {}
with observe_progress('reading cube', 3) as observer:
try:
if input_config.store_id:
store_instance = get_data_store_instance(
input_config.store_id,
store_params=store_params,
store_pool=self._store_pool
)
store = store_instance.store
if opener_id is None:
opener_id = self._get_opener_id(
input_config, store
)
opener = store
open_params = dict(open_params)
open_params['opener_id'] = opener_id
else:
opener = new_data_opener(opener_id)
open_params = dict(open_params)
open_params.update(store_params)
open_params_schema = opener.get_open_data_params_schema(
input_config.data_id
)
dataset_open_params = {
k: v for k, v in cube_params.items()
if k in open_params_schema.properties
}
observer.worked(1)
dataset = opener.open_data(input_config.data_id,
**open_params,
**dataset_open_params)
observer.worked(1)
except DataStoreError as dse:
raise CubeGeneratorError(f'{dse}', status_code=400) from dse
# Turn dataset into cube and grid_mapping
try:
cube, gm, _ = decode_cube(dataset,
normalize=True)
except DatasetIsNotACubeError as e:
raise CubeGeneratorError(f'{e}') from e
observer.worked(1)
if dataset_open_params:
drop_names = [k for k in dataset_open_params.keys()
if k not in _STEADY_CUBE_CONFIG_NAMES]
cube_config = cube_config.drop_props(drop_names)
return cube, gm, cube_config
@classmethod
def _get_opener_id(cls, input_config, store) -> str:
opener_ids = None
data_type_names = store.get_data_types_for_data(
input_config.data_id
)
for data_type_name in data_type_names:
if DATASET_TYPE.is_super_type_of(data_type_name):
opener_ids = \
store.get_data_opener_ids(
data_id=input_config.data_id,
data_type=data_type_name
)
break
if not opener_ids:
raise CubeGeneratorError(f'Data store {input_config.store_id!r}'
f' does not support datasets',
status_code=400)
opener_id = opener_ids[0]
return opener_id
|
import yaml
class Config(object):
with open('./config/config.yaml', encoding='utf-8') as f:
__config = yaml.safe_load(f)
@staticmethod
def getInstance():
return Config.__config
|
import gym
import numpy as np
from typing import Dict, Tuple
class NoopResetEnv(gym.Wrapper):
def __init__(self,
env: gym.Wrapper,
no_op_max=30,
):
"""
Samples initial states by performing a random number of no operations on reset.
Slightly modified from OpenAI baselines AtariWrappers. As detailed in Mnih et al. (2015) -- aka Nature paper.
:param env: the inner environment
:param no_op_max: maximum number of no operations
"""
super().__init__(env)
self.no_op_max = no_op_max
self.no_op_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self,
**kwargs,
) -> np.ndarray:
"""
Resets the environment
:param kwargs: keyword arguments of the OpenAI core
:return: state
"""
self.env.reset(**kwargs)
no_ops = np.random.randint(1, self.no_op_max + 1)
state = None
for _ in range(no_ops):
state, _, done, _ = self.env.step(self.no_op_action)
if done:
state = self.env.reset(**kwargs)
return state
def step(self,
action: int,
) -> Tuple[np.ndarray, float, bool, Dict]:
"""
Performs the provided action
:param action: the action taken
:return: state, reward, done, information dictionary
"""
return self.env.step(action)
|
from params import *
from K2_K2_64x44 import K2_K2_transpose_64x44
p = print
def karatsuba_eval(dst, dst_off, coeff, src, t0, t1):
""" t1 can overlap with any source register, but not t0 """
p("vmovdqa %ymm{}, {}({})".format(src[0], (dst_off+3*0+coeff)*32, dst)) # a[0:]
p("vmovdqa %ymm{}, {}({})".format(src[1], (dst_off+3*1+coeff)*32, dst)) # a[44:]
p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(src[0], src[1], t0))
p("vmovdqa %ymm{}, {}({})".format(t0, (dst_off+3*2+coeff)*32, dst)) # s1[0:]
p("vmovdqa %ymm{}, {}({})".format(src[2], (dst_off+3*3+coeff)*32, dst)) # a[88:]
p("vmovdqa %ymm{}, {}({})".format(src[3], (dst_off+3*4+coeff)*32, dst)) # a[132:]
p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(src[2], src[3], t0))
p("vmovdqa %ymm{}, {}({})".format(t0, (dst_off+3*5+coeff)*32, dst)) # s2[0:]
p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(src[0], src[2], t0))
p("vmovdqa %ymm{}, {}({})".format(t0, (dst_off+3*6+coeff)*32, dst)) # s0[0:]
p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(src[1], src[3], t1))
p("vmovdqa %ymm{}, {}({})".format(t1, (dst_off+3*7+coeff)*32, dst)) # s0[44:]
p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(t0, t1, t0))
p("vmovdqa %ymm{}, {}({})".format(t0, (dst_off+3*8+coeff)*32, dst)) # s3[0:]
def karatsuba_interpolate(dst, dst_off, src, src_off, coeff):
""" Destroys all ymm regs and does not leave useful values.
In practice we're doing 7 of these sequentially, so there is no reasonable
way to save any high-coefficients results. """
def addr(i, off):
return '{}({})'.format((src_off+3*(2*i+off//44)+coeff)*32, src)
r0_44 = 0
p("vmovdqa {}, %ymm{}".format(addr(0, 44), r0_44))
out0_44 = r0_44
p("vpsubw {}, %ymm{}, %ymm{}".format(addr(1, 0), r0_44, out0_44))
r2_44 = 1
p("vmovdqa {}, %ymm{}".format(addr(2, 44), r2_44))
out1_0 = r2_44
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out0_44, r2_44, out1_0))
p("vpsubw {}, %ymm{}, %ymm{}".format(addr(1, 44), out1_0, out1_0))
p("vpsubw {}, %ymm{}, %ymm{}".format(addr(0, 0), out0_44, out0_44))
p("vpaddw {}, %ymm{}, %ymm{}".format(addr(2, 0), out0_44, out0_44))
r3_44 = 2
p("vmovdqa {}, %ymm{}".format(addr(3, 44), r3_44))
out2_44 = r3_44
p("vpsubw {}, %ymm{}, %ymm{}".format(addr(4, 0), r3_44, out2_44))
r5_44 = 3
p("vmovdqa {}, %ymm{}".format(addr(5, 44), r5_44))
out3_0 = r5_44
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out2_44, r5_44, out3_0))
p("vpsubw {}, %ymm{}, %ymm{}".format(addr(4, 44), out3_0, out3_0))
p("vpsubw {}, %ymm{}, %ymm{}".format(addr(3, 0), out2_44, out2_44))
p("vpaddw {}, %ymm{}, %ymm{}".format(addr(5, 0), out2_44, out2_44))
r6_44 = 4
p("vmovdqa {}, %ymm{}".format(addr(6, 44), r6_44))
p("vpsubw {}, %ymm{}, %ymm{}".format(addr(7, 0), r6_44, r6_44))
r8_44 = 5
p("vmovdqa {}, %ymm{}".format(addr(8, 44), r8_44))
r7_0 = r8_44
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(r6_44, r8_44, r7_0))
p("vpsubw {}, %ymm{}, %ymm{}".format(addr(7, 44), r7_0, r7_0))
p("vpsubw {}, %ymm{}, %ymm{}".format(addr(6, 0), r6_44, r6_44))
p("vpaddw {}, %ymm{}, %ymm{}".format(addr(8, 0), r6_44, r6_44))
p("vpsubw {}, %ymm{}, %ymm{}".format(addr(3, 0), out1_0, out1_0))
out2_0 = r7_0
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out1_0, r7_0, out2_0))
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out3_0, out2_0, out2_0))
p("vpsubw {}, %ymm{}, %ymm{}".format(addr(0, 0), out1_0, out1_0))
p("vpaddw {}, %ymm{}, %ymm{}".format(addr(6, 0), out1_0, out1_0))
r1_44 = 6
p("vmovdqa {}, %ymm{}".format(addr(1, 44), r1_44))
out1_44 = 7
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out2_44, r1_44, out1_44))
r7_44 = out2_44
p("vmovdqa {}, %ymm{}".format(addr(7, 44), r7_44))
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out1_44, r7_44, out2_44))
p("vpsubw {}, %ymm{}, %ymm{}".format(addr(4, 44), out2_44, out2_44))
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out0_44, out1_44, out1_44))
p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(r6_44, out1_44, out1_44))
# TODO can get rid of these by fetching them from the right place during Toom4 eval
out0_0 = 8
out3_44 = 9
p("vmovdqa {}, %ymm{}".format(addr(0, 0), out0_0))
p("vmovdqa {}, %ymm{}".format(addr(4, 44), out3_44))
# TODO should move these up in between computations for better pipelining?
p("vmovdqa %ymm{}, {}({})".format(out0_0, (dst_off+2*0+0)*32, dst))
p("vmovdqa %ymm{}, {}({})".format(out0_44, (dst_off+2*0+1)*32, dst))
p("vmovdqa %ymm{}, {}({})".format(out1_0, (dst_off+2*1+0)*32, dst))
p("vmovdqa %ymm{}, {}({})".format(out1_44, (dst_off+2*1+1)*32, dst))
p("vmovdqa %ymm{}, {}({})".format(out2_0, (dst_off+2*2+0)*32, dst))
p("vmovdqa %ymm{}, {}({})".format(out2_44, (dst_off+2*2+1)*32, dst))
p("vmovdqa %ymm{}, {}({})".format(out3_0, (dst_off+2*3+0)*32, dst))
p("vmovdqa %ymm{}, {}({})".format(out3_44, (dst_off+2*3+1)*32, dst))
def idx2off(i):
""" Produces [0, 32, 64, 88, 120, 152, 176, 208, 240, 264, 296, 328]
These are the byte offsets when dividing into 44-coeff chunks"""
return i * 32 - (8 * (i//3))
if __name__ == '__main__':
p(".data")
p(".p2align 5")
p("mask_low9words:")
for i in [65535]*9 + [0]*7:
p(".word 0x{:x}".format(i))
p("const3:")
for i in range(16):
p(".word 3")
p("const9:")
for i in range(16):
p(".word 9")
p("const0:")
for i in range(16):
p(".word 0")
p("const729:")
for i in range(16):
p(".word 729")
p("const3_inv:") # inverse of 3 mod 2**16
for i in range(16):
p(".word 43691")
p("const5_inv:") # inverse of 3 mod 2**16
for i in range(16):
p(".word 52429")
p("shuf48_16:")
for j in range(2):
for i in range(16):
p(".byte {}".format((i - 6) % 16))
p("shuf48_12:")
for i in range(16):
p(".byte {}".format((i - 6) % 16))
for i in range(8):
p(".byte 255")
for i in range(8):
p(".byte {}".format((i - 6) % 8))
p("shufmin1_mask3:")
for i in range(6):
p(".byte {}".format((i + 2) % 16))
for i in range(26):
p(".byte 255")
p("mask32_to_16:")
for a, b in zip([65535]*8, [0]*8):
p(".word 0x{:x}".format(a))
p(".word 0x{:x}".format(b))
p("mask5_3_5_3:")
for i in range(16):
p(".word {}".format(0 if i % 8 < 3 else 65535))
p("mask3_5_3_5:")
for i in range(16):
p(".word {}".format(65535 if i % 8 < 3 else 0))
p("mask3_5_4_3_1:")
for i in range(8):
p(".word {}".format(65535 if i % 8 < 3 else 0))
for i in range(4):
p(".word 0")
for i in range(3):
p(".word 65535")
p(".word 0")
p("mask_keephigh:")
for i in range(8):
p(".word 0")
for i in range(8):
p(".word 65535")
p(".text")
p(".global {}poly_Rq_mul".format(NAMESPACE))
p(".global _{}poly_Rq_mul".format(NAMESPACE))
p("{}poly_Rq_mul:".format(NAMESPACE))
p("_{}poly_Rq_mul:".format(NAMESPACE))
# assume a and b in rsi and rdx respectively
# assume destination pointer in rdi
r_real = '%rdi'
a_real = '%rsi'
b_real = '%rdx'
# karatsuba layers use registers rcx, r9 and r10
# r8 is used to store the stack pointer
# that leaves rax and r11 for pointers, so we must preserve one more
p("push %r12")
r_out = '%r12'
a_prep = '%rax'
b_prep = '%r11'
p("mov %rsp, %r8") # Use r8 to store the old stack pointer during execution.
p("andq $-32, %rsp") # Align rsp to the next 32-byte value, for vmovdqa.
# allocate destination block for prepared a
p("subq ${}, %rsp".format((64 * 48 // 16) * 32))
p("mov %rsp, {}".format(a_prep))
# allocate destination block for prepared b
p("subq ${}, %rsp".format((64 * 48 // 16) * 32))
p("mov %rsp, {}".format(b_prep))
# allocate destination block for resulting r
p("subq ${}, %rsp".format((64 * 96 // 16) * 32))
p("mov %rsp, {}".format(r_out))
# allocate some space for f0-f3
p("subq ${}, %rsp".format(16 * 32))
###### evaluate Toom4 / K2 / K2
# think of blocks of 44 coefficients, for karatsuba preparation
# we evaluate for first 16 coefficients of each block, then 16, then 12
const_3 = 3
p("vmovdqa const3(%rip), %ymm{}".format(const_3))
for (prep, real) in [(a_prep, a_real), (b_prep, b_real)]:
for coeff in range(3):
f0 = [0, 1, 2, 12] # we already have const_3 in 3 (keeping it saves 5 loads)
# TODO replace vmovdqu with vmovdqa when possible
for i, r in enumerate(f0):
p("vmovdqu {}({}), %ymm{}".format(0*11*32+idx2off(i*3+coeff), real, r))
f3 = [4, 5, 6, 7]
for i, r in enumerate(f3):
p("vmovdqu {}({}), %ymm{}".format(3*11*32+idx2off(i*3+coeff), real, r))
# there are 701 coefficients, not 704;
# mask out the final 7 (3 since of mod 701, and 4 because 44, not 48)
if coeff == 2:
p("vpand mask_low9words(%rip), %ymm{}, %ymm{}".format(f3[3], f3[3]))
# retrieve f1 so we can store it in the stack and use for vpadd
f1 = [8, 9, 10, 11]
for i, r in enumerate(f1):
p("vmovdqu {}({}), %ymm{}".format(1*11*32+idx2off(i*3+coeff), real, r))
t0 = 14
t1 = 15
karatsuba_eval(prep, dst_off=0*9*3, src=f0, t0=t0, t1=t1, coeff=coeff)
karatsuba_eval(prep, dst_off=6*9*3, src=f3, t0=t0, t1=t1, coeff=coeff)
# store f0 and f1 so we can use those registers (storing guarantees alignment)
for i, r in enumerate(f0):
p("vmovdqa %ymm{}, {}(%rsp)".format(r, (0*4+i)*32))
for i, r in enumerate(f1):
p("vmovdqa %ymm{}, {}(%rsp)".format(r, (1*4+i)*32))
x1 = [8, 9, 10, 11]
x2 = [12, 13, 14, 15]
for i in range(4):
f2_i = 0
p("vmovdqu {}({}), %ymm{}".format(2*11*32+idx2off(i*3+coeff), real, f2_i))
# TODO: run out of register
f0f2_i = 1
p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((0*4+i)*32, f2_i, f0f2_i))
f1f3_i = 2
p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((1*4+i)*32, f3[i], f1f3_i))
p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(f1f3_i, f0f2_i, x1[i]))
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(f1f3_i, f0f2_i, x2[i]))
# also store the retrieved element of f2 on the stack, makes addition easier later
p("vmovdqa %ymm{}, {}(%rsp)".format(f2_i, (2*4+i)*32))
t0 = 0
t1 = 1
karatsuba_eval(prep, dst_off=1*9*3, src=x1, t0=t0, t1=t1, coeff=coeff)
karatsuba_eval(prep, dst_off=2*9*3, src=x2, t0=t0, t1=t1, coeff=coeff)
x3 = [8, 9, 10, 11]
x4 = [12, 13, 14, 15]
for i in range(4):
f2_i = 0
p("vmovdqa {}(%rsp), %ymm{}".format((2*4+i)*32, f2_i))
f2_4_i = 0
p("vpsllw $2, %ymm{}, %ymm{}".format(f2_i, f2_4_i))
f0f2_4_i = 0
p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((0*4+i)*32, f2_4_i, f0f2_4_i))
f3_4_i = 1
p("vpsllw $2, %ymm{}, %ymm{}".format(f3[i], f3_4_i))
f1f3_4_i = 1
p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((1*4+i)*32, f3_4_i, f1f3_4_i))
f1_2f3_8_i = 1
p("vpsllw $1, %ymm{}, %ymm{}".format(f1f3_4_i, f1_2f3_8_i))
p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(f1_2f3_8_i, f0f2_4_i, x3[i]))
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(f1_2f3_8_i, f0f2_4_i, x4[i]))
t0 = 0
t1 = 1
karatsuba_eval(prep, dst_off=3*9*3, src=x3, t0=t0, t1=t1, coeff=coeff)
karatsuba_eval(prep, dst_off=4*9*3, src=x4, t0=t0, t1=t1, coeff=coeff)
x5 = [12, 13, 14, 15]
for i in range(4):
f3_3_i = 0
p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const_3, f3[i], f3_3_i))
f2f3_3_i = 0
p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((2*4+i)*32, f3_3_i, f2f3_3_i))
f2_3f3_9_i = 0
p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const_3, f2f3_3_i, f2_3f3_9_i))
f1f2_3f3_9_i = 0
p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((1*4+i)*32, f2_3f3_9_i, f1f2_3f3_9_i))
f1_3f2_9f3_27_i = 0
p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const_3, f1f2_3f3_9_i, f1_3f2_9f3_27_i))
p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((0*4+i)*32, f1_3f2_9f3_27_i, x5[i]))
karatsuba_eval(prep, dst_off=5*9*3, src=x5, t0=t0, t1=t1, coeff=coeff)
K2_K2_transpose_64x44(r_out, a_prep, b_prep)
###### interpolate Toom4 / K2 / K2
# we could have probably left something in registers after the transpose
# but that is extremely messy and would've maybe saved ten cycles at most
# we get 8 44-bit chunks per interpolated result, of which we have 7;
# (352 / 44 = 8, 3-way sequential over coefficients => 8 registers)
# there are already 16 registers available from f0-f3, so allocate 40 more
# we also allocate 4x8 registers-worth for the words that drop out of h3,
# h4, h5 and h6 during composition / reduction, which need to be added later
# we also allocate 3 registers for the two words that spill out of h2 into
# h0, h3 into h1 and h4 into h2
p("subq ${}, %rsp".format((56 - 16 + 4*8 + 3) * 32))
compose_offset = 56
far_spill_offset = compose_offset + 4*8
# we zero the space for composition; later we rely the top half being 0
# by only vmovdqa'ing the xmm part (so we do not need to mask)
p("vpxor %ymm0, %ymm0, %ymm0")
for i in range(4*8):
p("vmovdqa %ymm0, {}(%rsp)".format((compose_offset+i)*32))
registers = list(range(16))
def free(*regs):
for x in regs:
if x in registers:
raise Exception("This register is already freed")
registers.append(x)
def alloc():
return registers.pop()
const729 = alloc()
p("vmovdqa const729(%rip), %ymm{}".format(const729))
const3_inv = alloc()
p("vmovdqa const3_inv(%rip), %ymm{}".format(const3_inv))
const5_inv = alloc()
p("vmovdqa const5_inv(%rip), %ymm{}".format(const5_inv))
const9 = alloc()
p("vmovdqa const9(%rip), %ymm{}".format(const9))
# consider swapping this around for more closely linked memory access
# they're somewhat spread around because of how the transpose worked, but
# staying sane while incrementally writing/testing this is also important
for coeff in range(3):
for i in range(7):
karatsuba_interpolate(dst='%rsp', dst_off=i*4*2, src=r_out, src_off=i*9*6, coeff=coeff)
# after interpolating, we can even go 24-way sequential;
# none of the 44-coefficient chunks interact anymore before reduction
for j in range(8): # for each 16 (or 12) coefficient chunk
def limb(i):
# TODO see above; for case j in {0, 8}, make an exception
return '{}(%rsp)'.format((i*8+j)*32)
h0 = alloc()
p("vmovdqa {}, %ymm{}".format(limb(0), h0))
h0lo = alloc()
h0hi = alloc()
p("vpunpcklwd const0(%rip), %ymm{}, %ymm{}".format(h0, h0lo))
p("vpunpckhwd const0(%rip), %ymm{}, %ymm{}".format(h0, h0hi))
free(h0lo)
h0_2lo = alloc()
p("vpslld $1, %ymm{}, %ymm{}".format(h0lo, h0_2lo))
free(h0hi)
h0_2hi = alloc()
p("vpslld $1, %ymm{}, %ymm{}".format(h0hi, h0_2hi))
t1 = alloc()
p("vmovdqa {}, %ymm{}".format(limb(1), t1))
t1lo = alloc()
p("vpunpcklwd const0(%rip), %ymm{}, %ymm{}".format(t1, t1lo))
free(t1)
t1hi = alloc()
p("vpunpckhwd const0(%rip), %ymm{}, %ymm{}".format(t1, t1hi))
t2 = alloc()
p("vmovdqa {}, %ymm{}".format(limb(2), t2))
t2lo = alloc()
p("vpunpcklwd const0(%rip), %ymm{}, %ymm{}".format(t2, t2lo))
free(t2)
t2hi = alloc()
p("vpunpckhwd const0(%rip), %ymm{}, %ymm{}".format(t2, t2hi))
t11lo = alloc()
p("vpaddd %ymm{}, %ymm{}, %ymm{}".format(t2lo, t1lo, t11lo))
t11hi = alloc()
p("vpaddd %ymm{}, %ymm{}, %ymm{}".format(t2hi, t1hi, t11hi))
free(h0_2lo, t11lo)
t11c1lo = alloc()
p("vpsubd %ymm{}, %ymm{}, %ymm{}".format(h0_2lo, t11lo, t11c1lo))
free(h0_2hi, t11hi)
t11c1hi = alloc()
p("vpsubd %ymm{}, %ymm{}, %ymm{}".format(h0_2hi, t11hi, t11c1hi))
free(t1lo, t2lo)
t12lo = alloc()
p("vpsubd %ymm{}, %ymm{}, %ymm{}".format(t2lo, t1lo, t12lo))
free(t1hi, t2hi)
t12hi = alloc()
p("vpsubd %ymm{}, %ymm{}, %ymm{}".format(t2hi, t1hi, t12hi))
p("vpsrld $1, %ymm{}, %ymm{}".format(t12lo, t12lo))
p("vpsrld $1, %ymm{}, %ymm{}".format(t12hi, t12hi))
p("vpand mask32_to_16(%rip), %ymm{}, %ymm{}".format(t12lo, t12lo))
p("vpand mask32_to_16(%rip), %ymm{}, %ymm{}".format(t12hi, t12hi))
free(t12lo, t12hi)
r11s = alloc()
p("vpackusdw %ymm{}, %ymm{}, %ymm{}".format(t12hi, t12lo, r11s))
h6 = alloc()
p("vmovdqa {}, %ymm{}".format(limb(6), h6))
h6lo = alloc()
p("vpunpcklwd const0(%rip), %ymm{}, %ymm{}".format(h6, h6lo))
h6hi = alloc()
p("vpunpckhwd const0(%rip), %ymm{}, %ymm{}".format(h6, h6hi))
free(h6lo)
h6_2lo = alloc()
p("vpslld $1, %ymm{}, %ymm{}".format(h6lo, h6_2lo))
free(h6hi)
h6_2hi = alloc()
p("vpslld $1, %ymm{}, %ymm{}".format(h6hi, h6_2hi))
free(h6_2lo, t11c1lo)
t11c2lo = alloc()
p("vpsubd %ymm{}, %ymm{}, %ymm{}".format(h6_2lo, t11c1lo, t11c2lo))
free(h6_2hi, t11c1hi)
t11c2hi = alloc()
p("vpsubd %ymm{}, %ymm{}, %ymm{}".format(h6_2hi, t11c1hi, t11c2hi))
p("vpsrld $1, %ymm{}, %ymm{}".format(t11c2lo, t11c2lo))
p("vpsrld $1, %ymm{}, %ymm{}".format(t11c2hi, t11c2hi))
p("vpand mask32_to_16(%rip), %ymm{}, %ymm{}".format(t11c2lo, t11c2lo))
p("vpand mask32_to_16(%rip), %ymm{}, %ymm{}".format(t11c2hi, t11c2hi))
free(t11c2lo, t11c2hi)
r11 = alloc()
p("vpackusdw %ymm{}, %ymm{}, %ymm{}".format(t11c2hi, t11c2lo, r11))
t3 = alloc()
p("vmovdqa {}, %ymm{}".format(limb(3), t3))
t13 = alloc()
p("vpaddw {}, %ymm{}, %ymm{}".format(limb(4), t3, t13))
free(t3)
t14 = alloc()
p("vpsubw {}, %ymm{}, %ymm{}".format(limb(4), t3, t14))
free(t14)
r12s = alloc()
p("vpsrlw $2, %ymm{}, %ymm{}".format(t14, r12s))
free(r12s)
e12s = alloc()
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(r11s, r12s, e12s))
free(e12s)
r22 = alloc()
p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const3_inv, e12s, r22))
h0_2 = alloc()
p("vpsllw $1, %ymm{}, %ymm{}".format(h0, h0_2))
free(t13, h0_2)
t13c1 = alloc()
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h0_2, t13, t13c1))
h6_128 = alloc()
p("vpsllw $7, %ymm{}, %ymm{}".format(h6, h6_128))
free(t13c1, h6_128)
t13c2 = alloc()
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h6_128, t13c1, t13c2))
free(t13c2)
r12 = alloc()
p("vpsrlw $3, %ymm{}, %ymm{}".format(t13c2, r12))
free(r12)
e12 = alloc()
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(r11, r12, e12))
# currently alive: h0, r11, e12, r11s, r22, h6
t5 = alloc()
p("vmovdqa {}, %ymm{}".format(limb(5), t5))
free(t5)
t5c1 = alloc()
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h0, t5, t5c1))
h6_729 = alloc()
p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const729, h6, h6_729))
free(t5c1, h6_729)
t5c2 = alloc()
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h6_729, t5c1, t5c2))
free(e12)
h4 = alloc()
p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const3_inv, e12, h4))
free(r11)
h2 = alloc()
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h4, r11, h2))
h4_9 = alloc()
p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const9, h4, h4_9))
# currently alive: h0, h2, h4, h6, h4_9, r22, t5c2, r11s
free(h4_9)
h2h4_9 = alloc()
p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(h4_9, h2, h2h4_9))
free(h2h4_9)
h2_9h4_81 = alloc()
p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const9, h2h4_9, h2_9h4_81))
free(t5c2, h2_9h4_81)
t16 = alloc()
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h2_9h4_81, t5c2, t16))
free(t16)
r13 = alloc()
p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const3_inv, t16, r13))
free(r13)
e13 = alloc()
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(r11s, r13, e13))
free(e13)
r23 = alloc()
p("vpsrlw $3, %ymm{}, %ymm{}".format(e13, r23))
free(r23)
e23 = alloc()
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(r22, r23, e23))
free(r22)
h3 = alloc()
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(e23, r22, h3))
free(r11s)
im1 = alloc()
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h3, r11s, im1))
free(e23)
h5 = alloc()
p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const5_inv, e23, h5))
free(im1)
h1 = alloc()
p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h5, im1, h1))
# currently alive: h0, h1, h2, h3, h4, h5, h6
h = [h0, h1, h2, h3, h4, h5, h6]
# TODO replace vmovdqu with vmovdqa when possible (calculate alignment?)
def get_limb(limbreg, i, j):
p("vmovdqu {}({}), %ymm{}".format((i*176 + j * 44 + coeff*16) * 2, r_real, limbreg))
def store_limb(limbreg, i, j):
if coeff == 2:
if i == 3 and j >= 4: # this part exceeds 704
return
p("vmovdqu %xmm{}, {}({})".format(limbreg, (i*176 + j * 44 + coeff*16) * 2, r_real))
p("vextracti128 $1, %ymm{}, %xmm{}".format(limbreg, limbreg, limbreg))
p("vmovq %xmm{}, {}({})".format(limbreg, (i*176 + j * 44 + coeff*16 + 8) * 2, r_real))
if j == 3: # these are bits 701 to 704, which we must spill into stack
p("vpshufb shufmin1_mask3(%rip), %ymm{}, %ymm{}".format(limbreg, limbreg))
# p("vpand mask3_5_4_3_1, %ymm{}, %ymm{}".format(limbreg, limbreg))
p("vmovdqa %xmm{}, {}(%rsp)".format(limbreg, (compose_offset+0*8+j-(3-i))*32))
else:
if i == 3 and j >= 4: # this part exceeds 704
return
p("vmovdqu %ymm{}, {}({})".format(limbreg, (i*176 + j * 44 + coeff*16) * 2, r_real))
# these exceptional cases have bits overflowing into two limbs over;
# 2 bits from h2 go into h0 (wrapped around), h3 into h1, h4 into h2
if j == 7 and coeff == 2:
for i in [2, 3, 4]:
tmp = alloc()
p("vextracti128 $1, %ymm{}, %xmm{}".format(h[i], tmp))
p("vpshufb shufmin1_mask3(%rip), %ymm{}, %ymm{}".format(tmp, tmp))
p("vmovdqa %ymm{}, {}(%rsp)".format(tmp, (far_spill_offset+i-2)*32))
free(tmp)
if j >= 4: # if there is something to be added into already
h0_old = alloc()
h1_old = alloc()
h2_old = alloc()
get_limb(h0_old, 0, j)
get_limb(h1_old, 1, j)
get_limb(h2_old, 2, j)
p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(h[0], h0_old, h[0]))
p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(h[1], h1_old, h[1]))
p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(h[2], h2_old, h[2]))
free(h0_old, h1_old, h2_old)
if j < 8:
for i in range(-1, 3):
if j < 4 and i == -1:
# h3 is special; only the high 4 limbs are added to h0.
continue
temp = alloc()
temp2 = alloc()
# rotate by 3 words in each lane
p("vpshufb shuf48_16(%rip), %ymm{}, %ymm{}".format(h[i+4], h[i+4]))
if coeff < 2:
mask = 'mask3_5_3_5(%rip)'
permutation = '11001110'
elif coeff == 2:
mask = 'mask3_5_4_3_1(%rip)'
# now '10' is the zero-quadword and 11 contains the 3 words
permutation = '10001011'
p("vpand {}, %ymm{}, %ymm{}".format(mask, h[i+4], temp))
# clear the 2x 3 words so that they can be added in later
p("vpand mask5_3_5_3(%rip), %ymm{}, %ymm{}".format(h[i+4], h[i+4]))
# grab the 3 words and put into position for adding them in
p("vpermq ${}, %ymm{}, %ymm{}".format(int(permutation, 2), temp, temp))
# add in the 3 low words that stay within this 16-word chunk
p("vpand mask_keephigh(%rip), %ymm{}, %ymm{}".format(temp, temp2))
p("vpor %ymm{}, %ymm{}, %ymm{}".format(temp2, h[i+4], h[i+4]))
free(temp2)
# if it's h3, we cannot add to another high limb
# in this case we fetch h0's low back from memory, and add there.
if i == -1:
dst = alloc()
get_limb(dst, 0, j-4)
else:
dst = h[i]
if coeff > 0: # there is something on the stack from the last one
p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((compose_offset+(i+1)*8+j)*32, dst, dst))
p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(h[i+4], dst, dst))
if i == -1:
store_limb(dst, 0, j-4)
free(dst)
# high 3 words should go to stack for the next chunk
p("vmovdqa %xmm{}, {}(%rsp)".format(temp, (compose_offset+(i+1)*8+j)*32))
free(temp)
for i in range(4):
store_limb(h[i], i, j)
free(h0, h1, h2, h3, h4, h5, h6)
# There are still some 3-word spills that we need to add into places
coeff = 0
for j in range(8):
for i in range(3):
htemp = alloc()
get_limb(htemp, i, j)
if not (i == 0 and j == 0): # dealing with this case separately
# grab the 3 words that spilled from the previous j
p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((compose_offset+(i+1)*8+((j-1) % 8))*32, htemp, htemp))
# exception case for h3 which wraps around onto h0 (skipped, above)
if i == 0 and 4 <= j+4 < 8:
p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((compose_offset+0*8+((j+4-1) % 8))*32, htemp, htemp))
# exception case for two coefficients flowing from h2 into h0, h3 into h1, h4 into h2
if j == 0 and i in [0, 1, 2]:
p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((far_spill_offset+i)*32, htemp, htemp))
p("vmovdqu %ymm{}, {}({})".format(htemp, (i*176 + j * 44 + coeff*16) * 2, r_real))
free(htemp)
p("mov %r8, %rsp")
p("pop %r12") # restore callee-saved r12
p("ret")
|
#!/usr/bin/python
"""test_Read.py to test the Read class.
Requires:
python 2 (https://www.python.org/downloads/)
nose 1.3 (https://nose.readthedocs.org/en/latest/)
Joy-El R.B. Talbot Copyright (c) 2014
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from nose.tools import raises
from Read import Read
from MetageneError import MetageneError
##TODO: test set_sam_tag method
##TODO: test set_chromosome_sizes
cigar_string = {}
bad_cigar_string = {}
bitwise_flag = {}
bad_bitwise_flag = {}
good_input = {}
bad_input = {}
chromosome_conversion = {"1": "chr1", "2": "chr2"}
def setup():
"""Create fixtures"""
# define cigar strings; value: ((args for build_positions), expected_result)
cigar_string['full_match'] = ((1, "10M", "*"), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
cigar_string['insertion'] = ((1, "5M4I5M", "*"), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
cigar_string['deletion'] = ((1, "5M4D5M", "*"), [1, 2, 3, 4, 5, 10, 11, 12, 13, 14])
cigar_string['gapped_match'] = ((1, "5M3N5M", "*"), [1, 2, 3, 4, 5, 9, 10, 11, 12, 13])
cigar_string['softclipped_match'] = ((4, "3S5M", "*"), [4, 5, 6, 7, 8])
cigar_string['hardclipped_match'] = ((4, "3H5M3H", "*"), [4, 5, 6, 7, 8])
cigar_string['padded_match'] = ((1, "3P5M", "*"), [4, 5, 6, 7, 8])
cigar_string['mismatch'] = ((1, "5=1X3=", "*"), [1, 2, 3, 4, 5, 6, 7, 8, 9])
cigar_string['no_cigar_match'] = ((1, "*", "aaaaa"), [1, 2, 3, 4, 5])
bad_cigar_string['unknown_length'] = ((1, "*", "*"), "raise MetageneError")
bad_cigar_string['illegal_cigar'] = ((1, "5M4B", "*"), "raise MetageneError")
bad_cigar_string['misordered_cigar'] = ((1, "M5N4M5", "*"), "raise MetageneError")
# define bitwise flags; value: ((args for parse_sam_bitwise_flag), expected_result(count?, reverse_complemented?))
bitwise_flag['unmapped'] = ((int("0b000000000100", 2),), (False, False))
bitwise_flag['unmapped_withflags'] = ((int("0b100111011101", 2),), (False, True))
bitwise_flag['plus_strand'] = ((int("0b000000000000", 2),), (True, False))
bitwise_flag['minus_strand'] = ((int("0b000000010000", 2),), (True, True))
bitwise_flag['multiple_segments'] = ((int("0b000000000001", 2),), (True, False))
# try various default and user-changed boolean flags
bitwise_flag['count_secondary_alignment'] = ((int("0b000100000000", 2),), (True, False))
bitwise_flag['skip_secondary_alignment'] = (
(int("0b000100000000", 2), False, False, False, True, False, False), (False, False))
bitwise_flag['skip_failed_quality_control'] = ((int("0b001000000000", 2),), (False, False))
bitwise_flag['count_failed_quality_control'] = (
(int("0b001000000000", 2), True, True, False, True, False, False), (True, False))
bitwise_flag['skip_PCR_optical_duplicate'] = ((int("0b010000000000", 2),), (False, False))
bitwise_flag['count_PCR_optical_duplicate'] = (
(int("0b010000000000", 2), True, False, True, True, False, False), (True, False))
bitwise_flag['count_supplementary_alignment'] = ((int("0b100000000000", 2),), (True, False))
bitwise_flag['skip_supplementary_alignment'] = (
(int("0b100000000000", 2), True, False, False, False, False, False), (False, False))
bitwise_flag['count_only_start_success'] = (
(int("0b000001000001", 2), True, False, False, True, True, False), (True, False))
bitwise_flag['count_only_start_fail'] = (
(int("0b000000000001", 2), True, False, False, True, True, False), (False, False))
bitwise_flag['count_only_end_success'] = (
(int("0b000010000001", 2), True, False, False, True, False, True), (True, False))
bitwise_flag['count_only_end_fail'] = (
(int("0b000000000001", 2), True, False, False, True, False, True), (False, False))
bad_bitwise_flag['count_only_both'] = (
(int("0b000011000001", 2), True, False, False, True, True, True), ("Raise MetageneError",))
# define good and bad samline inputs
good_input['no_tags'] = (0, "chr1", 200, "10M", 10, 1, 1, "+")
good_input['plus_strand_match'] = (0, "chr1", 200, "10M", 10, 2, 4, "+")
good_input['minus_strand_match'] = (16, "chr1", 200, "10M", 10, 2, 4, "-")
good_input['no_match'] = (4, "*", 0, "*", 10, 1, 1, ".")
sample = ["NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4",
"NA:i:4\tNH:i:4"]
Read.process_set_sam_tag(sample, count_tag=True, tag_regex='NA:i:(\d+)')
Read.process_set_sam_tag(sample, count_tag=True, tag_regex='NH:i:(\d+)')
def test_build_positions():
for test in cigar_string:
yield (check_build_positions, test, cigar_string[test])
def check_build_positions(test, (values, expected)):
position_array = Read.build_positions(*values)
test_description = "\nTest: \t{}\n".format(test)
test_description += "Expected:\t{}\n".format(expected)
test_description += "Position:\t{}\n".format(position_array)
assert position_array == expected, "{}Error: \tDid not create the expected position array.".format(
test_description)
def test_catch_bad_cigar_input():
for test in bad_cigar_string:
yield (check_catch_bad_cigar_input, test, bad_cigar_string[test])
@raises(MetageneError)
def check_catch_bad_cigar_input(test, (values, expected)):
print Read.build_positions(*values)
def test_parse_sam_bitwise_flag():
for test in bitwise_flag:
yield (check_parse_sam_bitwise_flag, test, bitwise_flag[test])
def check_parse_sam_bitwise_flag(test, (values, expected)):
bitwise_result = Read.parse_sam_bitwise_flag(*values)
test_description = "\nTest: \t{}\n".format(test)
test_description += "Expected:\t{}\n".format(expected)
test_description += "Position:\t{}\n".format(bitwise_result)
assert bitwise_result == expected, "{}Error: \tDid not parse bitwise flag as expected.".format(test_description)
def test_catch_bad_bitwise_input():
for test in bad_bitwise_flag:
yield (check_catch_bad_bitwise_input, test, bad_bitwise_flag[test])
@raises(MetageneError)
def check_catch_bad_bitwise_input(test, (values, expected)):
print Read.parse_sam_bitwise_flag(*values)
def build_samline(bitcode, chromosome, start, cigar, length, abundance, mappings):
"""Return a SAM format line"""
string = "a" * length
return "read\t{}\t{}\t{}\t255\t{}\t*\t0\t0\t{}\t{}\tNH:i:{}\tNA:i:{}".format(
bitcode,
chromosome,
start,
cigar,
string,
string,
mappings,
abundance)
def test_create_read():
for test in good_input:
yield (check_create_read, test, good_input[test])
def check_create_read(test, values):
# create expected result
if int(values[0]) == 4:
expected = "Non-aligning read"
else:
start = int(values[2])
end = int(values[2]) + int(values[4]) - 1
if values[7] == "-":
start = end
end = int(values[2])
expected = "Read at {0}:{1}-{2} on {3} strand; counts for {4:2.3f}:".format(
values[1], # chromosome
start,
end,
values[7], # strand
float(values[5]) / float(values[6])) # abundance / mappings
# build input to test
samline = build_samline(*values[0:-1]) # exclude final value
(created, read) = Read.create_from_sam(samline, chromosome_conversion.values(), count_method='all')
output = str(read).split("\t")[0]
# create description in case test fails
test_description = "\nTest: \t{}\n".format(test)
test_description += "Abundance:\t{}\n".format(Read.has_sam_tag["NA"])
test_description += "Mappings:\t{}\n".format(Read.has_sam_tag["NH"])
test_description += "Sam Line:\t{}\n".format(samline)
test_description += "Expected:\t{}\n".format(expected)
test_description += "Position:\t{}\n".format(output)
assert output == expected, "{}Error: \tDid not create expected read.".format(test_description)
def test_catch_bad_input():
for test in bad_input:
yield (check_catch_bad_input, test, bad_input[test])
@raises(MetageneError)
def check_catch_bad_input(test, samline):
print Read(sam_line)
|
from setuptools import setup, find_packages
from pathlib import Path
# see https://packaging.python.org/guides/single-sourcing-package-version/
version_dict = {}
with open(Path(__file__).parents[0] / "boltzmanngen/_version.py") as fp:
exec(fp.read(), version_dict)
version = version_dict["__version__"]
del version_dict
setup(
name="boltzmanngen",
version=version,
description="BoltzmannGen is an open-source code that reimplements the Boltzmann Generators paper by Frank Noe.",
download_url="https://github.com/Daniangio/paper_boltzmann_generators",
author="Frank Noe, Daniele Angioletti",
python_requires=">=3.8",
packages=find_packages(include=["boltzmanngen", "boltzmanngen.*"]),
install_requires=[
"numpy",
"ase",
"tqdm",
"wandb",
"biopandas",
"torch@https://download.pytorch.org/whl/cu113/torch-1.10.2%2Bcu113-cp39-cp39-linux_x86_64.whl",
"torch-runstats>=0.2.0",
"e3nn>=0.3.5,<0.5.0",
"pyyaml",
"contextlib2;python_version<'3.7'", # backport of nullcontext
"typing_extensions;python_version<'3.8'", # backport of Final
"scikit_learn", # for GaussianProcess for per-species statistics
],
zip_safe=True,
)
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.output_reporting import OutputSchedules
log = logging.getLogger(__name__)
class TestOutputSchedules(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_outputschedules(self):
pyidf.validation_level = ValidationLevel.error
obj = OutputSchedules()
# alpha
var_key_field = "Hourly"
obj.key_field = var_key_field
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.outputscheduless[0].key_field, var_key_field)
|
#!/usr/bin/env python3
import sys
import struct
import opcodes
MAGIC_NUMBERS = bytes([int(mv, 0) for mv in ['0xCA', '0xFE', '0xBA', '0xBE']])
JVM_NAMES = {45: 'J2SE 1.1',
46: 'J2SE 1.2',
47: 'J2SE 1.3',
48: 'J2SE 1.4',
49: 'J2SE 5.0',
50: 'J2SE 6.0',
51: 'J2SE 7',
52: 'J2SE 8'}
CP_TYPE_UTF8 = 'CONSTANT_Utf8_info'
CP_TYPE_CLASS = 'CONSTANT_Class'
CP_TYPE_FIELDREF = 'CONSTANT_Fieldref'
CP_TYPE_METHODREF = 'CONSTANT_Methodref'
CP_TYPE_INFMETHREF = 'CONSTANT_InterfaceMethodref'
CP_TYPE_STRING = 'CONSTANT_String'
CP_TYPE_INTEGER = 'CONSTANT_Integer'
CP_TYPE_FLOAT = 'CONSTANT_Float'
CP_TYPE_LONG = 'CONSTANT_Long'
CP_TYPE_DOUBLE = 'CONSTANT_Double'
CP_TYPE_NAMETYPE = 'CONSTANT_NameAndType'
ATTRIBUTE_CONSTANT_VALUE = 'ConstantValue'
ATTRIBUTE_CODE = 'Code'
ATTRIBUTE_STACK_MAP_TABLE = 'StackMapTable'
ATTRIBUTE_EXCEPTIONS = 'Exceptions'
ATTRIBUTE_INNER_CLASSES = 'InnerClasses'
ATTRIBUTE_ENCLOSING_METHOD = 'EnclosingMethod'
ATTRIBUTE_SYNTHETIC = 'Synthetic'
ATTRIBUTE_SIGNATURE = 'Signature'
ATTRIBUTE_SOURCE_FILE = 'SourceFile'
ATTRIBUTE_SOURCE_DEBUG_EXTENSION = 'SourceDebugExtension'
ATTRIBUTE_LINE_NUMBER_TABLE = 'LineNumberTable'
ATTRIBUTE_LOCAL_VARIABLE_TABLE = 'LocalVariableTable'
ATTRIBUTE_LOCAL_VARIABLE_TYPE_TABLE = 'LocalVariableTypeTable'
ATTRIBUTE_DEPRECATED = 'Deprecated'
ATTRIBUTE_RUNTIME_VISIBLE_ANNOTATIONS = 'RuntimeVisibleAnnotations'
ATTRIBUTE_RUNTIME_INVISIBLE_ANNOTATIONS = 'RuntimeInvisibleAnnotations'
ATTRIBUTE_RUNTIME_VISIBLE_PARAMETER_ANNOTATIONS = 'RuntimeVisibleParameterAnnotations'
ATTRIBUTE_RUNTIME_INVISIBLE_PARAMETER_ANNOTATIONS = 'RuntimeInvisibleParameterAnnotations'
ATTRIBUTE_ANNOTATION_DEFAULT = 'AnnotationDefault'
ATTRIBUTE_BOOTSTRAP_METHODS = 'BootstrapMethods'
def decode_jutf8(b):
return b.replace(b'\xC0\x80', b'\x00').decode('utf8')
class CPI:
def __init__(self, jc, tag):
self.jc = jc
self.tag = tag
class CPIUTF8(CPI):
cp_type = CP_TYPE_UTF8
def __init__(self, jclass, tag, length, bts):
self.length = length
self.bytes = bts
self.value = decode_jutf8(bts)
super(CPIUTF8, self).__init__(jclass, tag)
def __str__(self):
return 'CP UTF8:"%s"' % self.value
class CPIInt(CPI):
cp_type = CP_TYPE_INTEGER
def __init__(self, jclass, tag, value):
self.value = value
super(CPIInt, self).__init__(jclass, tag)
def __str__(self):
return 'CP int:%s' % self.value
class CPIFloat(CPI):
cp_type = CP_TYPE_FLOAT
def __init__(self, jclass, tag, value):
self.value = value
super(CPIFloat, self).__init__(jclass, tag)
def __str__(self):
return 'CP float:%s' % self.value
class CPILong(CPI):
cp_type = CP_TYPE_LONG
def __init__(self, jclass, tag, value):
self.value = value
super(CPILong, self).__init__(jclass, tag)
def __str__(self):
return 'CP long:%s' % self.value
class CPIDouble(CPI):
cp_type = CP_TYPE_DOUBLE
def __init__(self, jclass, tag, value):
self.value = value
super(CPIDouble, self).__init__(jclass, tag)
def __str__(self):
return 'CP double:%s' % self.value
class CPIClassReference(CPI):
cp_type = CP_TYPE_CLASS
def __init__(self, jclass, tag, name_index):
self.name_index = name_index
super(CPIClassReference, self).__init__(jclass, tag)
def get_name(self):
return self.jc.get_cpi(self.name_index)
def __str__(self):
return 'CP class reference index:%s name:%s' % (self.name_index, self.get_name().value)
class CPIStringReference(CPI):
cp_type = CP_TYPE_STRING
def __init__(self, jclass, tag, string_index):
self.string_index = string_index
super(CPIStringReference, self).__init__(jclass, tag)
def __str__(self):
return 'CP string reference index: %s' % self.string_index
class CPIFMI(CPI):
def __init__(self, jclass, tag, class_index, name_and_type_index):
self.class_index = class_index
self.name_and_type_index = name_and_type_index
super(CPIFMI, self).__init__(jclass, tag)
class CPIFieldReference(CPIFMI):
cp_type = CP_TYPE_FIELDREF
def __str__(self):
return 'CP field reference: class ref %s name and type descriptor %s' % \
(self.class_index, self.name_and_type_index)
class CPIMethodReference(CPIFMI):
cp_type = CP_TYPE_METHODREF
def __str__(self):
return 'CP method reference: class ref %s name and type descriptor %s' % \
(self.class_index, self.name_and_type_index)
class CPIInterfaceReference(CPIFMI):
cp_type = CP_TYPE_INFMETHREF
def __str__(self):
return 'CP interface reference: class ref %s name and type descriptor %s' % \
(self.class_index, self.name_and_type_index)
class CPINameAndTypeDescriptor(CPI):
cp_type = CP_TYPE_NAMETYPE
def __init__(self, jclass, tag, name_index, descriptor_index):
self.name_index = name_index
self.descriptor_index = descriptor_index
super(CPINameAndTypeDescriptor, self).__init__(jclass, tag)
def __str__(self):
return 'CP name and type descriptor: name index %s type index %s' % (
self.name_index, self.descriptor_index)
class AttributeInfo:
def __init__(self, jclass, attribute_name_index):
self.jc = jclass
self.attribute_name_index = attribute_name_index
self.attribute_length = jclass.read_uint32()
@property
def name(self):
return self.jc.cpi_val(self.attribute_name_index)
def __str__(self):
return 'Attribute: %s' % self.name
def __repr__(self):
return self.__str__()
class AttributeConstantValue(AttributeInfo):
def __init__(self, jclass, attribute_name_index):
super(AttributeConstantValue, self).__init__(jclass, attribute_name_index)
self.constantvalue_index = jclass.read_uint16()
@property
def value(self):
return self.jc.cpi_val(self.constantvalue_index)
def __str__(self):
return "%s %s" % (super(AttributeConstantValue, self), self.value)
class ExceptionItem:
def __init__(self, jclass):
self.start_pc = jclass.read_uint16()
self.end_pc = jclass.read_uint16()
self.handler = jclass.read_uint16()
self.catch = jclass.read_uint16()
class AttributeCode(AttributeInfo):
def __init__(self, jclass, attribute_name_index):
super(AttributeCode, self).__init__(jclass, attribute_name_index)
self.max_stack = jclass.read_uint16()
self.max_locals = jclass.read_uint16()
self.code_length = jclass.read_uint32()
self.code = [jclass.read_uint8() for _ in range(self.code_length)]
self.exception_table_length = jclass.read_uint16()
self.exception_table = [ExceptionItem(jclass) for _ in range(self.exception_table_length)]
self.attributes_count = jclass.read_uint16()
self.attributes = [make_attribute_info(jclass) for _ in range(self.attributes_count)]
@property
def opcodes(self):
return opcodes.decode(self.code)
def __str__(self):
return "%s len: %s, max_stack: %s, max_locals: %s, %s" % (super(AttributeCode, self).__str__(), self.code_length, self.max_stack, self.max_locals, self.opcodes)
class AttributeException(AttributeInfo):
def __init__(self, jclass, attribute_name_index):
super(AttributeException, self).__init__(jclass, attribute_name_index)
self.number_of_exceptions = jclass.read_uint16()
self.exception_index_table = [jclass.read_uint16() for _ in range(self.number_of_exceptions)]
class InnerClass:
def __init__(self, jclass):
self.inner_class_info_index = jclass.read_uint16()
self.outer_class_info_index = jclass.read_uint16()
self.inner_name_index = jclass.read_uint16()
self.inner_class_access_flags = jclass.read_uint16()
class AttributeInnerClasses(AttributeInfo):
def __init__(self, jclass, attribute_name_index):
super(AttributeInnerClasses, self).__init__(jclass, attribute_name_index)
self.number_of_classes = jclass.read_uint16()
self.classes = [InnerClass(jclass) for _ in range(self.number_of_classes)]
class AttributeSynthetic(AttributeInfo):
def __init__(self, jclass, attribute_name_index):
super(AttributeSynthetic, self).__init__(jclass, attribute_name_index)
class AttributeSourceFile(AttributeInfo):
def __init__(self, jclass, attribute_name_index):
super(AttributeSourceFile, self).__init__(jclass, attribute_name_index)
self.sourcefile_index = jclass.read_uint16()
def __str__(self):
return "%s %s" % (super(AttributeSourceFile, self).__str__(), self.sourcefile_index)
class LineNumber:
def __init__(self, jclass):
self.start_pc = jclass.read_uint16()
self.line_number = jclass.read_uint16()
class AttributeLineNumberTable(AttributeInfo):
def __init__(self, jclass, attribute_name_index):
super(AttributeLineNumberTable, self).__init__(jclass, attribute_name_index)
self.line_number_table_length = jclass.read_uint16()
self.line_number_table = [LineNumber(jclass) for _ in range(self.line_number_table_length)]
class LocalVariable:
def __init__(self, jclass):
self.start_pc = jclass.read_uint16()
self.length = jclass.read_uint16()
self.name_index = jclass.read_uint16()
self.descriptor_index = jclass.read_uint16()
self.index = jclass.read_uint16()
class AttributeLocalVariableTable(AttributeInfo):
def __init__(self, jclass, attribute_name_index):
super(AttributeLocalVariableTable, self).__init__(jclass, attribute_name_index)
self.local_variable_table_length = jclass.read_uint16()
self.local_variable_table = [LocalVariable(jclass) for _ in range(self.local_variable_table_length)]
class AttributeDeprecated(AttributeInfo):
def __init__(self, jclass, attribute_name_index):
super(AttributeDeprecated, self).__init__(jclass, attribute_name_index)
class AttributeOther(AttributeInfo):
def __init__(self, jclass, attribute_name_index):
super(AttributeOther, self).__init__(jclass, attribute_name_index)
self.info = [jclass.read_uint8() for _ in range(self.attribute_length)]
class AttributeNotImplementedError:
def __init__(self, *args):
raise NotImplementedError()
attribute_map = {ATTRIBUTE_CONSTANT_VALUE: AttributeConstantValue,
ATTRIBUTE_CODE: AttributeCode,
#ATTRIBUTE_STACK_MAP_TABLE: AttributeNotImplementedError,
ATTRIBUTE_EXCEPTIONS: AttributeException,
ATTRIBUTE_INNER_CLASSES: AttributeInnerClasses,
#ATTRIBUTE_ENCLOSING_METHOD: AttributeNotImplementedError,
ATTRIBUTE_SYNTHETIC: AttributeSynthetic,
#ATTRIBUTE_SIGNATURE: AttributeNotImplementedError,
ATTRIBUTE_SOURCE_FILE: AttributeSourceFile,
#ATTRIBUTE_SOURCE_DEBUG_EXTENSION: AttributeNotImplementedError,
ATTRIBUTE_LINE_NUMBER_TABLE: AttributeLineNumberTable,
ATTRIBUTE_LOCAL_VARIABLE_TABLE: AttributeLocalVariableTable,
#ATTRIBUTE_LOCAL_VARIABLE_TYPE_TABLE: AttributeNotImplementedError,
ATTRIBUTE_DEPRECATED: AttributeDeprecated,
#ATTRIBUTE_RUNTIME_VISIBLE_ANNOTATIONS: AttributeNotImplementedError,
#ATTRIBUTE_RUNTIME_INVISIBLE_ANNOTATIONS: AttributeNotImplementedError,
#ATTRIBUTE_RUNTIME_VISIBLE_PARAMETER_ANNOTATIONS: AttributeNotImplementedError,
#ATTRIBUTE_RUNTIME_INVISIBLE_PARAMETER_ANNOTATIONS: AttributeNotImplementedError,
#ATTRIBUTE_ANNOTATION_DEFAULT: AttributeNotImplementedError,
#ATTRIBUTE_BOOTSTRAP_METHODS: AttributeNotImplementedError}
}
def make_attribute_info(jclass):
attribute_name_index = jclass.read_uint16()
cp_entry = jclass.get_cpi(attribute_name_index)
if type(cp_entry) != CPIUTF8:
raise Exception('attribute at %d is not UTF8' % attribute_name_index)
name = cp_entry.value
cl = attribute_map.get(name, AttributeOther)
return cl(jclass, attribute_name_index)
class FieldInfo:
def __init__(self, jclass):
self.jc = jclass
self.access_flags = jclass.read_uint16()
self.name_index = jclass.read_uint16()
self.descriptor_index = jclass.read_uint16()
self.attributes_count = jclass.read_uint16()
self.attributes = [make_attribute_info(jclass) for _ in range(self.attributes_count)]
def __str__(self):
return 'Field attrs:%s name:%s desc:%s att:%s' % \
(self.attributes_count, self.jc.cpi_val(self.name_index),
self.jc.cpi_val(self.descriptor_index), self.attributes)
class MethodInfo:
def __init__(self, jclass):
self.jc = jclass
self.access_flags = jclass.read_uint16()
self.name_index = jclass.read_uint16()
self.descriptor_index = jclass.read_uint16()
self.attributes_count = jclass.read_uint16()
self.attributes = [make_attribute_info(jclass) for _ in range(self.attributes_count)]
def __str__(self):
return 'Method acc_flags:%s attrs:%s name:%s desc:%s att:%s' % \
(self.access_flags, self.attributes_count, self.jc.cpi_val(self.name_index),
self.jc.cpi_val(self.descriptor_index), self.attributes)
# noinspection PyAttributeOutsideInit
class JavaClass:
def __init__(self, byte_array, validate=False):
self.ba = byte_array
self.to_validate = validate
self.ba_data = iter(self.ba)
self.constant_pool = []
self.minor_version = None
self.major_version = None
self.constant_pool_size = None
def version2string(self):
return JVM_NAMES[self.major_version]
def read_byte(self):
return bytes([next(self.ba_data)])
def read_bytes(self, count):
return bytes([next(self.ba_data) for _ in range(count)])
def unpack(self, fmt, size=1):
if size == 1:
data = self.read_byte()
else:
data = self.read_bytes(size)
return struct.unpack(fmt, data)[0]
def read_int8(self):
return self.unpack('>b')
def read_uint8(self):
return self.unpack('>B')
def read_uint16(self):
return self.unpack('>H', 2)
def read_int16(self):
return self.unpack('>h', 2)
def read_uint32(self):
return self.unpack('>I', 4)
def read_int32(self):
return self.unpack('>i', 4)
def read_float32(self):
return self.unpack('f', 4)
def read_double64(self):
return self.unpack('d', 8)
def read_int64(self):
return self.unpack('>q', 8)
def get_cpi(self, i):
return self.constant_pool[i - 1]
def cpi_val(self, i):
return self.constant_pool[i - 1].value
def read_constant_pool(self):
i = 1
while i < self.constant_pool_size:
v = self.read_uint8()
if v == 1:
pv = self.read_uint16()
self.constant_pool.append(CPIUTF8(self, v, pv, self.read_bytes(pv)))
elif v == 3:
self.constant_pool.append(CPIInt(self, v, self.read_int32()))
elif v == 4:
self.constant_pool.append(CPIFloat(self, v, self.read_float32()))
elif v == 5:
self.constant_pool.append(CPILong(self, v, self.read_int64()))
i += 1
elif v == 6:
self.constant_pool.append(CPIDouble(self, v, self.read_double64()))
i += 1
elif v == 7:
self.constant_pool.append(CPIClassReference(self, v, self.read_uint16()))
elif v == 8:
self.constant_pool.append(CPIStringReference(self, v, self.read_uint16()))
elif v == 9:
self.constant_pool.append(CPIFieldReference(self, v, self.read_uint16(), self.read_uint16()))
elif v == 10:
self.constant_pool.append(CPIMethodReference(self, v, self.read_uint16(), self.read_uint16()))
elif v == 11:
self.constant_pool.append(
CPIInterfaceReference(
self,
v,
self.read_uint16(),
self.read_uint16()))
elif v == 12:
self.constant_pool.append(
CPINameAndTypeDescriptor(
self,
v,
self.read_uint16(),
self.read_uint16()))
else:
raise Exception('constant pool unknown tag byte %s' % v)
i += 1
def read_interfaces(self):
self.interfaces = [self.read_int16() for _ in range(self.interfaces_count)]
def read_fields(self):
self.fields = [FieldInfo(self) for _ in range(self.fields_count)]
def read_methods(self):
self.methods = [MethodInfo(self) for _ in range(self.methods_count)]
def read_attributes(self):
self.attributes = [make_attribute_info(self) for _ in range(self.attributes_count)]
def decode(self):
mn = self.read_bytes(4)
if mn != MAGIC_NUMBERS:
raise Exception('magic numbers %s do not match %s' % (MAGIC_NUMBERS, mn))
self.minor_version = self.read_uint16()
print('minor version: %s' % self.minor_version)
self.major_version = self.read_uint16()
print('major version: %s' % self.major_version)
print('name version: %s' % self.version2string())
self.constant_pool_size = self.read_uint16()
print('pool size: %s' % self.constant_pool_size)
self.read_constant_pool()
for i, cpi in enumerate(self.constant_pool):
print(i+1, cpi)
self.access_flags = self.read_uint16()
print('access flags: %s' % self.access_flags)
self.this_class = self.read_uint16()
print('this class index: %s' % self.this_class)
self.super_class = self.read_uint16()
print('super class index: %s' % self.super_class)
self.interfaces_count = self.read_uint16()
print('interfaces count: %s' % self.interfaces_count)
self.read_interfaces()
for ifc in self.interfaces:
print(ifc)
self.fields_count = self.read_uint16()
print('fields count: %s' % self.fields_count)
self.read_fields()
for fld in self.fields:
print(fld)
self.methods_count = self.read_uint16()
print('methods count: %s' % self.methods_count)
self.read_methods()
for mtd in self.methods:
print(mtd)
self.attributes_count = self.read_uint16()
print('attributes count: %s' % self.attributes_count)
self.read_attributes()
for att in self.attributes:
print(att)
try:
self.read_byte()
except StopIteration:
pass
def print_out(self):
print('minor version: %s' % self.minor_version)
print('major version: %s %s' % (self.major_version, self.version2string()))
print('pool size: %s' % self.constant_pool_size)
for cpi in self.constant_pool:
print(cpi)
print('access flags: %s' % self.access_flags)
print('this class index: %s' % self.this_class)
print(self.get_cpi(self.this_class))
print('super class index: %s' % self.super_class)
print(self.get_cpi(self.super_class))
print('interfaces count: %s' % self.interfaces_count)
for ifc in self.interfaces:
print(ifc)
print('fields count: %s' % self.fields_count)
for fld in self.fields:
print(fld)
print('methods count: %s' % self.methods_count)
for mtd in self.methods:
print(mtd)
print('attributes count: %s' % self.attributes_count)
for att in self.attributes:
print(att)
if __name__ == '__main__':
with open(sys.argv[1], 'rb') as f:
jc = JavaClass(f.read())
jc.decode()
#jc.print_out()
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import argparse
import seaborn as sns
import pandas as pd
import os
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
parser = argparse.ArgumentParser(description='RL')
parser.add_argument(
'--env',
default='cheetah'
)
args = parser.parse_args()
#sns.set_style("darkgrid")
mpl.style.use('seaborn')
seeds = list(range(125,129))
#seeds = [125,126,127]
fig, ax = plt.subplots()
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(24)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(20)
alg_list = ["sac_aq", "agtr_q", "agtr_aq", "agtr_aq_2Q"]
for alg in alg_list:
rewards = []
for seed in seeds:
file = os.path.join("save", args.env, str(seed), alg, 'eval.log')
print(file)
data = pd.read_json(file, lines=True)
rewards.append(data['episode_reward'].to_numpy())
timesteps = data['step'].to_numpy()
rw_lists = np.array(rewards)
mean_list = np.mean(rw_lists,axis = 0)
std_list = np.std(rw_lists,axis = 0)
ax.plot(timesteps, mean_list, label=alg)
plt.fill_between(timesteps, mean_list + std_list,mean_list - std_list, alpha=0.2)
ax.set_xlabel("number of timesteps")
ax.set_ylabel("rewards")
plt.legend(fontsize=16, loc='center right')
plt.title(args.env,fontsize=24)
plt.savefig("{}.png".format(args.env),bbox_inches='tight')
|
import typing
import unittest
import test_runner
def test_cases(**_kwargs):
import test_cli
import test_load
import test_unzip
cases = list()
cases += [
test_cli.CLITest,
test_load.LoadTest,
test_unzip.UnzipTest,
]
return cases
def test_suite(**kwargs) -> typing.Union[unittest.TestSuite, unittest.TestCase]:
suite = unittest.TestSuite()
suite.addTests(
[
unittest.defaultTestLoader.loadTestsFromTestCase(case)
for case in test_cases(**kwargs)
]
)
return suite
if __name__ == "__main__":
# Run the test suite
result = test_runner.ColoredTestRunner(failfast=True, verbosity=2).run(test_suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
|
from __future__ import print_function, division, absolute_import
import time
import warnings
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import imgaug as ia
def main():
time_start = time.time()
test_Keypoint()
test_KeypointsOnImage()
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
def test_Keypoint():
eps = 1e-8
# -------------
# x/y/x_int/y_int
# -------------
kp = ia.Keypoint(y=1, x=2)
assert kp.y == 1
assert kp.x == 2
assert kp.y_int == 1
assert kp.x_int == 2
kp = ia.Keypoint(y=1.1, x=2.7)
assert 1.1 - eps < kp.y < 1.1 + eps
assert 2.7 - eps < kp.x < 2.7 + eps
assert kp.y_int == 1
assert kp.x_int == 3
# -------------
# project
# -------------
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.project((10, 10), (10, 10))
assert kp2.y == 1
assert kp2.x == 2
kp2 = kp.project((10, 10), (20, 10))
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.project((10, 10), (10, 20))
assert kp2.y == 1
assert kp2.x == 4
kp2 = kp.project((10, 10), (20, 20))
assert kp2.y == 2
assert kp2.x == 4
# -------------
# shift
# -------------
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.shift(y=1)
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.shift(y=-1)
assert kp2.y == 0
assert kp2.x == 2
kp2 = kp.shift(x=1)
assert kp2.y == 1
assert kp2.x == 3
kp2 = kp.shift(x=-1)
assert kp2.y == 1
assert kp2.x == 1
kp2 = kp.shift(y=1, x=2)
assert kp2.y == 2
assert kp2.x == 4
# -------------
# draw_on_image
# -------------
kp = ia.Keypoint(x=0, y=0)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 255, 0), alpha=1, size=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[0, 0, :] == [0, 255, 0])
assert np.all(image_kp[1:, :, :] == 10)
assert np.all(image_kp[:, 1:, :] == 10)
kp = ia.Keypoint(x=4, y=4)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 255, 0), alpha=1, size=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[4, 4, :] == [0, 255, 0])
assert np.all(image_kp[:4, :, :] == 10)
assert np.all(image_kp[:, :4, :] == 10)
kp = ia.Keypoint(x=4, y=4)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 255, 0), alpha=1, size=5, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[2:, 2:, :] == [0, 255, 0])
assert np.all(image_kp[:2, :, :] == 10)
assert np.all(image_kp[:, :2, :] == 10)
kp = ia.Keypoint(x=5, y=5)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 255, 0), alpha=1, size=5, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[3:, 3:, :] == [0, 255, 0])
assert np.all(image_kp[:3, :, :] == 10)
assert np.all(image_kp[:, :3, :] == 10)
kp = ia.Keypoint(x=0, y=0)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 255, 0), alpha=1, size=5, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[:3, :3, :] == [0, 255, 0])
assert np.all(image_kp[3:, :, :] == 10)
assert np.all(image_kp[:, 3:, :] == 10)
kp = ia.Keypoint(x=-1, y=-1)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 255, 0), alpha=1, size=5, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[:2, :2, :] == [0, 255, 0])
assert np.all(image_kp[2:, :, :] == 10)
assert np.all(image_kp[:, 2:, :] == 10)
kp = ia.Keypoint(x=0, y=0)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 200, 0), alpha=0.5, size=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[0, 0, :] == [0 + 5, 100 + 5, 0 + 5])
assert np.all(image_kp[1:, :, :] == 10)
assert np.all(image_kp[:, 1:, :] == 10)
# -------------
# generate_similar_points_manhattan
# -------------
kp = ia.Keypoint(y=4, x=5)
kps_manhatten = kp.generate_similar_points_manhattan(0, 1.0, return_array=False)
assert len(kps_manhatten) == 1
assert kps_manhatten[0].y == 4
assert kps_manhatten[0].x == 5
kps_manhatten = kp.generate_similar_points_manhattan(1, 1.0, return_array=False)
assert len(kps_manhatten) == 5
expected = [(4, 5), (3, 5), (4, 6), (5, 5), (4, 4)]
for y, x in expected:
assert any([np.allclose([y, x], [kp_manhatten.y, kp_manhatten.x]) for kp_manhatten in kps_manhatten])
kps_manhatten = kp.generate_similar_points_manhattan(1, 1.0, return_array=True)
assert kps_manhatten.shape == (5, 2)
expected = [(4, 5), (3, 5), (4, 6), (5, 5), (4, 4)]
for y, x in expected:
assert any([np.allclose([y, x], [kp_manhatten_y, kp_manhatten_x])
for kp_manhatten_x, kp_manhatten_y in kps_manhatten])
# -------------
# __repr__ / __str_
# -------------
kp = ia.Keypoint(y=1, x=2)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.00000000, y=1.00000000)"
kp = ia.Keypoint(y=1.2, x=2.7)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.70000000, y=1.20000000)"
def test_KeypointsOnImage():
eps = 1e-8
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
# height/width
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(10, 20, 3))
assert kpi.height == 10
assert kpi.width == 20
# image instead of shape
kpi = ia.KeypointsOnImage(keypoints=kps, shape=np.zeros((10, 20, 3), dtype=np.uint8))
assert kpi.shape == (10, 20, 3)
# -------------
# on()
# -------------
kpi2 = kpi.on((10, 20, 3))
assert all([kp_i.x == kp_j.x and kp_i.y == kp_j.y for kp_i, kp_j in zip(kpi.keypoints, kpi2.keypoints)])
kpi2 = kpi.on((20, 40, 3))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
kpi2 = kpi.on(np.zeros((20, 40, 3), dtype=np.uint8))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
# -------------
# draw_on_image
# -------------
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], alpha=0.5, size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [int(0.5*10+0), int(0.5*10+0.5*255), int(10*0.5+0)])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False)
kps_mask_size3 = np.copy(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1] = 1
assert np.all(image_kps[kps_mask_size3] == [0, 255, 0])
assert np.all(image_kps[~kps_mask_size3] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 0, 255], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 0, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=255, size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [255, 255, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image2 = np.copy(image)
image_kps = kpi.draw_on_image(image2, color=[0, 255, 0], size=1, copy=False, raise_if_out_of_image=False)
assert np.all(image2 == image_kps)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
assert np.all(image2[kps_mask] == [0, 255, 0])
assert np.all(image2[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=5, y=5)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
# -------------
# shift
# -------------
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.shift(x=0, y=0)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x - 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x - 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(y=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 1
kpi2 = kpi.shift(y=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y - 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y - 1
kpi2 = kpi.shift(x=1, y=2)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 2
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 2
# -------------
# to_xy_array
# -------------
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
observed = kpi.to_xy_array()
expected = np.float32([
[1, 2],
[3, 4]
])
assert np.allclose(observed, expected)
# -------------
# from_xy_array
# -------------
arr = np.float32([
[1, 2],
[3, 4]
])
kpi = ia.KeypointsOnImage.from_xy_array(arr, shape=(5, 5, 3))
assert 1 - eps < kpi.keypoints[0].x < 1 + eps
assert 2 - eps < kpi.keypoints[0].y < 2 + eps
assert 3 - eps < kpi.keypoints[1].x < 3 + eps
assert 4 - eps < kpi.keypoints[1].y < 4 + eps
# -------------
# to_keypoint_image
# -------------
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = kpi.to_keypoint_image(size=1)
image_size3 = kpi.to_keypoint_image(size=3)
kps_mask = np.zeros((5, 5, 2), dtype=np.bool)
kps_mask[2, 1, 0] = 1
kps_mask[4, 3, 1] = 1
kps_mask_size3 = np.zeros_like(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1, 0] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1, 1] = 1
assert np.all(image[kps_mask] == 255)
assert np.all(image[~kps_mask] == 0)
assert np.all(image_size3[kps_mask] == 255)
assert np.all(image_size3[kps_mask_size3] >= 128)
assert np.all(image_size3[~kps_mask_size3] == 0)
# -------------
# from_keypoint_image()
# -------------
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 255
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == 4
assert kpi2.keypoints[1].x == 3
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords={"x": -1, "y": -2}, threshold=20,
nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=(-1, -2), threshold=20,
nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=None, threshold=20, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
got_exception = False
try:
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
_ = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords="exception-please", threshold=20,
nb_channels=3)
except Exception as exc:
assert "Expected if_not_found_coords to be" in str(exc)
got_exception = True
assert got_exception
# -------------
# to_distance_maps()
# -------------
kpi = ia.KeypointsOnImage(keypoints=[ia.Keypoint(x=2, y=3)], shape=(5, 5, 3))
distance_map = kpi.to_distance_maps()
expected_xx = np.float32([
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]
])
expected_yy = np.float32([
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]
])
expected = np.sqrt((expected_xx - 2)**2 + (expected_yy - 3)**2)
assert distance_map.shape == (5, 5, 1)
assert np.allclose(distance_map, expected[..., np.newaxis])
distance_map_inv = kpi.to_distance_maps(inverted=True)
expected_inv = np.divide(np.ones_like(expected), expected+1)
assert np.allclose(distance_map_inv, expected_inv[..., np.newaxis])
# to_distance_maps() with two keypoints
# positions on (4, 4) map (X=position, 1=KP 1 is closest, 2=KP 2 is closest, B=close to both)
# [1, X, 1, 1]
# [1, 1, 1, B]
# [B, 2, 2, 2]
# [2, 2, X, 2]
# this test could have been done a bit better by simply splitting the distance maps, one per keypoint, considering
# the function returns one distance map per keypoint
kpi = ia.KeypointsOnImage(keypoints=[ia.Keypoint(x=2, y=3), ia.Keypoint(x=1, y=0)], shape=(4, 4, 3))
expected = np.float32([
[(0-1)**2 + (0-0)**2, (1-1)**2 + (0-0)**2, (2-1)**2 + (0-0)**2, (3-1)**2 + (0-0)**2],
[(0-1)**2 + (1-0)**2, (1-1)**2 + (1-0)**2, (2-1)**2 + (1-0)**2, (3-1)**2 + (1-0)**2],
[(0-1)**2 + (2-0)**2, (1-2)**2 + (2-3)**2, (2-2)**2 + (2-3)**2, (3-2)**2 + (2-3)**2],
[(0-2)**2 + (3-3)**2, (1-2)**2 + (3-3)**2, (2-2)**2 + (3-3)**2, (3-2)**2 + (3-3)**2],
])
distance_map = kpi.to_distance_maps()
expected = np.sqrt(expected)
assert np.allclose(np.min(distance_map, axis=2), expected)
distance_map_inv = kpi.to_distance_maps(inverted=True)
expected_inv = np.divide(np.ones_like(expected), expected+1)
assert np.allclose(np.max(distance_map_inv, axis=2), expected_inv)
# -------------
# from_distance_maps()
# -------------
distance_map1 = np.float32([
[2, 2, 2, 2, 2],
[2, 1, 1, 1, 2],
[2, 1, 0, 1, 2],
[2, 1, 1, 1, 2]
])
distance_map2 = np.float32([
[4, 3, 2, 2, 2],
[4, 3, 2, 1, 1],
[4, 3, 2, 1, 0.1],
[4, 3, 2, 1, 1]
])
distance_maps = np.concatenate([distance_map1[..., np.newaxis], distance_map2[..., np.newaxis]], axis=2)
kpi = ia.KeypointsOnImage.from_distance_maps(distance_maps, nb_channels=4)
assert len(kpi.keypoints) == 2
assert kpi.keypoints[0].x == 2
assert kpi.keypoints[0].y == 2
assert kpi.keypoints[1].x == 4
assert kpi.keypoints[1].y == 2
assert kpi.shape == (4, 5, 4)
kpi = ia.KeypointsOnImage.from_distance_maps(np.divide(np.ones_like(distance_maps), distance_maps+1),
inverted=True)
assert len(kpi.keypoints) == 2
assert kpi.keypoints[0].x == 2
assert kpi.keypoints[0].y == 2
assert kpi.keypoints[1].x == 4
assert kpi.keypoints[1].y == 2
assert kpi.shape == (4, 5)
kpi = ia.KeypointsOnImage.from_distance_maps(distance_maps, if_not_found_coords=(1, 1), threshold=0.09)
assert len(kpi.keypoints) == 2
assert kpi.keypoints[0].x == 2
assert kpi.keypoints[0].y == 2
assert kpi.keypoints[1].x == 1
assert kpi.keypoints[1].y == 1
assert kpi.shape == (4, 5)
kpi = ia.KeypointsOnImage.from_distance_maps(distance_maps, if_not_found_coords={"x": 1, "y": 2}, threshold=0.09)
assert len(kpi.keypoints) == 2
assert kpi.keypoints[0].x == 2
assert kpi.keypoints[0].y == 2
assert kpi.keypoints[1].x == 1
assert kpi.keypoints[1].y == 2
assert kpi.shape == (4, 5)
kpi = ia.KeypointsOnImage.from_distance_maps(distance_maps, if_not_found_coords=None, threshold=0.09)
assert len(kpi.keypoints) == 1
assert kpi.keypoints[0].x == 2
assert kpi.keypoints[0].y == 2
assert kpi.shape == (4, 5)
got_exception = False
try:
_ = ia.KeypointsOnImage.from_distance_maps(distance_maps, if_not_found_coords=False, threshold=0.09)
except Exception as exc:
assert "Expected if_not_found_coords to be" in str(exc)
got_exception = True
assert got_exception
# -------------
# copy()
# -------------
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.copy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 100
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# -------------
# deepcopy()
# -------------
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.deepcopy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# -------------
# repr/str
# -------------
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
expected = "KeypointsOnImage([Keypoint(x=1.00000000, y=2.00000000), Keypoint(x=3.00000000, y=4.00000000)], " \
+ "shape=(5, 5, 3))"
assert kpi.__repr__() == kpi.__str__() == expected
|
# Deep Neural Network architectures for function approximators
#
# Import libraries
import torch
import torch.nn as nn
import torch.nn.functional as F
# MLP architecture for the state-action value function of the DQN algorithm
class QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_nodes=64, fc2_nodes=64):
"""Initialize parameters and define model elements.
Params
======
state_size (int): Dimension of state space
action_size (int): Dimension of action space
seed (int): Random seed
fc1_nodes: nodes of the first hidden layer
fc2_nodes: nodes of the second hiddend layer
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
# Linear layer (state_size --> fc1_nodes)
self.fc1 = nn.Linear(state_size, fc1_nodes)
# Linear layer (fc1_nodes --> fc2_nodes)
self.fc2 = nn.Linear(fc1_nodes, fc2_nodes)
# Linear layer (fc2_nodes --> action_size)
self.fc3 = nn.Linear(fc2_nodes, action_size)
def forward(self, state):
"""Assembling model elements for fordward pass definition.
"""
x = F.relu(self.fc1(state)) # linear layer + reLu activation
x = F.relu(self.fc2(x)) # linear layer + reLu activation
x = self.fc3(x) # linear layer
return x # return state-action value
|
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
from datetime import datetime, timezone
from typing import Dict
import pytest
from httpx import AsyncClient
from sqlalchemy.ext.asyncio import AsyncSession
from app import crud
from app.core.config import settings
from app.schemas.user import UserCreate, UserUpdate
from app.tests.utils.question import png_content_type
from app.tests.utils.question_order_item import create_random_question_order_item
from app.tests.utils.user import authentication_token_from_email, create_random_user
from app.tests.utils.utils import random_email, random_int, random_lower_string
pytestmark = pytest.mark.asyncio
event_running = pytest.mark.skipif(
(
settings.EVENT_START_TIME is not None
and datetime.now(tz=timezone.utc) < settings.EVENT_START_TIME
)
or (
settings.EVENT_END_TIME is not None
and datetime.now(tz=timezone.utc) > settings.EVENT_END_TIME
),
reason="Event not running",
)
async def test_get_users_superuser_me(
client: AsyncClient, superuser_token_headers: Dict[str, str]
) -> None:
response = await client.get(
f"{settings.API_V1_STR}/users/me", headers=superuser_token_headers
)
current_user = response.json()
assert current_user
assert current_user["is_superuser"]
assert current_user["email"] == settings.FIRST_SUPERUSER
async def test_get_users_normal_user_me(
client: AsyncClient, normal_user_token_headers: Dict[str, str]
) -> None:
response = await client.get(
f"{settings.API_V1_STR}/users/me", headers=normal_user_token_headers
)
current_user = response.json()
assert current_user
assert current_user["is_superuser"] is False
assert current_user["email"] == settings.EMAIL_TEST_USER
async def test_create_user_new_email(
client: AsyncClient,
superuser_token_headers: Dict[str, str],
db_session: AsyncSession,
) -> None:
full_name = random_lower_string()
email = random_email()
username = random_lower_string()
password = random_lower_string()
data = {
"full_name": full_name,
"email": email,
"username": username,
"password": password,
}
response = await client.post(
f"{settings.API_V1_STR}/users/",
headers=superuser_token_headers,
json=data,
)
assert 200 <= response.status_code < 300
created_user = response.json()
user = await crud.user.get_by_email(db_session, email=email)
assert user
assert user.email == created_user["email"]
assert user.username == created_user["username"]
async def test_get_existing_user(
client: AsyncClient,
superuser_token_headers: Dict[str, str],
db_session: AsyncSession,
) -> None:
user = await create_random_user(db_session)
user_id = user.id
response = await client.get(
f"{settings.API_V1_STR}/users/{user_id}",
headers=superuser_token_headers,
)
assert 200 <= response.status_code < 300
api_user = response.json()
assert user.email
existing_user = await crud.user.get_by_email(db_session, email=user.email)
assert existing_user
assert existing_user.email == api_user["email"]
assert existing_user.username == api_user["username"]
async def test_get_not_existing_user(
client: AsyncClient, superuser_token_headers: Dict[str, str]
) -> None:
user_id = -1
response = await client.get(
f"{settings.API_V1_STR}/users/{user_id}",
headers=superuser_token_headers,
)
assert response.status_code == 404
async def test_get_current_user_normal_user(
client: AsyncClient,
normal_user_token_headers: Dict[str, str],
db_session: AsyncSession,
) -> None:
user = await crud.user.get_by_email(db_session, email=settings.EMAIL_TEST_USER)
assert user
user_id = user.id
response = await client.get(
f"{settings.API_V1_STR}/users/{user_id}",
headers=normal_user_token_headers,
)
current_user = response.json()
assert current_user
assert current_user["is_superuser"] is False
assert current_user["email"] == settings.EMAIL_TEST_USER
async def test_get_another_user_normal_user(
client: AsyncClient,
normal_user_token_headers: Dict[str, str],
db_session: AsyncSession,
) -> None:
user = await crud.user.get_by_email(db_session, email=settings.EMAIL_TEST_USER)
assert user and user.id
user_id = user.id - 1 # Any user ID other than current user
response = await client.get(
f"{settings.API_V1_STR}/users/{user_id}",
headers=normal_user_token_headers,
)
assert response.status_code == 400
async def test_create_user_existing_username(
client: AsyncClient,
superuser_token_headers: Dict[str, str],
db_session: AsyncSession,
) -> None:
full_name = random_lower_string()
email = random_email()
username = random_lower_string()
password = random_lower_string()
user_in = UserCreate(
full_name=full_name, email=email, username=username, password=password
)
await crud.user.create(db_session, obj_in=user_in)
data = {
"full_name": full_name,
"email": email,
"username": username,
"password": password,
}
response = await client.post(
f"{settings.API_V1_STR}/users/",
headers=superuser_token_headers,
json=data,
)
created_user = response.json()
assert response.status_code == 400
assert "_id" not in created_user
async def test_create_user_by_normal_user(
client: AsyncClient, normal_user_token_headers: Dict[str, str]
) -> None:
username = random_email()
password = random_lower_string()
full_name = random_lower_string()
data = {"email": username, "password": password, "full_name": full_name}
response = await client.post(
f"{settings.API_V1_STR}/users/",
headers=normal_user_token_headers,
json=data,
)
assert response.status_code == 400
async def test_retrieve_users(
client: AsyncClient,
superuser_token_headers: Dict[str, str],
db_session: AsyncSession,
) -> None:
await create_random_user(db_session)
await create_random_user(db_session)
await create_random_user(db_session)
response = await client.get(
f"{settings.API_V1_STR}/users/", headers=superuser_token_headers
)
all_users = response.json()
assert len(all_users) > 1
for user in all_users:
assert "email" in user
async def test_update_user_normal_user_me(
client: AsyncClient, normal_user_token_headers: Dict[str, str]
) -> None:
data = {
"full_name": random_lower_string(),
"email": random_email(),
"username": random_lower_string(),
"password": random_lower_string(),
}
response = await client.put(
f"{settings.API_V1_STR}/users/me",
headers=normal_user_token_headers,
json=data,
)
current_user = response.json()
assert current_user
assert current_user["is_superuser"] is False
assert current_user["email"] == data["email"]
assert current_user["username"] == data["username"]
assert current_user["full_name"] == data["full_name"]
@pytest.mark.skipif(
not settings.USERS_OPEN_REGISTRATION, reason="Open user registration disabled"
)
async def test_create_user_open(client: AsyncClient) -> None:
data = {
"full_name": random_lower_string(),
"email": random_email(),
"username": random_lower_string(),
"password": random_lower_string(),
}
response = await client.post(
f"{settings.API_V1_STR}/users/open",
json=data,
)
current_user = response.json()
assert current_user
assert current_user["is_superuser"] is False
assert current_user["email"] == data["email"]
assert current_user["username"] == data["username"]
assert current_user["full_name"] == data["full_name"]
@pytest.mark.skipif(
not settings.USERS_OPEN_REGISTRATION, reason="Open user registration disabled"
)
async def test_create_user_open_existing_username(
client: AsyncClient, db_session: AsyncSession
) -> None:
full_name = random_lower_string()
email = random_email()
username = random_lower_string()
password = random_lower_string()
user_in = UserCreate(
full_name=full_name, email=email, username=username, password=password
)
await crud.user.create(db_session, obj_in=user_in)
data = {
"full_name": full_name,
"email": email,
"username": username,
"password": password,
}
response = await client.post(
f"{settings.API_V1_STR}/users/open",
json=data,
)
assert response.status_code == 400
async def test_update_user_existing_user(
client: AsyncClient,
superuser_token_headers: Dict[str, str],
db_session: AsyncSession,
) -> None:
user = await create_random_user(db_session)
data = {
"full_name": random_lower_string(),
"email": random_email(),
"username": random_lower_string(),
"is_superuser": True,
}
response = await client.put(
f"{settings.API_V1_STR}/users/{user.id}",
headers=superuser_token_headers,
json=data,
)
api_user = response.json()
assert api_user
assert api_user["is_superuser"]
assert api_user["full_name"] == data["full_name"]
assert api_user["email"] == data["email"]
assert api_user["username"] == data["username"]
async def test_update_user_not_existing_user(
client: AsyncClient, superuser_token_headers: Dict[str, str]
) -> None:
user_id = -1
data = {
"email": random_email(),
"password": random_lower_string(),
"full_name": random_lower_string(),
"is_superuser": True,
}
response = await client.put(
f"{settings.API_V1_STR}/users/{user_id}",
headers=superuser_token_headers,
json=data,
)
assert response.status_code == 404
async def test_delete_user_existing_user(
client: AsyncClient,
superuser_token_headers: Dict[str, str],
db_session: AsyncSession,
) -> None:
user = await create_random_user(db_session)
user_id = user.id
response = await client.delete(
f"{settings.API_V1_STR}/users/{user_id}",
headers=superuser_token_headers,
)
assert 200 <= response.status_code < 300
async def test_delete_user_not_existing_user(
client: AsyncClient, superuser_token_headers: Dict[str, str]
) -> None:
user_id = -1
response = await client.delete(
f"{settings.API_V1_STR}/users/{user_id}",
headers=superuser_token_headers,
)
assert response.status_code == 404
@event_running
async def test_get_question(client: AsyncClient, db_session: AsyncSession) -> None:
question_order_item = await create_random_question_order_item(db_session)
question_number = question_order_item.question_number
user = await create_random_user(db_session)
user_in_update = UserUpdate(question_number=question_number)
user = await crud.user.update(db_session, db_obj=user, obj_in=user_in_update)
assert user.email # Required for mypy
normal_user_token_headers = await authentication_token_from_email(
client=client, email=user.email, db_session=db_session
)
response = await client.get(
f"{settings.API_V1_STR}/users/question", headers=normal_user_token_headers
)
assert 200 <= response.status_code < 300
question_data = response.json()
assert "content" in question_data
assert "content_type" in question_data
@event_running
async def test_get_question_image(
client: AsyncClient, db_session: AsyncSession
) -> None:
question_order_item = await create_random_question_order_item(db_session)
question_number = question_order_item.question_number
user = await create_random_user(db_session)
user_in_update = UserUpdate(question_number=question_number)
user = await crud.user.update(db_session, db_obj=user, obj_in=user_in_update)
assert user.email # Required for mypy
normal_user_token_headers = await authentication_token_from_email(
client=client, email=user.email, db_session=db_session
)
params = {"image": True}
response = await client.get(
f"{settings.API_V1_STR}/users/question",
headers=normal_user_token_headers,
params=params,
)
assert 200 <= response.status_code < 300
content_type_header = "content-type"
assert content_type_header in response.headers
assert response.headers[content_type_header] == png_content_type()
@event_running
async def test_get_question_redirect_if_none(
client: AsyncClient, db_session: AsyncSession
) -> None:
question_number = random_int()
user = await create_random_user(db_session)
user_in_update = UserUpdate(question_number=question_number)
user = await crud.user.update(db_session, db_obj=user, obj_in=user_in_update)
assert user.email # Required for mypy
normal_user_token_headers = await authentication_token_from_email(
client=client, email=user.email, db_session=db_session
)
response = await client.get(
f"{settings.API_V1_STR}/users/question",
headers=normal_user_token_headers,
follow_redirects=False,
)
assert response.status_code == 307
@event_running
async def test_get_question_redirect_if_none_allow_redirects(
client: AsyncClient, db_session: AsyncSession
) -> None:
question_number = random_int()
user = await create_random_user(db_session)
user_in_update = UserUpdate(question_number=question_number)
user = await crud.user.update(db_session, db_obj=user, obj_in=user_in_update)
assert user.email # Required for mypy
normal_user_token_headers = await authentication_token_from_email(
client=client, email=user.email, db_session=db_session
)
response = await client.get(
f"{settings.API_V1_STR}/users/question",
headers=normal_user_token_headers,
follow_redirects=True,
)
assert 200 <= response.status_code < 300
message_json = response.json()
assert "message" in message_json
@event_running
async def test_verify_answer_correct_answer(
client: AsyncClient, db_session: AsyncSession
) -> None:
question_order_item = await create_random_question_order_item(db_session)
question_number = question_order_item.question_number
user = await create_random_user(db_session)
user_in_update = UserUpdate(question_number=question_number)
user = await crud.user.update(db_session, db_obj=user, obj_in=user_in_update)
assert user.email
normal_user_token_headers = await authentication_token_from_email(
client=client, email=user.email, db_session=db_session
)
answer = question_order_item.question.answer
data = {"answer": answer}
response = await client.post(
f"{settings.API_V1_STR}/users/answer",
headers=normal_user_token_headers,
json=data,
)
assert 200 <= response.status_code < 300
assert user.id
assert user.rank
old_rank = user.rank
updated_user = await crud.user.get(db_session, identifier=user.id)
await db_session.refresh(updated_user)
assert updated_user
assert updated_user.question_number
assert updated_user.rank
assert question_number
assert updated_user.question_number == question_number + 1
assert updated_user.rank >= old_rank
@event_running
async def test_verify_answer_incorrect_answer(
client: AsyncClient, db_session: AsyncSession
) -> None:
question_order_item = await create_random_question_order_item(db_session)
question_number = question_order_item.question_number
user = await create_random_user(db_session)
user_in_update = UserUpdate(question_number=question_number)
user = await crud.user.update(db_session, db_obj=user, obj_in=user_in_update)
assert user.email
normal_user_token_headers = await authentication_token_from_email(
client=client, email=user.email, db_session=db_session
)
answer = random_lower_string()
data = {"answer": answer}
response = await client.post(
f"{settings.API_V1_STR}/users/answer",
headers=normal_user_token_headers,
json=data,
)
assert response.status_code == 400
assert user.id
assert user.rank
old_rank = user.rank
unmodified_user = await crud.user.get(db_session, identifier=user.id)
assert unmodified_user
assert unmodified_user.question_number
assert unmodified_user.rank
assert question_number
assert unmodified_user.question_number == question_number
assert unmodified_user.rank == old_rank
async def test_retrieve_leaderboard(
client: AsyncClient, db_session: AsyncSession
) -> None:
await create_random_user(db_session)
await create_random_user(db_session)
await create_random_user(db_session)
response = await client.get(f"{settings.API_V1_STR}/users/leaderboard")
all_users = response.json()
assert len(all_users) > 1
for user in all_users:
assert "question_number" in user
assert "rank" in user
assert "username" in user
|
from setuptools import setup
import twitch
setup(name='twitch.py',
description='Twitch API for Python',
author='Zakru',
authir_email='sakari.leukkunen@gmail.com',
url='https://github.com/Zakru/twitch.py',
version=twitch.__version__,
packages=['twitch'],
license='MIT')
|
class Rect:
def __init__(self):
self.x = 0
self.y = 0
self.width = 0
self.height = 0
|
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# The Adobe DNG SDK, an API for reading and writing DNG files.
{
'variables': {
'other_cflags': [
'-DqDNGBigEndian=0',
'-DqDNGReportErrors=0',
'-DqDNGThreadSafe=1',
'-DqDNGUseLibJPEG=1',
'-DqDNGUseXMP=0',
'-DqDNGValidate=0',
'-DqDNGValidateTarget=1',
'-fexceptions',
'-frtti',
'-w',
'-Wframe-larger-than=20000',
'-DUNIX_ENV=1',
],
'headers': [
'../third_party/externals/dng_sdk/source/RawEnvironment.h',
'../third_party/externals/dng_sdk/source/dng_1d_function.h',
'../third_party/externals/dng_sdk/source/dng_1d_table.h',
'../third_party/externals/dng_sdk/source/dng_abort_sniffer.h',
'../third_party/externals/dng_sdk/source/dng_area_task.h',
'../third_party/externals/dng_sdk/source/dng_assertions.h',
'../third_party/externals/dng_sdk/source/dng_auto_ptr.h',
'../third_party/externals/dng_sdk/source/dng_bad_pixels.h',
'../third_party/externals/dng_sdk/source/dng_bottlenecks.h',
'../third_party/externals/dng_sdk/source/dng_camera_profile.h',
'../third_party/externals/dng_sdk/source/dng_classes.h',
'../third_party/externals/dng_sdk/source/dng_color_space.h',
'../third_party/externals/dng_sdk/source/dng_color_spec.h',
'../third_party/externals/dng_sdk/source/dng_date_time.h',
'../third_party/externals/dng_sdk/source/dng_errors.h',
'../third_party/externals/dng_sdk/source/dng_exceptions.h',
'../third_party/externals/dng_sdk/source/dng_exif.h',
'../third_party/externals/dng_sdk/source/dng_fast_module.h',
'../third_party/externals/dng_sdk/source/dng_file_stream.h',
'../third_party/externals/dng_sdk/source/dng_filter_task.h',
'../third_party/externals/dng_sdk/source/dng_fingerprint.h',
'../third_party/externals/dng_sdk/source/dng_flags.h',
'../third_party/externals/dng_sdk/source/dng_gain_map.h',
'../third_party/externals/dng_sdk/source/dng_globals.h',
'../third_party/externals/dng_sdk/source/dng_host.h',
'../third_party/externals/dng_sdk/source/dng_hue_sat_map.h',
'../third_party/externals/dng_sdk/source/dng_ifd.h',
'../third_party/externals/dng_sdk/source/dng_image.h',
'../third_party/externals/dng_sdk/source/dng_image_writer.h',
'../third_party/externals/dng_sdk/source/dng_info.h',
'../third_party/externals/dng_sdk/source/dng_iptc.h',
'../third_party/externals/dng_sdk/source/dng_jpeg_image.h',
'../third_party/externals/dng_sdk/source/dng_lens_correction.h',
'../third_party/externals/dng_sdk/source/dng_linearization_info.h',
'../third_party/externals/dng_sdk/source/dng_lossless_jpeg.h',
'../third_party/externals/dng_sdk/source/dng_matrix.h',
'../third_party/externals/dng_sdk/source/dng_memory.h',
'../third_party/externals/dng_sdk/source/dng_memory_stream.h',
'../third_party/externals/dng_sdk/source/dng_misc_opcodes.h',
'../third_party/externals/dng_sdk/source/dng_mosaic_info.h',
'../third_party/externals/dng_sdk/source/dng_mutex.h',
'../third_party/externals/dng_sdk/source/dng_negative.h',
'../third_party/externals/dng_sdk/source/dng_opcode_list.h',
'../third_party/externals/dng_sdk/source/dng_opcodes.h',
'../third_party/externals/dng_sdk/source/dng_orientation.h',
'../third_party/externals/dng_sdk/source/dng_parse_utils.h',
'../third_party/externals/dng_sdk/source/dng_pixel_buffer.h',
'../third_party/externals/dng_sdk/source/dng_point.h',
'../third_party/externals/dng_sdk/source/dng_preview.h',
'../third_party/externals/dng_sdk/source/dng_pthread.h',
'../third_party/externals/dng_sdk/source/dng_rational.h',
'../third_party/externals/dng_sdk/source/dng_read_image.h',
'../third_party/externals/dng_sdk/source/dng_rect.h',
'../third_party/externals/dng_sdk/source/dng_ref_counted_block.h',
'../third_party/externals/dng_sdk/source/dng_reference.h',
'../third_party/externals/dng_sdk/source/dng_render.h',
'../third_party/externals/dng_sdk/source/dng_resample.h',
'../third_party/externals/dng_sdk/source/dng_sdk_limits.h',
'../third_party/externals/dng_sdk/source/dng_shared.h',
'../third_party/externals/dng_sdk/source/dng_simple_image.h',
'../third_party/externals/dng_sdk/source/dng_spline.h',
'../third_party/externals/dng_sdk/source/dng_stream.h',
'../third_party/externals/dng_sdk/source/dng_string.h',
'../third_party/externals/dng_sdk/source/dng_string_list.h',
'../third_party/externals/dng_sdk/source/dng_tag_codes.h',
'../third_party/externals/dng_sdk/source/dng_tag_types.h',
'../third_party/externals/dng_sdk/source/dng_tag_values.h',
'../third_party/externals/dng_sdk/source/dng_temperature.h',
'../third_party/externals/dng_sdk/source/dng_tile_iterator.h',
'../third_party/externals/dng_sdk/source/dng_tone_curve.h',
'../third_party/externals/dng_sdk/source/dng_types.h',
'../third_party/externals/dng_sdk/source/dng_uncopyable.h',
'../third_party/externals/dng_sdk/source/dng_utils.h',
'../third_party/externals/dng_sdk/source/dng_xy_coord.h',
'../third_party/externals/dng_sdk/source/dng_jpeg_memory_source.h',
'../third_party/externals/dng_sdk/source/dng_jpeglib.h',
'../third_party/externals/dng_sdk/source/dng_safe_arithmetic.h',
],
},
'targets': [{
'target_name': 'dng_sdk-selector',
'type': 'none',
'conditions': [
[ 'skia_android_framework', {
'dependencies': [ 'android_deps.gyp:libdng_sdk' ],
'export_dependent_settings': [ 'android_deps.gyp:libdng_sdk' ],
}, {
'dependencies': [ 'dng_sdk.gyp:dng_sdk' ],
'export_dependent_settings': [ 'dng_sdk.gyp:dng_sdk' ],
}]
]
},{
'target_name': 'dng_sdk',
'type': 'static_library',
'cflags_cc!': [ '-fno-rtti' ],
'cflags': [ '<@(other_cflags)' ],
'conditions': [
['skia_os == "ios" or skia_os == "mac"', {
'xcode_settings': {
'OTHER_CFLAGS': [ '<@(other_cflags)' ],
'OTHER_CPLUSPLUSFLAGS': [ '<@(other_cflags)' ],
},
}],
['skia_os == "win"', {
'msvs_settings': {
'VCCLCompilerTool': {
'WarningLevel': '0',
'AdditionalOptions': [
'/wd4189',
'/DqDNGBigEndian#0',
'/DqDNGReportErrors#0',
'/DqDNGThreadSafe#1',
'/DqDNGUseLibJPEG#1',
'/DqDNGUseXMP#0',
'/DqDNGValidate#0',
'/DqDNGValidateTarget#1',
],
},
},
}],
['skia_os != "linux"', {
'sources': ['<@(headers)'],
}],
['skia_arch_type == "arm" and skia_clang_build', {
# DNG SDK uses __builtin_smulll_overflow() to detect 64x64 bit multiply overflow.
# On ARMv7, Clang implements this with __mulodi4() in libclang_rt.
# I can't quite figure out how to link that here, so instead here's a shim for
# __builtin_smulll_overflow() that multiplies normally assuming no overflow.
# Tracked in b/29412086.
'defines': [ '__builtin_smulll_overflow(x,y,p)=(*(p)=(x)*(y), false)' ],
}],
],
'dependencies': [
'libjpeg-turbo-selector.gyp:libjpeg-turbo-selector',
'zlib.gyp:zlib',
],
'include_dirs': [
'../third_party/externals/dng_sdk/source',
'../third_party/externals/libjpeg-turbo',
],
'direct_dependent_settings': {
'include_dirs': [
'../third_party/externals/dng_sdk/source',
],
},
'sources': [
'../third_party/externals/dng_sdk/source/dng_1d_function.cpp',
'../third_party/externals/dng_sdk/source/dng_1d_table.cpp',
'../third_party/externals/dng_sdk/source/dng_abort_sniffer.cpp',
'../third_party/externals/dng_sdk/source/dng_area_task.cpp',
'../third_party/externals/dng_sdk/source/dng_bad_pixels.cpp',
'../third_party/externals/dng_sdk/source/dng_bottlenecks.cpp',
'../third_party/externals/dng_sdk/source/dng_camera_profile.cpp',
'../third_party/externals/dng_sdk/source/dng_color_space.cpp',
'../third_party/externals/dng_sdk/source/dng_color_spec.cpp',
'../third_party/externals/dng_sdk/source/dng_date_time.cpp',
'../third_party/externals/dng_sdk/source/dng_exceptions.cpp',
'../third_party/externals/dng_sdk/source/dng_exif.cpp',
'../third_party/externals/dng_sdk/source/dng_file_stream.cpp',
'../third_party/externals/dng_sdk/source/dng_filter_task.cpp',
'../third_party/externals/dng_sdk/source/dng_fingerprint.cpp',
'../third_party/externals/dng_sdk/source/dng_gain_map.cpp',
'../third_party/externals/dng_sdk/source/dng_globals.cpp',
'../third_party/externals/dng_sdk/source/dng_host.cpp',
'../third_party/externals/dng_sdk/source/dng_hue_sat_map.cpp',
'../third_party/externals/dng_sdk/source/dng_ifd.cpp',
'../third_party/externals/dng_sdk/source/dng_image.cpp',
'../third_party/externals/dng_sdk/source/dng_image_writer.cpp',
'../third_party/externals/dng_sdk/source/dng_info.cpp',
'../third_party/externals/dng_sdk/source/dng_iptc.cpp',
'../third_party/externals/dng_sdk/source/dng_jpeg_image.cpp',
'../third_party/externals/dng_sdk/source/dng_lens_correction.cpp',
'../third_party/externals/dng_sdk/source/dng_linearization_info.cpp',
'../third_party/externals/dng_sdk/source/dng_lossless_jpeg.cpp',
'../third_party/externals/dng_sdk/source/dng_matrix.cpp',
'../third_party/externals/dng_sdk/source/dng_memory.cpp',
'../third_party/externals/dng_sdk/source/dng_memory_stream.cpp',
'../third_party/externals/dng_sdk/source/dng_misc_opcodes.cpp',
'../third_party/externals/dng_sdk/source/dng_mosaic_info.cpp',
'../third_party/externals/dng_sdk/source/dng_mutex.cpp',
'../third_party/externals/dng_sdk/source/dng_negative.cpp',
'../third_party/externals/dng_sdk/source/dng_opcode_list.cpp',
'../third_party/externals/dng_sdk/source/dng_opcodes.cpp',
'../third_party/externals/dng_sdk/source/dng_orientation.cpp',
'../third_party/externals/dng_sdk/source/dng_parse_utils.cpp',
'../third_party/externals/dng_sdk/source/dng_pixel_buffer.cpp',
'../third_party/externals/dng_sdk/source/dng_point.cpp',
'../third_party/externals/dng_sdk/source/dng_preview.cpp',
'../third_party/externals/dng_sdk/source/dng_pthread.cpp',
'../third_party/externals/dng_sdk/source/dng_rational.cpp',
'../third_party/externals/dng_sdk/source/dng_read_image.cpp',
'../third_party/externals/dng_sdk/source/dng_rect.cpp',
'../third_party/externals/dng_sdk/source/dng_ref_counted_block.cpp',
'../third_party/externals/dng_sdk/source/dng_reference.cpp',
'../third_party/externals/dng_sdk/source/dng_render.cpp',
'../third_party/externals/dng_sdk/source/dng_resample.cpp',
'../third_party/externals/dng_sdk/source/dng_shared.cpp',
'../third_party/externals/dng_sdk/source/dng_simple_image.cpp',
'../third_party/externals/dng_sdk/source/dng_spline.cpp',
'../third_party/externals/dng_sdk/source/dng_stream.cpp',
'../third_party/externals/dng_sdk/source/dng_string.cpp',
'../third_party/externals/dng_sdk/source/dng_string_list.cpp',
'../third_party/externals/dng_sdk/source/dng_tag_types.cpp',
'../third_party/externals/dng_sdk/source/dng_temperature.cpp',
'../third_party/externals/dng_sdk/source/dng_tile_iterator.cpp',
'../third_party/externals/dng_sdk/source/dng_tone_curve.cpp',
'../third_party/externals/dng_sdk/source/dng_utils.cpp',
'../third_party/externals/dng_sdk/source/dng_xy_coord.cpp',
'../third_party/externals/dng_sdk/source/dng_jpeg_memory_source.cpp',
'../third_party/externals/dng_sdk/source/dng_safe_arithmetic.cpp',
],
}],
}
|
from setuptools import setup, find_packages
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='abstract',
version='2021.11.4',
license='MIT',
author='Idin',
author_email='py@idin.ca',
url='https://github.com/idin/abstract',
keywords='graph',
description='Python library for creating and drawing graphs and taking advantage of graph properties',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=find_packages(exclude=("jupyter_tests", ".idea", ".git")),
install_requires=['graphviz', 'base32hex', 'colouration'],
python_requires='~=3.6',
zip_safe=True
)
|
from picamera import PiCamera, Color
import time
import RPi.GPIO as GPIO
from datetime import datetime
from datetime import timedelta
segCode = [0x3f,0x06,0x5b,0x4f,0x66,0x6d,0x7d,0x07,0x7f,0x6f,0x77,0x7c,0x39,0x5e,0x79,0x71,0x80]
camera = PiCamera()
BeepPin = 15
BtnPin = 16 # pin12 --- button
SDI = 11
RCLK = 12
SRCLK = 13
def setup():
GPIO.setmode(GPIO.BOARD) # Numbers GPIOs by physical location
GPIO.setup(BtnPin, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Set BtnPin's mode is input, and pull up to high level(3.3V)
GPIO.setup(BeepPin, GPIO.OUT) # Set BeepPin's mode is output
GPIO.output(BeepPin, GPIO.HIGH) # Set BeepPin high(+3.3V) to off beep
GPIO.setup(SDI, GPIO.OUT)
GPIO.setup(RCLK, GPIO.OUT)
GPIO.setup(SRCLK, GPIO.OUT)
GPIO.output(SDI, GPIO.LOW)
GPIO.output(RCLK, GPIO.LOW)
GPIO.output(SRCLK, GPIO.LOW)
def swPhoto(ev=None):
global pressTime
global timePicOne
global numPics
global betweenPics
global namePhotoshoot
global personNum
if datetime.now() > ( pressTime + timedelta(seconds=3) ):
#camera.start_preview()
personNum = personNum + 1
camera.annotate_text = "Look above for the\ncountdown to each picture"
camera.annotate_text_size = 160
#camera.annotate_foreground = Color('red')
time.sleep(timePicOne)
camera.annotate_text = ""
for i in range(numPics):
for p in range (betweenPics):
pic = betweenPics - p
#camera.annotate_text = "%s" % pic
hc595_shift(segCode[pic])
time.sleep(1)
hc595_shift(segCode[0])
time.sleep(.5)
GPIO.output(BeepPin, GPIO.LOW)
time.sleep(0.5)
GPIO.output(BeepPin, GPIO.HIGH) # beep off
time.sleep(0.1)
camera.capture('/home/pi/Desktop/Projects/PhotoBooth/Images/%s-%s-%s.jpg' % (namePhotoshoot,personNum,i))
#camera.stop_preview()
#name = input('please enter your name: ')
#print('hello, ', name)
camera.annotate_text = 'Push button to begin\ntaking your photos.'
pressTime = datetime.now()
def hc595_shift(dat):
for bit in range(0, 8):
GPIO.output(SDI, 0x80 & (dat << bit))
GPIO.output(SRCLK, GPIO.HIGH)
time.sleep(0.001)
GPIO.output(SRCLK, GPIO.LOW)
GPIO.output(RCLK, GPIO.HIGH)
time.sleep(0.001)
GPIO.output(RCLK, GPIO.LOW)
def loop():
GPIO.add_event_detect(BtnPin, GPIO.FALLING, callback=swPhoto) # wait for falling
camera.start_preview()
camera.rotation = 90
camera.resolution = (1944, 2592) #, 1944)
while True:
pass # Don't do anything
def destroy():
camera.stop_preview()
GPIO.cleanup() # Release resource
if __name__ == '__main__': # Program start from here
namePhotoshoot = input('please enter the name of the photoshoot: ')
numPics = int(input('how many pictures do you want: '))
timePicOne = int(input('how many seconds do you want to get ready for the first picture: '))
if timePicOne > 9:
timePicOne = 9
betweenPics = int(input('how many seconds do you want in between all the other pictures: '))
#number = int(numPics)
personNum = 0
#timeOne = int(timePicOne)
#timeRest = int(betweenPics)
print('the name of your photoshoot is ', namePhotoshoot,
'you want ', numPics, 'pictures taken',
'you want ', timePicOne, 'seconds to prepare for the first picture. ',
'you want ',betweenPics, 'seconds between every other picture.')
pressTime = datetime.now()
setup()
try:
loop()
except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed.
destroy()
|
from django.db import models
from django_extensions.db.models import TimeStampedModel
from author.decorators import with_author
from expressmanage.utils import normalize_string
@with_author
class Customer(TimeStampedModel):
name = models.CharField(max_length=255)
firm = models.CharField(max_length=80)
address = models.CharField(max_length=255, blank=True, null=True)
city = models.CharField(max_length=50, blank=True, null=True)
mobile_number = models.IntegerField(blank=True, null=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.name = normalize_string(self.name)
self.firm = normalize_string(self.firm)
self.address = normalize_string(self.address)
self.city = normalize_string(self.city)
super(Customer, self).save(*args, **kwargs)
# self._state.adding is True creating
# self._state.adding is False updating
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import os
import sys
import logging
import eventlet
# import json
import ujson
import yurl
currentDir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath('%s/..' % currentDir))
eventlet.monkey_patch()
import utils.cloud_utils as cloud_utils
import entity_utils
import entity_manager
import entity_constants
from utils.underscore import _
import cfd_keystone.cfd_keystone
import utils.cache_utils as cache_utils
import entity_file
import slice.slice_ops as slice_ops
import organization.organization_ops
import utils.publish_utils
LOG = logging.getLogger()
slice_objects = []
'''
Organization is a virtual entity. It is defined one in the database - by system admin.
Data Structure Formats:
slice_objects:
[ { 'slice':slice_object, 'name': slice name, 'dbid': id,
"organizations": [ { 'name': name, "uri": organization uri, "rest": orgnaization rest response}, 'dbid': id,
"departments: { dbid, department_object,
]
}]
}]
}
]
'''
organization_objects = []
vdcs_dict = {}
storage_types = ["gold", "silver", "platinum", "bronze", "cloud"]
class SystemFunctions(object):
def __init__(self, db):
self.parent_row = None
self.dbid = 0
self.parent_row = db.get_row_dict("tblEntities", {"entitytype": "system"}, order="ORDER BY id LIMIT 1")
if not self.parent_row:
return
self.dbid = self.parent_row["id"]
initialize_resource_records(db, self.dbid, "system", 0)
def do(self, db, function, options=None, **kwargs):
timeout = eventlet.Timeout(1200)
status = None
LOG.info(_("SystemFunctions: Starting function %s with options %s" % (function, options)))
try:
status = self.functionMap.get(function.lower(), lambda *args, **kwargs: None)(self, db, options=options)
except eventlet.Timeout:
cloud_utils.log_exception(sys.exc_info())
except:
cloud_utils.log_exception(sys.exc_info())
finally:
timeout.cancel()
LOG.info(_("Ending function %s" % function))
return status
def _initialize(self, db, options=None, **kwargs):
try:
self._update_slices(db)
# self._update_organizations(db)
except:
cloud_utils.log_exception(sys.exc_info())
def _update_slices(self, db, options=None, **kwargs):
try:
pool = eventlet.GreenPool()
slices = db.get_multiple_row("tblEntities",
"deleted=0 AND EntityType='slice' AND EntityStatus != 'Duplicate' ")
if slices:
for item in slices:
count = db.get_rowcount("tblEntities",
"name='%s' AND deleted=0 AND EntityType='slice'" % item['Name'])
if count == 1:
eve = SliceFunctions(db, item['id'])
pool.spawn_n(eve.do, db, "initialize")
else:
# db.delete_row_id("tblEntities", item['id'])
LOG.critical(_("Skipping duplicate slices %s with id %s" % (item['Name'], item['id'])))
pool.waitall()
return "Slices initialized"
except:
cloud_utils.log_exception(sys.exc_info())
return "Error in initializing slices"
functionMap = {
"initialize": _initialize,
"slices": _update_slices,
}
class SliceFunctions(object):
def __init__(self, db, dbid, LOG=LOG):
self.dbid = dbid
self.LOG = LOG
self.timeout = None
self.parent_row = cloud_utils.lower_key(db.get_row_dict("tblEntities",
{"entitytype": "system"}, order="ORDER BY id LIMIT 1"))
def do(self, db, function, options=None, **kwargs):
# if options is None or "name" not in options.keys():
# return json.dumps({"result_code": -1, "result_message": "invalid parameters", "dbid": 0})
self.timeout = eventlet.Timeout(600)
status = None
LOG.info(
_("SliceFunctions: Starting function %s with dbid %s with options %s" % (function, self.dbid, options)))
try:
status = self.functionMap.get(function.lower(), lambda *args, **kwargs: None)(self, db, options=options)
except eventlet.Timeout:
cloud_utils.log_exception(sys.exc_info())
except:
cloud_utils.log_exception(sys.exc_info())
finally:
if self.timeout:
self.timeout.cancel()
LOG.info(_("SliceFunctions: Ending function %s with dbid %s" % (function, self.dbid)))
return status
def _create(self, db, options=None, **kwargs):
# Create
if options is None or "name" not in options.keys() or "url" not in options.keys():
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters", "dbid": 0})
if options["url"].endswith("/"):
options["url"] = options["url"][:-1]
options["entitytype"] = "slice"
options["parententityid"] = self.parent_row["id"]
dup_slice = db.get_row_dict("tblEntities", {
"name": options["name"],
"entitytype": options["entitytype"],
"deleted": 0,
"parententityid": options["parententityid"]
}, order="ORDER BY id LIMIT 1")
if dup_slice:
return ujson.dumps({"result_code": -1, "result_message": "duplicate slice", "dbid": dup_slice["id"]})
self.dbid = cloud_utils.update_or_insert(db, "tblEntities",
options,
{
"name": options["name"],
"entitytype": options["entitytype"],
"deleted": 0,
"parententityid": options["parententityid"]
},
child_table="tblSlices")
if self.dbid == 0:
return ujson.dumps({"result_code": -1, "result_message": "database create error", "dbid": 0})
initialize_resource_records(db, self.dbid, "slice", options["parententityid"])
slice_obj = slice_ops.Slice(db, self.dbid, user_info=options)
self._add_slice(slice_obj)
# result = slice_obj.update(db, user_info=options)
result = slice_obj.get_slice(db)
if result:
eventlet.spawn_n(self._get_status_update_resources, slice_obj, options)
# else:
# update_entity_resource_records(db, self.parent_row["id"], "total", "slice", "total")
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
def _get_status_update_resources(self, slice_obj, options):
try:
db = cloud_utils.CloudGlobalBase(log=None)
result = slice_obj.update(db, user_info=options)
if result:
update_entity_resource_records(db, self.parent_row["id"], "total", "slice", "total")
entity_utils.clone_slice_images(db, self.dbid)
db.close(log=None)
except:
cloud_utils.log_exception(sys.exc_info())
def _initialize(self, db, **kwargs):
if self.timeout:
self.timeout.cancel()
self.timeout = None
slice = self._find_slice()
if not slice:
sc = slice_ops.Slice(db, self.dbid, LOG=self.LOG)
slice = self._add_slice(sc)
slice["slice"].update(db)
update_entity_resource_records(db, self.parent_row["id"], "total", "slice", "total")
def _delete(self, db, options=None, **kwargs):
current_slice = self._find_slice()
if current_slice is None:
db.delete_rows_dict("tblUris", {"tblslices": self.dbid})
entity_utils.delete_entity_recursively(db, self.dbid)
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters - db row id", "dbid": 0})
eventlet.spawn_n(self._delete_bg, current_slice, options)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
def _delete_bg(self, current_slice, options):
try:
db = cloud_utils.CloudGlobalBase(log=None)
current_slice["slice"].delete(db)
slice_objects.remove(current_slice)
LOG.info(_("SliceObjects Count of %s after deletion: %s" % (len(slice_objects), str(slice_objects))))
update_entity_resource_records(db, self.parent_row["id"], "total", "slice", "total")
db.close()
except:
cloud_utils.log_exception(sys.exc_info())
def _update(self, db, options=None, **kwargs):
current_slice = self._find_slice()
if current_slice is None:
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters - db row id", "dbid": 0})
options.pop("name", None)
options.pop("url", None)
cloud_utils.update_or_insert(db, "tblEntities", options, {"id": self.dbid}, child_table="tblSlices")
# current_slice["slice"].update(db, user_info=options)
# update_entity_resource_records(db, self.parent_row["id"], "total", "slice", "total")
eventlet.spawn_n(self._get_status_update_resources, current_slice["slice"], options)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
def _status(self, db, options=None, **kwargs):
current_slice = self._find_slice()
if current_slice is None:
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters - db row id", "dbid": 0})
eventlet.spawn_n(self._get_status_update_resources, current_slice["slice"], options)
# current_slice["slice"].status(db)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
def _find_slice(self):
for index, value in enumerate(slice_objects):
if value["dbid"] == self.dbid:
return value
return None
def _add_slice(self, slice_obj):
# delete any old slice records
slice_objects[:] = [slice for slice in slice_objects if slice["name"] != slice_obj.get_name()]
slice_objects.append({"name": slice_obj.get_name(), "dbid": self.dbid, "slice": slice_obj, "organizations": []})
LOG.info(_("SliceObjects Count of %s after addition: %s" % (len(slice_objects), str(slice_objects))))
return slice_objects[-1]
functionMap = {
"initialize": _initialize,
"create": _create,
"delete": _delete,
"update": _update,
"status": _status
}
class OrganizationFunctions(object):
def __init__(self, db, dbid):
self.row = None
if dbid:
self.dbid = dbid
self.row = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"id": self.dbid},
order="ORDER BY id LIMIT 1"))
self.parent_row = cloud_utils.lower_key(db.get_row_dict("tblEntities",
{"entitytype": "system"},
order="ORDER BY id LIMIT 1"))
else:
self.parent_row = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"entitytype": "system"}, order="ORDER BY id LIMIT 1"))
self.dbid = self.parent_row["id"]
def do(self, db, function, options=None, **kwargs):
timeout = eventlet.Timeout(600)
status = None
LOG.info(_("OrganizationFunctions: Starting function %s with dbid %s with options %s" % (function,
self.dbid, options)))
try:
status = self.functionMap.get(function.lower(), lambda *args, **kwargs: None)(self, db, options=options)
except eventlet.Timeout:
cloud_utils.log_exception(sys.exc_info())
except:
cloud_utils.log_exception(sys.exc_info())
finally:
timeout.cancel()
LOG.info(_("OrganizationFunctions: Ending function %s with dbid %s " % (function, self.dbid)))
return status
def _create(self, db, options=None, **kwargs):
# Create
if options is None:
options = {}
if "name" not in options:
options["name"] = entity_utils.create_entity_name(db, "organization")
if options is None or "name" not in options.keys():
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters", "dbid": 0})
options["entitytype"] = "organization"
options["parententityid"] = self.parent_row["id"]
self.dbid = cloud_utils.update_or_insert(db, "tblEntities", options, {
"name": options["name"], "entitytype": options["entitytype"],
"deleted": 0, "parententityid": options["parententityid"]}, child_table="tblOrganizations")
if self.dbid == 0:
return ujson.dumps({"result_code": -1, "result_message": "database create error", "dbid": 0})
if "user_row" in options:
cloud_utils.log_message(db, self.parent_row["id"], "Organization %s created by user %s" % (
options["name"], options["user_row"]["name"]))
# entity_utils.clone_from_slices(db, self.dbid, "imagelibrary")
eventlet.spawn_n(self._post_organization, options)
eventlet.spawn_n(update_resource_records_bg, self.dbid, options["parententityid"], options["entitytype"],
options, create_flag=True)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
def _post_organization(self, options):
db = cloud_utils.CloudGlobalBase(log=None)
try:
for slice in cloud_utils.get_entity(db, "slice", child_table=entity_manager.entities["slice"].child_table):
if "virtual_infrastructure_url" not in slice or slice["entitystatus"].lower() != "active":
continue
create_entity(self.dbid, options, slice_row=slice)
except:
cloud_utils.log_exception(sys.exc_info())
finally:
db.close()
def _get_status_update_resources(self, organization_obj, slice_objects, options):
try:
db = cloud_utils.CloudGlobalBase(log=None)
organization_obj.update(db, slice_objects, user_info=options)
db.close(log=None)
except:
cloud_utils.log_exception(sys.exc_info())
def _update(self, db, options=None, **kwargs):
if not options:
options = {}
options["entitytype"] = "organization"
options["parententityid"] = self.parent_row["id"]
if "name" in options.keys() and options["name"] != self.row["name"]:
row = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"parententityid": options["parententityid"],
"entitytype": options["entitytype"],
"name": options['name']},
order="ORDER BY id LIMIT 1"))
if row:
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters - duplicate name",
"dbid": self.dbid})
options["newname"] = options["name"]
self.row["name"] = options["name"]
options["name"] = self.row["name"]
cloud_utils.update_or_insert(db, "tblEntities", options, {"id": self.dbid}, child_table="tblOrganizations")
if "user_row" in options:
cloud_utils.log_message(db, self.parent_row["id"], "Organization %s is updated by user %s" % (
options["name"], options["user_row"]["name"]))
eventlet.spawn_n(update_entity_bg, self.dbid, options)
eventlet.spawn_n(update_resource_records_bg, self.dbid, options["parententityid"], options["entitytype"],
options, create_flag=False)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
def _delete(self, db, options=None, **kwargs):
if "user_row" in options:
cloud_utils.log_message(db, self.parent_row["id"], "Organization %s deleted by user %s" % (
self.row["name"], options["user_row"]["name"]))
eventlet.spawn_n(delete_entity_bg, self.dbid, options)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
functionMap = {
"create": _create,
"delete": _delete,
"update": _update,
"status": _update
}
def verify_integer(number):
if isinstance(number, (int, long)):
return True
if isinstance(number, basestring) and number.isdigit():
return True
return False
class DepartmentFunctions(object):
def __init__(self, db, dbid):
self.parent_row = None
self.row = None
self.dbid = dbid
if self.dbid != 0:
self.row = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"id": self.dbid},
order="ORDER BY id LIMIT 1"))
if self.row:
self.parent_row = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"id": self.row["parententityid"]},
order="ORDER BY id LIMIT 1"))
def do(self, db, function, options=None, **kwargs):
timeout = eventlet.Timeout(600)
status = None
LOG.info(_("DepartmentFunctions: Starting function %s with dbid %s with options %s" %
(function, self.dbid, options)))
try:
status = self.functionMap.get(function.lower(), lambda *args, **kwargs: None)(self, db, options=options)
except eventlet.Timeout:
cloud_utils.log_exception(sys.exc_info())
except:
cloud_utils.log_exception(sys.exc_info())
finally:
timeout.cancel()
LOG.info(_("DepartmentFunctions: Ending function %s with dbid %s" % (function, self.dbid)))
return status
def _create(self, db, options=None, **kwargs):
try:
if options is None:
options = {}
if "name" not in options or not options["name"]:
options["name"] = entity_utils.create_entity_name(db, "department")
if options is None or "name" not in options.keys():
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters - no name", "dbid": 0})
if "parententityid" in options.keys() and verify_integer(options["parententityid"]):
self.parent_row = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"id": options["parententityid"],
"EntityType": "organization"},
order="ORDER BY id LIMIT 1"))
elif "parententityname" in options.keys():
self.parent_row = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"Name": options["parententityname"],
"EntityType": "organization"},
order="ORDER BY id LIMIT 1"))
if not self.parent_row:
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters- no parent name or id",
"dbid": 0})
options["parententityid"] = self.parent_row["id"]
options["entitytype"] = "department"
self.dbid = cloud_utils.update_or_insert(db, "tblEntities",
options,
{
"name": options["name"],
"entitytype": options["entitytype"],
"deleted": 0,
"parententityid": options["parententityid"]
},
child_table="tblDepartments")
if self.dbid == 0:
return ujson.dumps({"result_code": -1, "result_message": "database create error", "dbid": 0})
if "ssh_keys" in options:
entity_manager.save_ssh_keys(db, self.dbid, options)
if "attached_entities" in options:
entity_manager.remove_and_add_attached_entities(db, self.dbid, options, mode="update")
if "user_row" in options:
cloud_utils.log_message(db, self.parent_row["id"], "Department %s created by user %s" % (
options["name"], options["user_row"]["name"]))
eventlet.spawn_n(create_entity, self.dbid, options)
eventlet.spawn_n(copy_profiles, self.dbid, self.parent_row["id"])
eventlet.spawn_n(update_resource_records_bg, self.dbid, options["parententityid"], options["entitytype"],
options, create_flag=True)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
except:
cloud_utils.log_exception(sys.exc_info())
def _update(self, db, options=None, **kwargs):
try:
if not options:
options = {}
options["entitytype"] = "department"
options["parententityid"] = self.row["parententityid"]
if "name" in options.keys() and options["name"] != self.row["name"]:
row = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"parententityid": options["parententityid"],
"entitytype": options["entitytype"],
"name": options['name']},
order="ORDER BY id LIMIT 1"))
if row:
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters - duplicate name",
"dbid": self.dbid})
options["newname"] = options["name"]
self.row["name"] = options["name"]
options["name"] = self.row["name"]
cloud_utils.update_or_insert(db, "tblEntities", options, {"id": self.dbid}, child_table="tblDepartments")
if "ssh_keys" in options:
entity_manager.save_ssh_keys(db, self.dbid, options)
if "attached_entities" in options:
entity_manager.remove_and_add_attached_entities(db, self.dbid, options, mode="update")
if "user_row" in options:
cloud_utils.log_message(db, options["parententityid"], "Department %s updated by user %s" %
(options["name"], options["user_row"]["name"]))
if "newname" in options:
cloud_utils.update_or_insert(db, "tblEntities", {"name": options["newname"]},
{"id": self.dbid}, child_table="tblDepartments")
eventlet.spawn_n(update_entity_bg, self.dbid, options)
eventlet.spawn_n(update_resource_records_bg, self.dbid, options["parententityid"], options["entitytype"],
options, create_flag=False)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
except:
cloud_utils.log_exception(sys.exc_info())
def _delete(self, db, options=None, **kwargs):
try:
if self.row:
if not options:
options = {}
options["entitytype"] = "department"
options["name"] = self.row["name"]
if "user_row" in options:
cloud_utils.log_message(db, self.parent_row["id"], "Department %s deleted by user %s" % (
options["name"], options["user_row"]["name"]))
eventlet.spawn_n(delete_entity_bg, self.dbid, options)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
except:
cloud_utils.log_exception(sys.exc_info())
functionMap = {
"create": _create,
"delete": _delete,
"update": _update
}
def create_entity(dbid, options, slice_row=None):
try:
db = cloud_utils.CloudGlobalBase(log=None)
eve = EntityFunctions(db, dbid, slice_row=slice_row, quick_provision=True)
response = eve.do(db, "provision", options=options)
LOG.info(_("Entity created with response %s" % response))
print response
db.close(log=None)
except:
cloud_utils.log_exception(sys.exc_info())
def update_resource_records_bg(dbid, parent_dbid, entitytype, options, create_flag=False):
try:
db = cloud_utils.CloudGlobalBase(log=False)
if create_flag:
initialize_resource_records(db, dbid, entitytype, parent_dbid)
update_user_assigned_resource_records(db, dbid, options)
update_entity_resource_records(db, parent_dbid, "allocated",
entity_constants.resource_parent_entitytype[entitytype], "total")
db.close()
except:
cloud_utils.log_exception(sys.exc_info())
def copy_profiles(to_dbid, from_dbid):
try:
db = cloud_utils.CloudGlobalBase(log=None)
for group in entity_constants.profile_group_clone:
entity_utils.clone_entity(db, to_dbid, from_dbid, group, update_clonedfrom=False)
for group in entity_constants.profile_group_clone:
entity_utils.clone_entity_attachments(db, to_dbid, from_dbid, group, to_dbid)
db.close(log=None)
except:
cloud_utils.log_exception(sys.exc_info())
def update_entity_bg(dbid, options=None, slice_row=None):
db = cloud_utils.CloudGlobalBase(log=None)
try:
row, error = entity_utils.read_full_entity_status_tuple(db, dbid)
if not row:
LOG.critical(_("No url: unable to update id:%s error:%s " % (dbid, error)))
return
element, error = entity_manager.get_entity_json(db, dbid, row)
if not element:
LOG.critical(
_("No json encoding: unable to update VDC:%s id:%s with error:%s " % (row["name"], dbid, error)))
return
url, error = entity_utils.get_entity_uri(db, dbid, row)
if not url:
LOG.critical(_("No url: unable to update VDC:%s id:%s error:%s " % (row["name"], dbid, error)))
return
rest_me = entity_utils.put_entity(element, row["entitytype"], url)
except:
cloud_utils.log_exception(sys.exc_info())
finally:
db.close()
def delete_entity_bg(dbid, options):
try:
db = cloud_utils.CloudGlobalBase(log=None)
row, error = entity_utils.read_full_entity_status_tuple(db, dbid)
if not row:
LOG.critical(_("No url: unable to update id:%s error:%s " % (dbid, error)))
return
url, error = entity_utils.get_entity_uri(db, dbid, row)
if not url:
LOG.critical(_("No url: unable to delete entity:%s id:%s error:%s " % (row["name"], dbid, error)))
eventlet.spawn_n(entity_utils.delete_entity, url)
db.delete_rows_dict("tblUris", {"tblentities": dbid})
entity_utils.delete_entity_recursively(db, dbid)
db.close()
except:
cloud_utils.log_exception(sys.exc_info())
class VDCFunctions(object):
def __init__(self, db, dbid):
self.parent_row = None
self.dbid = dbid
self.vdc_object = None
self.dept_object = None
self.selected_slice = None
self.grandparent = None
if self.dbid == 0:
return
self.row = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"id": self.dbid},
order="ORDER BY id LIMIT 1"))
if not self.row:
return
self.row.update(cloud_utils.lower_key(db.get_row_dict("tblVdcs", {"tblEntities": self.dbid},
order="ORDER BY id LIMIT 1")))
self.parent_row = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"id": self.row["parententityid"]},
order="ORDER BY id LIMIT 1"))
self.grandparent = cloud_utils.lower_key(db.get_row_dict("tblEntities",
{"id": self.parent_row["parententityid"]},
order="ORDER BY id LIMIT 1"))
try:
for slice in slice_objects:
if slice["dbid"] == self.row["selectedsliceentityid"]:
for org in slice["organizations"]:
if org["dbid"] == self.grandparent["id"]:
self.dept_object = org["departments"].get(self.parent_row["id"], None)
if self.dept_object is None:
return
self.vdc_object = self.dept_object.get_vdc_object(self.dbid)
return
except:
cloud_utils.log_exception(sys.exc_info())
def do(self, db, function, options=None, **kwargs):
timeout = eventlet.Timeout(600)
status = None
LOG.info(_("VDCFunctions: Starting function %s with %s with options %s" % (function, self.dbid, options)))
try:
status = self.functionMap.get(function.lower(), lambda *args, **kwargs: None)(self, db, options=options)
except eventlet.Timeout:
cloud_utils.log_exception(sys.exc_info())
except:
cloud_utils.log_exception(sys.exc_info())
finally:
timeout.cancel()
LOG.info(_("VDCFunctions: Ending function %s with dbdid %s" % (function, self.dbid)))
return status
def _create(self, db, options=None, **kwargs):
try:
if options is None:
options = {}
if "name" not in options or not options["name"]:
options["name"] = entity_utils.create_entity_name(db, "vdc")
if options is None or "name" not in options.keys():
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters - no name", "dbid": 0})
if "parententityid" in options.keys() and verify_integer(options["parententityid"]):
self.parent_row = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"id": options["parententityid"]},
order="ORDER BY id LIMIT 1"))
elif "parententityname".lower() in options.keys():
self.parent_row = cloud_utils.lower_key(db.get_row_dict("tblEntities",
{"Name": options["parententityname"],
"EntityType": "department"},
order="ORDER BY id LIMIT 1"))
if not self.parent_row:
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters- no parent name or id",
"dbid": 0})
self.grandparent = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"id": self.parent_row["parententityid"]},
order="ORDER BY id LIMIT 1"))
options["entitytype"] = "vdc"
options["parententityid"] = self.parent_row["id"]
# options["entitysubtype"] = "network_service"
self.selected_slice = self.allocate_slice()
if self.selected_slice:
options["selectedsliceentityid"] = self.selected_slice["dbid"]
self.dbid = cloud_utils.update_or_insert(db, "tblEntities",
options,
{
"name": options["name"],
"entitytype": options["entitytype"],
"deleted": 0,
"parententityid": options["parententityid"]
},
child_table="tblVdcs")
if self.dbid == 0:
return ujson.dumps({"result_code": -1, "result_message": "database create error", "dbid": 0})
if "ssh_keys" in options:
entity_manager.save_ssh_keys(db, self.dbid, options)
if "metadata" in options and options["metadata"]:
entity_manager.update_db_metadata_keyvalue(db, self.dbid, options)
if "user_data" in options:
entity_manager.save_user_data(db, self.dbid, options)
if "attached_entities" in options:
entity_manager.remove_and_add_attached_entities(db, self.dbid, options, mode="create")
if "user_row" in options:
cloud_utils.log_message(db, self.parent_row["id"],
"VDC %s created by user %s" % (options["name"], options["user_row"]["name"]))
eventlet.spawn_n(create_entity, self.dbid, options)
eventlet.spawn_n(copy_profiles, self.dbid, self.parent_row["id"])
eventlet.spawn_n(update_resource_records_bg, self.dbid, options["parententityid"], options["entitytype"],
options, create_flag=True)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
except:
cloud_utils.log_exception(sys.exc_info())
def _get_status_update_resources(self, organization_obj, options):
try:
eventlet.sleep(0.0001)
db = cloud_utils.CloudGlobalBase(log=None)
self.vdc_object = vdcs_dict[self.dbid] = organization_obj.create_grandchild(db, self.dbid,
self.parent_row["id"],
self.selected_slice, options)
db.close(log=None)
except:
cloud_utils.log_exception(sys.exc_info())
def allocate_slice(self):
if slice_objects:
return slice_objects[0]
def _update(self, db, options=None, **kwargs):
try:
if not self.row:
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters - entity not found",
"dbid": self.dbid})
if not options:
options = {}
options["entitytype"] = "vdc"
options["parententityid"] = self.parent_row["id"]
if "templateid" in options:
return self.vdc_template(db, options)
if "name" in options.keys() and options["name"] != self.row["name"]:
row = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"parententityid": options["parententityid"],
"entitytype": options["entitytype"],
"name": options['name']},
order="ORDER BY id LIMIT 1"))
if row:
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters - duplicate name",
"dbid": self.dbid})
# options["name"] = self.row["name"]
if "ssh_keys" in options:
entity_manager.save_ssh_keys(db, self.dbid, options)
if "metadata" in options and options["metadata"]:
entity_manager.update_db_metadata_keyvalue(db, self.dbid, options)
if "attached_entities" in options:
entity_manager.remove_and_add_attached_entities(db, self.dbid, options, mode="update")
if "user_data" in options:
entity_manager.save_user_data(db, self.dbid, options)
if "user_row" in options:
cloud_utils.log_message(db, self.parent_row["id"],
"VDC %s updated by user %s" % (options["name"], options["user_row"]["name"]))
cloud_utils.update_only(db, "tblEntities", options, {"id": self.dbid}, child_table="tblVdcs")
eventlet.spawn_n(update_resource_records_bg, self.dbid, options["parententityid"], options["entitytype"],
options, create_flag=False)
eventlet.spawn_n(update_entity_bg, self.dbid, options)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
except:
cloud_utils.log_exception(sys.exc_info())
def _delete(self, db, options=None, **kwargs):
if not self.row:
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters - entity not found",
"dbid": self.dbid})
if not options:
options = {}
options["entitytype"] = "vdc"
options["name"] = self.row["name"]
if "user_row" in options:
cloud_utils.log_message(db, self.parent_row["id"],
"VDC %s deleted by user %s" % (options["name"], options["user_row"]["name"]))
eventlet.spawn_n(delete_entity_bg, self.dbid, options)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
def _command(self, db, options=None, **kwargs):
if "command" not in options.keys():
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters - command missing",
"dbid": self.dbid})
if options["command"] == "paste":
return self.vdc_paste(db, options)
if options["command"] == "template":
return self.vdc_template(db, options)
LOG.critical(_("Unknown vdc command: unable to process VDC:%s id:%s " % (self.row["name"], self.dbid)))
return ujson.dumps({"result_code": -1, "result_message": "internal error - invalid command",
"dbid": self.dbid})
def vdc_template(self, db, options):
if "templateid" in options and "positionx" in options and "positiony" in options:
return entity_file.add_template(db, self.dbid, options["templateid"], options["positionx"],
options["positiony"])
else:
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters", "dbid": self.dbid})
def vdc_paste(self, db, options):
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
functionMap = {
"create": _create,
"delete": _delete,
"update": _update,
"command": _command,
}
class EntityFunctions(object):
def __init__(self, db, dbid, slice_row=None, callback=None, return_object=None, row=None, quick_provision=False):
self.parent_row = None
self.slice_row = slice_row
self.dbid = dbid
self.vdc_object = None
self.dept_object = None
self.selected_slice = None
self.error = None
self.row = row
self.uri_row = None
self.parent_uri_row = None
self.callback = callback
self.return_object = return_object
# self.jobid = jobid
self.quick_provision = quick_provision
def do(self, db, function, options=None, do_get=False, **kwargs):
timeout = eventlet.Timeout(600)
status = None
LOG.info(_("Starting function %s with %s with options %s" % (function, self.dbid, options)))
try:
status = self.functionMap.get(function.lower(), lambda *args, **kwargs: None)(self, db, options=options,
do_get=do_get)
except eventlet.Timeout:
cloud_utils.log_exception(sys.exc_info())
except:
cloud_utils.log_exception(sys.exc_info())
finally:
timeout.cancel()
LOG.info(_("Ending function %s with dbdid %s" % (function, self.dbid)))
return status
def _create(self, db, options=None, **kwargs):
try:
# Create
self.error = entity_utils.confirm_options_keys(options, ["entitytype", "parententityid"])
if self.error or not verify_integer(options["parententityid"]):
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": 0})
if options["parententityid"] == 0:
if options["entitytype"] == "user_group" or \
options["entitytype"] == "storage_class" or \
options["entitytype"] == "compute_class" or \
options["entitytype"] == "network_class":
system = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"entitytype": "system", "deleted": 0},
order="ORDER BY id LIMIT 1"))
options["parententityid"] = system["id"]
else:
return ujson.dumps({"result_code": -1, "result_message": "invalid parent entity id", "dbid": 0})
if options["entitytype"] == "user":
dup_user = db.execute_db("SELECT tblEntities.* FROM tblEntities JOIN tblUsers "
" WHERE ( tblUsers.tblEntities = tblEntities.id AND "
" tblUsers.loginid = '%s' AND tblEntities.deleted=0 ) ORDER BY id DESC LIMIT 1" %
options["loginid"])
if dup_user:
return ujson.dumps({"result_code": -1, "result_message": "user id already in user", "dbid": 0})
if options["entitytype"] not in entity_manager.entities.keys():
return ujson.dumps(
{"result_code": -1, "result_message": "invalid entity type %s" % options["entitytype"],
"dbid": 0})
if "name" not in options or not options["name"]:
options["name"] = entity_utils.create_entity_name(db, options["entitytype"])
if options["entitytype"] in entity_constants.topology_network_services:
options["entitysubtype"] = "network_service"
options["Throughputs"] = entity_utils.get_throughputs(db, options)
self.parent_row, status = entity_utils.read_full_entity_status_tuple(db, options["parententityid"])
result = entity_manager.entities[options["entitytype"]].pre_db_create_function(db, options,
mode="create",
parent_row=self.parent_row)
if result:
return ujson.dumps({"result_code": -1, "result_message": "%s" % result, "dbid": 0})
self.dbid = cloud_utils.update_or_insert(db, "tblEntities", options,
{
"name": options["name"],
"entitytype": options["entitytype"],
"deleted": 0,
"parententityid": options["parententityid"]
},
child_table=entity_manager.entities[
options["entitytype"]].child_table)
if self.dbid == 0:
return ujson.dumps({"result_code": -1, "result_message": "database create error", "dbid": 0})
if "ssh_keys" in options:
entity_manager.save_ssh_keys(db, self.dbid, options)
if "user_data" in options:
entity_manager.save_user_data(db, self.dbid, options)
if "attached_entities" in options:
entity_manager.remove_and_add_attached_entities(db, self.dbid, options, mode="create")
if "attach_to" in options:
entity_manager.remove_and_add_attach_to_entities(db, self.dbid, options, mode="update")
entity_manager.entities[options["entitytype"]].post_db_create_function(db, self.dbid, options,
mode="create",
parent_row=self.parent_row)
if not options or "usertype" not in options or options["usertype"] != "developer":
eventlet.spawn_n(self._post, options)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
except:
cloud_utils.log_exception(sys.exc_info())
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": self.dbid})
def _post(self, options=None, **kwargs):
db = cloud_utils.CloudGlobalBase(log=False)
try:
if not entity_manager.entity_rest_api_enabled(db, self.dbid, options):
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
if options["entitytype"] not in entity_constants.vdc_provision_only_entitytypes:
self._provision(db, options=options)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
except:
cloud_utils.log_exception(sys.exc_info())
finally:
db.close()
return ujson.dumps({"result_code": -1, "result_message": "exception", "dbid": self.dbid})
def _update(self, db, options=None, **kwargs):
try:
if self.dbid == 0:
if options and "networkservicename" in options and "parententityid" in options:
trow = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"parententityid": options["parententityid"],
"name": options["networkservicename"],
"entitysubtype": "network_service"
}, order="ORDER BY id LIMIT 1"))
if not trow:
return ujson.dumps({"result_code": -1, "result_message": "database id not provided", "dbid": 0})
self.dbid = trow["id"]
if options and "log_state" in options:
db.execute_db("UPDATE tblLogs SET field='Error' WHERE id='%s' AND field = 'Alert' " % self.dbid)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
self.row = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"id": self.dbid}, order="ORDER BY id LIMIT 1"))
if not self.row:
return ujson.dumps({"result_code": -1, "result_message": "invalid table id", "dbid": self.dbid})
if options:
if "name" in options.keys() and options["name"] != self.row["name"]:
self.error = self.duplicate_name_check(db, options)
if self.error:
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": 0})
if self.row["entitysubtype"] == "network_service":
entity_utils.update_destination_port_name(db, self.dbid, options["name"])
options["entitytype"] = self.row["entitytype"]
options["parententityid"] = self.row["parententityid"]
# if "persistencetimeout" in options:
# options["persistencetimeout"] = int(options["persistencetimeout"] )
cloud_utils.update_or_insert(db, "tblEntities", options, {"id": self.dbid},
child_table=entity_manager.entities[options["entitytype"]].child_table)
if "ssh_keys" in options:
entity_manager.save_ssh_keys(db, self.dbid, options)
if "user_data" in options:
entity_manager.save_user_data(db, self.dbid, options)
if "attached_entities" in options:
entity_manager.remove_and_add_attached_entities(db, self.dbid, options, mode="update",
entity_row=self.row)
if "attach_to" in options:
entity_manager.remove_and_add_attach_to_entities(db, self.dbid, options, mode="update")
if "policy" in options:
entity_manager.save_entity_policy(db, self.dbid, options)
if "flavors" in options:
entity_manager.save_entity_flavors(db, self.dbid, options)
if "classes" in options:
entity_manager.save_entity_classes(db, self.dbid, self.row, options)
entity_manager.entities[options["entitytype"]].post_db_create_function(db, self.dbid, options,
mode="update")
if options and "usertype" in options and options["usertype"] == "developer":
eventlet.spawn_n(self._developer, options)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
if not entity_manager.entity_rest_api_enabled(db, self.dbid, self.row):
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
if self.row["entitytype"] in entity_constants.vdc_no_update_entitytypes:
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
self.row, self.error = entity_utils.read_full_entity_status_tuple(db, self.dbid)
if self.row:
element, self.error = entity_manager.get_entity_json(db, self.dbid, self.row)
if element:
url, self.error = self.get_entity_uri(db, self.dbid, self.row)
if url:
rest_me = entity_utils.put_entity(element, self.row["entitytype"], url)
if self.check_status(db, rest_me, url):
return ujson.dumps({"result_code": rest_me.get("http_status_code", 500),
"result_message": "http rest error", "dbid": self.dbid})
entity_manager.entities[self.row["entitytype"]].post_rest_get_function(db, self.dbid, rest_me,
rest='put')
rest_me.pop("name", None)
cloud_utils.update_or_insert(db, "tblEntities", rest_me, {"id": self.dbid},
child_table=entity_manager.entities[
self.row["entitytype"]].child_table)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": 0})
except:
cloud_utils.log_exception(sys.exc_info())
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": self.dbid})
def _status(self, db, options=None, do_get=False, **kwargs):
try:
if self.dbid == 0:
return ujson.dumps({"result_code": -1, "result_message": "database id not provided", "dbid": 0})
self.row, self.error = entity_utils.read_full_entity_status_tuple(db, self.dbid)
if self.row:
if self.row["entitytype"] == "slice":
eve = SliceFunctions(db, self.dbid)
return eve.do(db, "status", options=options)
if self.row["entitytype"] == "organization":
eve = OrganizationFunctions(db, self.dbid)
return eve.do(db, "status", options=options)
# if self.row["entitytype"] == "department":
# eve = DepartmentFunctions(db, self.dbid)
# return eve.do(db, "status", options=options)
# if self.row["entitytype"] == "vdc":
# eve = VDCFunctions(db, self.dbid)
# return eve.do(db, "status", options=options)
if not entity_manager.entity_rest_api_enabled(db, self.dbid, self.row):
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
url, self.error = self.get_entity_uri(db, self.dbid, self.row)
if url:
if do_get:
rest_me = entity_utils.get_entity(url)
ignore_pending = True
else:
rest_me = entity_utils.put_entity({"command": "status"}, self.row["entitytype"], url)
ignore_pending = False
if self.check_status(db, rest_me, url, ignore_pending=ignore_pending):
return ujson.dumps(
{"result_code": rest_me.get("http_status_code", 500), "result_message": "http rest error",
"dbid": self.dbid})
entity_manager.entities[self.row["entitytype"]].post_rest_get_function(db, self.dbid, rest_me,
rest='get')
rest_me.pop("name", None)
cloud_utils.update_or_insert(db, "tblEntities", rest_me, {"id": self.dbid},
child_table=entity_manager.entities[
self.row["entitytype"]].child_table)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": 0})
except:
cloud_utils.log_exception(sys.exc_info())
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": self.dbid})
def _delete(self, db, options=None, **kwargs):
try:
if self.dbid == 0:
return ujson.dumps({"result_code": -1, "result_message": "database id not provided", "dbid": 0})
self.row, self.error = entity_utils.read_full_entity_status_tuple(db, self.dbid)
if self.row:
if self.row["entitytype"] == "slice":
eve = SliceFunctions(db, self.dbid)
return eve.do(db, "delete", options=options)
if self.row["entitytype"] == "organization":
eve = OrganizationFunctions(db, self.dbid)
return eve.do(db, "delete", options=options)
if self.row["entitytype"] == "department":
eve = DepartmentFunctions(db, self.dbid)
return eve.do(db, "delete", options=options)
if self.row["entitytype"] == "vdc":
eve = VDCFunctions(db, self.dbid)
return eve.do(db, "delete", options=options)
if entity_manager.entity_rest_api_enabled(db, self.dbid, self.row):
# delete from CFD
url, self.error = self.get_entity_uri(db, self.dbid, self.row)
if url:
# rest_me = entity_utils.delete_entity(url)
eventlet.spawn_n(entity_utils.delete_entity, url)
id = entity_manager.entities[self.row["entitytype"]].pre_db_delete_function(db, self.dbid, self.row)
# we will skip the next few steps in case we are deleting an interfacce, but instead we have deleted the tap service.
if self.row["entitytype"] != "network_interface" or not id or id == 0:
entity_utils.delete_entity_recursively(db, self.dbid)
db.execute_db("DELETE FROM tblAttachedEntities WHERE attachedentityid='%s'" % self.dbid)
entity_manager.entities[self.row["entitytype"]].post_db_delete_function(db, self.dbid, self.row)
if id and self.row["entitytype"] == "tap_network_service":
self.dbid = id
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": 0})
except:
cloud_utils.log_exception(sys.exc_info())
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": self.dbid})
def _command(self, db, options=None, **kwargs):
try:
if self.dbid == 0:
self.call_callback(db, "failed", 500)
return ujson.dumps({"result_code": -1, "result_message": "database id not provided", "dbid": 0})
if "command" not in options.keys():
self.call_callback(db, "failed", 500)
return ujson.dumps({"result_code": -1, "result_message": "invalid parameters - command missing",
"dbid": self.dbid})
self.row, self.error = entity_utils.read_full_entity_status_tuple(db, self.dbid)
if self.row:
if (options["command"] == "provision" or options["command"] == "deprovision") and \
(self.row["entitytype"] == "volume" or self.row["entitytype"] == "server"):
status = entity_utils.update_resources(db, self.row, options["command"])
if status != "success":
if options["command"] != "deprovision":
LOG.critical(_("%s: Command %s failed - update user status status" % (
self.row["name"], options["command"])))
utils.publish_utils.publish(self.row["id"], {
"update_status": {"dbid": self.row["id"], "status": self.row["entitystatus"]}})
if options["command"] == "provision":
entity_utils.log_entity_message(db, self.dbid,
"Provision failed due to insufficient resoucres",
entity=self.row, type='Warn')
return ujson.dumps(
{"result_code": -1, "result_message": "Insufficient resources to provision",
"dbid": self.dbid})
else:
return ujson.dumps({"result_code": -1, "result_message": "Unable to deprovision",
"dbid": self.dbid})
elif options["command"] == "backup" or options["command"] == "archive":
if "entity_container" not in options:
self.call_callback(db, "failed", 500)
return ujson.dumps(
{"result_code": -1, "result_message": "invalid parameters - container id missing",
"dbid": self.dbid})
if "entity_name" in options:
options["name"] = options["entity_name"]
else:
options["name"] = self.row["name"] + "-" + cloud_utils.generate_uuid()
if "entity_description" in options:
options["description"] = options["entity_description"]
options.pop("entity_description", None)
con_dbid = options["entity_container"]
options["entity_name"], options[
"entity_container"], familytree = entity_manager.get_child_parent_name(db, self.dbid)
self.dbid = 0
options["parententityid"] = con_dbid
options["entitytype"] = "volume"
options["capacity"] = self.row["capacity"]
options["capacity"] = self.row["capacity"]
options["volumeclass"] = self.row["volumeclass"]
options["voltype"] = self.row["voltype"]
options["permissions"] = self.row["permissions"]
self.row = None
return self._create(db, options=options)
url, self.error = self.get_entity_uri(db, self.dbid, self.row)
if not url:
LOG.critical(_("%s: Command failed - Unable to find uri for self" % self.row["name"]))
else:
force_periodic = False
if options["command"] != "cancel":
# and self.jobid == 0:
if self.return_object:
pass
# and isinstance(self.return_object, list) and "jobid" in \
# self.return_object[-1]:
# self.jobid = self.return_object[-1]["jobid"]
else:
# add = {"entitytype": "job_queue", "parententityid": self.dbid, "deleted": 0,
# "command": ujson.dumps(options), "status": "Started"}
# self.jobid = cloud_utils.update_or_insert(db, "tblEntities", add, None,
# child_table="tblJobsQueue")
self.callback = entity_command_completed
if not self.return_object:
self.return_object = []
self.return_object.append({"entitytype": self.row["entitytype"],
"options": options, "dbid": self.dbid,
# "jobid": self.jobid,
"caller": "entity.command"})
saved_options = {}
if self.row["entitytype"] == "volume":
if options["command"] == "snapshot":
if "entity_name" not in options:
options["entity_name"] = self.row["name"] + "-" + cloud_utils.generate_uuid()
options.pop("name", None)
options.pop("description", None)
force_periodic = True
# new_dbid = cloud_utils.update_or_insert(db, "tblEntities", item_rest, {"parententityid": dbid,
# "entitytype":options["command"], "name":volume_name},
# child_table=entity_manager.entities[volume_type].child_table)
options.pop("user_row", None)
rest_me = entity_utils.put_entity(options, self.row["entitytype"], url)
if options["command"] != "cancel":
if self.check_status(db, rest_me, url, force_periodic=force_periodic, ignore_pending=self.quick_provision):
# if an error is detected
self._local_command(db, options)
return ujson.dumps({"result_code": rest_me.get("http_status_code", 500),
"result_message": "http rest error", "dbid": self.dbid})
rest_me.pop("name", None)
cloud_utils.update_or_insert(db, "tblEntities", rest_me, {"id": self.dbid},
child_table=entity_manager.entities[
self.row["entitytype"]].child_table)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
self._local_command(db, options)
self.call_callback(db, "failed", 500)
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": self.dbid})
except:
cloud_utils.log_exception(sys.exc_info())
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": self.dbid})
def _local_command(self, db, options=None, **kwargs):
try:
if options["command"] == "provision":
entity_utils.log_entity_message(db, self.dbid, "Provision failed due to a commuication error",
entity=self.row, type='Warn')
cloud_utils.update_only(db, "tblEntities", {"entitystatus": "Aborted"}, {"id": self.dbid})
elif options["command"] == "deprovision":
entity_utils.log_entity_message(db, self.dbid, "Deprovision deferred due to a commuication error",
entity=self.row, type='Warn')
cloud_utils.update_only(db, "tblEntities", {"entitystatus": "Ready"}, {"id": self.dbid})
elif options["command"] == "clear":
entity_utils.log_entity_message(db, self.dbid, "Clear state deferred due to a commuication error",
entity=self.row, type='Warn')
cloud_utils.update_only(db, "tblEntities", {"entitystatus": "Ready"}, {"id": self.dbid})
except:
cloud_utils.log_exception(sys.exc_info())
def _developer(self, options, **kwargs):
db = cloud_utils.CloudGlobalBase(log=False)
try:
self.row = entity_utils.read_remaining_entity(db, self.dbid, self.row)
self._provision(db, options=options)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
except:
cloud_utils.log_exception(sys.exc_info())
finally:
db.close()
return ujson.dumps({"result_code": -1, "result_message": "exception", "dbid": self.dbid})
def _provision(self, db, options=None, **kwargs):
try:
if not self.row:
self.row, self.error = entity_utils.read_full_entity_status_tuple(db, self.dbid)
if not self.row or self.error:
LOG.critical(_("%s: Provision failed - Unable to locate row in dataase" % str(options)))
self.call_callback(db, "failed", 500)
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": 0})
element, self.error = entity_manager.get_entity_json(db, self.dbid, self.row, options=options,
quick_provision=self.quick_provision)
if not element:
LOG.critical(_("%s: Provision failed - Unable to json encode the entity" % self.row["name"]))
self.call_callback(db, "failed", 500)
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": 0})
url, self.error = self.get_entity_parent_uri(db, self.dbid, self.row)
if not url:
LOG.critical(_("%s: Provision failed - Unable to find parent's uri" % self.row["name"]))
self.call_callback(db, "failed", 500)
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": 0})
rest_me = entity_utils.post_entity(element, self.row["entitytype"], url)
y = yurl.URL(url)
slice_url = str(yurl.URL(scheme=y.scheme, host=y.host, port=y.port))
if self.check_status(db, rest_me, slice_url + rest_me.get("uri", ""), slice_url=slice_url):
# if self.check_status(db, rest_me,
# self.slice_row["virtual_infrastructure_url"] + rest_me.get("uri", "")):
return ujson.dumps({"result_code": rest_me.get("http_status_code", 500),
"result_message": "http rest error", "dbid": self.dbid})
entity_manager.entities[self.row["entitytype"]].post_rest_get_function(db, self.dbid, rest_me,
rest='post')
if rest_me and "http_status_code" in rest_me.keys() and \
rest_me["http_status_code"] == 200 and "uri" in rest_me:
self.update_all_service_uris(db, rest_me, options=options, slice_url=slice_url)
rest_me.pop("name", None)
cloud_utils.update_or_insert(db, "tblEntities", rest_me, {"id": self.dbid},
child_table=entity_manager.entities[self.row["entitytype"]].child_table)
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": self.dbid})
except:
cloud_utils.log_exception(sys.exc_info())
return ujson.dumps({"result_code": -1, "result_message": "%s" % self.error, "dbid": self.dbid})
def update_all_service_uris(self, db, rest_me, options=None, slice_url=None):
try:
if not rest_me:
return
slice_url = slice_url or self.slice_row["virtual_infrastructure_url"]
entity_utils.create_or_update_uri(db, self.row, self.dbid, slice_url,
rest_me,
uri_type="home", slice_dbid=self.slice_row["tblentities"])
uris = {}
if "compute_uri" in rest_me:
uris["uri"] = rest_me.get("compute_uri", "")
entity_utils.create_or_update_uri(db, None, self.dbid, slice_url,
uris, slice_dbid=self.slice_row["tblentities"], uri_type="compute")
if "network_uri" in rest_me:
uris["uri"] = rest_me.get("network_uri", "")
entity_utils.create_or_update_uri(db, None, self.dbid, slice_url,
uris, slice_dbid=self.slice_row["tblentities"], uri_type="network")
if "compute_uri" in rest_me:
uris["uri"] = rest_me.get("storage_uri", "")
entity_utils.create_or_update_uri(db, None, self.dbid, slice_url,
uris, slice_dbid=self.slice_row["tblentities"], uri_type="storage")
if options and "usertype" in options and options["usertype"] == "developer":
if "server_boot" in rest_me and "volume_id" in options:
cloud_utils.update_or_insert(db, "tblUris",
{"uri": slice_url + rest_me["server_boot"]["boot_volume"]["uri"],
"type": "home",
"tblEntities": options["volume_id"],
"tblSlices": self.slice_row["tblentities"]},
{"tblEntities": options["volume_id"]})
if "interfaces" in rest_me:
for j in cloud_utils.network_service_ports(db, self.dbid):
drow = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"id": j["destinationserviceentityid"]},
order="ORDER BY id LIMIT 1"))
found = False
for i in rest_me["interfaces"]:
if drow and "name" in i and "uri" in i and i["name"] == drow["name"]:
port = entity_utils.get_entity(slice_url + i["uri"])
# if "traffic_stats" in port:
# stats = self.slice_row["virtual_infrastructure_url"] + port ["traffic_stats"]
# else:
# stats = ""
# cloud_utils.update_only(db, "tblEntities",
# {"uri": self.slice_row["virtual_infrastructure_url"] +i["uri"],
# "statistics": stats},
# {"id": j["id"]}, child_table="tblServicePorts")
entity_utils.create_or_update_uri(db, self.row, j["id"],
slice_url,
port, uri_type="home",
slice_dbid=self.slice_row["tblentities"])
update_port = entity_manager.provision_network_service_ports(db, j["id"])
if update_port:
n = entity_utils.put_entity(update_port, "network_interface",
slice_url + i["uri"])
entity_utils.create_or_update_uri(db, self.row, j["id"],
slice_url,
n, uri_type="home",
slice_dbid=self.slice_row["tblentities"])
found = True
break
if not found:
cloud_utils.log_message(db, self.dbid, "%s: Unable to find uri for interface %s " %
(self.row["name"], drow["name"]))
LOG.critical(_("%s: Unable to find uri for interface %s" % (self.row["name"], drow["name"])))
except:
print sys.exc_info()
cloud_utils.log_exception(sys.exc_info())
def duplicate_name_check(self, db, options):
check_row = db.execute_db("SELECT * FROM tblEntities WHERE (Name = '%s' AND EntityType = '%s' AND deleted = 0 AND \
ParentEntityId = '%s' AND id != '%s') ORDER By id LIMIT 1" %
(options["name"], self.row["entitytype"], self.row["parententityid"], self.dbid))
if check_row:
LOG.critical(_("Update entity with duplicate name declined entity %s current name %s requested name %s" %
(self.dbid, self.row["name"], options["name"])))
return "Update declined - Duplicate name error"
return None
def check_status(self, db, rest_me, url, ignore_pending=False, force_periodic=False, slice_url=None):
if rest_me and "EntityStatus" in rest_me:
if rest_me["EntityStatus"].lower() in entity_utils.http_error_states:
self.call_callback(db, "failed", rest_me.get("http_status_code", 500), rest_me=rest_me)
return rest_me.get("http_status_code", 500)
slice_url = slice_url or self.slice_row["virtual_infrastructure_url"]
if "uri" in rest_me:
entity_utils.create_or_update_uri(db, self.row, self.dbid, slice_url,
rest_me,
uri_type="home", slice_dbid=self.slice_row["tblentities"])
LOG.info(_("Updated - URI for tblEntities id %s" % (self.dbid)))
if force_periodic or not rest_me or "EntityStatus" not in rest_me or rest_me["EntityStatus"].lower() in \
entity_manager.entities[self.row["entitytype"]].entity_pending_states:
if not ignore_pending:
entity_utils.add_periodic_check(db, {"dbid": self.dbid, "url": url, "callback": self.callback,
"entity": self.row,
"return_object": self.return_object,
"slice_dbid": self.slice_row["tblentities"],
"slice_uri": slice_url})
return None
else:
if entity_manager.entities[self.row["entitytype"]].post_entity_final_status_function:
entity_manager.entities[self.row["entitytype"]].post_entity_final_status_function(db, self.dbid
)
if rest_me and "EntityStatus" in rest_me and rest_me["EntityStatus"].lower() in entity_manager.entities[
self.row["entitytype"]].entity_failed_states:
self.call_callback(db, "failed", rest_me.get("http_status_code", 500), rest_me=rest_me)
else:
self.call_callback(db, "success", rest_me.get("http_status_code", 200), rest_me=rest_me)
return None
else:
self.call_callback(db, "failed", rest_me.get("http_status_code", 500), rest_me=rest_me)
LOG.critical(_("Unable to locate Entity status or URI for tblEntities id %s in %s" % (self.dbid, rest_me)))
self.call_callback(db, "failed", rest_me.get("http_status_code", 500), rest_me=rest_me)
return rest_me.get("http_status_code", 500)
def call_callback(self, db, return_status, http_status_code, rest_me=None):
try:
# if self.jobid:
# cloud_utils.update_only(db, "tblEntities", {"progress": 100, "status": return_status,
# "response": ujson.dumps(rest_me)},
# {"id": self.jobid}, child_table="tblJobsQueue")
if self.callback:
if self.return_object and isinstance(self.return_object, list):
self.return_object[-1]["http_status_code"] = http_status_code
self.return_object[-1]["response"] = rest_me
eventlet.spawn_n(self.callback, self.dbid, return_status=return_status,
return_object=self.return_object)
except:
cloud_utils.log_exception(sys.exc_info())
def get_entity_uri(self, db, dbid, entity):
uritype = "home"
self.uri_row = cloud_utils.lower_key(db.get_row_dict("tblUris", {"tblEntities": dbid,
"type": uritype,
"deleted": 0},
order="ORDER BY id LIMIT 1"))
if not self.uri_row:
LOG.critical(_("Unable to locate URI for tblEntities id %s" % dbid))
return None, "Unable to locate entity URI in tblUris database"
self.slice_row = cloud_utils.lower_key(db.get_row_dict("tblSlices", {"tblEntities": self.uri_row["tblslices"]},
order="ORDER BY id LIMIT 1"))
if not self.slice_row:
LOG.critical(
_("Unable to locate slice for tblEntities uriid %s entity id %s" % (self.uri_row["id"], dbid)))
return None, "Unable to locate entry URI in tblslices in database"
return self.uri_row["uri"], None
def get_entity_parent_uri(self, db, dbid, entity):
uritype = "home"
if entity["entitytype"] in entity_manager.entities.keys():
uritype = entity_manager.entities[entity["entitytype"]].parent_uri_type
self.parent_uri_row = cloud_utils.lower_key(db.get_row_dict("tblUris", {"tblEntities": entity["parententityid"],
"type": uritype, "deleted": 0},
order="ORDER BY id LIMIT 1"))
if not self.parent_uri_row:
if entity["entitytype"] == "storage_class" or \
entity["entitytype"] == "compute_class" or \
entity["entitytype"] == "network_class":
if self.slice_row:
return self.slice_row["physical_infrastructure_url"], None
if entity["entitytype"] == "organization" and self.slice_row:
return self.slice_row["virtual_infrastructure_url"], None
LOG.critical(_("Unable to locate URI for tblEntities id %s" % entity["parententityid"]))
return None, "Unable to locate parent entity in database"
if not self.slice_row:
self.slice_row = cloud_utils.lower_key(db.get_row_dict("tblSlices",
{"tblEntities": self.parent_uri_row["tblslices"]},
order="ORDER BY id LIMIT 1"))
if not self.slice_row:
LOG.critical(
_("Unable to locate slice for tblEntities uriid %s entity id %s " % (self.parent_uri_row["id"],
entity["parententityid"])))
return None, "Unable to locate entry in tblslices in database"
if not self.slice_row["virtual_infrastructure_url"] or not self.parent_uri_row["uri"]:
return None, "Unable to get slice uri %s or parent uri %s" % (self.slice_row["virtual_infrastructure_url"],
self.parent_uri_row["uri"])
return self.parent_uri_row["uri"], None
functionMap = {
"create": _create,
"delete": _delete,
"update": _update,
"status": _status,
"command": _command,
"provision": _provision,
"post": _post
}
def entity_command_completed(dbid, return_status=None, return_object=None):
try:
if return_object and isinstance(return_object, list):
LOG.info(_("job completed for dbid: %s" % dbid))
else:
LOG.critical(_("Unable to locate the eventfunction for dbid: %s" % dbid))
except:
cloud_utils.log_exception(sys.exc_info())
def user_login(db, dbid, function, options=None):
error = entity_utils.confirm_options_keys(options, ["loginid", "password"])
if error:
return ujson.dumps({"result_code": -1, "result_message": "%s" % error, "dbid": 0})
system_row = cache_utils.get_cache("db|tblEntities|EntityType|System", None, db_in=db)
dbid = cfd_keystone.cfd_keystone.login(db, options["loginid"], options["password"])
if dbid == 0:
cloud_utils.log_message(db, system_row["id"],
"User id %s login attempt rejected from IP address: %s" %
(options["loginid"], options.get("ipaddress", "0.0.0.0")))
return ujson.dumps({"result_code": -1, "result_message": "Login rejected", "dbid": 0})
user = cache_utils.get_cache("db|tblEntities|id|%s" % dbid, None, db_in=db)
cloud_utils.log_message(db, system_row["id"],
"User %s with login id %s login successful from IP address: %s" %
(user["name"], options["loginid"], options.get("ipaddress", "0.0.0.0")))
db.execute_db("UPDATE tblUsers SET LastActivityDate =now() WHERE tblEntities = '%s' " % user["id"])
entity_utils.update_developer_resources(db, user["id"])
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": dbid})
def user_functions(db, dbid, function, options=None):
if not options or "status" not in options or options["status"] != "logout" or "user_row" not in options:
return ujson.dumps({"result_code": -1, "result_message": "Invalid user functiona call", "dbid": dbid})
system_row = cache_utils.get_cache("db|tblEntities|EntityType|System", None, db_in=db)
user = cloud_utils.lower_key(db.get_row_dict("tblUsers",
{"tblEntities": options["user_row"]["id"]},
order="ORDER BY id LIMIT 1"))
if user:
cloud_utils.log_message(db, system_row["id"], "User %s login id %s logged out" %
(options["user_row"]["name"], user["loginid"]))
cache_utils.remove_cache("db-tblEntities-id-%s" % options["user_row"]["id"])
db.execute_db("UPDATE tblUsers SET Token =NULL WHERE tblEntities = '%s' " % user["id"])
return ujson.dumps({"result_code": 0, "result_message": "success", "dbid": dbid})
def find_organization(dbid):
for index, value in enumerate(organization_objects):
if value["dbid"] == dbid:
return value
return None
def update_user_assigned_resource_records(db, dbid, options):
if not options:
return
if not "resources" in options:
return
resources = options["resources"]
print resources
for i in resources:
if not "catagory" in i:
continue
if not isinstance(i, dict):
continue
if i["catagory"] == "compute":
current = db.get_row("tblResourcesCompute", "Catagory='total' AND tblEntities='%s'" % dbid)
if not "cpu" in i:
i["cpu"] = current["CPU"]
if not "ram" in i:
i["ram"] = current["RAM"]
if not "network" in i:
i["network"] = current["Network"]
db.update_db("UPDATE tblResourcesCompute SET CPU='%s', RAM='%s', Network='%s' "
" WHERE Catagory='total' AND tblEntities='%s'" %
(i.get("cpu", 0), i.get('ram', 0), i.get("network", 0), dbid))
continue
if i["catagory"] == "storage" and "type" in i:
current = db.get_row("tblResourcesStorage",
"Catagory='total' AND tblEntities='%s'AND type='%s'" % (dbid, i["type"]))
if not "capacity" in i:
i["capacity"] = current["Capacity"]
if not "iops" in i:
i["iops"] = current["IOPS"]
if not "network" in i:
i["network"] = current["Network"]
db.update_db("UPDATE tblResourcesStorage SET capacity='%s', iops='%s', Network='%s'"
" WHERE Catagory='total' AND tblEntities='%s'AND type='%s' " %
(i.get("capacity", 0), i.get("iops", 0), i.get("network", 0), dbid, i["type"]))
continue
if i["catagory"] == "network" and "type" in i:
db.update_db("UPDATE tblResourcesNetwork SET throughput='%s' "
"WHERE Catagory = 'total' AND tblEntities='%s'AND type='%s' " %
(i.get("throughput", 0), dbid, i["type"]))
def initialize_resource_records(db, dbid, entittype, parententityid):
row = db.get_row_dict("tblResourcesCompute", {"tblEntities": dbid, "type": "default"},
order="ORDER BY id LIMIT 1")
if not row:
db.update_db(
"INSERT INTO tblResourcesCompute (tblEntities, Catagory, TypeTitle, Type,CPU, RAM, Network, Entitytype, ParentEntityId) "
"VALUES ('%s','total','Compute','Default','0','0','0', '%s','%s')" % (dbid, entittype, parententityid))
db.update_db(
"INSERT INTO tblResourcesCompute (tblEntities, Catagory, TypeTitle, Type,CPU, RAM, Network, Entitytype, ParentEntityId) "
"VALUES ('%s','allocated','Compute','Default','0','0','0', '%s','%s')" % (dbid, entittype, parententityid))
db.update_db(
"INSERT INTO tblResourcesCompute (tblEntities, Catagory, TypeTitle, Type,CPU, RAM, Network, Entitytype, ParentEntityId) "
"VALUES ('%s','deployed','Compute','Default','0','0','0', '%s','%s')" % (dbid, entittype, parententityid))
for item in storage_types:
row = db.get_row_dict("tblResourcesStorage", {"tblEntities": dbid, "type": item},
order="ORDER BY id LIMIT 1")
if not row:
db.update_db("INSERT INTO tblResourcesStorage "
"(tblEntities, Catagory, TypeTitle, Type, capacity, iops, Network, Entitytype, ParentEntityId) "
"VALUES ('%s','total', 'Latency','%s','0','0','0', '%s','%s') " % (
dbid, item, entittype, parententityid))
db.update_db("INSERT INTO tblResourcesStorage "
"(tblEntities, Catagory, TypeTitle, Type, capacity, iops, Network, Entitytype, ParentEntityId) "
"VALUES ('%s','allocated', 'Latency','%s','0','0','0', '%s','%s') " % (
dbid, item, entittype, parententityid))
db.update_db("INSERT INTO tblResourcesStorage "
"(tblEntities, Catagory, TypeTitle, Type, capacity, iops, Network, Entitytype, ParentEntityId) "
"VALUES ('%s','deployed', 'Latency','%s','0','0','0', '%s','%s') " % (
dbid, item, entittype, parententityid))
for item in entity_constants.network_services:
row = db.get_row_dict("tblResourcesNetwork", {"tblEntities": dbid, "type": item},
order="ORDER BY id LIMIT 1")
if not row:
db.update_db(
"INSERT INTO tblResourcesNetwork (tblEntities, Catagory, TypeTitle, Type, throughput, Entitytype, ParentEntityId) "
"VALUES ('%s','total', 'Network Service','%s','0', '%s','%s') " % (
dbid, item, entittype, parententityid))
db.update_db(
"INSERT INTO tblResourcesNetwork (tblEntities, Catagory, TypeTitle, Type, throughput, Entitytype, ParentEntityId) "
"VALUES ('%s','allocated', 'Network Service','%s','0', '%s','%s') " % (
dbid, item, entittype, parententityid))
db.update_db(
"INSERT INTO tblResourcesNetwork (tblEntities, Catagory, TypeTitle, Type, throughput, Entitytype, ParentEntityId) "
"VALUES ('%s','deployed', 'Network Service','%s','0', '%s','%s') " % (
dbid, item, entittype, parententityid))
def update_entity_resource_records(db, dbid, to_catagory, from_entity_type, from_catagory):
try:
cores = 0
mhz = 0
ram = 0
cnetwork = 0
capacity = {}
iops = {}
snetwork = {}
for i in storage_types:
capacity[i] = 0
iops[i] = 0
snetwork[i] = 0
thru = {}
for i in entity_constants.network_services:
thru[i] = 0
for item in cloud_utils.entity_children(db, dbid, entitytype=from_entity_type):
# update compute resources
row = cloud_utils.lower_key(db.get_row_dict("tblResourcesCompute", {'tblEntities': item['id'],
'Catagory': from_catagory},
order="ORDER BY id LIMIT 1"))
if row:
cores += row['cpu']
ram += row['ram']
cnetwork += row['network']
for i in storage_types:
row = cloud_utils.lower_key(db.get_row_dict("tblResourcesStorage", {'tblEntities': item['id'],
'type': i,
'Catagory': from_catagory},
order="ORDER BY id LIMIT 1"))
if row:
capacity[i] += row['capacity']
iops[i] += row['iops']
snetwork[i] += row['network']
for i in entity_constants.network_services:
row = cloud_utils.lower_key(db.get_row_dict("tblResourcesNetwork", {'tblEntities': item['id'],
'type': i,
'Catagory': from_catagory},
order="ORDER BY id LIMIT 1"))
if row:
thru[i] += row['throughput']
db.update_db("UPDATE tblResourcesCompute SET CPU='%s', RAM='%s', Network='%s' "
"WHERE Catagory = '%s' AND tblEntities='%s'" %
(cores, ram, cnetwork, to_catagory, dbid))
for i in storage_types:
db.update_db("UPDATE tblResourcesStorage SET Type='%s', capacity='%s', iops='%s', Network='%s'"
" WHERE Catagory ='%s' AND tblEntities='%s'AND type='%s' " %
(i, capacity[i], iops[i], snetwork[i], to_catagory, dbid, i))
for i in entity_constants.network_services:
db.update_db("UPDATE tblResourcesNetwork SET Type='%s', throughput='%s' "
" WHERE Catagory ='%s' AND tblEntities='%s'AND type='%s' " %
(i, thru[i], to_catagory, dbid, i))
except:
cloud_utils.log_exception(sys.exc_info())
|
import sys
import logging
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as NP
import pandas as PD
from obspy.core.stream import Stream
from obspy.core.utcdatetime import UTCDateTime
from obspy.core.trace import Trace
from fgm2iaga import parse
from iaga2hdf import get_dec_tenths_arcminute, write_hdf
logger = logging.getLogger('pyrsss.mat.fgm2hdf')
def build_header(data_list,
keys=[('geodetic_latitude', 'lat'),
('geodetic_longitude', 'lon'),
('station', 'siteid')],
elevation=None,
baseline_declination=None):
"""
Build and return meta information mapping based on information in
*data_list* with *keys*, *elevation*, and *baseline_declination*
(the baseline declination, determined from IGRF if not specified).
"""
header = {}
for key, search_key in keys:
values = set([getattr(x, search_key) for x in data_list])
if len(values) > 1:
raise ValueError('multiple values for {} encountered'.format(search_key))
if len(values) == 0:
logger.warning('{} not found'.format(key))
continue
value = values.pop()
header[key] = value
if 'station' in header:
header['station'] = header['station'][:3]
d1 = PD.to_datetime(data_list[0].index.values[0]).to_pydatetime()
d2 = PD.to_datetime(data_list[-1].index.values[-1]).to_pydatetime()
d1_obj = UTCDateTime('{:%Y-%m-%d %H:%H:%S}'.format(d1))
d2_obj = UTCDateTime('{:%Y-%m-%d %H:%H:%S}'.format(d2))
header['starttime'] = d1_obj
header['endtime'] = d2_obj
if elevation is None:
logger.warning('no elevation found --- using default of 0')
header['elevation'] = 0
else:
header['elevation'] = elevation
delta = NP.diff(data_list[0].index.values[:2])[0] / NP.timedelta64(1, 's')
fs = 1 / delta
header['sampling_rate'] = fs
if baseline_declination is None:
d = {'starttime': header['starttime'],
'Geodetic Latitude': header['geodetic_latitude'],
'Geodetic Longitude': header['geodetic_longitude'],
'Elevation': header['elevation']}
baseline_declination = get_dec_tenths_arcminute(d, d1)
header['declination_base'] = baseline_declination
header['npts'] = sum(map(len, data_list))
return header
def fgm2hdf(hdf_fname,
fgm_fnames,
he=False,
elevation=0,
key='B_raw'):
"""
Convert data found in FGM files *fgm_fnames* to an HDF record at
*hdf_fname*. Write to the HDF record associated with *key*. If
*he*, store the h (mag north) and e (mag east) components. Use
*elevation* in specifying the measurement location. Return the
tuple containing *hdf_fname* and *key*.
"""
data_list = []
for fgm_fname in fgm_fnames:
logger.info('reading {}'.format(fgm_fname))
data_list.append(parse(fgm_fname))
header = build_header(data_list,
elevation=elevation)
df = PD.concat(data_list)
df.loc[df.flag, ['x', 'y', 'z', 'f']] = NP.nan
df.drop(columns='flag', inplace=True)
df.rename(columns={'x': 'B_X',
'y': 'B_Y',
'z': 'B_Z',
'f': 'B_F'},
inplace=True)
write_hdf(hdf_fname, df, key, header)
return hdf_fname, key
def main(argv=None):
if argv is None:
argv = sys.argv
parser = ArgumentParser('Convert FGM format data to HDF.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('hdf_fname',
type=str,
help='output HDF file name')
parser.add_argument('fgm_fnames',
type=str,
metavar='fgm_fname',
nargs='*',
help='input FGM file (in time order)')
args = parser.parse_args(argv[1:])
fgm2hdf(args.hdf_fname,
args.fgm_fnames)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
|
from .fhirbase import fhirbase
class ValueSet(fhirbase):
"""
A value set specifies a set of codes drawn from one or more code
systems.
Attributes:
resourceType: This is a ValueSet resource
url: An absolute URI that is used to identify this value set when it
is referenced in a specification, model, design or an instance. This
SHALL be a URL, SHOULD be globally unique, and SHOULD be an address at
which this value set is (or will be) published. The URL SHOULD include
the major version of the value set. For more information see
[Technical and Business Versions](resource.html#versions).
identifier: A formal identifier that is used to identify this value
set when it is represented in other formats, or referenced in a
specification, model, design or an instance.
version: The identifier that is used to identify this version of the
value set when it is referenced in a specification, model, design or
instance. This is an arbitrary value managed by the value set author
and is not expected to be globally unique. For example, it might be a
timestamp (e.g. yyyymmdd) if a managed version is not available. There
is also no expectation that versions can be placed in a
lexicographical sequence.
name: A natural language name identifying the value set. This name
should be usable as an identifier for the module by machine processing
applications such as code generation.
title: A short, descriptive, user-friendly title for the value set.
status: The status of this value set. Enables tracking the life-cycle
of the content.
experimental: A boolean value to indicate that this value set is
authored for testing purposes (or education/evaluation/marketing), and
is not intended to be used for genuine usage.
date: The date (and optionally time) when the value set was
published. The date must change if and when the business version
changes and it must change if the status code changes. In addition, it
should change when the substantive content of the value set changes.
(e.g. the 'content logical definition').
publisher: The name of the individual or organization that published
the value set.
contact: Contact details to assist a user in finding and communicating
with the publisher.
description: A free text natural language description of the value set
from a consumer's perspective.
useContext: The content was developed with a focus and intent of
supporting the contexts that are listed. These terms may be used to
assist with indexing and searching for appropriate value set
instances.
jurisdiction: A legal or geographic region in which the value set is
intended to be used.
immutable: If this is set to 'true', then no new versions of the
content logical definition can be created. Note: Other metadata might
still change.
purpose: Explaination of why this value set is needed and why it has
been designed as it has.
copyright: A copyright statement relating to the value set and/or its
contents. Copyright statements are generally legal restrictions on the
use and publishing of the value set.
extensible: Whether this is intended to be used with an extensible
binding or not.
compose: A set of criteria that define the content logical definition
of the value set by including or excluding codes from outside this
value set. This I also known as the "Content Logical Definition"
(CLD).
expansion: A value set can also be "expanded", where the value set is
turned into a simple collection of enumerated codes. This element
holds the expansion, if it has been performed.
"""
__name__ = 'ValueSet'
def __init__(self, dict_values=None):
self.resourceType = 'ValueSet'
# type: str
# possible values: ValueSet
self.url = None
# type: str
self.version = None
# type: str
self.name = None
# type: str
self.title = None
# type: str
self.status = None
# type: str
# possible values: draft, active, retired, unknown
self.experimental = None
# type: bool
self.date = None
# type: str
self.publisher = None
# type: str
self.contact = None
# type: list
# reference to ContactDetail
self.description = None
# type: str
self.useContext = None
# type: list
# reference to UsageContext
self.jurisdiction = None
# type: list
# reference to CodeableConcept
self.immutable = None
# type: bool
self.purpose = None
# type: str
self.copyright = None
# type: str
self.extensible = None
# type: bool
self.compose = None
# reference to ValueSet_Compose
self.expansion = None
# reference to ValueSet_Expansion: identifier
self.identifier = None
# type: list
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.status is not None:
for value in self.status:
if value is not None and value.lower() not in [
'draft', 'active', 'retired', 'unknown']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'draft, active, retired, unknown'))
def get_relationships(self):
return [
{'parent_entity': 'UsageContext',
'parent_variable': 'object_id',
'child_entity': 'ValueSet',
'child_variable': 'useContext'},
{'parent_entity': 'ValueSet_Compose',
'parent_variable': 'object_id',
'child_entity': 'ValueSet',
'child_variable': 'compose'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'ValueSet',
'child_variable': 'identifier'},
{'parent_entity': 'ContactDetail',
'parent_variable': 'object_id',
'child_entity': 'ValueSet',
'child_variable': 'contact'},
{'parent_entity': 'ValueSet_Expansion',
'parent_variable': 'identifier',
'child_entity': 'ValueSet',
'child_variable': 'expansion'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ValueSet',
'child_variable': 'jurisdiction'},
]
class ValueSet_Compose(fhirbase):
"""
A value set specifies a set of codes drawn from one or more code
systems.
Attributes:
lockedDate: If a locked date is defined, then the Content Logical
Definition must be evaluated using the current version as of the
locked date for referenced code system(s) and value set instances
where ValueSet.compose.include.version is not defined.
inactive: Whether inactive codes - codes that are not approved for
current use - are in the value set. If inactive = true, inactive codes
are to be included in the expansion, if inactive = false, the inactive
codes will not be included in the expansion. If absent, the behavior
is determined by the implementation, or by the applicable
ExpansionProfile (but generally, inactive codes would be expected to
be included).
include: Include one or more codes from a code system or other value
set(s).
exclude: Exclude one or more codes from the value set based on code
system filters and/or other value sets.
"""
__name__ = 'ValueSet_Compose'
def __init__(self, dict_values=None):
self.lockedDate = None
# type: str
self.inactive = None
# type: bool
self.include = None
# type: list
# reference to ValueSet_Include
self.exclude = None
# type: list
# reference to ValueSet_Include
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'ValueSet_Include',
'parent_variable': 'object_id',
'child_entity': 'ValueSet_Compose',
'child_variable': 'include'},
{'parent_entity': 'ValueSet_Include',
'parent_variable': 'object_id',
'child_entity': 'ValueSet_Compose',
'child_variable': 'exclude'},
]
class ValueSet_Include(fhirbase):
"""
A value set specifies a set of codes drawn from one or more code
systems.
Attributes:
system: An absolute URI which is the code system from which the
selected codes come from.
version: The version of the code system that the codes are selected
from.
concept: Specifies a concept to be included or excluded.
filter: Select concepts by specify a matching criteria based on the
properties (including relationships) defined by the system. If
multiple filters are specified, they SHALL all be true.
valueSet: Selects concepts found in this value set. This is an
absolute URI that is a reference to ValueSet.url.
"""
__name__ = 'ValueSet_Include'
def __init__(self, dict_values=None):
self.system = None
# type: str
self.version = None
# type: str
self.concept = None
# type: list
# reference to ValueSet_Concept
self.filter = None
# type: list
# reference to ValueSet_Filter
self.valueSet = None
# type: list
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'ValueSet_Concept',
'parent_variable': 'object_id',
'child_entity': 'ValueSet_Include',
'child_variable': 'concept'},
{'parent_entity': 'ValueSet_Filter',
'parent_variable': 'object_id',
'child_entity': 'ValueSet_Include',
'child_variable': 'filter'},
]
class ValueSet_Concept(fhirbase):
"""
A value set specifies a set of codes drawn from one or more code
systems.
Attributes:
code: Specifies a code for the concept to be included or excluded.
display: The text to display to the user for this concept in the
context of this valueset. If no display is provided, then applications
using the value set use the display specified for the code by the
system.
designation: Additional representations for this concept when used in
this value set - other languages, aliases, specialized purposes, used
for particular purposes, etc.
"""
__name__ = 'ValueSet_Concept'
def __init__(self, dict_values=None):
self.code = None
# type: str
self.display = None
# type: str
self.designation = None
# type: list
# reference to ValueSet_Designation
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'ValueSet_Designation',
'parent_variable': 'object_id',
'child_entity': 'ValueSet_Concept',
'child_variable': 'designation'},
]
class ValueSet_Designation(fhirbase):
"""
A value set specifies a set of codes drawn from one or more code
systems.
Attributes:
language: The language this designation is defined for.
use: A code that details how this designation would be used.
value: The text value for this designation.
"""
__name__ = 'ValueSet_Designation'
def __init__(self, dict_values=None):
self.language = None
# type: str
self.use = None
# reference to Coding
self.value = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'ValueSet_Designation',
'child_variable': 'use'},
]
class ValueSet_Filter(fhirbase):
"""
A value set specifies a set of codes drawn from one or more code
systems.
Attributes:
property: A code that identifies a property defined in the code
system.
op: The kind of operation to perform as a part of the filter criteria.
value: The match value may be either a code defined by the system, or
a string value, which is a regex match on the literal string of the
property value when the operation is 'regex', or one of the values
(true and false), when the operation is 'exists'.
"""
__name__ = 'ValueSet_Filter'
def __init__(self, dict_values=None):
self.property = None
# type: str
self.op = None
# type: str
# possible values: =, is-a, descendent-of, is-not-a, regex,
# in, not-in, generalizes, exists
self.value = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.op is not None:
for value in self.op:
if value is not None and value.lower() not in [
'=', 'is-a', 'descendent-of', 'is-not-a', 'regex', 'in', 'not-in',
'generalizes', 'exists']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, '=, is-a, descendent-of, is-not-a, regex, in, not-in, generalizes,'
'exists'))
class ValueSet_Expansion(fhirbase):
"""
A value set specifies a set of codes drawn from one or more code
systems.
Attributes:
identifier: An identifier that uniquely identifies this expansion of
the valueset. Systems may re-use the same identifier as long as the
expansion and the definition remain the same, but are not required to
do so.
timestamp: The time at which the expansion was produced by the
expanding system.
total: The total number of concepts in the expansion. If the number of
concept nodes in this resource is less than the stated number, then
the server can return more using the offset parameter.
offset: If paging is being used, the offset at which this resource
starts. I.e. this resource is a partial view into the expansion. If
paging is not being used, this element SHALL not be present.
parameter: A parameter that controlled the expansion process. These
parameters may be used by users of expanded value sets to check
whether the expansion is suitable for a particular purpose, or to pick
the correct expansion.
contains: The codes that are contained in the value set expansion.
"""
__name__ = 'ValueSet_Expansion'
def __init__(self, dict_values=None):
self.timestamp = None
# type: str
self.total = None
# type: int
self.offset = None
# type: int
self.parameter = None
# type: list
# reference to ValueSet_Parameter
self.contains = None
# type: list
# reference to ValueSet_Contains
self.identifier = None
# type: str
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'ValueSet_Parameter',
'parent_variable': 'object_id',
'child_entity': 'ValueSet_Expansion',
'child_variable': 'parameter'},
{'parent_entity': 'ValueSet_Contains',
'parent_variable': 'object_id',
'child_entity': 'ValueSet_Expansion',
'child_variable': 'contains'},
]
class ValueSet_Parameter(fhirbase):
"""
A value set specifies a set of codes drawn from one or more code
systems.
Attributes:
name: The name of the parameter.
valueString: The value of the parameter.
valueBoolean: The value of the parameter.
valueInteger: The value of the parameter.
valueDecimal: The value of the parameter.
valueUri: The value of the parameter.
valueCode: The value of the parameter.
"""
__name__ = 'ValueSet_Parameter'
def __init__(self, dict_values=None):
self.name = None
# type: str
self.valueString = None
# type: str
self.valueBoolean = None
# type: bool
self.valueInteger = None
# type: int
self.valueDecimal = None
# type: int
self.valueUri = None
# type: str
self.valueCode = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
class ValueSet_Contains(fhirbase):
"""
A value set specifies a set of codes drawn from one or more code
systems.
Attributes:
system: An absolute URI which is the code system in which the code for
this item in the expansion is defined.
abstract: If true, this entry is included in the expansion for
navigational purposes, and the user cannot select the code directly as
a proper value.
inactive: If the concept is inactive in the code system that defines
it. Inactive codes are those that are no longer to be used, but are
maintained by the code system for understanding legacy data.
version: The version of this code system that defined this code and/or
display. This should only be used with code systems that do not
enforce concept permanence.
code: The code for this item in the expansion hierarchy. If this code
is missing the entry in the hierarchy is a place holder (abstract) and
does not represent a valid code in the value set.
display: The recommended display for this item in the expansion.
designation: Additional representations for this item - other
languages, aliases, specialized purposes, used for particular
purposes, etc. These are relevant when the conditions of the expansion
do not fix to a single correct representation.
contains: Other codes and entries contained under this entry in the
hierarchy.
"""
__name__ = 'ValueSet_Contains'
def __init__(self, dict_values=None):
self.system = None
# type: str
self.abstract = None
# type: bool
self.inactive = None
# type: bool
self.version = None
# type: str
self.code = None
# type: str
self.display = None
# type: str
self.designation = None
# type: list
# reference to ValueSet_Designation
self.contains = None
# type: list
# reference to ValueSet_Contains
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'ValueSet_Contains',
'parent_variable': 'object_id',
'child_entity': 'ValueSet_Contains',
'child_variable': 'contains'},
{'parent_entity': 'ValueSet_Designation',
'parent_variable': 'object_id',
'child_entity': 'ValueSet_Contains',
'child_variable': 'designation'},
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 6 21:56:44 2020
@author: sgnodde
"""
import datetime as dt
class Clock:
def __init__(self, start_date_time = dt.datetime(2020,1,1)):
self.date_time = start_date_time
def add_time(self, time_increment = dt.timedelta(days = 1)):
"""Adds time to clock.
Parameters
----------
timediff : datetime.timedelta, optional
Time added to clock. The default is dt.timedelta(days = 1).
Returns
-------
datetime.datetime
Time right now.
"""
self.date_time = self.date_time + time_increment
return self.date_time
|
# inspired by https://nbviewer.jupyter.org/gist/kyamagu/6cff70840c10ca374e069a3a7eb00cb4/dogs-vs-cats.ipynb
from caffe2.python import caffe2_pb2
import numpy as np
import lmdb, json, skimage
from os.path import join
from DataHelpers import FetchRowCount, PreprocessImage
from DataTransforms import CropCenter# CalculateAugmentRatio
class LmdbDatasetWrapper(object):
def __init__(self, lmdbPath, batchSize, imageSize, newImageSize=None):
self.lmdbPath = lmdbPath
self.Enviroment = lmdb.open(lmdbPath, readonly=True, create=False)
self.Transaction = None
self.Cursor = None
self.BatchSize = batchSize
self.ImageSize = imageSize
self.NewImageSize = newImageSize
def __len__(self):
return FetchRowCount(self.lmdbPath)
def __getitem__(self, index):
if not self.Cursor.set_range(str(index)):
raise IndexError('invalid index')
_, imgTensorStr = self.Cursor.item()
return self._prepareImage(imgTensorStr)
def Open(self):
if self.Transaction == None:
self.Transaction = self.Enviroment.begin(write=False)
self._createCursor()
def Close(self):
self.Cursor = None
self.Transaction = None
def _parseStringProto(self, proto):
rowProto = caffe2_pb2.TensorProtos()
rowProto.ParseFromString(proto)
imageArr = np.reshape(rowProto.protos[0].float_data, [3, self.ImageSize, self.ImageSize])
label = int(rowProto.protos[1].int32_data[0])
return imageArr, label
def _createCursor(self):
if self.Cursor != None:
return
self.Cursor = self.Transaction.cursor()
def __del__(self):
if self.Transaction != None:
self.Transaction = None
self.Enviroment.close()
def _prepareImage(self, imageProtoStr):
image, label = self._parseStringProto(imageProtoStr)
if self.NewImageSize not in [None, self.ImageSize]:
if self.NewImageSize == self.ImageSize:
pass
if self.NewImageSize > self.ImageSize:
nImage = np.zeros((3, self.NewImageSize, self.NewImageSize)).astype(np.float32)
startPos = (self.NewImageSize - self.ImageSize) / 2
nImage[:, startPos:self.ImageSize + startPos, startPos:self.ImageSize + startPos] = image
image = nImage
else:
image = CropCenter(image, self.NewImageSize)
image = image - 0.5
return image, label
def GetBatch(self, batchIndex):
data, labels = [], []
self.Cursor.set_range(str(batchIndex * self.BatchSize))
i = 0
for item in self.Cursor:
if i >= self.BatchSize:
break
image, label = self._prepareImage(item[1])
data.append(image)
labels.append(label)
i += 1
# for i in range(self.BatchSize):
# result.append(self[(batchIndex * self.BatchSize) + i])
return np.stack(data, axis=0).astype(np.float32), np.stack(labels, axis=0).astype(np.int32)
class DatasetOnTheFly(object):
"""docstring for DatasetOnTheFly"""
def __init__(self, imageMapPath, batchSize, imageSize):#, maxDeviation=0, deviationStep=8):
with open(imageMapPath, 'r') as f:
self.imageMap = json.load(f)
self.baseDir = '/'.join(imageMapPath.split('/')[:-1])
self.batchSize = batchSize
self.imageSize = imageSize
# self.augmentAmplifier = CalculateAugmentRatio(maxDeviation, deviationStep)
def GetFirst(self):
# print self.imageMap[0][0]
img = skimage.io.imread(join(self.baseDir, self.imageMap[0][0]))
return PreprocessImage(img, self.imageSize, True, True), self.imageMap[0][1]
def GetBatch(self, batchIndex):
# enable repeatable iterating over images
if batchIndex + 1 > len(self.imageMap) / self.batchSize:
batchIndex = batchIndex % (len(self.imageMap) / self.batchSize)
data = []
label = np.empty((self.batchSize,), dtype=np.int32)
for i, imagePair in enumerate(self.imageMap[(self.batchSize*batchIndex):(self.batchSize*(batchIndex + 1))]):
img = skimage.io.imread(join(self.baseDir, imagePair[0]))
data.append(PreprocessImage(img, self.imageSize, True, True))
label[i] = int(imagePair[1])
return np.stack(data).astype(np.float32), np.stack(label).astype(np.int32)
|
class MouseService:
"""A mouse service inteface."""
def get_coordinates(self):
"""Gets the current mouse coordinates as a Point.
Returns:
Point: An instance of the quest.casting.Point class.
"""
raise NotImplementedError("not implemented in base class")
def has_mouse_moved(self):
"""Whether or not the mouse has moved since the last frame.
Returns:
True if the mouse moved; false if otherwise.
"""
raise NotImplementedError("not implemented in base class")
def is_button_down(self, button):
"""Detects if the given button is pressed.
Args:
button: A string containing the button value, e.g. 'left', 'right' or 'middle'.
Returns:
True if the button is pressed; false if otherwise.
"""
raise NotImplementedError("not implemented in base class")
def is_button_pressed(self, button):
"""Detects if the given button was pressed once.
Args:
button: A string containing the button value, e.g. 'left', 'right' or 'middle'.
Returns:
True if the button was pressed once; false if otherwise.
"""
raise NotImplementedError("not implemented in base class")
def is_button_released(self, button):
"""Detects if the given button was released once.
Args:
button: A string containing the button value, e.g. 'left', 'right' or 'middle'.
Returns:
True if the button was released once; false if otherwise.
"""
raise NotImplementedError("not implemented in base class")
def is_button_up(self, button):
"""Detects if the given button is released.
Args:
button: A string containing the button value, e.g. 'left', 'right' or 'middle'.
Returns:
True if the button is released; false if otherwise.
"""
raise NotImplementedError("not implemented in base class")
|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
The hadut module provides access to some functionalities available
via the Hadoop shell.
"""
import logging
import os
import shlex
import subprocess
import pydoop
import pydoop.utils.misc as utils
import pydoop.hadoop_utils as hu
import pydoop.hdfs as hdfs
from .utils.py3compat import basestring
GLOB_CHARS = frozenset('*,?[]{}')
# --- FIXME: perhaps we need a more sophisticated tool for setting args ---
GENERIC_ARGS = frozenset([
"-conf", "-D", "-fs", "-jt", "-files", "-libjars", "-archives"
])
CSV_ARGS = frozenset([
"-files", "-libjars", "-archives"
])
# generic args must go before command-specific args
def _pop_generic_args(args):
generic_args = []
i = len(args) - 1
while i >= 0:
if args[i] in GENERIC_ARGS:
try:
args[i + 1]
except IndexError:
raise ValueError("option %s has no value" % args[i])
generic_args.extend(args[i: i + 2])
del args[i: i + 2]
i -= 1
return generic_args
# -files f1 -files f2 --> -files f1,f2
def _merge_csv_args(args):
merge_map = {}
i = len(args) - 1
while i >= 0:
if args[i] in CSV_ARGS:
try:
args[i + 1]
except IndexError:
raise ValueError("option %s has no value" % args[i])
k, v = args[i: i + 2]
merge_map.setdefault(k, []).append(v.strip())
del args[i: i + 2]
i -= 1
for k, vlist in merge_map.items():
args.extend([k, ",".join(vlist)])
# FIXME: the above functions share a lot of code
# -------------------------------------------------------------------------
def _construct_property_args(prop_dict):
return sum((['-D', '%s=%s' % p] for p in prop_dict.items()), [])
# 'a:b:c' OR ['a', 'b', 'c'] OR ['a:b', 'c'] --> {'a', 'b', 'c'}
def _to_set(classpath):
if isinstance(classpath, basestring):
classpath = [classpath]
return set(_.strip() for s in classpath for _ in s.split(":"))
# inherits from RuntimeError for backwards compatibility
class RunCmdError(RuntimeError):
"""
This exception is raised by run_cmd and all functions that make
use of it to indicate that the call failed (returned non-zero).
"""
def __init__(self, returncode, cmd, output=None):
RuntimeError.__init__(self, output)
self.returncode = returncode
self.cmd = cmd
def __str__(self):
m = RuntimeError.__str__(self)
if m:
return m # mimic old run_cmd behaviour
else:
return "Command '%s' returned non-zero exit status %d" % (
self.cmd, self.returncode
)
# keep_streams must default to True for backwards compatibility
def run_tool_cmd(tool, cmd, args=None, properties=None, hadoop_conf_dir=None,
logger=None, keep_streams=True):
"""
Run a Hadoop command.
If ``keep_streams`` is set to :obj:`True` (the default), the
stdout and stderr of the command will be buffered in memory. If
the command succeeds, the former will be returned; if it fails, a
``RunCmdError`` will be raised with the latter as the message.
This mode is appropriate for short-running commands whose "result"
is represented by their standard output (e.g., ``"dfsadmin",
["-safemode", "get"]``).
If ``keep_streams`` is set to :obj:`False`, the command will write
directly to the stdout and stderr of the calling process, and the
return value will be empty. This mode is appropriate for long
running commands that do not write their "real" output to stdout
(such as pipes).
.. code-block:: python
>>> hadoop_classpath = run_cmd('classpath')
"""
if logger is None:
logger = utils.NullLogger()
_args = [tool]
if hadoop_conf_dir:
_args.extend(["--config", hadoop_conf_dir])
_args.append(cmd)
if properties:
_args.extend(_construct_property_args(properties))
if args:
if isinstance(args, basestring):
args = shlex.split(args)
_merge_csv_args(args)
gargs = _pop_generic_args(args)
for seq in gargs, args:
_args.extend(map(str, seq))
logger.debug('final args: %r', (_args,))
if keep_streams:
p = subprocess.Popen(
_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
error = ""
stderr_iterator = iter(p.stderr.readline, b"")
for line in stderr_iterator:
error += line
logger.info("cmd stderr line: %s", line.strip())
output, _ = p.communicate()
else:
p = subprocess.Popen(_args, stdout=None, stderr=None, bufsize=1)
ret = p.wait()
error = 'command exited with %d status' % ret if ret else ''
output = ''
if p.returncode:
raise RunCmdError(p.returncode, ' '.join(_args), error)
return output
def run_cmd(cmd, args=None, properties=None, hadoop_home=None,
hadoop_conf_dir=None, logger=None, keep_streams=True):
tool = pydoop.hadoop_exec(hadoop_home=hadoop_home)
run_tool_cmd(tool, cmd, args=args, properties=properties,
hadoop_conf_dir=hadoop_conf_dir, logger=logger,
keep_streams=keep_streams)
def run_mapred_cmd(cmd, args=None, properties=None, hadoop_home=None,
hadoop_conf_dir=None, logger=None, keep_streams=True):
tool = pydoop.mapred_exec(hadoop_home=hadoop_home)
run_tool_cmd(tool, cmd, args=args, properties=properties,
hadoop_conf_dir=hadoop_conf_dir, logger=logger,
keep_streams=keep_streams)
def get_task_trackers(properties=None, hadoop_conf_dir=None, offline=False):
"""
Get the list of task trackers in the Hadoop cluster.
Each element in the returned list is in the ``(host, port)`` format.
All arguments are passed to :func:`run_class`.
If ``offline`` is :obj:`True`, try getting the list of task trackers from
the ``slaves`` file in Hadoop's configuration directory (no attempt is
made to contact the Hadoop daemons). In this case, ports are set to 0.
"""
if offline:
if not hadoop_conf_dir:
hadoop_conf_dir = pydoop.hadoop_conf()
slaves = os.path.join(hadoop_conf_dir, "slaves")
try:
with open(slaves) as f:
task_trackers = [(l.strip(), 0) for l in f]
except IOError:
task_trackers = []
else:
# run JobClient directly (avoids "hadoop job" deprecation)
stdout = run_class(
"org.apache.hadoop.mapred.JobClient", ["-list-active-trackers"],
properties=properties, hadoop_conf_dir=hadoop_conf_dir,
keep_streams=True
)
task_trackers = []
for line in stdout.splitlines():
if not line:
continue
line = line.split(":")
task_trackers.append((line[0].split("_")[1], int(line[-1])))
return task_trackers
def get_num_nodes(properties=None, hadoop_conf_dir=None, offline=False):
"""
Get the number of task trackers in the Hadoop cluster.
All arguments are passed to :func:`get_task_trackers`.
"""
return len(get_task_trackers(properties, hadoop_conf_dir, offline))
def dfs(args=None, properties=None, hadoop_conf_dir=None):
"""
Run the Hadoop file system shell.
All arguments are passed to :func:`run_class`.
"""
# run FsShell directly (avoids "hadoop dfs" deprecation)
return run_class(
"org.apache.hadoop.fs.FsShell", args, properties,
hadoop_conf_dir=hadoop_conf_dir, keep_streams=True
)
def path_exists(path, properties=None, hadoop_conf_dir=None):
"""
Return :obj:`True` if ``path`` exists in the default HDFS.
Keyword arguments are passed to :func:`dfs`.
This function does the same thing as :func:`hdfs.path.exists
<pydoop.hdfs.path.exists>`, but it uses a wrapper for the Hadoop
shell rather than the hdfs extension.
"""
try:
dfs(["-stat", path], properties, hadoop_conf_dir=hadoop_conf_dir)
except RuntimeError:
return False
return True
def run_jar(jar_name, more_args=None, properties=None, hadoop_conf_dir=None,
keep_streams=True):
"""
Run a jar on Hadoop (``hadoop jar`` command).
All arguments are passed to :func:`run_cmd` (``args = [jar_name] +
more_args``) .
"""
if hu.is_readable(jar_name):
args = [jar_name]
if more_args is not None:
args.extend(more_args)
return run_cmd(
"jar", args, properties, hadoop_conf_dir=hadoop_conf_dir,
keep_streams=keep_streams
)
else:
raise ValueError("Can't read jar file %s" % jar_name)
def run_class(class_name, args=None, properties=None, classpath=None,
hadoop_conf_dir=None, logger=None, keep_streams=True):
"""
Run a Java class with Hadoop (equivalent of running ``hadoop
<class_name>`` from the command line).
Additional ``HADOOP_CLASSPATH`` elements can be provided via
``classpath`` (either as a non-string sequence where each element
is a classpath element or as a ``':'``-separated string). Other
arguments are passed to :func:`run_cmd`.
.. code-block:: python
>>> cls = 'org.apache.hadoop.fs.FsShell'
>>> try: out = run_class(cls, args=['-test', '-e', 'file:/tmp'])
... except RunCmdError: tmp_exists = False
... else: tmp_exists = True
.. note::
``HADOOP_CLASSPATH`` makes dependencies available **only on the
client side**. If you are running a MapReduce application, use
``args=['-libjars', 'jar1,jar2,...']`` to make them available to
the server side as well.
"""
if logger is None:
logger = utils.NullLogger()
old_classpath = None
if classpath:
old_classpath = os.getenv('HADOOP_CLASSPATH', '')
if isinstance(classpath, basestring):
classpath = [classpath]
# Prepend the classpaths provided by the user to the existing
# HADOOP_CLASSPATH value. Order matters. We could work a little
# harder to avoid duplicates, but it's not essential
os.environ['HADOOP_CLASSPATH'] = ":".join(
classpath + old_classpath.split(':', 1)
)
logger.debug('HADOOP_CLASSPATH: %r', os.getenv('HADOOP_CLASSPATH'))
try:
res = run_cmd(class_name, args, properties,
hadoop_conf_dir=hadoop_conf_dir, logger=logger,
keep_streams=keep_streams)
finally:
if old_classpath is not None:
os.environ['HADOOP_CLASSPATH'] = old_classpath
return res
def run_pipes(executable, input_path, output_path, more_args=None,
properties=None, force_pydoop_submitter=False,
hadoop_conf_dir=None, logger=None, keep_streams=False):
"""
Run a pipes command.
``more_args`` (after setting input/output path) and ``properties``
are passed to :func:`run_cmd`.
If not specified otherwise, this function sets the properties
``mapreduce.pipes.isjavarecordreader`` and
``mapreduce.pipes.isjavarecordwriter`` to ``"true"``.
This function works around a bug in Hadoop pipes that affects
versions of Hadoop with security when the local file system is
used as the default FS (no HDFS); see
https://issues.apache.org/jira/browse/MAPREDUCE-4000. In those
set-ups, the function uses Pydoop's own pipes submitter
application. You can force the use of Pydoop's submitter by
passing the argument force_pydoop_submitter=True.
"""
if logger is None:
logger = utils.NullLogger()
if not hdfs.path.exists(executable):
raise IOError("executable %s not found" % executable)
if not hdfs.path.exists(input_path) and not (set(input_path) & GLOB_CHARS):
raise IOError("input path %s not found" % input_path)
if properties is None:
properties = {}
properties.setdefault('mapreduce.pipes.isjavarecordreader', 'true')
properties.setdefault('mapreduce.pipes.isjavarecordwriter', 'true')
if force_pydoop_submitter:
use_pydoop_submit = True
else:
use_pydoop_submit = False
ver = pydoop.hadoop_version_info()
if ver.has_security():
if ver.is_cdh_mrv2() and hdfs.default_is_local():
raise RuntimeError("mrv2 on local fs not supported yet")
use_pydoop_submit = hdfs.default_is_local()
args = [
"-program", executable,
"-input", input_path,
"-output", output_path,
]
if more_args is not None:
args.extend(more_args)
if use_pydoop_submit:
submitter = "it.crs4.pydoop.pipes.Submitter"
pydoop_jar = pydoop.jar_path()
args.extend(("-libjars", pydoop_jar))
return run_class(submitter, args, properties,
classpath=pydoop_jar, logger=logger,
keep_streams=keep_streams)
else:
return run_mapred_cmd("pipes", args=args, properties=properties,
hadoop_conf_dir=hadoop_conf_dir, logger=logger,
keep_streams=keep_streams)
def find_jar(jar_name, root_path=None):
"""
Look for the named jar in:
#. ``root_path``, if specified
#. working directory -- ``PWD``
#. ``${PWD}/build``
#. ``/usr/share/java``
Return the full path of the jar if found; else return :obj:`None`.
"""
jar_name = os.path.basename(jar_name)
root = root_path or os.getcwd()
paths = (root, os.path.join(root, "build"), "/usr/share/java")
for p in paths:
p = os.path.join(p, jar_name)
if os.path.exists(p):
return p
return None
def iter_mr_out_files(mr_out_dir):
for fn in hdfs.ls(mr_out_dir):
if hdfs.path.basename(fn).startswith("part"):
yield fn
def collect_output(mr_out_dir, out_file=None):
"""
Return all mapreduce output in ``mr_out_dir``.
Append the output to ``out_file`` if provided. Otherwise, return
the result as a single string (it is the caller's responsibility to
ensure that the amount of data retrieved fits into memory).
"""
if out_file is None:
output = []
for fn in iter_mr_out_files(mr_out_dir):
with hdfs.open(fn, "rt") as f:
output.append(f.read())
return "".join(output)
else:
block_size = 16777216
with open(out_file, 'a') as o:
for fn in iter_mr_out_files(mr_out_dir):
with hdfs.open(fn) as f:
data = f.read(block_size)
while len(data) > 0:
o.write(data)
data = f.read(block_size)
class PipesRunner(object):
"""
Allows to set up and run pipes jobs, optionally automating a few
common tasks.
:type prefix: str
:param prefix: if specified, it must be a writable directory path
that all nodes can see (the latter could be an issue if the local
file system is used rather than HDFS)
:type logger: :class:`logging.Logger`
:param logger: optional logger
If ``prefix`` is set, the runner object will create a working
directory with that prefix and use it to store the job's input and
output --- the intended use is for quick application testing. If it
is not set, you **must** call :meth:`set_output` with an hdfs path
as its argument, and ``put`` will be ignored in your call to
:meth:`set_input`. In any event, the launcher script will be placed
in the output directory's parent (this has to be writable for the
job to succeed).
"""
def __init__(self, prefix=None, logger=None):
hadoop_version_info = pydoop.hadoop_version_info()
if hadoop_version_info.is_local():
raise pydoop.LocalModeNotSupported()
self.wd = self.exe = self.input = self.output = None
self.logger = logger or utils.NullLogger()
if prefix:
self.wd = utils.make_random_str(prefix=prefix)
hdfs.mkdir(self.wd)
for n in "input", "output":
setattr(self, n, hdfs.path.join(self.wd, n))
def clean(self):
"""
Remove the working directory, if any.
"""
if self.wd:
hdfs.rmr(self.wd)
def set_input(self, input_, put=False):
"""
Set the input path for the job. If ``put`` is :obj:`True`, copy
(local) ``input_`` to the working directory.
"""
if put and self.wd:
self.logger.info("copying input data to HDFS")
hdfs.put(input_, self.input)
else:
self.input = input_
self.logger.info("assigning input to %s", self.input)
def set_output(self, output):
"""
Set the output path for the job. Optional if the runner has been
instantiated with a prefix.
"""
self.output = output
self.logger.info("assigning output to %s", self.output)
def set_exe(self, pipes_code):
"""
Dump launcher code to the distributed file system.
"""
if not self.output:
raise RuntimeError("no output directory, can't create launcher")
parent = hdfs.path.dirname(hdfs.path.abspath(self.output.rstrip("/")))
self.exe = hdfs.path.join(parent, utils.make_random_str())
hdfs.dump(pipes_code, self.exe)
def run(self, **kwargs):
"""
Run the pipes job. Keyword arguments are passed to :func:`run_pipes`.
"""
if not (self.input and self.output and self.exe):
raise RuntimeError("setup incomplete, can't run")
self.logger.info("running MapReduce application")
run_pipes(self.exe, self.input, self.output, **kwargs)
def collect_output(self, out_file=None):
"""
Run :func:`collect_output` on the job's output directory.
"""
if self.logger.isEnabledFor(logging.INFO):
self.logger.info(
"collecting output %s", " to %s" % out_file if out_file else ''
)
self.logger.info("self.output %s", self.output)
return collect_output(self.output, out_file)
def __str__(self):
res = [self.__class__.__name__]
for n in "exe", "input", "output":
res.append(" %s: %s" % (n, getattr(self, n)))
return os.linesep.join(res) + os.linesep
class PydoopScriptRunner(PipesRunner):
"""
Specialization of :class:`PipesRunner` to support the set up and running of
pydoop script jobs.
"""
PYDOOP_EXE = None
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.expanduser(os.path.join(path, 'pydoop'))
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
PYDOOP_EXE = exe_file
break
def run(self, script, more_args=None, pydoop_exe=PYDOOP_EXE):
args = [pydoop_exe, "script", script, self.input, self.output]
self.logger.info("running pydoop script")
retcode = subprocess.call(args + (more_args or []))
if retcode:
raise RuntimeError("Error running pydoop_script")
if __name__ == "__main__":
import doctest
FLAGS = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
doctest.testmod(optionflags=FLAGS)
|
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from enum import Enum, IntEnum
from concurrent.futures import ThreadPoolExecutor
import functools
import traceback
import mako.template
from util import (
warning,
existing_dir,
not_none,
info,
merge_dicts,
FluentIterable
)
from mailutil import _send_mail
from github.util import (
github_cfg_for_hostname,
GitHubRepositoryHelper,
GitHubRepoBranch,
_create_github_api_object,
)
from github.codeowners import CodeownersEnumerator, CodeOwnerEntryResolver
from concourse.factory import DefinitionFactory, RawPipelineDefinitionDescriptor
from concourse.enumerator import (
DefinitionDescriptorPreprocessor,
TemplateRetriever,
GithubOrganisationDefinitionEnumerator,
)
from concourse import client
import concourse.client.model
def replicate_pipelines(
cfg_set,
concourse_cfg,
job_mapping,
template_path,
template_include_dir,
unpause_pipelines: bool=True,
expose_pipelines: bool=True,
):
definition_enumerators = [
GithubOrganisationDefinitionEnumerator(
job_mapping=job_mapping,
cfg_set=cfg_set,
),
]
preprocessor = DefinitionDescriptorPreprocessor()
template_retriever = TemplateRetriever(template_path=template_path)
renderer = Renderer(
template_retriever=template_retriever,
template_include_dir=template_include_dir,
cfg_set=cfg_set,
)
deployer = ConcourseDeployer(
unpause_pipelines=unpause_pipelines,
expose_pipelines=expose_pipelines,
)
result_processor = ReplicationResultProcessor(
cfg_set=cfg_set,
)
replicator = PipelineReplicator(
definition_enumerators=definition_enumerators,
descriptor_preprocessor=preprocessor,
definition_renderer=renderer,
definition_deployer=deployer,
result_processor=result_processor,
)
return replicator.replicate()
class Renderer(object):
def __init__(self, template_retriever, template_include_dir, cfg_set):
self.template_retriever = template_retriever
if template_include_dir:
template_include_dir = os.path.abspath(template_include_dir)
self.template_include_dir = os.path.abspath(template_include_dir)
from mako.lookup import TemplateLookup
self.lookup = TemplateLookup([template_include_dir])
self.cfg_set = cfg_set
def render(self, definition_descriptor):
try:
definition_descriptor = self._render(definition_descriptor)
info('rendered pipeline {pn}'.format(pn=definition_descriptor.pipeline_name))
return RenderResult(
definition_descriptor,
render_status=RenderStatus.SUCCEEDED,
)
except Exception:
warning(
f"erroneous pipeline definition '{definition_descriptor.pipeline_name}' "
f"in repository '{definition_descriptor.main_repo.get('path')}'"
)
traceback.print_exc()
return RenderResult(
definition_descriptor,
render_status=RenderStatus.FAILED,
error_details=traceback.format_exc(),
)
def _render(self, definition_descriptor):
effective_definition = definition_descriptor.pipeline_definition
# handle inheritance
for override in definition_descriptor.override_definitions:
effective_definition = merge_dicts(effective_definition, override)
template_name = definition_descriptor.template_name()
template_contents = self.template_retriever.template_contents(template_name)
pipeline_name = definition_descriptor.pipeline_name
# support declaring jobs by either 'jobs' or 'variants'
# TODO: Add deprecation message for old 'variants' syntax.
jobs = effective_definition.get('jobs', {})
variants = effective_definition.get('variants', {})
if jobs and variants:
raise RuntimeError(
f"Both 'jobs' and 'variants' are defined in pipeline '{pipeline_name}'"
)
pipeline_definition = RawPipelineDefinitionDescriptor(
name=pipeline_name,
base_definition=effective_definition.get('base_definition', {}),
variants=jobs or variants,
template=template_name,
)
factory = DefinitionFactory(raw_definition_descriptor=pipeline_definition)
pipeline_metadata = dict()
pipeline_metadata['definition'] = factory.create_pipeline_definition()
pipeline_metadata['name'] = pipeline_definition.name
pipeline_metadata['target_team'] = definition_descriptor.concourse_target_team
generated_model = pipeline_metadata.get('definition')
# determine pipeline name (if there is main-repo, append the configured branch name)
for variant in pipeline_metadata.get('definition').variants():
# hack: take the first "main_repository" we find
if not variant.has_main_repository():
continue
main_repo = variant.main_repository()
pipeline_metadata['pipeline_name'] = '-'.join(
[pipeline_definition.name, main_repo.branch()]
)
break
else:
# fallback in case no main_repository was found
pipeline_metadata['pipeline_name'] = pipeline_definition.name
main_repo = None
t = mako.template.Template(template_contents, lookup=self.lookup)
definition_descriptor.pipeline = t.render(
instance_args=generated_model,
config_set=self.cfg_set,
pipeline=pipeline_metadata,
)
return definition_descriptor
class RenderStatus(Enum):
SUCCEEDED = 0
FAILED = 1
class RenderResult(object):
def __init__(
self,
definition_descriptor,
render_status,
error_details=None,
):
self.definition_descriptor = not_none(definition_descriptor)
self.render_status = not_none(render_status)
self.error_details = error_details
class DeployStatus(IntEnum):
SUCCEEDED = 1
FAILED = 2
SKIPPED = 4
CREATED = 8
class DeployResult(object):
def __init__(
self,
definition_descriptor,
deploy_status,
error_details=None,
):
self.definition_descriptor = not_none(definition_descriptor)
self.deploy_status = not_none(deploy_status)
self.error_details = error_details
class DefinitionDeployer(object):
def deploy(self, definition_descriptor, pipeline):
raise NotImplementedError('subclasses must overwrite')
class FilesystemDeployer(DefinitionDeployer):
def __init__(self, base_dir):
self.base_dir = existing_dir(base_dir)
def deploy(self, definition_descriptor):
try:
with open(os.path.join(self.base_dir, definition_descriptor.pipeline_name), 'w') as f:
f.write(definition_descriptor.pipeline)
return DeployResult(
definition_descriptor=definition_descriptor,
deploy_status=DeployStatus.SUCCEEDED,
)
except Exception as e:
warning(e)
return DeployResult(
definition_descriptor=definition_descriptor,
deploy_status=DeployStatus.FAILED,
)
class ConcourseDeployer(DefinitionDeployer):
def __init__(
self,
unpause_pipelines: bool,
expose_pipelines: bool=True
):
self.unpause_pipelines = unpause_pipelines
self.expose_pipelines = expose_pipelines
def deploy(self, definition_descriptor):
pipeline_definition = definition_descriptor.pipeline
pipeline_name = definition_descriptor.pipeline_name
try:
api = client.from_cfg(
concourse_cfg=definition_descriptor.concourse_target_cfg,
team_name=definition_descriptor.concourse_target_team,
)
response = api.set_pipeline(
name=pipeline_name,
pipeline_definition=pipeline_definition
)
info(
'Deployed pipeline: ' + pipeline_name +
' to team: ' + definition_descriptor.concourse_target_team
)
if self.unpause_pipelines:
api.unpause_pipeline(pipeline_name=pipeline_name)
if self.expose_pipelines:
api.expose_pipeline(pipeline_name=pipeline_name)
deploy_status = DeployStatus.SUCCEEDED
if response is concourse.client.model.SetPipelineResult.CREATED:
deploy_status |= DeployStatus.CREATED
elif response is concourse.client.model.SetPipelineResult.UPDATED:
pass
else:
raise NotImplementedError
return DeployResult(
definition_descriptor=definition_descriptor,
deploy_status=deploy_status,
)
except Exception as e:
import traceback
traceback.print_exc()
warning(e)
return DeployResult(
definition_descriptor=definition_descriptor,
deploy_status=DeployStatus.FAILED,
error_details=traceback.format_exc(),
)
class ReplicationResultProcessor(object):
def __init__(self, cfg_set):
self._cfg_set = cfg_set
def process_results(self, results):
# collect pipelines by concourse target (concourse_cfg, team_name) as key
concourse_target_results = {}
for result in results:
definition_descriptor = result.definition_descriptor
concourse_target_key = definition_descriptor.concourse_target_key()
if concourse_target_key not in concourse_target_results:
concourse_target_results[concourse_target_key] = set()
concourse_target_results[concourse_target_key].add(result)
for concourse_target_key, concourse_results in concourse_target_results.items():
# TODO: implement eq for concourse_cfg
concourse_cfg, concourse_team = next(iter(
concourse_results)).definition_descriptor.concourse_target()
concourse_results = concourse_target_results[concourse_target_key]
concourse_api = client.from_cfg(
concourse_cfg=concourse_cfg,
team_name=concourse_team,
)
# find pipelines to remove
deployed_pipeline_names = set(map(
lambda r: r.definition_descriptor.pipeline_name, concourse_results
))
pipelines_to_remove = set(concourse_api.pipelines()) - deployed_pipeline_names
for pipeline_name in pipelines_to_remove:
info('removing pipeline: {p}'.format(p=pipeline_name))
concourse_api.delete_pipeline(pipeline_name)
# trigger resource checks in new pipelines
self._initialise_new_pipeline_resources(concourse_api, concourse_results)
# order pipelines alphabetically
pipeline_names = list(concourse_api.pipelines())
pipeline_names.sort()
concourse_api.order_pipelines(pipeline_names)
# evaluate results
failed_descriptors = [
d for d in results
if not d.deploy_status & DeployStatus.SUCCEEDED
]
failed_count = len(failed_descriptors)
info('Successfully replicated {d} pipeline(s)'.format(d=len(results) - failed_count))
if failed_count == 0:
return True
warning('Errors occurred whilst replicating {d} pipeline(s):'.format(
d=failed_count,
)
)
all_notifications_succeeded = True
for failed_descriptor in failed_descriptors:
warning(failed_descriptor.definition_descriptor.pipeline_name)
try:
self._notify_broken_definition_owners(failed_descriptor)
except Exception:
warning('an error occurred whilst trying to send error notifications')
traceback.print_exc()
all_notifications_succeeded = False
# signall error only if error notifications failed
return all_notifications_succeeded
def _notify_broken_definition_owners(self, failed_descriptor):
definition_descriptor = failed_descriptor.definition_descriptor
main_repo = definition_descriptor.main_repo
github_cfg = github_cfg_for_hostname(self._cfg_set, main_repo['hostname'])
github_api = _create_github_api_object(github_cfg)
repo_owner, repo_name = main_repo['path'].split('/')
githubrepobranch = GitHubRepoBranch(
github_config=github_cfg,
repo_owner=repo_owner,
repo_name=repo_name,
branch=main_repo['branch'],
)
repo_helper = GitHubRepositoryHelper.from_githubrepobranch(
githubrepobranch=githubrepobranch,
)
codeowners_enumerator = CodeownersEnumerator()
codeowners_resolver = CodeOwnerEntryResolver(github_api=github_api)
recipients = set(codeowners_resolver.resolve_email_addresses(
codeowners_enumerator.enumerate_remote_repo(github_repo_helper=repo_helper)
))
# in case no codeowners are available, resort to using the committer
if not recipients:
head_commit = repo_helper.repository.commit(main_repo['branch'])
user_ids = {
user_info.get('login')
for user_info
in (head_commit.committer, head_commit.author)
if user_info.get('login')
}
for user_id in user_ids:
user = github_api.user(user_id)
if user.email:
recipients.add(user.email)
info(f'Sending notification e-mail to {recipients} ({main_repo["path"]})')
email_cfg = self._cfg_set.email()
_send_mail(
email_cfg=email_cfg,
recipients=recipients,
subject='Your pipeline definition in {repo} is erroneous'.format(
repo=main_repo['path'],
),
mail_template='Error details:\n' + str(failed_descriptor.error_details),
)
def _initialise_new_pipeline_resources(self, concourse_api, results):
newly_deployed_pipeline_names = map(
lambda result: result.definition_descriptor.pipeline_name,
filter(
lambda result: result.deploy_status & DeployStatus.CREATED,
results,
)
)
for pipeline_name in newly_deployed_pipeline_names:
info('unpausing new pipeline {p}'.format(p=pipeline_name))
concourse_api.unpause_pipeline(pipeline_name)
info('triggering initial resource check for pipeline {p}'.format(p=pipeline_name))
trigger_pipeline_resource_check = functools.partial(
concourse_api.trigger_resource_check,
pipeline_name=pipeline_name,
)
FluentIterable(concourse_api.pipeline_resources(pipeline_name)) \
.filter(lambda resource: resource.has_webhook_token()) \
.map(lambda resource: trigger_pipeline_resource_check(resource_name=resource.name)) \
.as_list()
class PipelineReplicator(object):
def __init__(
self,
definition_enumerators,
descriptor_preprocessor,
definition_renderer,
definition_deployer,
result_processor=None,
):
self.definition_enumerators = definition_enumerators
self.descriptor_preprocessor = descriptor_preprocessor
self.definition_renderer = definition_renderer
self.definition_deployer = definition_deployer
self.result_processor = result_processor
def _enumerate_definitions(self):
for enumerator in self.definition_enumerators:
yield from enumerator.enumerate_definition_descriptors()
def _process_definition_descriptor(self, definition_descriptor):
if definition_descriptor.exception:
return DeployResult(
definition_descriptor=definition_descriptor,
deploy_status=DeployStatus.SKIPPED,
error_details=definition_descriptor.exception,
)
preprocessed = self.descriptor_preprocessor.process_definition_descriptor(
definition_descriptor
)
result = self.definition_renderer.render(preprocessed)
if result.render_status == RenderStatus.SUCCEEDED:
deploy_result = self.definition_deployer.deploy(result.definition_descriptor)
else:
deploy_result = DeployResult(
definition_descriptor=definition_descriptor,
deploy_status=DeployStatus.SKIPPED,
error_details=result.error_details,
)
return deploy_result
def _replicate(self):
executor = ThreadPoolExecutor(max_workers=8)
yield from executor.map(
self._process_definition_descriptor,
self._enumerate_definitions(),
)
def replicate(self):
results = []
for result in self._replicate():
results.append(result)
if self.result_processor:
return self.result_processor.process_results(results)
|
"""
Converter um DataFrame para CSV
"""
import pandas as pd
dataset = pd.DataFrame({'Frutas': ["Abacaxi", "Mamão"],
"Nomes": ["Éverton", "Márcia"]},
index=["Linha 1", "Linha 2"])
dataset.to_csv("dataset.csv")
|
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.testlib.testcase import BaseTestCase
from cfnlint import conditions
class TestEquals(BaseTestCase):
""" Test Equals Logic """
def test_equal_value_string(self):
""" Test equals setup """
template = 'us-east-1'
result = conditions.EqualsValue(template)
self.assertTrue(result == 'us-east-1')
self.assertFalse(result == 'us-west-2')
|
"""account excpetion"""
from .duplicate import DuplicateUser
from .id import UnknownID
from .not_found import UserNotFound
__all__ = ["UnknownID", "DuplicateUser", "UserNotFound"]
|
class Shape:
def __init__(self, pos, angle):
self.pos = pos
self.angle = angle
self.offset = [pos[0] * 2, pos[1] * 2, pos[2] * 2]
self.points = []
self.edges = []
def add_pos(self, new_pos=(0, 0, 0)):
for point in self.points:
point[0][0] += new_pos[0]
point[1][0] += new_pos[1]
point[2][0] += new_pos[2]
class Cube(Shape):
def __init__(self, pos=[0, 0, 0], angle=[0, 0, 0]):
super().__init__(pos, angle)
self.points.append([[-1 + self.offset[0]], [-1 + self.offset[1]], [1 + self.offset[2]]])
self.points.append([[1 + self.offset[0]], [-1 + self.offset[1]], [1 + self.offset[2]]])
self.points.append([[1 + self.offset[0]], [1 + self.offset[1]], [1 + self.offset[2]]])
self.points.append([[-1 + self.offset[0]], [1 + self.offset[1]], [1 + self.offset[2]]])
self.points.append([[-1 + self.offset[0]], [-1 + self.offset[1]], [-1 + self.offset[2]]])
self.points.append([[1 + self.offset[0]], [-1 + self.offset[1]], [-1 + self.offset[2]]])
self.points.append([[1 + self.offset[0]], [1 + self.offset[1]], [-1 + self.offset[2]]])
self.points.append([[-1 + self.offset[0]], [1 + self.offset[1]], [-1 + self.offset[2]]])
self.edges = [(0, 1), (1, 2), (2, 3), (3, 0), (4, 5), (5, 6), (6, 7), (7, 4), (0, 4), (1, 5), (2, 6), (3, 7)]
class Plane(Shape):
def __init__(self, pos=[0, 0, 0], angle=[0, 0, 0]):
super().__init__(pos, angle)
self.points.append([[-1 + self.offset[0]], [-1 + self.offset[1]], [0 + self.offset[2]]])
self.points.append([[1 + self.offset[0]], [-1 + self.offset[1]], [0 + self.offset[2]]])
self.points.append([[1 + self.offset[0]], [1 + self.offset[1]], [0 + self.offset[2]]])
self.points.append([[-1 + self.offset[0]], [1 + self.offset[1]], [0 + self.offset[2]]])
self.edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-08-27 12:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Wettbewerbe', '0013_auto_20170827_1228'),
]
operations = [
migrations.AlterField(
model_name='teilnahme',
name='person',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='Wettbewerbe.Person'),
),
migrations.AlterField(
model_name='teilnahme',
name='veranstaltung',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Wettbewerbe.Veranstaltung'),
),
]
|
allocate_lead(leads,config):
leads_list = [{"user_id":1, "size:500", "HQ":"US"},
{"user_id":2, "size:700", "HQ":"US"}
{"user_id":3, "size:50", "HQ":"CANADA"}
]
User_A[0,500]
User_B[0,X]
User_c[500,X]
User_D
class Graph:
def __init__(self, user, category, hq):
self.USER = user
self.CATEGORY = category
self.HQ = HQ
def allocate_lead(self, leads, config):
return (i >= 0 and i < self.ROW and
j >= 0 and j < self.COL and
not visited[i][j] and self.graph[i][j] == 1)
]
|
"""add columns to metrics_enrollment_status_cache table
Revision ID: 9c957ce496bf
Revises: b662c5bb00cc
Create Date: 2019-05-31 11:14:30.217021
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "9c957ce496bf"
down_revision = "484c5d15ac06"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.execute(
"ALTER TABLE metrics_enrollment_status_cache ADD COLUMN participant_count int(11) NOT NULL AFTER `registered_count`;"
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("metrics_enrollment_status_cache", "participant_count")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
class LogisticRegressor:
def __init__(self, d, n, lr=0.001):
# Place holders for input-output training data
self.X = tf.placeholder(tf.float32, \
shape=[None, d], name='input')
self.Y = tf.placeholder(tf.float32, \
name='output')
# Variables for weight and bias
self.b = tf.Variable(tf.zeros(n), dtype=tf.float32)
self.W = tf.Variable(tf.random_normal([d, n]), \
dtype=tf.float32)
# The Linear Regression Model
h = tf.matmul(self.X, self.W) + self.b
self.Ypred = tf.nn.sigmoid(h)
# Loss function
self.loss = tf.reduce_mean(-tf.reduce_sum(self.Y * tf.log(self.Ypred), \
reduction_indices=1), name='cross-entropy-loss')
# Gradient Descent with learning
# rate of 0.05 to minimize loss
optimizer = tf.train.GradientDescentOptimizer(lr)
self.optimize = optimizer.minimize(self.loss)
# Initializing Variables
init_op = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init_op)
def fit(self, X, Y, epochs=500):
total = []
for i in range(epochs):
_, l = self.sess.run([self.optimize, self.loss], \
feed_dict={self.X: X, self.Y: Y})
total.append(l)
if i % 1000 == 0:
print('Epoch {0}/{1}: Loss {2}'.format(i, epochs, l))
return total
def predict(self, X):
return self.sess.run(self.Ypred, feed_dict={self.X: X})
def get_weights(self):
return self.sess.run([self.W, self.b])
|
# Exercise: Using your first API
# API documentation: http://bechdeltest.com/api/v1/doc
# Goal 1:
# Ask the user for the movie title they want to check
# Display all of the details about the movie returned by the API
#
# Things to keep in mind:
# How will your program behave when multiple movies are returned?
# How will your program behave when no movies are returned?
# How will your program behave with works like "the" in the title?
import requests
response = requests.get('http://bechdeltest.com/api/v1/getMovieByTitle=matrix').json()
print response
# Goal 2:
# Check to see if the user input is a movie title or an ImdbID and use the proper endpoint
# Goal 3:
# Integrate this with the Open Movie Database API: http://www.omdbapi.com/
# Display all of the details from both APIs when searching for a movie.
# Note that you may need to prefix your ImdbIDs with 'tt' to get the search to work.
# Copy these URLs into your browser!
# To visualize as a CSV, copy the JSON into http://konklone.io/json
# Sample Bechdel test API returns: http://bechdeltest.com/api/v1/getMovieByImdbId?imdbid=0367631
# JSON:
# {
# "visible": "1",
# "date": "2009-12-05 05:13:37",
# "submitterid": "270",
# "rating": "3",
# "dubious": "0",
# "imdbid": "0367631",
# "id": "551",
# "title": "D.E.B.S.",
# "year": "2004"
# }
# JSON to CSV link: http://konklone.io/json/?id=11488879
# Sample Open Movie Database API returns: http://www.omdbapi.com/?i=tt0367631&t=
# JSON:
# {
# "Title": "D.E.B.S.",
# "Year": "2004",
# "Rated": "PG-13",
# "Released": "25 Mar 2005",
# "Runtime": "91 min",
# "Genre": "Action, Comedy, Romance",
# "Director": "Angela Robinson",
# "Writer": "Angela Robinson",
# "Actors": "Sara Foster, Jordana Brewster, Meagan Good, Devon Aoki",
# "Plot": "Plaid-skirted schoolgirls are groomed by a secret government agency to become the newest members of the elite national-defense group, D.E.B.S.",
# "Language": "English",
# "Country": "USA",
# "Awards": "1 win & 2 nominations.",
# "Poster": "http://ia.media-imdb.com/images/M/MV5BMjA0OTU5ODgyOF5BMl5BanBnXkFtZTcwODczNDgyMQ@@._V1_SX300.jpg",
# "Metascore": "42",
# "imdbRating": "5.2",
# "imdbVotes": "10,563",
# "imdbID": "tt0367631",
# "Type": "movie",
# "Response": "True"
# }
# JSON to CSV link: http://konklone.io/json/?id=11488839
|
import pytest
from app.fastapi.database import SessionLocal
from app.fastapi.models import Nutrition
@pytest.fixture
def db_session():
session = SessionLocal()
yield session
session.rollback()
session.close()
@pytest.fixture
def db_get(db_session):
def get(name):
db_session.flush()
return db_session.query(Nutrition).filter(Nutrition.name == name).first()
return get
|
from capybara.helpers import desc, toregex
from capybara.queries.base_query import BaseQuery
class StyleQuery(BaseQuery):
"""
Queries for computed style values of a node.
Args:
expected_styles (Dict[str, str]): The expected style names and values.
wait (bool | int | float, optional): Whether and how long to wait for synchronization.
Defaults to :data:`capybara.default_max_wait_time`.
"""
def __init__(self, expected_styles, wait=None):
self.expected_styles = expected_styles
self.actual_styles = {}
self.options = {
"wait": wait}
self.node = None
@property
def wait(self):
""" int | float: How long to wait for synchronization. """
return self.normalize_wait(self.options["wait"])
def resolves_for(self, node):
"""
Resolves this query relative to the given node.
Args:
node (node.Base): The node to be evaluated.
Returns:
int: The number of matches found.
"""
self.node = node
self.actual_styles = node.style(*self.expected_styles.keys())
return all(
toregex(value).search(self.actual_styles[style])
for style, value in iter(self.expected_styles.items()))
@property
def failure_message(self):
""" str: A message describing the query failure. """
return (
"Expected node to have styles {expected}. "
"Actual styles were {actual}").format(
expected=desc(self.expected_styles),
actual=desc(self.actual_styles))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: utils.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
from contextlib import contextmanager
import operator
import tensorflow as tf
__all__ = ['LeastLoadedDeviceSetter',
'OverrideCachingDevice',
'override_to_local_variable',
'allreduce_grads',
'average_grads']
"""
Some utilities for building the graph.
"""
def _replace_global_by_local(kwargs):
if 'collections' in kwargs:
collections = kwargs['collections']
if not collections:
collections = set([tf.GraphKeys.GLOBAL_VARIABLES])
else:
collections = set(collections.copy())
collections.remove(tf.GraphKeys.GLOBAL_VARIABLES)
collections.add(tf.GraphKeys.LOCAL_VARIABLES)
kwargs['collections'] = list(collections)
@contextmanager
def override_to_local_variable(enable=True):
if enable:
def custom_getter(getter, name, *args, **kwargs):
_replace_global_by_local(kwargs)
return getter(name, *args, **kwargs)
with tf.variable_scope(
tf.get_variable_scope(),
custom_getter=custom_getter):
yield
else:
yield
# https://github.com/tensorflow/benchmarks/blob/48cbef14a592e02a14beee8e9aef3ad22cadaed1/scripts/tf_cnn_benchmarks/variable_mgr_util.py#L192-L218
class LeastLoadedDeviceSetter(object):
""" Helper class to assign variables on the least loaded ps-device."""
def __init__(self, worker_device, ps_devices):
"""
Args:
worker_device: the device to use for compute ops.
ps_devices: a list of device to use for Variable ops.
"""
self.ps_devices = ps_devices
self.worker_device = worker_device
self.ps_sizes = [0] * len(self.ps_devices)
def __call__(self, op):
def sanitize_name(name): # tensorflow/tensorflow#11484
return tf.DeviceSpec.from_string(name).to_string()
if op.device:
return op.device
if op.type not in ['Variable', 'VariableV2']:
return sanitize_name(self.worker_device)
device_index, _ = min(enumerate(
self.ps_sizes), key=operator.itemgetter(1))
device_name = self.ps_devices[device_index]
var_size = op.outputs[0].get_shape().num_elements()
self.ps_sizes[device_index] += var_size
return sanitize_name(device_name)
def __str__(self):
return "LeastLoadedDeviceSetter-{}".format(self.worker_device)
def allreduce_grads(all_grads):
"""
All-reduce average the gradients among devices. Results are broadcasted to all devices.
Args:
all_grads (K x N x 2): A list of K lists. Each of the list is a list of N (grad, var) tuples.
The variables have to be the same across the K lists.
Returns:
(K x N x 2): same as input, but each grad is replaced by the average over K lists.
"""
from tensorflow.contrib import nccl
nr_tower = len(all_grads)
if nr_tower == 1:
return all_grads
new_all_grads = [] # NVar * NGPU * 2
with tf.name_scope('AvgGrad'):
for grad_and_vars in zip(*all_grads):
v = grad_and_vars[0][1]
grads = [g for g, _ in grad_and_vars]
summed = nccl.all_sum(grads)
grads_for_a_var = []
for (_, v), g in zip(grad_and_vars, summed):
with tf.device(g.device):
g = tf.multiply(g, 1.0 / nr_tower)
grads_for_a_var.append((g, v))
new_all_grads.append(grads_for_a_var)
# transpose
ret = [k for k in zip(*new_all_grads)]
return ret
def average_grads(all_grads, colocation=True):
"""
Average the gradients, on the device of each variable.
Args:
all_grads (K x N x 2): A list of K lists. Each of the list is a list of N (grad, var) tuples.
The variables have to be the same across the K lists.
colocation (bool): colocate gradient averaging with the variable
Returns:
(N x 2): A list of N (grad, var) tuples, where grad is averaged over K.
"""
nr_tower = len(all_grads)
if nr_tower == 1:
return all_grads[0]
ret = []
with tf.name_scope('AvgGrad'):
for grad_and_vars in zip(*all_grads):
# Ngpu * 2
v = grad_and_vars[0][1]
grads = [g for (g, _) in grad_and_vars]
if colocation:
with tf.device(v.device): # colocate summed grad with var
grad = tf.multiply(
tf.add_n(grads), 1.0 / nr_tower)
else:
grad = tf.multiply(
tf.add_n(grads), 1.0 / nr_tower)
ret.append((grad, v))
return ret
# https://github.com/tensorflow/benchmarks/blob/48cbef14a592e02a14beee8e9aef3ad22cadaed1/scripts/tf_cnn_benchmarks/variable_mgr_util.py#L140-L166
class OverrideCachingDevice(object):
"""Variable getter which caches variables on the least loaded device.
Variables smaller than a certain threshold are cached on a single specific
device, as specified in the constructor. All other variables are load balanced
across a pool of devices, by caching each variable on the least loaded device.
"""
def __init__(self, devices, device_for_small_variables,
small_variable_size_threshold):
self.devices = devices
self.sizes = [0] * len(self.devices)
self.device_for_small_variables = device_for_small_variables
self.small_variable_size_threshold = small_variable_size_threshold
def __call__(self, getter, *args, **kwargs):
size = tf.TensorShape(kwargs['shape']).num_elements()
if size is None or not kwargs.get('trainable', True):
# TODO a lot of vars won't be saved then
_replace_global_by_local(kwargs)
return getter(*args, **kwargs)
if size < self.small_variable_size_threshold:
device_name = self.device_for_small_variables
else:
device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
device_name = self.devices[device_index]
self.sizes[device_index] += size
kwargs['caching_device'] = device_name
var = getter(*args, **kwargs)
return var
|
def require_non_None(obj, msg=None,msg_supplier=None):
"""Ensures that obj does not exist or raises TypeError
Message is either msg or provided by msg_supplier"""
if not obj:
raise TypeError(msg or (msg_supplier() if msg_supplier else None))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-25 20:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productdb', '0011_userprofile_regex_search'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='regex_search',
field=models.BooleanField(default=False, help_text='Use regular expression in any search field (fallback to simple search if no valid regular expression is used)', verbose_name='use regex search'),
),
]
|
# https://stackoverflow.com/questions/48065360/interpolate-polynomial-over-a-finite-field/48067397#48067397
import itertools
from sympy.polys.domains import ZZ
from sympy.polys.galoistools import (gf_irreducible_p, gf_add, \
gf_sub, gf_mul, gf_rem, gf_gcdex)
from sympy.ntheory.primetest import isprime
class GF():
def __init__(self, p, n=1):
p, n = int(p), int(n)
if not isprime(p):
raise ValueError("p must be a prime number, not %s" % p)
if n <= 0:
raise ValueError("n must be a positive integer, not %s" % n)
self.p = p
self.n = n
if n == 1:
self.reducing = [1, 0]
else:
for c in itertools.product(range(p), repeat=n):
poly = (1, *c)
if gf_irreducible_p(poly, p, ZZ):
self.reducing = poly
break
def __eq__(self, other):
return self.p == other.p and self.n == other.n and self.reducing == other.reducing
def add(self, x, y):
return gf_add(x, y, self.p, ZZ)
def sub(self, x, y):
return gf_sub(x, y, self.p, ZZ)
def mul(self, x, y):
return gf_rem(gf_mul(x, y, self.p, ZZ), self.reducing, self.p, ZZ)
def minus(self, x):
return gf_sub([0], x, self.p, ZZ)
def reduce(self, x):
return gf_rem(gf_mul(x, [1], self.p, ZZ), self.reducing, self.p, ZZ)
# def div(self, x, y):
# return gf_div
def inv(self, x):
s, t, h = gf_gcdex(x, self.reducing, self.p, ZZ)
return s
def eval_poly(self, poly, point):
val = []
for c in poly:
val = self.mul(val, point)
val = self.add(val, c)
return val
# Class PolyRing, polynomials over a field
class PolyRing():
def __init__(self, field):
self.K = field
def add(self, p, q):
s = [self.K.add(x, y) for x, y in \
itertools.zip_longest(p[::-1], q[::-1], fillvalue=[])]
return s[::-1]
def sub(self, p, q):
s = [self.K.sub(x, y) for x, y in \
itertools.zip_longest(p[::-1], q[::-1], fillvalue=[])]
return s[::-1]
def mul(self, p, q):
if len(p) < len(q):
p, q = q, p
s = [[]]
for j, c in enumerate(q):
s = self.add(s, [self.K.mul(b, c) for b in p] + \
[[]] * (len(q) - j - 1))
return s
def poly_degree(poly):
return len(poly) - 1
# to calculate invert elem of x modulo p - prime
def mod_inv(x, p):
return pow(x, p - 2, p)
# apply Polynomial(defined by list of coeffs) to x
#The coefficients must be in ascending order (``x**0`` to ``x**n``).
def poly_calc(x, coeffs):
if not coeffs :
return 0*x
n = len(coeffs)
y = 0
for i in range(0, n):
y += coeffs[i]*x**i
return y
#polynom derivation
#The coefficients must be in ascending order (``x**0`` to ``x**n``).
def deriv_poly(poly):
return [poly[i] * i for i in range(1, len(poly))]
|
# -*- coding: utf-8 -*-
"""
A peak element is an element that is greater than its neighbors.
Given an input array where num[i] ≠ num[i+1], find a peak element and return
its index.
The array may contain multiple peaks, in that case return the index to any one
of the peaks is fine.
You may imagine that num[-1] = num[n] = -∞.
For example, in array [1, 2, 3, 1], 3 is a peak element and your function
should return the index number 2.
Note:
Your solution should be in logarithmic complexity.
"""
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
left = 0
right = n - 1
if n == 1:
return 0
while left <= right:
mid = left + (right - left) / 2
if mid == 0 and nums[mid] > nums[mid + 1]:
return mid
elif mid == n - 1 and nums[mid] > nums[mid - 1]:
return mid
elif nums[mid - 1] < nums[mid] > nums[mid + 1]:
return mid
elif mid > 0 and nums[mid - 1] > nums[mid]:
right = mid - 1
else:
left = mid + 1
return mid
a1 = [1, 2]
a2 = [1, 2, 1]
s = Solution()
print(s.findPeakElement(a1))
print(s.findPeakElement(a2))
|
# Copyright (c) 2009-2015 testtools developers. See LICENSE for details.
__all__ = [
'ContainsAll',
'MatchesListwise',
'MatchesSetwise',
'MatchesStructure',
]
"""Matchers that operate with knowledge of Python data structures."""
from ..helpers import map_values
from ._higherorder import (
Annotate,
MatchesAll,
MismatchesAll,
)
from ._impl import Mismatch
def ContainsAll(items):
"""Make a matcher that checks whether a list of things is contained
in another thing.
The matcher effectively checks that the provided sequence is a subset of
the matchee.
"""
from ._basic import Contains
return MatchesAll(*map(Contains, items), first_only=False)
class MatchesListwise:
"""Matches if each matcher matches the corresponding value.
More easily explained by example than in words:
>>> from ._basic import Equals
>>> MatchesListwise([Equals(1)]).match([1])
>>> MatchesListwise([Equals(1), Equals(2)]).match([1, 2])
>>> print (MatchesListwise([Equals(1), Equals(2)]).match([2, 1]).describe())
Differences: [
2 != 1
1 != 2
]
>>> matcher = MatchesListwise([Equals(1), Equals(2)], first_only=True)
>>> print (matcher.match([3, 4]).describe())
3 != 1
"""
def __init__(self, matchers, first_only=False):
"""Construct a MatchesListwise matcher.
:param matchers: A list of matcher that the matched values must match.
:param first_only: If True, then only report the first mismatch,
otherwise report all of them. Defaults to False.
"""
self.matchers = matchers
self.first_only = first_only
def match(self, values):
from ._basic import HasLength
mismatches = []
length_mismatch = Annotate(
"Length mismatch", HasLength(len(self.matchers))).match(values)
if length_mismatch:
mismatches.append(length_mismatch)
for matcher, value in zip(self.matchers, values):
mismatch = matcher.match(value)
if mismatch:
if self.first_only:
return mismatch
mismatches.append(mismatch)
if mismatches:
return MismatchesAll(mismatches)
class MatchesStructure:
"""Matcher that matches an object structurally.
'Structurally' here means that attributes of the object being matched are
compared against given matchers.
`fromExample` allows the creation of a matcher from a prototype object and
then modified versions can be created with `update`.
`byEquality` creates a matcher in much the same way as the constructor,
except that the matcher for each of the attributes is assumed to be
`Equals`.
`byMatcher` creates a similar matcher to `byEquality`, but you get to pick
the matcher, rather than just using `Equals`.
"""
def __init__(self, **kwargs):
"""Construct a `MatchesStructure`.
:param kwargs: A mapping of attributes to matchers.
"""
self.kws = kwargs
@classmethod
def byEquality(cls, **kwargs):
"""Matches an object where the attributes equal the keyword values.
Similar to the constructor, except that the matcher is assumed to be
Equals.
"""
from ._basic import Equals
return cls.byMatcher(Equals, **kwargs)
@classmethod
def byMatcher(cls, matcher, **kwargs):
"""Matches an object where the attributes match the keyword values.
Similar to the constructor, except that the provided matcher is used
to match all of the values.
"""
return cls(**map_values(matcher, kwargs))
@classmethod
def fromExample(cls, example, *attributes):
from ._basic import Equals
kwargs = {}
for attr in attributes:
kwargs[attr] = Equals(getattr(example, attr))
return cls(**kwargs)
def update(self, **kws):
new_kws = self.kws.copy()
for attr, matcher in kws.items():
if matcher is None:
new_kws.pop(attr, None)
else:
new_kws[attr] = matcher
return type(self)(**new_kws)
def __str__(self):
kws = []
for attr, matcher in sorted(self.kws.items()):
kws.append("{}={}".format(attr, matcher))
return "{}({})".format(self.__class__.__name__, ', '.join(kws))
def match(self, value):
matchers = []
values = []
for attr, matcher in sorted(self.kws.items()):
matchers.append(Annotate(attr, matcher))
values.append(getattr(value, attr))
return MatchesListwise(matchers).match(values)
class MatchesSetwise:
"""Matches if all the matchers match elements of the value being matched.
That is, each element in the 'observed' set must match exactly one matcher
from the set of matchers, with no matchers left over.
The difference compared to `MatchesListwise` is that the order of the
matchings does not matter.
"""
def __init__(self, *matchers):
self.matchers = matchers
def match(self, observed):
remaining_matchers = set(self.matchers)
not_matched = []
for value in observed:
for matcher in remaining_matchers:
if matcher.match(value) is None:
remaining_matchers.remove(matcher)
break
else:
not_matched.append(value)
if not_matched or remaining_matchers:
remaining_matchers = list(remaining_matchers)
# There are various cases that all should be reported somewhat
# differently.
# There are two trivial cases:
# 1) There are just some matchers left over.
# 2) There are just some values left over.
# Then there are three more interesting cases:
# 3) There are the same number of matchers and values left over.
# 4) There are more matchers left over than values.
# 5) There are more values left over than matchers.
if len(not_matched) == 0:
if len(remaining_matchers) > 1:
msg = "There were {} matchers left over: ".format(
len(remaining_matchers))
else:
msg = "There was 1 matcher left over: "
msg += ', '.join(map(str, remaining_matchers))
return Mismatch(msg)
elif len(remaining_matchers) == 0:
if len(not_matched) > 1:
return Mismatch(
"There were {} values left over: {}".format(
len(not_matched), not_matched))
else:
return Mismatch(
"There was 1 value left over: {}".format(
not_matched))
else:
common_length = min(len(remaining_matchers), len(not_matched))
if common_length == 0:
raise AssertionError("common_length can't be 0 here")
if common_length > 1:
msg = "There were {} mismatches".format(common_length)
else:
msg = "There was 1 mismatch"
if len(remaining_matchers) > len(not_matched):
extra_matchers = remaining_matchers[common_length:]
msg += " and {} extra matcher".format(len(extra_matchers))
if len(extra_matchers) > 1:
msg += "s"
msg += ': ' + ', '.join(map(str, extra_matchers))
elif len(not_matched) > len(remaining_matchers):
extra_values = not_matched[common_length:]
msg += " and {} extra value".format(len(extra_values))
if len(extra_values) > 1:
msg += "s"
msg += ': ' + str(extra_values)
return Annotate(
msg, MatchesListwise(remaining_matchers[:common_length])
).match(not_matched[:common_length])
|
import pandas as pd
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
# from IPython.display import display
currentDirectory = os.getcwd()
final_train = pd.read_csv(currentDirectory+"/final_train.csv")
train = pd.read_csv('./unbalanced/train.csv')
test = pd.read_csv('./unbalanced/test.csv')
cols_to_drop = ['void()', 'subject']
train = train.drop(cols_to_drop, axis = 1)
test = test.drop(cols_to_drop, axis = 1)
target = ['Activity']
train_x, train_y = train.drop(target, axis = 1), train[target]
test_x, test_y = test.drop(target, axis = 1), test[target]
######################
#Making of forest
######################
number_trees = [10,20,40,80,100,150,200,500]
names_atributes = ['LAYING', 'SITTING', 'STANDING', 'WALKING', 'WALKING_DOWNSTAIRS', 'WALKING_UPSTAIRS']
result = pd.DataFrame(None, index = names_atributes)
def make_forest(number_trees, test_x, train_x, train_y, *args):
for trees in number_trees:
if args:
clf_basic_gini = RandomForestClassifier(trees, class_weight = args[0])
else:
clf_basic_gini = RandomForestClassifier(trees)
clf_basic_gini.fit(train_x, train_y.values.flatten())
pred_y_basic_gini= clf_basic_gini.predict(test_x)
temp = list()
for i in range(6):
temp.append("{:.3f}".format((precision_score(test_y, pred_y_basic_gini,average=None))[i])+"/"+"{:.3f}".format((recall_score(test_y, pred_y_basic_gini,average=None))[i])+"/"+"{:.3f}".format((f1_score(test_y, pred_y_basic_gini,average=None))[i]))
if args:
result[("random forest with " + str(trees) + "trees with " + args[0] + " weight")] = temp
else:
result[("random forest with " + str(trees))+ "trees "] = temp
make_forest(number_trees, test_x, train_x, train_y, "balanced")
make_forest(number_trees, test_x, train_x, train_y)
print(result)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
import tempfile
import tensorflow as tf
from bayes_opt import BayesianOptimization
from bayes_opt.event import Events
from bayes_opt.observer import JSONLogger
from bayes_opt.util import load_logs
import modeling
import tokenization
from run_wikijoin import (WikiJoinProcessor, model_fn_builder, file_based_convert_examples_to_features,
file_based_input_fn_builder, PaddingInputExample)
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dirs_path", None,
"调参时保存每次调参输出模型的路径。")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
## Tuning parameters
flags.DEFINE_string("tuning_metric", "eval_accuracy", "调参时优化的指标。")
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
processor = WikiJoinProcessor()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
train_examples = processor.get_train_examples(FLAGS.data_dir)
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
tuning_metric = FLAGS.tuning_metric
output_dirs_path = FLAGS.output_dirs_path
tf.gfile.MakeDirs(output_dirs_path)
pbounds = {"learning_rate": (1e-5, 6e-5), "num_train_epochs": (1, 5)}
partial_function = functools.partial(tuned_function, bert_config=bert_config, label_list=label_list,
output_dirs_path=output_dirs_path, tokenizer=tokenizer,
train_examples=train_examples,
eval_examples=eval_examples, tuning_metric=tuning_metric)
optimizer = BayesianOptimization(
f=partial_function,
pbounds=pbounds,
random_state=0,
)
logger = JSONLogger(path=os.path.join(output_dirs_path, "logs_2.json"))
optimizer.subscribe(Events.OPTMIZATION_STEP, logger)
load_logs(optimizer, logs=[os.path.join(output_dirs_path, "logs.json")])
optimizer.maximize(init_points=0, n_iter=20)
with open(os.path.join(output_dirs_path, "best_2.json"), "w", encoding="utf-8") as fo_best:
json.dump(optimizer.max, fo_best, ensure_ascii=False)
def tuned_function(bert_config, label_list, output_dirs_path, tokenizer, train_examples, eval_examples, tuning_metric,
learning_rate, num_train_epochs):
output_dir = tempfile.mkdtemp(prefix=f"lr{learning_rate:.6f}_ep{num_train_epochs:.6f}_", dir=output_dirs_path)
tf.gfile.MakeDirs(output_dir)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
train_file = os.path.join(output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
return result[tuning_metric]
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dirs_path")
tf.app.run()
|
import sys
sys.path.append("../../")
from drlgeb.ac.model import ActorCriticModel
from drlgeb.common import make_game, Agent
from collections import deque
import queue
import threading
import gym
from multiprocessing import Process, Pipe
import multiprocessing as mp
import tensorflow as tf
import numpy as np
from drlgeb.common.logging_util import default_logger as logging
import time
class RateSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, init_rate, l: list):
super(RateSchedule, self).__init__()
self.init_rate = init_rate
self.l = l
def __call__(self, step):
for i in range(len(self.l)):
if step < self.l[i][0]:
if i == 0:
return self.init_rate
return self.l[i - 1][1]
return self.l[-1][1]
class TransitionExperience(object):
def __init__(self, state, action, reward, **kwargs):
self.state = state
self.action = action
self.reward = reward
for k, v in kwargs.items():
setattr(self, k, v)
class Master(Agent):
class WorkerState(object):
def __init__(self):
self.memory = []
self.score = 0
def __init__(self, env_id="SpaceInvaders-v0", **configs):
self.env_id = env_id
self.env = make_game(env_id=env_id)
self.state_shape = self.env.observation_space.shape
self.action_size = self.env.action_space.n
self.model = ActorCriticModel(self.state_shape, self.action_size)
self.nenvs = configs.get('nenvs', mp.cpu_count() * 2)
self.lr = RateSchedule(configs.get('lr', 0.01), [(120000, 0.0003), (720000, 0.0001)])
self.opt = tf.keras.optimizers.Adam(self.lr, epsilon=1e-3)
self.local_time_max = configs.get('local_time_max', 5)
self.gamma = configs.get('discount_gamma', 0.99)
self.batch_size = configs.get('batch_size', 128)
self.step_max = configs.get('step_max', 1e9)
self.no_graphics = configs.get('no_graphics', False)
self.scores = deque(maxlen=100)
super().__init__(name=env_id, **configs)
def get_action(self, state):
state = np.array([state], dtype=np.float32)
logits, _ = self.model(state)
policy = tf.nn.softmax(logits).numpy()[0]
action = np.random.choice(self.action_size, p=policy)
return action
def test_env(self, vis=False):
state = self.env.reset()
done = False
score = 0
while not done:
next_state, reward, done, _ = self.env.step(self.get_action(state))
state = next_state
if vis:
self.env.render()
score += reward
return score
def update(self):
step = 0
while step < self.step_max:
states = []
actions = []
discount_returns = []
action_probs = []
while True:
state, action, R, action_prob = self.queue.get()
states.append(state)
actions.append(action)
discount_returns.append(R)
action_probs.append(action_prob)
if len(states) == self.batch_size:
with tf.GradientTape() as tape:
states = np.array(states, dtype=np.float32)
actions = np.array(actions, dtype=np.int32)
discount_returns = np.array(discount_returns, dtype=np.float32)
action_probs = np.array(action_probs, dtype=np.float32)
logits, values = self.model(states)
values = tf.squeeze(values, [1])
policy = tf.nn.softmax(logits)
log_probs = tf.math.log(policy + 1e-6)
log_pi_a_given_s = tf.reduce_sum(log_probs * tf.one_hot(actions, self.action_size), 1)
advantage = tf.subtract(tf.stop_gradient(values), discount_returns)
pi_a_given_s = tf.reduce_sum(policy * tf.one_hot(actions, self.action_size), 1)
importance = tf.stop_gradient(tf.clip_by_value(pi_a_given_s / (action_probs + 1e-8), 0, 10))
policy_loss = tf.reduce_sum(log_pi_a_given_s * advantage * importance)
entropy_loss = tf.reduce_sum(policy * log_probs)
value_loss = tf.nn.l2_loss(values - discount_returns)
pred_reward = tf.reduce_mean(values)
loss = tf.add_n([policy_loss, entropy_loss * (0.01 if step < 480000 else 0.005),
value_loss * 0.5]) / self.batch_size
grads = tape.gradient(loss, self.model.trainable_variables)
# grads = [(tf.clip_by_norm(grad, 0.1 * tf.cast(tf.size(grad), tf.float32))) for grad in grads]
self.opt.apply_gradients(zip(grads, self.model.trainable_variables))
step += 1
self.record(step=step, pred_reward=pred_reward, loss=loss, policy_loss=policy_loss,
entropy_loss=entropy_loss, value_loss=value_loss, importance=tf.reduce_mean(importance))
break
def learn(self):
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(self.nenvs)])
self.work_states = [self.WorkerState() for _ in range(self.nenvs)]
self.ps = [Worker(i, remote, work_remote, self.env_id, self.no_graphics) for i, (remote, work_remote) in
enumerate(zip(self.remotes, self.work_remotes))]
self.queue = queue.Queue(maxsize=self.batch_size * 2 * 8)
for worker in self.ps:
worker.start()
print(f"{worker.name} Start!")
t = threading.Thread(target=self.recv_send)
t.start()
update = threading.Thread(target=self.update)
update.start()
for worker in self.ps:
worker.join()
t.join()
update.join()
def recv_send(self):
candidate = list(range(self.nenvs))
while True:
idxs = np.random.choice(candidate, 32)
for idx in idxs:
work_idx, state, reward, done = self.remotes[idx].recv()
self.work_states[idx].score += reward
if done:
self.scores.append(self.work_states[idx].score)
self.work_states[idx].score = 0
if len(self.work_states[idx].memory) > 0:
self.work_states[idx].memory[-1].reward = reward
if done or len(self.work_states[idx].memory) == self.local_time_max + 1:
self.collect_experience(idx, done)
action, value, action_prob = self.predict(state)
self.work_states[idx].memory.append(
TransitionExperience(state, action, reward=None, value=value, prob=action_prob))
self.remotes[idx].send(action)
def predict(self, state):
# print(np.array(state).shape)
# state = np.array(state)
# print(np.array(state).shape)
logit, value = self.model(np.array(state, dtype=np.float32)[None, :])
policy = tf.nn.softmax(logit).numpy()[0]
action = np.random.choice(self.action_size, p=policy)
return action, value.numpy()[0], policy[action]
def collect_experience(self, idx, done):
mem = self.work_states[idx].memory
if not done:
R = mem[-1].value[0]
last = mem[-1]
mem = mem[:-1]
else:
R = 0
mem.reverse()
for k in mem:
R = np.clip(k.reward, -1, 1) + self.gamma * R
# R = k.reward + self.gamma * R
self.queue.put([k.state, k.action, R, k.prob])
if not done:
self.work_states[idx].memory = [last]
else:
self.work_states[idx].memory = []
def record(self, step, **kwargs):
if step % 100 == 0:
train_mean_score = np.mean(self.scores) if len(self.scores) > 1 else 0.0
kwargs["train_mean_score"] = train_mean_score
log_txt = f"Step:{step}, " + ','.join([f" {k}:{v}" for k, v in kwargs.items()])
print(log_txt + "," + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
self.train_summary(step=step, **kwargs)
if step % 18000 == 0:
scores = [self.test_env() for _ in range(50)]
mean_score, max_score = np.mean(scores), np.max(scores)
logging.info("Mean Score: {}, Max Score: {}".format(np.mean(scores), np.max(scores)))
self.train_summary(step=step, mean_score=mean_score, max_score=max_score)
if step % 6000 == 0:
self.checkpoint_save(step // 6000 % 5)
class Worker(Process):
def __init__(self, idx: int, master_conn, worker_conn, env_id, no_graphics):
super().__init__()
self.idx = idx
self.name = 'worker-{}'.format(self.idx)
self.master_conn = master_conn
self.worker_conn = worker_conn
self.env_id = env_id
self.no_graphics = no_graphics
def run(self):
env = self.get_env()
state = env.reset()
reward, done = 0, False
while True:
self.worker_conn.send((self.idx, state, reward, done))
action = self.worker_conn.recv()
state, reward, done, _ = env.step(action)
if done:
state = env.reset()
def get_env(self):
return make_game(env_id=self.env_id, max_episode_steps=60000, worker_id=self.idx, no_graphics=self.no_graphics)
if __name__ == '__main__':
configs = {
'nenvs': 4,
'lr': 0.001,
'discount_gamma': 0.99,
'batch_size': 128,
'local_time_max': 5,
'step_max': 1e9,
'eval_episodes': 50,
'no_graphics': False
}
env_id = "/home/miku/PythonObjects/unity-exercise/envs/Basic/Basic"
agent = Master(env_id=env_id, **configs)
agent.learn()
# agent.play(5, model_path="/home/geb/PycharmProjects/drlgeb/drlgeb/ac/train_logs/train-SpaceInvaders-v0-20210120-214933")
|
from litcoin.secp256k1 import POINT_AT_INFINITY, SECP256K1_GENERATOR, SECP256K1_ORDER, \
secp256k1_random_scalar, secp256k1_add, secp256k1_multiply
import unittest
class TestSecp256k1(unittest.TestCase):
def test_secp256k1_random_scalar(self):
scalar_1: int = secp256k1_random_scalar()
scalar_2: int = secp256k1_random_scalar()
assert scalar_1 != scalar_2
assert 1 <= scalar_1 <= SECP256K1_ORDER - 1
assert 1 <= scalar_2 <= SECP256K1_ORDER - 1
def test_secp256k1_add(self):
self.assertEqual(POINT_AT_INFINITY, secp256k1_add(POINT_AT_INFINITY, POINT_AT_INFINITY))
self.assertEqual(SECP256K1_GENERATOR, secp256k1_add(SECP256K1_GENERATOR, POINT_AT_INFINITY))
self.assertEqual(SECP256K1_GENERATOR, secp256k1_add(POINT_AT_INFINITY, SECP256K1_GENERATOR))
# TODO: more tests
def test_secp256k1_multiply(self):
self.assertEqual(POINT_AT_INFINITY, secp256k1_multiply(0))
self.assertEqual(SECP256K1_GENERATOR, secp256k1_multiply(1))
self.assertEqual(SECP256K1_GENERATOR, secp256k1_multiply(1, SECP256K1_GENERATOR))
self.assertEqual(POINT_AT_INFINITY, secp256k1_multiply(SECP256K1_ORDER, SECP256K1_GENERATOR))
self.assertEqual(POINT_AT_INFINITY, secp256k1_multiply(0, POINT_AT_INFINITY))
self.assertEqual(POINT_AT_INFINITY, secp256k1_multiply(1, POINT_AT_INFINITY))
self.assertEqual(POINT_AT_INFINITY, secp256k1_multiply(2, POINT_AT_INFINITY))
# TODO: more tests
|
import geopandas as gpd
import numpy as np
import pytest
from scipy.ndimage.morphology import binary_erosion
from shapely.geometry.linestring import LineString
from shapely.geometry.multilinestring import MultiLineString
from shapely.geometry.multipolygon import MultiPolygon
from shapely.geometry.polygon import Polygon
import geoutils as gu
GLACIER_OUTLINES_URL = "http://public.data.npolar.no/cryoclim/CryoClim_GAO_SJ_1990.zip"
class TestVector:
glacier_outlines = gu.Vector(GLACIER_OUTLINES_URL)
def test_init(self) -> None:
vector = gu.Vector(GLACIER_OUTLINES_URL)
assert isinstance(vector, gu.Vector)
def test_copy(self) -> None:
vector2 = self.glacier_outlines.copy()
assert vector2 is not self.glacier_outlines
vector2.ds = vector2.ds.query("NAME == 'Ayerbreen'")
assert vector2.ds.shape[0] < self.glacier_outlines.ds.shape[0]
def test_query(self) -> None:
vector2 = self.glacier_outlines.query("NAME == 'Ayerbreen'")
assert vector2 is not self.glacier_outlines
assert vector2.ds.shape[0] < self.glacier_outlines.ds.shape[0]
def test_bounds(self) -> None:
bounds = self.glacier_outlines.bounds
assert bounds.left < bounds.right
assert bounds.bottom < bounds.top
assert bounds.left == self.glacier_outlines.ds.total_bounds[0]
assert bounds.bottom == self.glacier_outlines.ds.total_bounds[1]
assert bounds.right == self.glacier_outlines.ds.total_bounds[2]
assert bounds.top == self.glacier_outlines.ds.total_bounds[3]
class TestSynthetic:
# Create a synthetic vector file with a square of size 1, started at position (10, 10)
poly1 = Polygon([(10, 10), (11, 10), (11, 11), (10, 11)])
gdf = gpd.GeoDataFrame({"geometry": [poly1]}, crs="EPSG:4326")
vector = gu.Vector(gdf)
# Same with a square started at position (5, 5)
poly2 = Polygon([(5, 5), (6, 5), (6, 6), (5, 6)])
gdf = gpd.GeoDataFrame({"geometry": [poly2]}, crs="EPSG:4326")
vector2 = gu.Vector(gdf)
# Create a multipolygon with both
multipoly = MultiPolygon([poly1, poly2])
gdf = gpd.GeoDataFrame({"geometry": [multipoly]}, crs="EPSG:4326")
vector_multipoly = gu.Vector(gdf)
# Create a synthetic vector file with a square of size 5, started at position (8, 8)
poly3 = Polygon([(8, 8), (13, 8), (13, 13), (8, 13)])
gdf = gpd.GeoDataFrame({"geometry": [poly3]}, crs="EPSG:4326")
vector_5 = gu.Vector(gdf)
# Create a synthetic LineString geometry
lines = LineString([(10, 10), (11, 10), (11, 11)])
gdf = gpd.GeoDataFrame({"geometry": [lines]}, crs="EPSG:4326")
vector_lines = gu.Vector(gdf)
# Create a synthetic MultiLineString geometry
multilines = MultiLineString([[(10, 10), (11, 10), (11, 11)], [(5, 5), (6, 5), (6, 6)]])
gdf = gpd.GeoDataFrame({"geometry": [multilines]}, crs="EPSG:4326")
vector_multilines = gu.Vector(gdf)
def test_create_mask(self) -> None:
"""
Test Vector.create_mask.
"""
# First with given res and bounds -> Should be a 21 x 21 array with 0 everywhere except center pixel
vector = self.vector.copy()
out_mask = vector.create_mask(xres=1, bounds=(0, 0, 21, 21))
ref_mask = np.zeros((21, 21), dtype="bool")
ref_mask[10, 10] = True
assert out_mask.shape == (21, 21)
assert np.all(ref_mask == out_mask)
# Check that vector has not been modified by accident
assert vector.bounds == self.vector.bounds
assert len(vector.ds) == len(self.vector.ds)
assert vector.crs == self.vector.crs
# Then with a gu.Raster as reference, single band
rst = gu.Raster.from_array(np.zeros((21, 21)), transform=(1.0, 0.0, 0.0, 0.0, -1.0, 21.0), crs="EPSG:4326")
out_mask = vector.create_mask(rst)
assert out_mask.shape == (1, 21, 21)
# With gu.Raster, 2 bands -> fails...
# rst = gu.Raster.from_array(np.zeros((2, 21, 21)), transform=(1., 0., 0., 0., -1., 21.), crs='EPSG:4326')
# out_mask = vector.create_mask(rst)
# Test that buffer = 0 works
out_mask_buff = vector.create_mask(rst, buffer=0)
assert np.all(ref_mask == out_mask_buff)
# Test that buffer > 0 works
rst = gu.Raster.from_array(np.zeros((21, 21)), transform=(1.0, 0.0, 0.0, 0.0, -1.0, 21.0), crs="EPSG:4326")
out_mask = vector.create_mask(rst)
for buffer in np.arange(1, 8):
out_mask_buff = vector.create_mask(rst, buffer=buffer)
diff = out_mask_buff & ~out_mask
assert np.count_nonzero(diff) > 0
# Difference between masks should always be thinner than buffer + 1
eroded_diff = binary_erosion(diff.squeeze(), np.ones((buffer + 1, buffer + 1)))
assert np.count_nonzero(eroded_diff) == 0
# Test that buffer < 0 works
vector_5 = self.vector_5
out_mask = vector_5.create_mask(rst)
for buffer in np.arange(-1, -3, -1):
out_mask_buff = vector_5.create_mask(rst, buffer=buffer)
diff = ~out_mask_buff & out_mask
assert np.count_nonzero(diff) > 0
# Difference between masks should always be thinner than buffer + 1
eroded_diff = binary_erosion(diff.squeeze(), np.ones((abs(buffer) + 1, abs(buffer) + 1)))
assert np.count_nonzero(eroded_diff) == 0
def test_extract_vertices(self) -> None:
"""
Test that extract_vertices works with simple geometries.
"""
# Polygons
vertices = gu.geovector.extract_vertices(self.vector.ds)
assert len(vertices) == 1
assert vertices == [[(10.0, 10.0), (11.0, 10.0), (11.0, 11.0), (10.0, 11.0), (10.0, 10.0)]]
# MultiPolygons
vertices = gu.geovector.extract_vertices(self.vector_multipoly.ds)
assert len(vertices) == 2
assert vertices[0] == [(10.0, 10.0), (11.0, 10.0), (11.0, 11.0), (10.0, 11.0), (10.0, 10.0)]
assert vertices[1] == [(5.0, 5.0), (6.0, 5.0), (6.0, 6.0), (5.0, 6.0), (5.0, 5.0)]
# LineString
vertices = gu.geovector.extract_vertices(self.vector_lines.ds)
assert len(vertices) == 1
assert vertices == [[(10.0, 10.0), (11.0, 10.0), (11.0, 11.0)]]
# MultiLineString
vertices = gu.geovector.extract_vertices(self.vector_multilines.ds)
assert len(vertices) == 2
assert vertices[0] == [(10.0, 10.0), (11.0, 10.0), (11.0, 11.0)]
assert vertices[1] == [(5.0, 5.0), (6.0, 5.0), (6.0, 6.0)]
def test_generate_voronoi(self) -> None:
"""
Check that geovector.generate_voronoi_polygons works on a simple Polygon.
Does not work with simple shapes as squares or triangles as the diagram is infinite.
For now, test on a set of two squares.
"""
# Check with a multipolygon
voronoi = gu.geovector.generate_voronoi_polygons(self.vector_multipoly.ds)
assert len(voronoi) == 2
vertices = gu.geovector.extract_vertices(voronoi)
assert vertices == [
[(5.5, 10.5), (10.5, 10.5), (10.5, 5.5), (5.5, 10.5)],
[(5.5, 10.5), (10.5, 5.5), (5.5, 5.5), (5.5, 10.5)],
]
# Check that it fails with proper error for too simple geometries
expected_message = "Invalid geometry, cannot generate finite Voronoi polygons"
with pytest.raises(ValueError, match=expected_message):
voronoi = gu.geovector.generate_voronoi_polygons(self.vector.ds)
def test_buffer_without_overlap(self) -> None:
"""
Check that non-overlapping buffer feature works. Does not work on simple geometries, so test on MultiPolygon.
Yet, very simple geometries yield unexpected results, as is the case for the second test case here.
"""
# Case 1, test with two squares, in separate Polygons
two_squares = gu.Vector(gpd.GeoDataFrame(geometry=[self.poly1, self.poly2], crs="EPSG:4326"))
# Check with buffers that should not overlap
# ------------------------------------------
buffer_size = 2
buffer = two_squares.buffer_without_overlap(buffer_size)
# Output should be of same size as input and same geometry type
assert len(buffer.ds) == len(two_squares.ds)
assert np.all(buffer.ds.geometry.geom_type == two_squares.ds.geometry.geom_type)
# Extract individual geometries
polys = []
for geom in buffer.ds.geometry:
if geom.geom_type in ["MultiPolygon"]:
polys.extend(list(geom))
else:
polys.append(geom)
# Check they do not overlap
for i in range(len(polys)):
for j in range(i + 1, len(polys)):
assert not polys[i].intersects(polys[j])
# buffer should yield the same result as create_mask with buffer, minus the original mask
mask_nonoverlap = buffer.create_mask(xres=0.1, bounds=(0, 0, 21, 21))
mask_buffer = two_squares.create_mask(xres=0.1, bounds=(0, 0, 21, 21), buffer=buffer_size)
mask_nobuffer = two_squares.create_mask(xres=0.1, bounds=(0, 0, 21, 21))
assert np.all(mask_nobuffer | mask_nonoverlap == mask_buffer)
# Case 2 - Check with buffers that overlap -> this case is actually not the expected result !
# -------------------------------
buffer_size = 5
buffer = two_squares.buffer_without_overlap(buffer_size)
# Output should be of same size as input and same geometry type
assert len(buffer.ds) == len(two_squares.ds)
assert np.all(buffer.ds.geometry.geom_type == two_squares.ds.geometry.geom_type)
# Extract individual geometries
polys = []
for geom in buffer.ds.geometry:
if geom.geom_type in ["MultiPolygon"]:
polys.extend(list(geom))
else:
polys.append(geom)
# Check they do not overlap
for i in range(len(polys)):
for j in range(i + 1, len(polys)):
assert polys[i].intersection(polys[j]).area == 0
# buffer should yield the same result as create_mask with buffer, minus the original mask
mask_nonoverlap = buffer.create_mask(xres=0.1, bounds=(0, 0, 21, 21))
mask_buffer = two_squares.create_mask(xres=0.1, bounds=(0, 0, 21, 21), buffer=buffer_size)
mask_nobuffer = two_squares.create_mask(xres=0.1, bounds=(0, 0, 21, 21))
assert np.all(mask_nobuffer | mask_nonoverlap == mask_buffer)
|
# -*- coding: utf-8 -*-
import click
from .openligadb import OpenLigaDB
from . import helpers
pass_openligadb = click.make_pass_decorator(OpenLigaDB)
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
"""
Bundesliga results and stats for hackers.
bundesliga-cli is a CLI tool that provides access to Bundesliga
results and stats.
Uses openligadb-json-api.heroku.com API which is itself a JSON wrapper
around the OpenligaDB API (http://www.openligadb.de).
"""
ctx.obj = OpenLigaDB()
@cli.command()
@click.option('--matchday', '-d', help='Defines the matchday')
@click.option('--league', '-l', help='Defines the league (e.g. bl1, bl2, bl3)')
@click.option('--season', '-s', help='Defines the season (e.g. 2014, 2013)')
@pass_openligadb
def matchday(openligadb, season, matchday, league):
"""
Match results for the given matchday.
Get all available league shortcuts with 'buli leagues'.
Season format: e.g. 2014 or 2011
"""
if not league:
league = openligadb.ERSTE_LIGA
if not matchday:
matchday = openligadb.getNextMatchday(league)
if not season:
season = openligadb.getCurrentSeason(league) # ['name']
matches = openligadb.getMatchdayResults(matchday, season, league)
matches = helpers.process_matches(matches)
table = helpers.create_results_table()
for match in matches:
table.add_row(match)
print(table)
@cli.command()
@click.option('--league', '-l', help='Defines the league')
@pass_openligadb
def next(openligadb, league):
"""
Shows the match results for the next/current matchday.
Get all available league shortcuts with 'buli leagues'.
"""
if not league:
league = openligadb.ERSTE_LIGA
matchday = openligadb.getNextMatchday(league)
season = openligadb.getCurrentSeason(league)
matches = openligadb.getMatchdayResults(matchday=matchday, season=season,
league=league)
matches = helpers.process_matches(matches)
table = helpers.create_results_table()
for match in matches:
table.add_row(match)
print(table)
@cli.command()
@click.option('--league', '-l', help='Defines the league')
@pass_openligadb
def last(openligadb, league):
"""
Shows the match results for the last matchday.
Get all available league shortcuts with 'buli leagues'.
"""
matchday = openligadb.getRecentMatchday()
if not league:
league = openligadb.ERSTE_LIGA
season = openligadb.getCurrentSeason(league)
matches = openligadb.getMatchdayResults(matchday=matchday, season=season,
league=league)
matches = helpers.process_matches(matches)
table = helpers.create_results_table()
for match in matches:
table.add_row(match)
print(table)
@cli.command()
@click.option('--league', '-l', help='Defines the league')
@click.option('--season', '-s', help='Defines the season')
@pass_openligadb
def table(openligadb, league, season):
"""
Shows the league table.
By default the league table for the 1. Bundesliga and the currrent
season is displayed.
Get all available league shortcuts with 'buli leagues'.
Season format: e.g. 2014 or 2011
"""
if not league:
league = openligadb.ERSTE_LIGA
if not season:
season = openligadb.getCurrentSeason(league) # ['name']
table_stats = openligadb.getTable(season, league)
rows = helpers.process_table_stats(table_stats)
click.echo(helpers.create_table_table(rows))
@cli.command()
@click.option('--league', '-l', help='Defines the league')
@click.option('--season', '-s', help='Defines the season')
@pass_openligadb
def teams(openligadb, league, season):
"""
Shows the teams for a league and season.
If no season is specified, the current season will be used.
If no league is specified, the 1. Fussball Bundesliga will be used.
League format: 'bl1' for 1. Bundesliga, 'bl2' for 2. Bundesliga, etc.
Get all available league shortcuts with 'buli leagues'.
Season format: e.g. 2014 or 2011
"""
if not league:
league = openligadb.ERSTE_LIGA
if not season:
season = openligadb.getCurrentSeason(league)
table = helpers.create_teams_table()
teams = openligadb.getTeams(season, league)
for team in teams:
row = [team['TeamName']]
table.add_row(row)
print(table)
# depreceated
# @pass_openligadb
# def leagues(openligadb):
# """
# Shows all available soccer leagues.
# The 'league shortcut' can be used to specify the league option for
# the other options.
# """
# table = helpers.create_leagues_table()
# leagues = openligadb.getAvailLeagues()
# for l in leagues:
# row = [l['leagueName'], l['leagueSaison'], l['leagueShortcut']]
# table.add_row(row)
# print(table)
if __name__ == '__main__':
cli(obj={})
|
#!/usr/bin/env python
# (c) 2016 DevicePilot Ltd.
# Definitions from https://github.com/OpenMobileAlliance/OMA-LWM2M-DevKit/blob/master/objects/lwm2m-object-definitions.json
objects = {
"0": {
"id": 0,
"name": "LWM2M Security",
"instancetype": "multiple",
"mandatory": True,
"description": "This LWM2M Object provides the keying material of a LWM2M Client appropriate to access a specified LWM2M Server. One Object Instance SHOULD address a LWM2M Bootstrap Server.\n These LWM2M Object Resources MUST only be changed by a LWM2M Bootstrap Server or Bootstrap from Smartcardand MUST NOT be accessible by any other LWM2M Server.",
"resourcedefs": {
"0": {
"id": 0,
"name": "LWM2M Server URI",
"operations": "-",
"instancetype": "single",
"mandatory": True,
"type": "string",
"range": "0-255 bytes",
"units": "",
"description": "Uniquely identifies the LWM2M Server or LWM2M Bootstrap Server, and is in the form:\n\"coaps://host:port\", where host is an IP address or FQDN, and port is the UDP port of the Server."
},
"1": {
"id": 1,
"name": "Bootstrap Server",
"operations": "-",
"instancetype": "single",
"mandatory": True,
"type": "boolean",
"range": "",
"units": "",
"description": "Determines if the current instance concerns a LWM2M Bootstrap Server (True) or a standard LWM2M Server (False)"
},
"2": {
"id": 2,
"name": "Security Mode",
"operations": "-",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "0-3",
"units": "",
"description": "Determines which UDP payload security mode is used\n0: Pre-Shared Key mode\n1: Raw Public Key mode\n2: Certificate mode\n3: NoSec mode"
},
"3": {
"id": 3,
"name": "Public Key or Identity",
"operations": "-",
"instancetype": "single",
"mandatory": True,
"type": "opaque",
"range": "",
"units": "",
"description": "Stores the LWM2M Client's Certificate (Certificate mode), public key (RPK mode) or PSK Identity (PSK mode). The format is defined in Section E.1.1."
},
"4": {
"id": 4,
"name": "Server Public Key or Identity",
"operations": "-",
"instancetype": "single",
"mandatory": True,
"type": "opaque",
"range": "",
"units": "",
"description": "Stores the LWM2M Server's or LWM2M Bootstrap Server's Certificate (Certificate mode), public key (RPK mode) or PSK Identity (PSK mode). The format is defined in Section E.1.1."
},
"5": {
"id": 5,
"name": "Secret Key",
"operations": "-",
"instancetype": "single",
"mandatory": True,
"type": "opaque",
"range": "",
"units": "",
"description": "Stores the secret key or private key of the security mode. The format of the keying material is defined by the security mode in Section E.1.1. This Resource MUST only be changed by a bootstrap server and MUST NOT be readable by any server."
},
"6": {
"id": 6,
"name": "SMS Security Mode",
"operations": "-",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "0-255",
"units": "",
"description": "Determines which SMS payload security mode is used (see section 7.2)\n0: Reserved for future use\n1: Secure Packet Structure mode device terminated\n2: Secure Packet Structure mode smartcard terminated\n3: NoSec mode\n255: Proprietary modes"
},
"7": {
"id": 7,
"name": "SMS Binding Key Parameters",
"operations": "-",
"instancetype": "single",
"mandatory": True,
"type": "opaque",
"range": "6 bytes",
"units": "",
"description": "Stores the KIc, KID, SPI and TAR. The format is defined in Section D.1.2."
},
"8": {
"id": 8,
"name": "SMS Binding Secret Keys",
"operations": "-",
"instancetype": "single",
"mandatory": True,
"type": "opaque",
"range": "32-48 bytes",
"units": "",
"description": "Stores the values of the keys for the SMS binding. \nThis resource MUST only be changed by a bootstrap server and MUST NOT be readable by any server."
},
"9": {
"id": 9,
"name": "LWM2M Server SMS Number",
"operations": "-",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "",
"units": "",
"description": "MSISDN used by the LWM2M Client to send messages to the LWM2M Server via the SMS binding. \nThe LWM2M Client SHALL silently ignore any SMS not originated from unknown MSISDN"
},
"10": {
"id": 10,
"name": "Short Server ID",
"operations": "-",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "1-65535",
"units": "",
"description": "This identifier uniquely identifies each LWM2M Server configured for the LWM2M Client.\nThis Resource MUST be set when the Bootstrap Server Resource has False value.\nDefault Short Server ID (i.e. 0) MUST NOT be used for identifying the LWM2M Server."
},
"11": {
"id": 11,
"name": "Client Hold Off Time",
"operations": "-",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "",
"units": "s",
"description": "Relevant information for a Bootstrap Server only.\nThe number of seconds to wait before initiating a Client Initiated Bootstrap once the LWM2M Client has determined it should initiate this bootstrap mode"
}
}
},
"1": {
"id": 1,
"name": "LWM2M Server",
"instancetype": "multiple",
"mandatory": True,
"description": "This LWM2M Objects provides the data related to a LWM2M Server. A Bootstrap Server has no such an Object Instance associated to it.",
"resourcedefs": {
"0": {
"id": 0,
"name": "Short Server ID",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "1-65535",
"units": "",
"description": "Used as link to associate server Object Instance."
},
"1": {
"id": 1,
"name": "Lifetime",
"operations": "RW",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "",
"units": "s",
"description": "Specify the lifetime of the registration in seconds."
},
"2": {
"id": 2,
"name": "Default Minimum Period",
"operations": "RW",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "s",
"description": "The default value the LWM2M Client should use for the Minimum Period of an Observation in the absence of this parameter being included in an Observation.\nIf this Resource doesn't exist, the default value is 1."
},
"3": {
"id": 3,
"name": "Default Maximum Period",
"operations": "RW",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "s",
"description": "The default value the LWM2M Client should use for the Maximum Period of an Observation in the absence of this parameter being included in an Observation."
},
"4": {
"id": 4,
"name": "Disable",
"operations": "E",
"instancetype": "single",
"mandatory": False,
"type": "",
"range": "",
"units": "",
"description": "If this Resource is executed, this LWM2M Server Object is disabled for a certain period defined in the Disabled Timeout Resource. After receiving 'Execute' operation, LWM2M Client MUST send response of the operation and perform de-registration process, and underlying network connection between the Client and Server MUST be disconnected to disable the LWM2M Server account.\nAfter the above process, the LWM2M Client MUST NOT send any message to the Server and ignore all the messages from the LWM2M Server for the period."
},
"5": {
"id": 5,
"name": "Disable Timeout",
"operations": "RW",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "s",
"description": "A period to disable the Server. After this period, the LWM2M Client MUST perform registration process to the Server. If this Resource is not set, a default timeout value is 86400 (1 day)."
},
"6": {
"id": 6,
"name": "Notification Storing When Disabled or Offline",
"operations": "RW",
"instancetype": "single",
"mandatory": True,
"type": "boolean",
"range": "",
"units": "",
"description": "If True, the LWM2M Client stores 'Notify' operations to the LWM2M Server while the LWM2M Server account is disabled or the LWM2M Client is offline. After the LWM2M Server account is enabled or the LWM2M Client is online, the LWM2M Client reports the stored 'Notify' operations to the Server.\nIf False, the LWM2M Client discards all the 'Notify' operationsor temporally disables the Observe function while the LWM2M Server is disabled or the LWM2M Client is offline.\nThe default value is True.\nThe maximum number of storing Notification per the Server is up to the implementation."
},
"7": {
"id": 7,
"name": "Binding",
"operations": "RW",
"instancetype": "single",
"mandatory": True,
"type": "string",
"range": "The possible values of Resource are listed in 5.2.1.1",
"units": "",
"description": "This Resource defines the transport binding configured for the LWM2M Client.\nIf the LWM2M Client supports the binding specified in this Resource, the LWM2M Client MUST use that for Current Binding and Mode."
},
"8": {
"id": 8,
"name": "Registration Update Trigger",
"operations": "E",
"instancetype": "single",
"mandatory": True,
"type": "",
"range": "",
"units": "",
"description": "If this Resource is executed the LWM2M Client MUST perform an 'Update' operation with this LWM2M Server using the Current Transport Binding and Mode."
}
}
},
"2": {
"id": 2,
"name": "LWM2M Access Control",
"instancetype": "multiple",
"mandatory": False,
"description": "Access Control Object is used to check whether the LWM2M Server has access right for performing a operation.",
"resourcedefs": {
"0": {
"id": 0,
"name": "Object ID",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "1-65534",
"units": "",
"description": "The Object ID and The Object Instance ID are applied for."
},
"1": {
"id": 1,
"name": "Object Instance ID",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "0-65535",
"units": "",
"description": "See Table 14: LWM2M Identifiers."
},
"2": {
"id": 2,
"name": "ACL",
"operations": "RW",
"instancetype": "multiple",
"mandatory": False,
"type": "integer",
"range": "16-bit",
"units": "",
"description": "Resource Instance ID MUST be the Short Server ID of a certain LWM2M Server which has an access right.\nResource Instance ID 0 is for default Short Server ID.\nThe Value of the Resource Instance contains the access rights.\nSetting each bit means the LWM2M Server has the access right for that operation. The bit order is specified as below.\n1st lsb: R(Read, Observe, Discover, Write Attributes)\n2nd lsb: W(Write)\n3rd lsb: E(Execute)\n4th lsb: D(Delete)\n5th lsb: C(Create)\nOther bits are reserved for future use"
},
"3": {
"id": 3,
"name": "Access Control Owner",
"operations": "RW",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "0-65535",
"units": "",
"description": "Short Server ID of a certain LWM2M Server. Only this LWM2M Server can manage these Resources of the Object Instance.\nValue MAX_ID=65535 is reserved for the Access Control Object Instances created during Bootstrap procedure."
}
}
},
"3": {
"id": 3,
"name": "Device",
"instancetype": "single",
"mandatory": True,
"description": "This LWM2M Object provides a range of device related information which can be queried by the LWM2M Server, and a device reboot and factory reset function.",
"resourcedefs": {
"0": {
"id": 0,
"name": "Manufacturer",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "string",
"range": "",
"units": "",
"description": "Human readable manufacturer name"
},
"1": {
"id": 1,
"name": "Model Number",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "string",
"range": "",
"units": "",
"description": "A model identifier (manufacturer specified string)"
},
"2": {
"id": 2,
"name": "Serial Number",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "string",
"range": "",
"units": "",
"description": "Serial Number"
},
"3": {
"id": 3,
"name": "Firmware Version",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "string",
"range": "",
"units": "",
"description": "Current firmware version"
},
"4": {
"id": 4,
"name": "Reboot",
"operations": "E",
"instancetype": "single",
"mandatory": True,
"type": "",
"range": "",
"units": "",
"description": "Reboot the LWM2M Device to restore the Device from unexpected firmware failure."
},
"5": {
"id": 5,
"name": "Factory Reset",
"operations": "E",
"instancetype": "single",
"mandatory": False,
"type": "",
"range": "",
"units": "",
"description": "Perform factory reset of the LWM2M Device to make the LWM2M Device have the same configuration as at the initial deployment.\nWhen this Resource is executed, 'De-register' operation MAY be sent to the LWM2M Server(s) before factory reset of the LWM2M Device."
},
"6": {
"id": 6,
"name": "Available Power Sources",
"operations": "R",
"instancetype": "multiple",
"mandatory": False,
"type": "integer",
"range": "0-7",
"units": "",
"description": "0: DC power\n1: Internal Battery\n2: External Battery\n4: Power over Ethernet\n5: USB\n6: AC (Mains) power\n7: Solar"
},
"7": {
"id": 7,
"name": "Power Source Voltage",
"operations": "R",
"instancetype": "multiple",
"mandatory": False,
"type": "integer",
"range": "",
"units": "mV",
"description": "Present voltage for each Available Power Sources Resource Instance.\nEach Resource Instance ID MUST map to the value of Available Power Sources Resource."
},
"8": {
"id": 8,
"name": "Power Source Current",
"operations": "R",
"instancetype": "multiple",
"mandatory": False,
"type": "integer",
"range": "",
"units": "mA",
"description": "Present current for each Available Power Source"
},
"9": {
"id": 9,
"name": "Battery Level",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "0-100",
"units": "%",
"description": "Contains the current battery level as a percentage (with a range from 0 to 100). This value is only valid when the value of Available Power Sources Resource is 1."
},
"10": {
"id": 10,
"name": "Memory Free",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "KB",
"description": "Estimated current available amount of storage space which can store data and software in the LWM2M Device (expressed in kilobytes)."
},
"11": {
"id": 11,
"name": "Error Code",
"operations": "R",
"instancetype": "multiple",
"mandatory": True,
"type": "integer",
"range": "",
"units": "",
"description": "0: No error\n1: Low battery power\n2: External power supply off\n3: GPS module failure\n4: Low received signal strength\n5: Out of memory\n6: SMS failure\n7: IP connectivity failure\n8: Peripheral malfunction\n\nWhen the single Device Object Instance is initiated, there is only one error code Resource Instance whose value is equal to 0 that means no error. When the first error happens, the LWM2M Client changes error code Resource Instance to any non-zero value to indicate the error type. When any other error happens, a new error code Resource Instance is created.\nThis error code Resource MAY be observed by the LWM2M Server. How to deal with LWM2M Client's error report depends on the policy of the LWM2M Server."
},
"12": {
"id": 12,
"name": "Reset Error Code",
"operations": "E",
"instancetype": "single",
"mandatory": False,
"type": "",
"range": "",
"units": "",
"description": "Delete all error code Resource Instances and create only one zero-value error code that implies no error."
},
"13": {
"id": 13,
"name": "Current Time",
"operations": "RW",
"instancetype": "single",
"mandatory": False,
"type": "time",
"range": "",
"units": "",
"description": "Current UNIX time of the LWM2M Client.\nThe LWM2M Client should be responsible to increase this time value as every second elapses.\nThe LWM2M Server is able to write this Resource to make the LWM2M Client synchronized with the LWM2M Server."
},
"14": {
"id": 14,
"name": "UTC Offset",
"operations": "RW",
"instancetype": "single",
"mandatory": False,
"type": "string",
"range": "",
"units": "",
"description": "Indicates the UTC offset currently in effect for this LWM2M Device. UTC+X [ISO 8601]."
},
"15": {
"id": 15,
"name": "Timezone",
"operations": "RW",
"instancetype": "single",
"mandatory": False,
"type": "string",
"range": "",
"units": "",
"description": "Indicates in which time zone the LWM2M Device is located, in IANA Timezone (TZ) database format."
},
"16": {
"id": 16,
"name": "Supported Binding and Modes",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "string",
"range": "",
"units": "",
"description": "Indicates which bindings and modes are supported in the LWM2M Client. The possible values of Resource are combination of \"U\" or \"UQ\" and \"S\" or \"SQ\"."
}
}
},
"4": {
"id": 4,
"name": "Connectivity Monitoring",
"instancetype": "single",
"mandatory": False,
"description": "This LWM2M Object enables monitoring of parameters related to network connectivity.\nIn this general connectivity Object, the Resources are limited to the most general cases common to most network bearers. It is recommended to read the description, which refers to relevant standard development organizations (e.g. 3GPP, IEEE).\nThe goal of the Connectivity Monitoring Object is to carry information reflecting the more up to date values of the current connection for monitoring purposes. Resources such as Link Quality, Radio Signal Strenght, Cell ID are retrieved during connected mode at least for cellular networks.",
"resourcedefs": {
"0": {
"id": 0,
"name": "Network Bearer",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "",
"units": "",
"description": "Indicates the network bearer used for the current LWM2M communication session from the below network bearer list.\n0~20 are Cellular Bearers\n0: GSM cellular network\n1: TD-SCDMA cellular network\n2: WCDMA cellular network\n3: CDMA2000 cellular network\n4: WiMAX cellular network\n5: LTE-TDD cellular network\n6: LTE-FDD cellular network\n7~20: Reserved for other type cellular network\n21~40 are Wireless Bearers\n21: WLAN network\n22: Bluetooth network\n23: IEEE 802.15.4 network\n24~40: Reserved for other type local wireless network\n41~50 are Wireline Bearers\n41: Ethernet\n42: DSL\n43: PLC\n44~50: reserved for others type wireline networks."
},
"1": {
"id": 1,
"name": "Available Network Bearer",
"operations": "R",
"instancetype": "multiple",
"mandatory": True,
"type": "integer",
"range": "",
"units": "",
"description": "Indicates list of current available network bearer. Each Resource Instance has a value from the network bearer list."
},
"2": {
"id": 2,
"name": "Radio Signal Strength",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "",
"units": "dBm",
"description": "This node contains the average value of the received signal strength indication used in the current network bearer in case Network Bearer Resource indicates a Cellular Network (RXLEV range 0..64) 0 is < 110dBm, 64 is > -48 dBm).\nRefer to [3GPP 44.018] for more details on Network Measurement Report encoding and [3GPP 45.008] or for Wireless Networks refer to the appropriate wireless standard."
},
"3": {
"id": 3,
"name": "Link Quality",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "",
"description": "This contains received link quality e.g., LQI for IEEE 802.15.4, (Range (0..255)), RxQual Downlink (for GSM range is 0..7).\nRefer to [3GPP 44.018] for more details on Network Measurement Report encoding."
},
"4": {
"id": 4,
"name": "IP Addresses",
"operations": "R",
"instancetype": "multiple",
"mandatory": True,
"type": "string",
"range": "",
"units": "",
"description": "The IP addresses assigned to the connectivity interface. (e.g. IPv4, IPv6, etc.)"
},
"5": {
"id": 5,
"name": "Router IP Addresse",
"operations": "R",
"instancetype": "multiple",
"mandatory": False,
"type": "string",
"range": "",
"units": "",
"description": "The IP address of the next-hop IP router.\nNote: This IP Address doesn't indicate the Server IP address."
},
"6": {
"id": 6,
"name": "Link Utilization",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "0-100",
"units": "%",
"description": "The average utilization of the link to the next-hop IP router in %."
},
"7": {
"id": 7,
"name": "APN",
"operations": "R",
"instancetype": "multiple",
"mandatory": False,
"type": "string",
"range": "",
"units": "",
"description": "Access Point Name in case Network Bearer Resource is a Cellular Network."
},
"8": {
"id": 8,
"name": "Cell ID",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "",
"description": "Serving Cell ID in case Network Bearer Resource is a Cellular Network.\nAs specified in TS [3GPP 23.003] and in [3GPP. 24.008]. Range (0..65535) in GSM/EDGE\nUTRAN Cell ID has a length of 28 bits.\nCell Identity in WCDMA/TD-SCDMA. Range: (0..268435455).\nLTE Cell ID has a length of 28 bits.\nParameter definitions in [3GPP 25.331]."
},
"9": {
"id": 9,
"name": "SMNC",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "%",
"description": "Serving Mobile Network Code. In case Network Bearer Resource has 0(cellular network). Range (0..999).\nAs specified in TS [3GPP 23.003]."
},
"10": {
"id": 10,
"name": "SMCC",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "",
"description": "Serving Mobile Country Code. In case Network Bearer Resource has 0 (cellular network). Range (0..999).\nAs specified in TS [3GPP 23.003]."
}
}
},
"5": {
"id": 5,
"name": "Firmware Update",
"instancetype": "single",
"mandatory": False,
"description": "This LWM2M Object enables management of firmware which is to be updated. This Object includes installing firmware package, updating firmware, and performing actions after updating firmware.",
"resourcedefs": {
"0": {
"id": 0,
"name": "Package",
"operations": "W",
"instancetype": "single",
"mandatory": True,
"type": "opaque",
"range": "",
"units": "",
"description": "Firmware package"
},
"1": {
"id": 1,
"name": "Package URI",
"operations": "W",
"instancetype": "single",
"mandatory": True,
"type": "string",
"range": "0-255 bytes",
"units": "",
"description": "URI from where the device can download the firmware package by an alternative mechanism. As soon the device has received the Package URI it performs the download at the next practical opportunity."
},
"2": {
"id": 2,
"name": "Update",
"operations": "E",
"instancetype": "single",
"mandatory": True,
"type": "",
"range": "",
"units": "",
"description": "Updates firmware by using the firmware package stored in Package, or, by using the firmware downloaded from the Package URI.\nThis Resource is only executable when the value of the State Resource is Downloaded."
},
"3": {
"id": 3,
"name": "State",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "1-3",
"units": "",
"description": "Indicates current state with respect to this firmware update. This value is set by the LWM2M Client.\n1: Idle (before downloading or after updating)\n2: Downloading (The data sequence is on the way)\n3: Downloaded\nIf writing the firmware package to Package Resource is done, or, if the device has downloaded the firmware package from the Package URI the state changes to Downloaded.\nIf writing an empty string to Package Resource is done or writing an empty string to Package URI is done, the state changes to Idle.\nIf performing the Update Resource failed, the state remains at Downloaded.\nIf performing the Update Resource was successful, the state changes from Downloaded to Idle."
},
"4": {
"id": 4,
"name": "Update Supported Objects",
"operations": "RW",
"instancetype": "single",
"mandatory": False,
"type": "boolean",
"range": "",
"units": "",
"description": "If this value is True, the LWM2M Client MUST inform the registered LWM2M Servers of Objects and Object Instances parameter by sending an Update or Registration message after the firmware update operation at the next practical opportunity if supported Objects in the LWM2M Client have changed, in order for the LWM2M Servers to promptly manage newly installed Objects.\nIf False, Objects and Object Instances parameter MUST be reported at the next periodic Update message.\nThe default value is False."
},
"5": {
"id": 5,
"name": "Update Result",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "0-6",
"units": "",
"description": "Contains the result of downloading or updating the firmware\n0: Default value. Once the updating process is initiated, this Resource SHOULD be reset to default value.\n1: Firmware updated successfully,\n2: Not enough storage for the new firmware package.\n3. Out of memory during downloading process.\n4: Connection lost during downloading process.\n5: CRC check failure for new downloaded package.\n6: Unsupported package type.\n7: Invalid URI\nThis Resource MAY be reported by sending Observe operation."
}
}
},
"6": {
"id": 6,
"name": "Location",
"instancetype": "single",
"mandatory": False,
"description": "This LWM2M Objects provide a range of device related information which can be queried by the LWM2M Server, and a device reboot and factory reset function.",
"resourcedefs": {
"0": {
"id": 0,
"name": "Latitude",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "string",
"range": "",
"units": "Deg",
"description": "The decimal notation of latitude, e.g. -43.5723 [World Geodetic System 1984]."
},
"1": {
"id": 1,
"name": "Longitude",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "string",
"range": "",
"units": "Deg",
"description": "The decimal notation of longitude, e.g. 153.21760 [World Geodetic System 1984]."
},
"2": {
"id": 2,
"name": "Altitude",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "string",
"range": "",
"units": "m",
"description": "The decimal notation of altitude in meters above sea level."
},
"3": {
"id": 3,
"name": "Uncertainty",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "string",
"range": "",
"units": "m",
"description": "The accuracy of the position in meters."
},
"4": {
"id": 4,
"name": "Velocity",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "opaque",
"range": "",
"units": "Refers to 3GPP GAD specs",
"description": "The velocity of the device as defined in 3GPP 23.032 GAD specification. This set of values may not be available if the device is static."
},
"5": {
"id": 5,
"name": "Timestamp",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "time",
"range": "0-6",
"units": "",
"description": "The timestamp of when the location measurement was performed."
}
}
},
"7": {
"id": 7,
"name": "Connectivity Statistics",
"instancetype": "single",
"mandatory": False,
"description": "This LWM2M Objects enables client to collect statistical information and enables the LWM2M Server to retrieve these information, set the collection duration and reset the statistical parameters.",
"resourcedefs": {
"0": {
"id": 0,
"name": "SMS Tx Counter",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "",
"description": "Indicate the total number of SMS successfully transmitted during the collection period."
},
"1": {
"id": 1,
"name": "SMS Rx Counter",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "",
"description": "Indicate the total number of SMS successfully received during the collection period."
},
"2": {
"id": 2,
"name": "Tx Data",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "Kilo-Bytes",
"description": "Indicate the total amount of data transmitted during the collection period."
},
"3": {
"id": 3,
"name": "Rx Data",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "Kilo-Bytes",
"description": "Indicate the total amount of data received during the collection period."
},
"4": {
"id": 4,
"name": "Max Message Size",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "Byte",
"description": "The maximum message size that is used during the collection period."
},
"5": {
"id": 5,
"name": "Average Message Size",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "",
"units": "Byte",
"description": "The average message size that is used during the collection period."
},
"6": {
"id": 6,
"name": "StartOrReset",
"operations": "E",
"instancetype": "single",
"mandatory": True,
"type": "",
"range": "",
"units": "",
"description": "Start to collect information or reset all other Resources to zeros in this Object. For example, the first time this Resource is executed, the client starts to collect information. The second time this Resource is executed, the values of Resource 0~5 are reset to 0."
}
}
},
"8": {
"id": 8,
"name": "Lock and Wipe",
"instancetype": "single",
"mandatory": False,
"description": "This LWM2M objects provides the resources needed to perform the lock and wipe operations.",
"resourcedefs": {
"0": {
"id": 0,
"name": "State",
"operations": "RW",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "0-2",
"units": "",
"description": "State of the device:\n0: unlocked state\nNormal operation.\n1: partially locked state\nTo render the device inoperable the device has been partially locked. The 'lock target' resource allows specifying the target(s) for this operation.\n2: fully locked state\nTo render the device fully inoperable the device has been fully locked."
},
"1": {
"id": 1,
"name": "Lock target",
"operations": "W",
"instancetype": "multiple",
"mandatory": True,
"type": "string",
"range": "",
"units": "",
"description": "To specify one or several targets for the lock operation. This allows partially locking the device by selecting specific components or interfaces to be locked."
},
"2": {
"id": 2,
"name": "Wipe item",
"operations": "R",
"instancetype": "multiple",
"mandatory": False,
"type": "string",
"range": "",
"units": "",
"description": "Indicates which data can be wiped from the device. This resource could be e.g. representing a directory."
},
"3": {
"id": 3,
"name": "Wipe",
"operations": "E",
"instancetype": "single",
"mandatory": True,
"type": "",
"range": "",
"units": "",
"description": "To permanently erase data from the device."
},
"4": {
"id": 4,
"name": "Wipe target",
"operations": "W",
"instancetype": "multiple",
"mandatory": True,
"type": "string",
"range": "",
"units": "",
"description": "To specify one or several targets for the wipe operation. This allows selecting specific data, or, memory areas for the wipe operation."
},
"5": {
"id": 5,
"name": "Lock or Wipe Operation Result",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "0-8",
"units": "",
"description": "Contains the result of a lock and wipe operation\n0: Default\n1: Partially Lock operation successful\n2: Fully Lock operation successful\n3: Unlock operation successful\n4: Wipe operation successful\n5: Partially Lock operation failed\n6: Fully Lock operation failed\n7: Unlock operation failed\n8: Wipe operation failed\nThis Resource MAY be reported by sending Observe operation."
}
}
},
"9": {
"id": 9,
"name": "Software Update",
"instancetype": "multiple",
"mandatory": False,
"description": "This LWM2M Objects provides the resources needed to perform software management on the device. Each software component is managed via a dedicated Software Management Object instance.",
"resourcedefs": {
"0": {
"id": 0,
"name": "PkgName",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "string",
"range": "0-255 bytes",
"units": "",
"description": "Name of the software package."
},
"1": {
"id": 1,
"name": "PkgVersion",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "string",
"range": "0-255 bytes",
"units": "",
"description": "Version of the software package."
},
"2": {
"id": 2,
"name": "Package",
"operations": "W",
"instancetype": "single",
"mandatory": True,
"type": "opaque",
"range": "",
"units": "",
"description": "Software package."
},
"3": {
"id": 3,
"name": "Package URI",
"operations": "W",
"instancetype": "single",
"mandatory": True,
"type": "string",
"range": "0-255 bytes",
"units": "",
"description": "URI from where the device can download the software package by an alternative mechanism. As soon as the device has received the Package URI it performs the download at the next practical opportunity."
},
"4": {
"id": 4,
"name": "Install",
"operations": "E",
"instancetype": "single",
"mandatory": True,
"type": "",
"range": "",
"units": "",
"description": "Installs software from the package either stored in Package resource or downloaded from the Package URI. This Resource is only executable when the value of the State Resource is DELIVERED."
},
"5": {
"id": 5,
"name": "Checkpoint",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "objlnk",
"range": "",
"units": "",
"description": "Link to a 'Checkpoint' object which allows to specify conditions/dependencies for a software update. E.g. power connected, sufficient memory, target system."
},
"6": {
"id": 6,
"name": "Uninstall",
"operations": "E",
"instancetype": "single",
"mandatory": True,
"type": "",
"range": "",
"units": "",
"description": "Uninstalls the software package, removes it from the Device if present and set Update State back to INITIAL state."
},
"7": {
"id": 7,
"name": "Update State",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "1-5",
"units": "",
"description": "Indicates current state with respect to this software update. This value is set by the LWM2M Client.\n1: INITIAL Before downloading. (see 5.1.2.1)\n2: DOWNLOAD STARTED The downloading process has started and is on-going. (see 5.1.2.2)\n3: DOWNLOADED The package has been completely downloaded (see 5.1.2.3)\n4: DELIVERED In that state, the package has been correctly downloaded and is ready to be installed. (see 5.1.2.4)\nIf executing the Install Resource failed, the state remains at DELIBERED.\nIf executing the Install Resource was successful, the state changes from DELIVERED to INSTALLED.\nAfter executing the UnInstall Resource, the state changes to INITIAL.\n5: INSTALLED In that state the software is correctly installed and can be activated or deactivated according to the Activation State Machine. (see 5.1.2.5)"
},
"8": {
"id": 8,
"name": "Update Supported Objects",
"operations": "RW",
"instancetype": "single",
"mandatory": False,
"type": "boolean",
"range": "",
"units": "",
"description": "If this value is True, the LWM2M Client MUST inform the registered LWM2M Servers of Objects and Object Instances parameter by sending an Update or Registration message after the software update operation at the next practical opportunity if supported Objects in the LWM2M Client have changed, in order for the LWM2M Servers to promptly manage newly installed Objects.\nIf False, Objects and Object Instances parameter MUST be reported at the next periodic Update message.\nThe default value is False."
},
"9": {
"id": 9,
"name": "Update Result",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "integer",
"range": "0-10",
"units": "",
"description": "Contains the result of downloading or installing/uninstalling the software\n0: Initial value. Prior to download any new package in the Device, Update Result MUST be reset to this initial value. One side effect of executing the Uninstall resource is to reset Update Result to this initial value '0'.\n1: Downloading. The package downloading process is on-going.\n2: Software d successfully installed.\n3: Not enough storage for the new software package.\n4: Out of memory during downloading process.\n5: Connection lost during downloading process.\n6: Package integrity check failure.\n7: Unsupported package type.\n8: Invalid URI\n9: Device defined update error\n10: Software installation failure\nThis Resource MAY be reported by sending Observe operation."
},
"10": {
"id": 10,
"name": "Activate",
"operations": "E",
"instancetype": "single",
"mandatory": True,
"type": "",
"range": "",
"units": "",
"description": "This action activates the software previously successfully installed (the Package Installation State Machine is currently in the INSTALLED state)"
},
"11": {
"id": 11,
"name": "Deactivate",
"operations": "E",
"instancetype": "single",
"mandatory": True,
"type": "",
"range": "",
"units": "",
"description": "This action deactivates software if the Package Installation State Machine is currently in the INSTALLED state."
},
"12": {
"id": 12,
"name": "Activation State",
"operations": "R",
"instancetype": "single",
"mandatory": True,
"type": "boolean",
"range": "",
"units": "",
"description": "Indicates the current activation state of this software:\n0: DISABLED Activation State is DISABLED if the Software Activation State Machine is in the INACTIVE state or not alive.\n1: ENABLED Activation State is ENABLED only if the Software Activation State Machine is in the ACTIVE state"
},
"13": {
"id": 13,
"name": "Package Settings",
"operations": "RW",
"instancetype": "single",
"mandatory": False,
"type": "objlnk",
"range": "",
"units": "",
"description": "Link to 'Package Settings' object which allows to modify at any time software configuration settings. This is an application specific object.\nNote: OMA might provide a template for a Package Settings object in a future release of this specification."
}
}
},
"32301": { # !!! CONNECT2's proprietary parameter names
"id": 32301,
"name": "Connect2",
"instancetype": "multiple",
"mandatory": False,
"description": "Connect 2 specific parameters",
"resourcedefs": {
"0": {
"id": 0,
"name": "Mote ID",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "string",
"range": "0-255 bytes",
"units": "",
"description": ""
},
"1": {
"id": 1,
"name": "Mote Status",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "string",
"range": "0-255 bytes",
"units": "",
"description": ""
},
"2": {
"id": 2,
"name": "Network Key",
"operations": "W",
"instancetype": "single",
"mandatory": False,
"type": "string",
"range": "0-255 bytes",
"units": "",
"description": ""
},
"3": {
"id": 3,
"name": "Firmware Version",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "string",
"range": "0-255 bytes",
"units": "",
"description": ""
},
"4": {
"id": 4,
"name": "Publish Interval",
"operations": "RW",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "0-255 bytes",
"units": "",
"description": ""
},
"5": {
"id": 5,
"name": "Input 1",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "0-255 bytes",
"units": "",
"description": ""
},
"6": {
"id": 5,
"name": "Input 2",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "0-255 bytes",
"units": "",
"description": ""
},
"7": {
"id": 7,
"name": "Input 3",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "0-255 bytes",
"units": "",
"description": ""
},
"8": {
"id": 8,
"name": "Input 4",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "0-255 bytes",
"units": "",
"description": ""
},
"9": {
"id": 9,
"name": "Transmit RSSI",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "0-255 bytes",
"units": "",
"description": ""
},
"10": {
"id": 10,
"name": "Receive RSSI",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "0-255 bytes",
"units": "",
"description": ""
},
"11": {
"id": 11,
"name": "Availability",
"operations": "R",
"instancetype": "single",
"mandatory": False,
"type": "integer",
"range": "0-255 bytes",
"units": "",
"description": ""
}
}
}
}
def doIndex(obj, num):
# Looks up a numerical key in a dict
if not obj:
return None
s = str(num)
if s in obj:
return obj[s]
return None
def lookupName(objectID, resourceID=None):
"""LWM2M ids are triples e.g. 1/0/0, but the middle one is the instance number. So it's objectID/instanceNum/resourceID"""
r1 = doIndex(objects, objectID)
if not r1:
return "UNKNOWN_LWM2M_OBJECT_ID"
s1 = r1["name"]
if resourceID==None:
return s1.replace(" ","_")
r2 = doIndex(r1["resourcedefs"], resourceID)
if not r2:
s2 = "UNKNOWN_LWM2M_RESOURCE_ID"
else:
s2 = r2["name"]
s = s1 + "_" + s2
s = s.replace(" ","_")
return s
|
import collections
import json
import os
import re
import subprocess
import sys
import time
import pytest
import dcos.util as util
from dcos.util import create_schema
from dcoscli.test.common import (assert_command, assert_lines,
assert_lines_range, exec_command)
from dcoscli.test.marathon import (add_app, app, remove_app,
watch_all_deployments)
from ..fixtures.task import task_fixture
SLEEP_COMPLETED = 'tests/data/marathon/apps/sleep-completed.json'
SLEEP_COMPLETED1 = 'tests/data/marathon/apps/sleep-completed1.json'
SLEEP1 = 'tests/data/marathon/apps/sleep1.json'
SLEEP2 = 'tests/data/marathon/apps/sleep2.json'
FOLLOW = 'tests/data/file/follow.json'
TWO_TASKS = 'tests/data/file/two_tasks.json'
TWO_TASKS_FOLLOW = 'tests/data/file/two_tasks_follow.json'
LS = 'tests/data/tasks/ls-app.json'
HELLO_STDERR = 'tests/data/marathon/apps/hello-stderr.json'
INIT_APPS = ((LS, 'ls-app'),
(SLEEP1, 'test-app1'),
(SLEEP2, 'test-app2'))
NUM_TASKS = len(INIT_APPS)
def setup_module():
# create a completed task
with app(SLEEP_COMPLETED, 'test-app-completed'):
pass
for app_ in INIT_APPS:
add_app(app_[0])
def teardown_module():
for app_ in INIT_APPS:
remove_app(app_[1])
def test_help():
with open('dcoscli/data/help/task.txt') as content:
assert_command(['dcos', 'task', '--help'],
stdout=content.read().encode('utf-8'))
def test_info():
stdout = b"Manage DC/OS tasks\n"
assert_command(['dcos', 'task', '--info'], stdout=stdout)
def test_task():
# test `dcos task` output
returncode, stdout, stderr = exec_command(['dcos', 'task', '--json'])
assert returncode == 0
assert stderr == b''
tasks = json.loads(stdout.decode('utf-8'))
assert isinstance(tasks, collections.Sequence)
assert len(tasks) == NUM_TASKS
schema = create_schema(task_fixture().dict(), True)
schema['required'].remove('labels')
for task in tasks:
assert not util.validate_json(task, schema)
def test_task_table():
assert_lines(['dcos', 'task'], NUM_TASKS + 1)
def test_task_completed():
assert_lines(
['dcos', 'task', '--completed', '--json', 'test-app-completed*'],
1,
greater_than=True)
def test_task_all():
assert_lines(
['dcos', 'task', '--json', '*-app*'],
NUM_TASKS,
greater_than=True)
def test_task_none():
assert_command(['dcos', 'task', 'bogus', '--json'],
stdout=b'[]\n')
def test_filter():
assert_lines(['dcos', 'task', 'test-app2', '--json'], 1, greater_than=True)
def test_log_no_files():
""" Tail stdout on nonexistant task """
assert_command(['dcos', 'task', 'log', 'bogus'],
returncode=1,
stderr=b'No matching tasks. Exiting.\n')
def test_log_single_file():
""" Tail a single file on a single task """
returncode, stdout, stderr = exec_command(
['dcos', 'task', 'log', 'test-app1'])
assert returncode == 0
assert stderr == b''
assert len(stdout.decode('utf-8').split('\n')) > 0
def test_log_task():
with app(HELLO_STDERR, 'test-hello-stderr'):
returncode, stdout, stderr = exec_command(
['dcos', 'task', 'log', 'test-hello-stderr', 'stderr',
'--lines=-1'])
assert returncode == 0
assert not stderr
assert stdout == b'hello\n'
def test_log_missing_file():
""" Tail a single file on a single task """
returncode, stdout, stderr = exec_command(
['dcos', 'task', 'log', 'test-app2', 'bogus'])
assert returncode == 1
assert stdout == b''
assert stderr == b'No logs found\n'
def test_log_lines_invalid():
""" Test invalid --lines value """
assert_command(['dcos', 'task', 'log', 'test-app1', '--lines=bogus'],
stdout=b'',
stderr=b'Error parsing string as int\n',
returncode=1)
@pytest.mark.skipif(sys.platform == 'win32',
reason="Using Windows unsupported import (fcntl)")
def test_log_follow():
""" Test --follow """
# verify output
with app(FOLLOW, 'follow'):
proc = subprocess.Popen(['dcos', 'task', 'log', 'follow', '--follow'],
stdout=subprocess.PIPE)
# mark stdout as non-blocking, so we can read all available data
# before EOF
_mark_non_blocking(proc.stdout)
time.sleep(10)
assert len(_read_lines(proc.stdout)) >= 1
proc.kill()
def test_log_completed():
""" Test `dcos task log --completed` """
# create a completed task
# ensure that tail lists nothing
# ensure that tail --completed lists a completed task
returncode, stdout, stderr = exec_command(
['dcos', 'task', 'log', 'test-app-completed'])
assert returncode == 1
assert stdout == b''
assert stderr.startswith(b'No running tasks match ID [test-app-completed]')
returncode, stdout, stderr = exec_command(
['dcos', 'task', 'log', '--completed', 'test-app-completed', 'stderr'])
assert returncode == 0
assert stderr == b''
assert len(stdout.decode('utf-8').split('\n')) >= 3
returncode, stdout, stderr = exec_command(
['dcos', 'task', 'log', '--all', 'test-app-completed', 'stderr'])
assert returncode == 0
assert stderr == b''
assert len(stdout.decode('utf-8').split('\n')) >= 3
def test_ls_no_params():
returncode, stdout, stderr = exec_command(
['dcos', 'task', 'ls'])
assert returncode == 0
assert stderr == b''
ls_line = '\.ssl.*stderr.*stdout.*'
lines = stdout.decode('utf-8').split('\n')
assert len(lines) == 7
assert re.match('===>.*<===', lines[0])
assert re.match(ls_line, lines[1])
assert re.match('===>.*<===', lines[2])
assert re.match(ls_line, lines[3])
assert re.match('===>.*<===', lines[4])
assert re.match(ls_line, lines[5])
def test_ls():
returncode, stdout, stderr = exec_command(
['dcos', 'task', 'ls', 'test-app1'])
assert returncode == 0
assert stderr == b''
ls_line = '\.ssl.*stderr.*stdout.*'
lines = stdout.decode('utf-8').split('\n')
assert len(lines) == 2
assert re.match(ls_line, lines[0])
def test_ls_multiple_tasks():
returncode, stdout, stderr = exec_command(
['dcos', 'task', 'ls', 'test-app'])
assert returncode == 0
assert stderr == b''
ls_line = '\.ssl.*stderr.*stdout.*'
lines = stdout.decode('utf-8').split('\n')
assert len(lines) == 5
assert re.match('===>.*<===', lines[0])
assert re.match(ls_line, lines[1])
assert re.match('===>.*<===', lines[2])
assert re.match(ls_line, lines[3])
def test_ls_long():
assert_lines_range(['dcos', 'task', 'ls', '--long', 'test-app1'], 5, 7)
def test_ls_path():
assert_command(['dcos', 'task', 'ls', 'ls-app', 'test'],
stdout=b'test1 test2\n')
def test_ls_bad_path():
assert_command(
['dcos', 'task', 'ls', 'test-app1', 'bogus'],
stderr=b'Cannot access [bogus]: No such file or directory\n',
returncode=1)
def test_ls_completed():
with app(SLEEP_COMPLETED1, 'test-app-completed1'):
task_id_completed = _get_task_id('test-app-completed1')
returncode, stdout, stderr = exec_command(
['dcos', 'task', 'ls', task_id_completed])
assert returncode == 1
assert stdout == b''
err = b'Cannot find a task with ID containing "test-app-completed1'
assert stderr.startswith(err)
returncode, stdout, stderr = exec_command(
['dcos', 'task', 'ls', '--completed', task_id_completed])
assert returncode == 0
assert stderr == b''
ls_line = '\.ssl.*stderr.*stdout.*'
lines = stdout.decode('utf-8').split('\n')
assert len(lines) == 2
assert re.match(ls_line, lines[0])
def test_exec_non_interactive():
with open('tests/data/tasks/lorem-ipsum.txt') as text:
content = text.read()
task_id = _get_task_id('test-app1')
with open('tests/data/tasks/lorem-ipsum.txt') as text:
assert_command(
['dcos', 'task', 'exec', task_id, 'printf', content],
stdout=bytes(content, 'UTF-8'))
def test_exec_interactive():
with open('tests/data/tasks/lorem-ipsum.txt') as text:
content = bytes(text.read(), 'UTF-8')
task_id = _get_task_id('test-app1')
with open('tests/data/tasks/lorem-ipsum.txt') as text:
assert_command(
['dcos', 'task', 'exec', '--interactive', task_id, 'cat'],
stdout=content, stdin=text)
def test_exec_match_id_pattern():
assert_command(['dcos', 'task', 'exec', 'app1', 'true'])
assert_command(['dcos', 'task', 'exec', 'app2', 'true'])
returncode, _, _ = exec_command(['dcos', 'task', 'exec', 'app', 'true'])
assert returncode != 0
def _mark_non_blocking(file_):
import fcntl
fcntl.fcntl(file_.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
def _install_sleep_task(app_path=SLEEP1, app_name='test-app'):
args = ['dcos', 'marathon', 'app', 'add', app_path]
assert_command(args)
watch_all_deployments()
def _uninstall_helloworld(args=[]):
assert_command(['dcos', 'package', 'uninstall', 'helloworld',
'--yes'] + args)
def _uninstall_sleep(app_id='test-app'):
assert_command(['dcos', 'marathon', 'app', 'remove', app_id])
def _get_task_id(app_id):
returncode, stdout, stderr = exec_command(
['dcos', 'task', '--json', app_id])
assert returncode == 0
tasks = json.loads(stdout.decode('utf-8'))
assert len(tasks) == 1
task_id = tasks[0]['id']
return task_id
def _read_lines(raw_io):
"""Polls calls to `read()` on the given byte stream until some bytes are
returned, or the maximum number of attempts is reached.
:param raw_io: the byte stream to read from
:type raw_io: io.RawIOBase
:returns: the bytes read, decoded as UTF-8 and split into a list of lines
:rtype: [str]
"""
for _ in range(30):
bytes_read = raw_io.read()
if bytes_read is not None:
break
time.sleep(1)
else:
assert False, 'timed out trying to read bytes'
return bytes_read.decode('utf-8').split('\n')
|
from typing import Any, Dict, List
from matplotlib.axes import Axes
from matplotlib.lines import Line2D
def set_legend(axes: Axes, lines: List[Line2D], legend_cfg: Dict[str, Any]):
axes.legend(handles=lines, **legend_cfg)
|
from django.urls import path, include
from api import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'player', views.PlayerViewSet)
# URL's for create and get dataset, get and filter rows
urlpatterns = [
path('team/', views.get_player),
path('load/', views.load_players),
# path('players?search=<str:search>/', views.find_players),
path('players', views.find_players),
path('', include(router.urls))
]
|
from django.core import checks, exceptions
from django.db.models import BLANK_CHOICE_DASH, CharField
from django.utils import six
from django.utils.functional import curry
from django.utils.itercompat import is_iterable
from django.utils.text import capfirst
from lazychoices import forms
from .mixins import LazyChoiceModelMixin
class LazyChoiceField(CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('choices', BLANK_CHOICE_DASH)
kwargs.setdefault('max_length', 25)
super(LazyChoiceField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(LazyChoiceField, self).check(**kwargs)
errors.extend(self._check_model())
for subclass in self.model.__subclasses__():
errors.extend(self._check_choices(subclass))
return errors
def _check_choices(self, klass=None):
klass = klass or self.model
if hasattr(klass, self.choices_name):
choices = getattr(klass, self.choices_name)
if (isinstance(choices, six.string_types) or not is_iterable(choices)):
return [
checks.Error(
"'{0}' must be an iterable (e.g., a list or tuple).".format(self.choices_name),
hint=None,
obj=klass,
id='lazychoices.E001',
),
]
elif any(isinstance(choice, six.string_types) or
not is_iterable(choice) or len(choice) != 2
for choice in choices):
return [
checks.Error(
("'{0}' must be an iterable containing "
'(actual value, human readable name) tuples.').format(self.choices_name),
hint=None,
obj=klass,
id='lazychoices.E002',
),
]
else:
return []
else:
return []
def _check_model(self):
if not issubclass(self.model, LazyChoiceModelMixin):
return [
checks.Error(
"The model must inherit from 'LazyChoiceModelMixin'.",
hint=None,
obj=self.model,
id='lazychoices.E003',
),
]
else:
return []
def validate(self, value, model_instance):
choices = getattr(model_instance, self.choices_name, [])
if choices and value not in self.empty_values:
for option_key, option_value in choices:
if isinstance(option_value, (list, tuple)):
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def set_attributes_from_name(self, name):
super(LazyChoiceField, self).set_attributes_from_name(name)
self.choices_name = '{0}_CHOICES'.format(self.name.upper())
def contribute_to_class(self, cls, name):
super(LazyChoiceField, self).contribute_to_class(cls, name)
if hasattr(cls, '_get_LAZYFIELD_display'):
setattr(cls, 'get_{0}_display'.format(self.name), curry(cls._get_LAZYFIELD_display, field=self))
def formfield(self, form_class=None, **kwargs):
defaults = {
'help_text': self.help_text,
'label': capfirst(self.verbose_name),
'model': self.model,
'required': not self.blank,
}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
defaults['choices_name'] = self.choices_name
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ['coerce', 'empty_label', 'empty_value', 'choices_name',
'model', 'required', 'widget', 'label', 'initial',
'help_text', 'error_messages', 'show_hidden_initial']:
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.LazyChoiceField
return form_class(**defaults)
|
from .dbnet import DBNet
from .ocr_mask_rcnn import OCRMaskRCNN
from .panet import PANet
from .psenet import PSENet
from .single_stage_text_detector import SingleStageTextDetector
from .text_detector_mixin import TextDetectorMixin
from .textsnake import TextSnake
__all__ = [
'TextDetectorMixin', 'SingleStageTextDetector', 'OCRMaskRCNN', 'DBNet',
'PANet', 'PSENet', 'TextSnake'
]
|
#!/usr/bin/env python3
"""Scans a directory to determine the licenses of its dependancies."""
import argparse
import datetime
import json
import logging
import os
import pathlib
import sys
from .kss_prereqs_scanner import KSSPrereqsScanner
from .manual_scanner import ManualScanner
from .swift_scanner import SwiftModuleScanner
from . import __version__
def _parse_command_line(args: list):
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true', help='Show debugging information')
parser.add_argument('--version', action='store_true', help='Show version number and then exit')
parser.add_argument('--directory',
default='.',
help='Directory to be scanned (defaults to the current working directory)')
parser.add_argument('--name',
help='Name of module to be scanned (default is the basename of the '
+ 'directory).')
parser.add_argument('--output',
default='Dependencies/prereqs-licenses.json',
help='Output file, relative to the scanned directory. (Default is '
+ '"Dependencies/prereqs-licenses.json")')
parser.add_argument('--manual-licenses',
default='manual-licenses.json',
metavar='FILENAME',
help='File containing manually generated license entries, within '
+ 'the scanned directory. Default is "manual-licenses.json")')
return parser.parse_args(args)
def _write_licenses(filename: str, licenses: dict, metadata: dict):
outputdir = os.path.dirname(filename)
if outputdir:
pathlib.Path(outputdir).mkdir(parents=True, exist_ok=True)
data = {
'dependencies': sorted(licenses.values(), key=lambda x: x['moduleName']),
'generated': metadata
}
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def _generated_metadata():
args = ""
if len(sys.argv) > 1:
args = " %s" % ' '.join(sys.argv[1:])
metadata = {
'time': datetime.datetime.now().astimezone().isoformat(),
'process': 'license-scanner%s' % args,
'project': os.path.basename(os.getcwd())
}
return metadata
def scan(args: list = None):
"""Main entry point.
Parameters:
args: list of strings specifying the arguments. If None then sys.argv will be used.
"""
options = _parse_command_line(args)
if options.version:
print(__version__)
sys.exit()
logging.getLogger().setLevel(logging.DEBUG if options.verbose else logging.INFO)
if not os.path.isdir(options.directory):
raise FileNotFoundError(options.directory)
cwd = os.getcwd()
directory = options.directory
if directory == ".":
directory = cwd
modulename = options.name or os.path.basename(directory)
outputfile = options.output
manualentries = options.manual_licenses
logging.info("Scanning for licenses in '%s'", directory)
logging.debug(" identifying module as '%s'", modulename)
logging.debug(" will write output to '%s'", outputfile)
logging.debug(" will look for manual entries in '%s'", manualentries)
try:
os.chdir(directory)
licenses = {}
scanners = [ManualScanner(modulename, manualentries),
SwiftModuleScanner(modulename),
KSSPrereqsScanner(modulename)
]
for scanner in scanners:
scanner.add_licenses(licenses)
_write_licenses(outputfile, licenses, _generated_metadata())
finally:
os.chdir(cwd)
if __name__ == '__main__':
scan()
|
"""
The ``mlflow.pytorch`` module provides an API for logging and loading PyTorch models. This module
exports PyTorch models with the following flavors:
PyTorch (native) format
This is the main flavor that can be loaded back into PyTorch.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and batch inference.
"""
from __future__ import absolute_import
import os
import numpy as np
import pandas as pd
import torch
from mlflow import pyfunc
from mlflow.models import Model
import mlflow.tracking
FLAVOR_NAME = "pytorch"
def log_model(pytorch_model, artifact_path, conda_env=None, **kwargs):
"""
Log a PyTorch model as an MLflow artifact for the current run.
:param pytorch_model: PyTorch model to be saved. Must accept a single ``torch.FloatTensor`` as
input and produce a single output tensor.
:param artifact_path: Run-relative artifact path.
:param conda_env: Path to a Conda environment file. If provided, this defines the environment
for the model. At minimum, it should specify python, pytorch, and mlflow with appropriate
versions.
:param kwargs: kwargs to pass to ``torch.save`` method.
"""
Model.log(artifact_path=artifact_path, flavor=mlflow.pytorch,
pytorch_model=pytorch_model, conda_env=conda_env, **kwargs)
def save_model(pytorch_model, path, conda_env=None, mlflow_model=Model(), **kwargs):
"""
Save a PyTorch model to a path on the local file system.
:param pytorch_model: PyTorch model to be saved. Must accept a single ``torch.FloatTensor`` as
input and produce a single output tensor.
:param path: Local path where the model is to be saved.
:param conda_env: Path to a Conda environment file. If provided, this decribes the environment
this model should be run in. At minimum, it should specify python, pytorch,
and mlflow with appropriate versions.
:param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
:param kwargs: kwargs to pass to ``torch.save`` method.
"""
if not isinstance(pytorch_model, torch.nn.Module):
raise TypeError("Argument 'pytorch_model' should be a torch.nn.Module")
path = os.path.abspath(path)
if os.path.exists(path):
raise RuntimeError("Path '{}' already exists".format(path))
os.makedirs(path)
model_path = os.path.join(path, "model.pth")
# Save pytorch model
torch.save(pytorch_model, model_path, **kwargs)
model_file = os.path.basename(model_path)
mlflow_model.add_flavor(FLAVOR_NAME, model_data=model_file, pytorch_version=torch.__version__)
pyfunc.add_to_model(mlflow_model, loader_module="mlflow.pytorch",
data=model_file, env=conda_env)
mlflow_model.save(os.path.join(path, "MLmodel"))
def _load_model(path, **kwargs):
mlflow_model_path = os.path.join(path, "MLmodel")
if not os.path.exists(mlflow_model_path):
raise RuntimeError("MLmodel is not found at '{}'".format(path))
mlflow_model = Model.load(mlflow_model_path)
if FLAVOR_NAME not in mlflow_model.flavors:
raise ValueError("Could not find flavor '{}' amongst available flavors {}, "
"unable to load stored model"
.format(FLAVOR_NAME, list(mlflow_model.flavors.keys())))
# This maybe replaced by a warning and then try/except torch.load
flavor = mlflow_model.flavors[FLAVOR_NAME]
if torch.__version__ != flavor["pytorch_version"]:
raise ValueError("Stored model version '{}' does not match "
"installed PyTorch version '{}'"
.format(flavor["pytorch_version"], torch.__version__))
path = os.path.abspath(path)
path = os.path.join(path, mlflow_model.flavors[FLAVOR_NAME]['model_data'])
return torch.load(path, **kwargs)
def load_model(path, run_id=None, **kwargs):
"""
Load a PyTorch model from a local file (if ``run_id`` is ``None``) or a run.
:param path: Local filesystem path or run-relative artifact path to the model saved
by :py:func:`mlflow.pytorch.log_model`.
:param run_id: Run ID. If provided, combined with ``path`` to identify the model.
:param kwargs: kwargs to pass to ``torch.load`` method.
"""
if run_id is not None:
path = mlflow.tracking.utils._get_model_log_dir(model_name=path, run_id=run_id)
return _load_model(path, **kwargs)
def load_pyfunc(path, **kwargs):
"""
Load a persisted PyTorch model as a ``python_function`` model.
The loaded PyFunc exposes a ``predict(pd.DataFrame) -> pd.DataFrame``
method that, given an input DataFrame of n rows and k float-valued columns, feeds a
corresponding (n x k) ``torch.FloatTensor`` (or ``torch.cuda.FloatTensor``) as input to the
PyTorch model. ``predict`` returns the model's predictions (output tensor) in a single column
DataFrame.
:param path: Local filesystem path to the model saved by :py:func:`mlflow.pytorch.log_model`.
:param kwargs: kwargs to pass to ``torch.load`` method.
:rtype: Pyfunc format model with function
``model.predict(pandas DataFrame) -> pandas DataFrame``.
"""
return _PyTorchWrapper(_load_model(os.path.dirname(path), **kwargs))
class _PyTorchWrapper(object):
"""
Wrapper class that creates a predict function such that
predict(data: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame)
"""
def __init__(self, pytorch_model):
self.pytorch_model = pytorch_model
def predict(self, data, device='cpu'):
if not isinstance(data, pd.DataFrame):
raise TypeError("Input data should be pandas.DataFrame")
self.pytorch_model.to(device)
self.pytorch_model.eval()
with torch.no_grad():
input_tensor = torch.from_numpy(data.values.astype(np.float32)).to(device)
preds = self.pytorch_model(input_tensor)
if not isinstance(preds, torch.Tensor):
raise TypeError("Expected PyTorch model to output a single output tensor, "
"but got output of type '{}'".format(type(preds)))
predicted = pd.DataFrame(preds.numpy())
predicted.index = data.index
return predicted
|
import os
import sys
import numpy as np
from numpy.lib import index_tricks
import raisimpy as raisim
import datetime
import time
import yaml
import random
import shutil
import pickle
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from xbox360controller import Xbox360Controller
sys.path.append(os.path.abspath(os.path.dirname(__file__)) + "/utils")
print(os.path.abspath(os.path.dirname(__file__))) # get current file path
from ParamsCalculate import ControlParamCal
import visualization
import FileSave
# xbox = Xbox360Controller(0, axis_threshold=0.02)
def DataPlot(Data):
BallPosition = Data['BallPos']
BallVelocity = Data['BallVel']
ExternalForce = Data['ExternalForce']
Point1Pos = Data['Point1Pos']
Point1Vel = Data['Point1Vel']
ResForce = Data['ResForce']
T = Data['time']
SumForce = ExternalForce
plt.figure()
plt.title('Ball motion in zx plane', fontsize = 20)
plt.subplot(311)
plt.plot(T, BallPosition[:, 0], label='Ball x-axis Position')
plt.plot(T, BallPosition[:, 1], label='Ball y-axis Position')
# plt.plot(T, BallVelocity[:, 0], label='Ball x-axis Velocity')
plt.plot(T, BallPosition[:, 2], label='Ball z-axis Position')
# plt.plot(T, line2, label='highest Velocity')
plt.axis([0, max(T)*1.05, -max(BallPosition[:, 2])*2, max(BallPosition[:, 2])*2])
plt.xlabel('time (s)')
plt.ylabel('Ball Position (m)', fontsize = 15)
plt.legend(loc='upper right', fontsize = 15)
plt.subplot(312)
plt.plot(T, BallVelocity[:, 0], label='Ball x-axis Velocity')
plt.plot(T, BallVelocity[:, 1], label='Ball y-axis Velocity')
plt.plot(T, BallVelocity[:, 2], label='Ball z-axis Velocity')
plt.axis([0, max(T)*1.05, -max(BallVelocity[:, 2])*2, max(BallVelocity[:, 2])*2])
plt.xlabel('time (s)')
plt.ylabel('Velocity (m/s)', fontsize = 15)
plt.legend(loc='upper right', fontsize = 15)
plt.subplot(313)
plt.plot(T, ExternalForce[:, 0], label='Ball x-axis Force')
plt.plot(T, ExternalForce[:, 1], label='Ball y-axis Force')
plt.plot(T, ExternalForce[:, 2], label='Ball z-axis Force')
plt.axis([0, max(T)*1.05, -max(ExternalForce[:, 0])*2.5, max(ExternalForce[:, 0])*2.5])
plt.xlabel('time (s)', fontsize = 15)
plt.ylabel('Force (N)', fontsize = 15)
plt.legend(loc='upper right', fontsize = 15)
plt.figure()
plt.scatter(BallPosition[:, 0], BallPosition[:, 2], label='X-Z plane Ball motion trajectory')
plt.xlabel('x-axis position (m)', fontsize = 15)
plt.ylabel('z-axis position (m)', fontsize = 15)
# plt.legend(loc='upper right')
plt.title('X-Z plane Ball motion trajectory', fontsize = 20)
# plt.figure()
# plt.plot(BallPosition[:, 0], ExternalForce[:, 0], label='Ball x-axis Pos-Force')
# plt.plot(BallPosition[:, 0], ExternalForce[:, 1], label='Ball y-axis Pos-Force')
# plt.plot(BallPosition[:, 0], ExternalForce[:, 2], label='Ball z-axis Pos-Force')
# plt.xlabel('Position (m)')
# plt.ylabel('Force (N)')
# plt.axis([-0.8, 0.8, -300, 250])
# plt.legend(loc='upper right')
# plt.title('Ball Pos-Force trajectory', fontsize = 20)
plt.figure()
plt.scatter(BallPosition[:, 0], BallPosition[:, 1], label='X-Y plane Ball motion trajectory', cmap='inferno')
plt.xlabel('x-axis position (m)', fontsize = 15)
plt.ylabel('y-axis position (m)', fontsize = 15)
# plt.legend(loc='upper right')
plt.title('X-Y plane Ball motion trajectory', fontsize = 20)
# plt.figure()
# Num = len(Point1Vel)
# index = np.linspace(0, Num, Num)
# plt.subplot(211)
# # Point1Vel[0, 1] = 6
# # plt.scatter(index, Point1Pos[:, 0], label='Point 1 x-axis pos motion trajectory')
# plt.scatter(index, Point1Pos[:, 1], label='Point 1 y-axis pos motion trajectory')
# # plt.scatter(index, Point1Pos[:, 2], label='Point 1 z-axis pos motion trajectory')
# plt.xlabel('Period', fontsize = 15)
# plt.ylabel('axis position (m)', fontsize = 15)
# # plt.axis([-0.5, Num + 0.5, -1.5, 1.5])
# plt.legend(loc='upper right', fontsize = 15)
# plt.subplot(212)
# # plt.scatter(index, Point1Vel[:, 0], label='Point 1 x-axis vel motion trajectory')
# plt.scatter(index, Point1Vel[:, 1], label='Point 1 y-axis vel motion trajectory')
# # plt.scatter(index, Point1Vel[:, 2], label='Point 1 z-axis vel motion trajectory')
# plt.xlabel('Period', fontsize = 15)
# plt.ylabel('axis velocity (m)', fontsize = 15)
# # plt.axis([-0.5, Num + 0.5, -1.5, 1.5])
# plt.legend(loc='upper right', fontsize = 15)
# # plt.title('Point1 motion trajectory', fontsize = 20)
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot(BallPosition[:, 0], BallPosition[:, 1], ExternalForce[:, 0], label='x-axis Force')
# ax.plot(BallPosition[:, 0], BallPosition[:, 1], ExternalForce[:, 1], label='y-axis Force')
# ax.plot(BallPosition[:, 0], BallPosition[:, 1], ExternalForce[:, 2], label='z-axis Force')
# ax.plot(BallPosition[:, 0], BallPosition[:, 1], ResForce[:,0], label=' Resultant Force')
# ax.set_xlabel('x-axis position (m)')
# ax.set_ylabel('y-axis position (m)')
# ax.set_zlabel('Force (N)')
# ax.legend(loc='upper right')
# ax.set_title('X-Y plane Force Trajectory', fontsize = 20)
plt.show()
def ForceControl(BallPos, BallVel, flag, x_coef, TraPoint_x, TraPoint_y, z_ref, v_zref, v_xref, K_zd, K_xd, K_zvup, K_zvdown):
if BallPos[2] > z_ref:
if flag == 0:
x_ref = BallPos[0]
x_coef = - BallVel[0] / np.abs(BallVel[0])
flag = 1
if BallVel[2] >= 0:
zx_ratio = np.abs(BallVel[0] / BallVel[2])
# zy_ratio = np.abs(BallVel[1] / BallVel[2])
ZForce = - K_zd * (BallPos[2] - z_ref) - K_zvup * (BallVel[2])
XForce = - x_coef * zx_ratio * ZForce
# print("x pos and vel is ", BallPos[0], BallVel[0])
# print("x_coef is ", x_coef)
# print("up state: ", XForce, ZForce)
elif BallVel[2] <= 0:
# XForce = x_coef * (K_xd * (BallPos[0] - x_ref) + 10 * (BallVel[2] - x_coef * v_xref))
XForce = x_coef * 20 * (v_xref - x_coef * BallVel[0])
ZForce = - K_zd * (BallPos[2] - z_ref) - K_zvdown * (BallVel[2]- v_zref)
# print("x pos and vel is ", BallPos[0], BallVel[0])
# print("down state: ", XForce, ZForce)
elif BallPos[2] <= z_ref:
if flag == 1:
flag = 0
XForce = 0.0
ZForce = 0.0
return XForce, ZForce, flag, x_coef
def DriControl(ParamData):
TraPoint_x = np.array([-0.2, -0.4, 0.0])
TraPoint_y = np.array([0.0, 0.4, 0.4])
flag = 0
xref_flag = 0
z_ref = 0.5
x_ref = 0.0
x_coef = 0.0
v_zref = -6
v_xref = 6
index = 0
K_xd = 400
K_zd = 300
K_zvup = 5
K_zvdown = 30
BallPos, BallVel = ball1.getState()
print("init ball pos: ", BallPos)
# set ball, arm end foot, contact force and joint state saved array
BallPosition = np.array([[0.0, 0.0, 0.0]]) # the pos and vel state of ball
BallVelocity = np.array([[0.0, 0.0, 0.0]]) # the pos and vel state of endfoot
ExternalForce = np.array([[0.0, 0.0, 0.0]]) # the calculate force and real contact force betweem ball an foot
SumForce = np.array([[0.0]])
T = np.array([0.0])
Point1Pos = np.array([[0.0, 0.0, 0.0]])
Point1Vel = np.array([[0.0, 0.0, 0.0]])
Point2State = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
Point3State = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
for i in range(20000):
time.sleep(0.0001)
BallPos, BallVel = ball1.getState()
BallPos = BallPos[0:3]
BallVel = BallVel[0:3]
# print(TraPoint_x[index])
# XForce, ZForce, flag, x_coef = ForceControl(BallPos, BallVel, flag, x_coef, TraPoint_x[index], TraPoint_y[index], z_ref, v_zref, v_xref, K_zd, K_xd, K_zvup, K_zvdown)
if BallPos[2] > z_ref:
if flag == 0:
x_ref = BallPos[0]
# x_coef = - BallVel[0] / np.abs(BallVel[0])
if index == 0:
x_coef = -1
y_coef = 0
v_xref = 6
v_yref = 0
# Point1Vel = np.concatenate([Point1Vel, [BallVel]], axis = 0)
elif index == 1:
x_coef = 1
y_coef = 1
v_xref = 3
v_yref = 6
# Point1Vel = np.concatenate([Point1Vel, [BallVel]], axis = 0)
elif index == 2:
x_coef = 1
y_coef = -1
v_xref = 3
v_yref = 6
# Point1Vel = np.concatenate([Point1Vel, [BallVel]], axis = 0)
flag = 1
print("=====================================================================")
print("phase index: ", index)
print("init pos: ", BallPos)
print("init vel: ", BallVel)
# print("x_coef, y_coef: ", x_coef, y_coef)
if BallVel[2] >= 0:
zx_ratio = np.abs(BallVel[0] / BallVel[2])
zy_ratio = np.abs(BallVel[1] / BallVel[2])
ZForce = - K_zd * (BallPos[2] - z_ref) - K_zvup * (BallVel[2])
if index == 0 :
XForce = - x_coef * zx_ratio * ZForce
YForce = - zy_ratio * ZForce
elif index == 1:
XForce = - x_coef * zx_ratio * ZForce
YForce = 0
elif index == 2:
XForce = x_coef * zx_ratio * ZForce
YForce = - y_coef * zy_ratio * ZForce
# print("x pos and vel is ", BallPos[0], BallVel[0])
# print("x_coef is ", x_coef)
# if index == 2:
# print(BallVel)
# print("up state: ", XForce, YForce, ZForce)
elif BallVel[2] <= 0:
if xref_flag == 0:
x_ref = BallPos[0] + x_coef * 0.1
y_ref = BallPos[1] + y_coef * 0.1
T_free = - (z_ref - 0.15) / v_zref
# v_xref = np.abs(x_ref - TraPoint_x[index]) / T_free
# v_yref = np.abs(y_ref - TraPoint_y[index]) / T_free
# i = 0
if index == 1:
Point1Pos = np.concatenate([Point1Pos, [BallPos]], axis = 0)
xref_flag = 1
print("max height pos: ", BallPos)
print("max height vel: ", BallVel)
print("x_ref, y_ref, T_free: ", x_ref, y_ref, T_free)
print("v_xref, v_yref: ", v_xref, v_yref)
# XForce = x_coef * (K_xd * (BallPos[0] - x_ref) + 10 * (BallVel[2] - x_coef * v_xref))
# xtemp = np.abs(BallPos[0] - x_ref)
ytemp = K_xd * np.abs(BallPos[1] - y_ref)
# xvtemp = v_xref - x_coef * BallVel[0]
# XForce = x_coef * (K_xd * (np.abs(BallPos[0] - x_ref)) + 50 * (v_xref - x_coef * BallVel[0]))
# YForce = y_coef * (K_xd * (np.abs(BallPos[1] - y_ref)) + 60 * (v_yref - y_coef * BallVel[1]))
XForce = x_coef * 50 * (v_xref - x_coef * BallVel[0])
YForce = y_coef * 50 * (v_yref - y_coef * BallVel[1])
ZForce = - K_zd * (BallPos[2] - z_ref) - K_zvdown * (BallVel[2]- v_zref)
# print("x pos and vel is ", BallPos[0], BallVel[0])
# if index == 2 and i == 0:
# print(BallPos[1])
# print("down state: ", XForce, YForce, ZForce)
elif BallPos[2] <= z_ref:
if flag == 1:
if index == 1:
Point1Vel = np.concatenate([Point1Vel, [BallVel]], axis = 0)
flag = 0
index = index + 1
xref_flag = 0
if index == 3:
index = 0
print("end pos: ", BallPos)
print("end vel: ", BallVel)
XForce = 0.0
YForce = 0.0
ZForce = 0.0
ball1.setExternalForce(0, [0, 0, 0], [XForce, YForce, ZForce])
t = i * t_step
T = np.concatenate([T, [t]], axis = 0)
BallPosition = np.concatenate([BallPosition, [BallPos]], axis = 0)
BallVelocity = np.concatenate([BallVelocity, [BallVel]], axis = 0)
ExternalForce = np.concatenate([ExternalForce, [[XForce, YForce, ZForce]]], axis = 0)
sumF = np.sqrt(XForce**2 + YForce**2 + ZForce**2)
SumForce = np.concatenate([SumForce, [[sumF]]], axis = 0)
world.integrate()
T = T[1:,]
Point1Pos = Point1Pos[1:,]
Point1Vel = Point1Vel[1:,]
BallPosition = BallPosition[1:,]
BallVelocity = BallVelocity[1:,]
ExternalForce = ExternalForce[1:,]
SumForce = SumForce[1:,]
Data = {'BallPos': BallPosition, 'BallVel': BallVelocity, 'ExternalForce': ExternalForce, 'ResForce':SumForce, 'time': T, \
"Point1Pos":Point1Pos, "Point1Vel":Point1Vel}
# print(0)
# return BallPosition, BallVelocity, ExternalForce, T
return Data
def XboxDriControl(ParamData):
flag = 0
xref_flag = 0
z_ref = 0.5
x_ref = 0.0
x_coef = 0.0
v_zref = -6
v_yref_init = 6
v_xref_init = 6
index = 0
K_xd = 400
K_zd = 300
K_zvup = 5
K_zvdown = 20
button_b = 0
BallPos, BallVel = ball1.getState()
print("init ball pos: ", BallPos)
# set ball, arm end foot, contact force and joint state saved array
BallPosition = np.array([[0.0, 0.0, 0.0]]) # the pos and vel state of ball
BallVelocity = np.array([[0.0, 0.0, 0.0]]) # the pos and vel state of endfoot
ExternalForce = np.array([[0.0, 0.0, 0.0]]) # the calculate force and real contact force betweem ball an foot
SumForce = np.array([[0.0]])
T = np.array([0.0])
for i in range(sim_time):
time.sleep(0.0005)
BallPos, BallVel = ball1.getState()
BallPos = BallPos[0:3]
BallVel = BallVel[0:3]
v_xref = 6
v_yref = 0
if xbox.button_b.is_pressed and button_b == 0:
button_b = 1
# print(xbox.button_b.is_pressed)
elif xbox.button_b.is_pressed and button_b == 1:
button_b = 0
# print(v_xref, v_yref, x_coef_down, y_coef_down)
if BallPos[2] > z_ref:
if flag == 0:
if BallVel[1] == 0:
y_coef_up = 0
else:
y_coef_up = - BallVel[1] / np.abs(BallVel[1])
x_coef_up = - BallVel[0] / np.abs(BallVel[0])
# y_coef_up = - BallVel[1] / np.abs(BallVel[1])
x_coef_down = - BallVel[0] / np.abs(BallVel[0])
y_coef_down = 0
flag = 1
print("=====================================================================")
# print("phase index: ", index)
print("init pos: ", BallPos)
print("init vel: ", BallVel)
# print("x_coef, y_coef: ", x_coef, y_coef)
if BallVel[2] >= 0:
zx_ratio = np.abs(BallVel[0] / BallVel[2])
zy_ratio = np.abs(BallVel[1] / BallVel[2])
ZForce = - K_zd * (BallPos[2] - z_ref) - K_zvup * (BallVel[2])
XForce = - x_coef_up * zx_ratio * ZForce
YForce = - y_coef_up * zy_ratio * ZForce
# print("x pos and vel is ", BallPos[0], BallVel[0])
# print("x_coef is ", x_coef)
# if index == 2:
# print(BallVel)
# print("up state: ", XForce, YForce, ZForce)
elif BallVel[2] <= 0:
if button_b == 1:
v_xratio = xbox.axis_r.x
v_yratio = - xbox.axis_r.y
v_xref = v_xref_init * np.abs(v_xratio)
v_yref = v_yref_init * np.abs(v_yratio)
if v_xref == 0:
x_coef_down = 0
else:
x_coef_down = np.abs(v_xratio) / v_xratio
if v_yref == 0:
y_coef_down = 0
else:
y_coef_down = np.abs(v_yratio) / v_yratio
print("v_xratio, v_yratio:", v_xratio, v_yratio)
print("v_xref, v_yref:", v_xref, v_yref)
print("x_coef_down, y_coef_down", x_coef_down, y_coef_down)
# xref_flag = 1
# XForce = x_coef * (K_xd * (BallPos[0] - x_ref) + 10 * (BallVel[2] - x_coef * v_xref))
# xvtemp = v_xref - x_coef * BallVel[0]
# XForce = x_coef * (K_xd * (np.abs(BallPos[0] - x_ref)) + 30 * (v_xref - x_coef * BallVel[0]))
# YForce = y_coef * (K_xd * (np.abs(BallPos[1] - y_ref)) + 30 * (v_yref - y_coef * BallVel[1]))
XForce = x_coef_down * 20 * (v_xref - x_coef_down * BallVel[0])
YForce = y_coef_down * 20 * (v_yref - y_coef_down * BallVel[1])
ZForce = - K_zd * (BallPos[2] - z_ref) - K_zvdown * (BallVel[2]- v_zref)
# print("x pos and vel is ", BallPos, BallVel)
# if index == 2 and i == 0:
# print(BallPos[1])
# print("down state: ", XForce, YForce, ZForce)
elif BallPos[2] <= z_ref:
if flag == 1:
flag = 0
xref_flag = 0
# print("end pos: ", BallPos)
# print("end vel: ", BallVel)
XForce = 0.0
YForce = 0.0
ZForce = 0.0
ball1.setExternalForce(0, [0, 0, 0], [XForce, YForce, ZForce])
t = i * t_step
T = np.concatenate([T, [t]], axis = 0)
BallPosition = np.concatenate([BallPosition, [BallPos]], axis = 0)
BallVelocity = np.concatenate([BallVelocity, [BallVel]], axis = 0)
ExternalForce = np.concatenate([ExternalForce, [[XForce, YForce, ZForce]]], axis = 0)
sumF = np.sqrt(XForce**2 + YForce**2 + ZForce**2)
SumForce = np.concatenate([SumForce, [[sumF]]], axis = 0)
world.integrate()
T = T[1:,]
BallPosition = BallPosition[1:,]
BallVelocity = BallVelocity[1:,]
ExternalForce = ExternalForce[1:,]
SumForce = SumForce[1:,]
Data = {'BallPos': BallPosition, 'BallVel': BallVelocity, 'ExternalForce': ExternalForce, 'ResForce':SumForce, 'time': T}
# print(0)
# return BallPosition, BallVelocity, ExternalForce, T
return Data
if __name__ == "__main__":
# get params config data
FilePath = os.path.dirname(os.path.abspath(__file__))
ParamFilePath = FilePath + "/config/LISM_test.yaml"
ParamFile = open(ParamFilePath, "r", encoding="utf-8")
ParamData = yaml.load(ParamFile, Loader=yaml.FullLoader)
# load activation file and urdf file
raisim.World.setLicenseFile(os.path.dirname(os.path.abspath(__file__)) + "/activation.raisim")
# ball1_urdf_file = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + "/urdf/ball.urdf"
ball1_urdf_file = os.path.dirname(os.path.abspath(__file__)) + "/urdf/ball.urdf"
# raisim world config setting
world = raisim.World()
# set simulation step
t_step = ParamData["environment"]["t_step"]
sim_time = ParamData["environment"]["sim_time"]
world.setTimeStep(t_step)
ground = world.addGround(0)
# set material collision property
# world.setMaterialPairProp("rubber", "rub", 1, 0, 0)
world.setMaterialPairProp("rubber", "rub", 1.0, 0.85, 0.0001) # ball rebound model test
world.setMaterialPairProp("default", "rub", 0.8, 1.0, 0.0001)
gravity = world.getGravity()
world.setMaterialPairProp("default", "steel", 0.0, 1.0, 0.001)
ball1 = world.addArticulatedSystem(ball1_urdf_file)
print(ball1.getDOF())
ball1.setName("ball1")
gravity = world.getGravity()
print(gravity)
print(ball1.getGeneralizedCoordinateDim())
jointNominalConfig = np.array([0.0, 0.0, 0.5, 1.0, 0.0, 0.0, 0.0])
jointVelocityTarget = np.array([2.0, 0.0, -5, 0.0, 0.0, 0.0])
ball1.setGeneralizedCoordinate(jointNominalConfig)
# print(ball1.getGeneralizedCoordinateDim())
ball1.setGeneralizedVelocity(jointVelocityTarget)
world.setMaterialPairProp("default", "steel", 0.0, 1.0, 0.001)
world.setMaterialPairProp("default", "rub", 0.0, 1.0, 0.001)
## ======================= single object ====================
# ball1 = world.addSphere(0.12, 0.8, "steel")
# dummy_inertia = np.zeros([3, 3])
# np.fill_diagonal(dummy_inertia, 0.1)
# ball1 = world.addMesh(ball_file, 0.6, dummy_inertia, np.array([0, 0, 1]), 0.001, "rub")
# ball1.setPosition(0, 0.0, 0.5)
# ball1.setVelocity(1.0, 0.0, -5, 0.0, 0, 0)
# BallPos = ball1.getPosition()
# BallVel = ball1.getLinearVelocity()
# raisim world server setting
server = raisim.RaisimServer(world)
server.launchServer(8080)
# # dribbling control
# BallPosition, BallVelocity, ExternalForce, T = DriControl(ParamData)
Data = DriControl(ParamData)
# Data = XboxDriControl(ParamData)
# # file save
# Data = FileSave.DataSave(BallState, EndFootState, ForceState, JointTorque, JointVelSaved, T, ParamData)
# # data visulization
# Data = {'BallPos': BallPosition, 'BallVel': BallVelocity, 'ExternalForce': ExternalForce, 'time': T}
DataPlot(Data)
# print("force, ", ForceState[0:100, 1])
server.killServer()
|
#!/usr/bin/env python
# ==============================================================================
# MIT License
#
# Copyright 2020 Institute for Automotive Engineering of RWTH Aachen University.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import os
import argparse
import yaml
import tqdm
import multiprocessing
import numpy as np
import cv2
import skimage.draw
BLOCKING_LABELS = ["building", "wall", "car", "truck", "bus", "caravan", "trailer", "train"]
TALL_NON_BLOCKING_LABELS = ["vegetation"] # will be visible behind small blocking objects (e.g. cars)
COLORS = {
"occluded" : (150, 150, 150),
"static" : ( 0, 0, 0),
"dynamic" : (111, 74, 0),
"ground" : ( 81, 0, 81),
"road" : (128, 64, 128),
"sidewalk" : (244, 35, 232),
"parking" : (250, 170, 160),
"rail track" : (230, 150, 140),
"building" : ( 70, 70, 70),
"wall" : (102, 102, 156),
"fence" : (190, 153, 153),
"guard rail" : (180, 165, 180),
"bridge" : (150, 100, 100),
"tunnel" : (150, 120, 90),
"pole" : (153, 153, 153),
"polegroup" : (153, 153, 153),
"traffic light": (250, 170, 30),
"traffic sign" : (220, 220, 0),
"vegetation" : (107, 142, 35),
"terrain" : (152, 251, 152),
"sky" : ( 70, 130, 180),
"person" : (255, 0, 0),
"rider" : (220, 20, 60),
"car" : ( 0, 0, 142),
"truck" : ( 0, 0, 70),
"bus" : ( 0, 60, 100),
"caravan" : ( 0, 0, 90),
"trailer" : ( 0, 0, 110),
"train" : ( 0, 80, 100),
"motorcycle" : ( 0, 0, 230),
"bicycle" : (119, 11, 32),
"roadmark" : (255, 255, 255)
}
DUMMY_COLOR = tuple(np.random.randint(0, 256, 3))
while DUMMY_COLOR in COLORS.values():
DUMMY_COLOR = tuple(np.random.randint(0, 256, 3))
class Camera:
def __init__(self, config, frame, pxPerM):
self.origin = (frame[0] + config["XCam"] * pxPerM[0], frame[1] - config["YCam"] * pxPerM[1])
self.yaw = -config["yaw"]
self.fov = 2.0 * np.arctan(config["px"] / config["fx"]) * 180.0 / np.pi
thetaMin = self.yaw - self.fov / 2.0
thetaMax = (self.yaw + self.fov / 2.0)
thetaMin = thetaMin % 180 if thetaMin < -180 else thetaMin
thetaMax = thetaMax % -180 if thetaMax > 180 else thetaMax
self.fovBounds = (thetaMin, thetaMax)
def canSee(self, x, y):
dx, dy = x - self.origin[0], y - self.origin[1]
theta = np.arctan2(dy, dx) * 180.0 / np.pi
if self.fovBounds[0] > self.fovBounds[1]:
return (self.fovBounds[0] <= theta) or (theta <= self.fovBounds[1])
else:
return (self.fovBounds[0] <= theta) and (theta <= self.fovBounds[1])
def floodFill(px, color, inputImg, outputImg):
mask = np.zeros((inputImg.shape[0]+2, inputImg.shape[1]+2), np.uint8)
flags = 4 | (255 << 8) | cv2.FLOODFILL_FIXED_RANGE | cv2.FLOODFILL_MASK_ONLY
cv2.floodFill(image=inputImg, mask=mask, seedPoint=(px[0], px[1]), newVal=(255, 255, 255), loDiff=(1,1,1), upDiff=(1,1,1), flags=flags)
outputImg[np.where(mask[1:-1, 1:-1] == 255)] = color
def castRay(fromPoint, toPoint, inputImg, outputImg):
# loop over all pixels along the ray, moving outwards
ray = list(zip(*skimage.draw.line(*(int(fromPoint[0]), int(fromPoint[1])), *(int(toPoint[0]), int(toPoint[1])))))
stopRay = stopTransfer = False
for px in ray:
# out-of-bounds check
if not (0 <= px[0] and px[0] < inputImg.shape[1] and 0 <= px[1] and px[1] < inputImg.shape[0]):
continue
# check if ray hit a blocking object class
for label in BLOCKING_LABELS:
if (inputImg[px[1], px[0], :] == COLORS[label]).all():
# if car, continue ray to look for more blocking objects, else stop ray
if label == "car":
if stopTransfer: # if car behind another car, skip
continue
else: # if first car in line of ray
stopTransfer = True
else:
stopRay = True
# transfer blocking object to output image
if not (outputImg[px[1], px[0], :] == COLORS[label]).all():
floodFill(px, COLORS[label], inputImg, outputImg)
break
if stopRay: # stop ray if blocked
break
if stopTransfer: # if transfer is stopped, still look for tall non-blocking labels to transfer
for label in TALL_NON_BLOCKING_LABELS:
if (inputImg[px[1], px[0], :] == COLORS[label]).all():
outputImg[px[1], px[0], :] = inputImg[px[1], px[0], :]
break
else: # transfer px to output image
outputImg[px[1], px[0], :] = inputImg[px[1], px[0], :]
# ==============================================================================
# parse command line arguments and read image
parser = argparse.ArgumentParser(description="Determines the areas not visible from vehicle cameras and removes them from drone camera footage.")
parser.add_argument("img", help="segmented drone image")
parser.add_argument("drone", help="drone camera config file")
parser.add_argument("cam", nargs="+", help="camera config file")
parser.add_argument("--batch", help="process folders of images instead of single images", action="store_true")
parser.add_argument("--output", help="output directory to write output images to")
args = parser.parse_args()
# load image paths
imagePaths = []
if not args.batch:
imagePaths.append(os.path.abspath(args.img))
else:
path = os.path.abspath(args.img)
imagePaths = [os.path.join(path, f) for f in sorted(os.listdir(path)) if f[0] != "."]
# parse camera configs
with open(os.path.abspath(args.drone)) as stream:
droneConfig = yaml.safe_load(stream)
cameraConfigs = []
for cameraConfig in args.cam:
with open(os.path.abspath(cameraConfig)) as stream:
cameraConfigs.append(yaml.safe_load(stream))
# create output directories
if args.output:
outputDir = os.path.abspath(args.output)
if not os.path.exists(outputDir):
os.makedirs(outputDir)
# determine image dimensions in (m)
inputImg = cv2.imread(imagePaths[0])
dxm = inputImg.shape[1] / droneConfig["fx"] * droneConfig["ZCam"]
dym = inputImg.shape[0] / droneConfig["fy"] * droneConfig["ZCam"]
pxPerM = (inputImg.shape[1] / dxm, inputImg.shape[0] / dym)
base_link = (int(inputImg.shape[1] / 2.0 - droneConfig["XCam"] * pxPerM[0]), int(inputImg.shape[0] / 2.0 + droneConfig["YCam"] * pxPerM[0]))
# create cameras
cameras = []
for cameraConfig in cameraConfigs:
cam = Camera(cameraConfig, base_link, pxPerM)
cameras.append(cam)
# define processing of a single image
def processImage(imagePath):
filename = os.path.basename(imagePath)
# read input image and create blank output image
inputImg = cv2.imread(imagePath)
inputImg = cv2.cvtColor(inputImg, cv2.COLOR_BGR2RGB)
outputImg = np.zeros(inputImg.shape, dtype=np.uint8) + np.array(COLORS["occluded"], dtype=np.uint8)
# temporarily recolor ego vehicle (if in image), s.t. it does not block
if base_link[0] > 0 and base_link[1] > 0:
floodFill(base_link, DUMMY_COLOR, inputImg, inputImg)
# loop over all border pixels to determine if ray is visible
rays = []
for cam in cameras:
for x in range(inputImg.shape[1]):
if cam.canSee(x, 0):
rays.append((cam.origin, (x, 0)))
for x in range(inputImg.shape[1]):
if cam.canSee(x, inputImg.shape[0]):
rays.append((cam.origin, (x, inputImg.shape[0])))
for y in range(inputImg.shape[0]):
if cam.canSee(0, y):
rays.append((cam.origin, (0, y)))
for y in range(inputImg.shape[0]):
if cam.canSee(inputImg.shape[1], y):
rays.append((cam.origin, (inputImg.shape[1], y)))
# cast rays
for ray in rays:
castRay(ray[0], ray[1], inputImg, outputImg)
# recolor ego vehicle as car and transfer to output
if base_link[0] > 0 and base_link[1] > 0:
floodFill(base_link, COLORS["car"], inputImg, outputImg)
floodFill(base_link, COLORS["car"], inputImg, outputImg)
# display or export output image
outputImg = cv2.cvtColor(outputImg, cv2.COLOR_RGB2BGR)
if args.output:
cv2.imwrite(os.path.join(outputDir, filename), outputImg)
else:
cv2.namedWindow(filename, cv2.WINDOW_NORMAL)
cv2.imshow(filename, outputImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
# process images in parallel
if args.batch:
print("Warning: This might take an extremely long time, are you sure you need to (re)generate the occluded labels?")
pool = multiprocessing.Pool(multiprocessing.cpu_count())
for _ in tqdm.tqdm(pool.imap(processImage, imagePaths), desc="Processing images", total=len(imagePaths), smoothing=0):
pass
pool.close()
pool.join()
else:
processImage(imagePaths[0])
|
import os
from mayatools import context
from mayatools import units
from sgfs import SGFS
from sgpublish.check import check_paths
from maya import cmds
steps = []
def _step(func):
steps.append(func)
return func
def _get_reference(namespace):
"""Get the (ref_node, filename) for a reference with the given namespace.
:raises: ``ValueError`` if no reference exists with the given namespace.
"""
namespace = ':' + namespace.strip(':')
for filename in cmds.file(query=True, reference=True) or ():
try:
this_namespace = cmds.referenceQuery(filename, namespace=True)
except RuntimeError as e:
# We need a namespace, so screw it.
continue
if namespace == this_namespace:
node = cmds.referenceQuery(filename, referenceNode=True)
return node, filename
raise ValueError(namespace)
@_step
def disable_hud_expressions():
for node in cmds.ls(type='expression') or ():
expr = cmds.getAttr(node + '.expression') or ''
if expr.strip().startswith('headsUpDisplay'):
print 'Deleting', node
cmds.delete(node)
@_step
def fetch_newest_camera():
"""Update the camera rig to the newest version in Shotgun."""
# Note that we need to do this manually because there are a pile of shots
# that do not have the camera referenced at all.
try:
cam_ref_node, current_cam_path = _get_reference(':cam')
except ValueError:
raise ValueError("No \"cam\" namespace.")
sgfs = SGFS()
path = '/Volumes/CGroot/Projects/MM52x2/assets/utilities/Camera_rig/rig/published/maya_scene/camera_rig/'
found = sgfs.entities_in_directory(path, entity_type='PublishEvent')
if not found:
raise ValueError("No camera publishes?!")
publishes = [e for path, e in found]
sgfs.session.fetch(publishes, ['path', 'created_at'])
publishes.sort(key=lambda e: e['created_at'])
latest_cam_path = publishes[-1].fetch('path')
if current_cam_path == latest_cam_path:
print ' Up to date.'
return
print ' Updating to', latest_cam_path
type_ = 'mayaAscii' if latest_cam_path.endswith('.ma') else 'mayaBinary'
cmds.file(latest_cam_path, loadReference=cam_ref_node, type=type_)
@_step
def update_references():
references = cmds.file(q=True, reference=True)
references = [path for path in references if cmds.referenceQuery(path, isLoaded=True)]
for status in check_paths(references):
print status.path
if status.is_latest:
print ' Up to date.'
continue
new_path = status.latest['sg_path']
ext = os.path.splitext(new_path)[1]
if ext not in ('.ma', '.mb'):
cmds.warning('PublishEvent.sg_path is not a maya file: {}'.format(new_path))
continue
print ' Updating to', new_path
type_ = 'mayaAscii' if ext == '.ma' else 'mayaBinary'
node = cmds.referenceQuery(status.path, referenceNode=True)
cmds.file(new_path, loadReference=node, type=type_)
def _bake(things):
minTime = cmds.playbackOptions(query=True, minTime=True)
maxTime = cmds.playbackOptions(query=True, maxTime=True)
# Hide poly for faster simulation.
# TODO: Query the API to figure out which panel this is for sure.
with context.command(cmds.modelEditor, 'modelPanel4', edit=True, polymeshes=False), context.selection(things, replace=True):
cmds.BakeSimulation(time='%s:%s' % (minTime, maxTime))
# Set the out-tangent; in-tangent cannot be set to "step".
cmds.keyTangent(edit=True, outTangentType="step")
@_step
def bake_dynamic_joints():
# Select all joints (with the suffix "dynbake").
all_joints = cmds.ls('*dynbake*', recursive=True)
if not all_joints:
print("No '*dynbake*' found; no dynamic joints to bake.")
return
_bake(all_joints)
@_step
def bake_controls():
"""Bakes controls (at 12fps) so we can transition to 24fps."""
fps = units.get_fps()
if fps != 12:
raise ValueError('Must bake controls at 12fps; currently %s.' % fps)
#select bake objects ie. ("*:*" + "*_Ctrl") and delects the flexi ctrls
controls = cmds.ls('*:*_Ctrl') or ()
controls = [x for x in controls if 'Flexi' not in x]
if not controls:
raise ValueError("There are no '*:*_Ctrl' objects to bake.")
_bake(controls)
@_step
def shift_time():
"""Convert from 12 to 24fps."""
fps = units.get_fps()
if fps == 24:
return
if fps != 12:
cmds.warning('We expect FPS to be 12, but it is currently %s.' % fps)
units.set_fps(24)
@_step
def smooth_geo():
"""Set smooth level in viewport to 1 and render time smoothing to 3 divisions."""
groups = cmds.ls("*:threeD_geo_grp") or ()
if not groups:
cmds.warning("No '*:threeD_geo_grp' groups.")
return
all_polygons = set()
for group in groups:
children = cmds.listRelatives(group, allDescendents=True, fullPath=True) or ()
for child in children:
polygons = cmds.filterExpand(child, selectionMask=12, fullPath=True) # 12 -> polygons (polymesh?)
all_polygons.update(polygons or ())
if not all_polygons:
raise ValueError("No polygons under '*:threeD_geo_grp' groups.")
for poly in all_polygons:
cmds.setAttr(poly + ".useSmoothPreviewForRender", 0)
cmds.setAttr(poly + ".smoothLevel", 0) # Preview.
cmds.setAttr(poly + ".displaySmoothMesh", 1)
cmds.setAttr(poly + ".renderSmoothLevel", 3)
#@_step
def constrain_head():
"""Constrain any in scene character's head to the main camera."""
cmds.warning("You shouldn't need to constrain heads anymore. What are you calling this for?!")
heads = cmds.ls('*:head_Sdk')
if not heads:
cmds.warning("There are no '*:head_Sdk' to constrain.")
return
if not cmds.objExists('cam:MainCAM'):
raise ValueError("No 'cam:MainCAM'.")
camera = 'cam:MainCAM'
for head in heads:
cmds.aimConstraint(camera, head,
offset=[0, 0, 0],
weight=1,
aimVector=[0, 0, 1],
upVector=[0, 1, 0],
worldUpType="object",
worldUpObject="cam:MainCAM_up_loc",
)
@_step
def shadow_light_linker():
"""Light links character shadow lights to the set group."""
lights = cmds.ls("*:shadowLight_light")
light_sets = cmds.ls("*_lightLink*")
if not lights:
cmds.warning("No '*:shadowLight_light' in scene.")
return
if not light_sets:
cmds.warning("No '*_lightLink*' in scene.")
return
for light in lights:
for light_set in light_sets:
cmds.lightlink(light=light, object=light_set)
@_step
def set_shadow_switch():
"""Set god_ctrl switch depending on if the set is interior or not."""
# Find the set.
# TODO: Moar SGFS.
files_referenced = cmds.file(query=True, list=True) or ()
for path in files_referenced:
if '/assets/sets/' not in path:
continue
if '/model/' not in path:
continue
break
else:
raise ValueError("Could not identify the set reference.")
# HACK: ...
is_interior = 'Interior' in path
print 'We have decided that the set is %san interior.' % ('' if is_interior else 'not ')
# Find all the "switch"es on god controls, and turn them on if interior,
# and off otherwise.
god_ctrls = cmds.ls('*:God_Ctrl') or ()
for god_ctrl in god_ctrls:
switch = god_ctrl + '.switch'
if not cmds.objExists(switch):
continue # because not all gods have switches.
cmds.setAttr(switch, 1 if is_interior else 0)
@_step
def manage_colours():
cmds.colorManagementPrefs(e=True, cmEnabled=True)
cmds.colorManagementPrefs(e=True, cmConfigFileEnabled=False)
# These should be overkill together.
cmds.colorManagementPrefs(e=True, outputTransformEnabled=False)
cmds.colorManagementPrefs(e=True, outputTransformName='Raw')
cmds.colorManagementPrefs(e=True, renderingSpaceName='scene-linear Rec 709/sRGB')
@_step
def config_mentalray():
# Load mentalRay if not yet active.
if 'Mayatomr' not in cmds.pluginInfo(q=True, listPlugins=True):
cmds.loadPlugin('Mayatomr')
# Set Renderer to MentalRay.
cmds.setAttr('defaultRenderGlobals.currentRenderer', 'mentalRay', type='string')
# Set quality settings.
# Kevin wanted "Production", but lets just straight to "FinalFrameEXR"
# (also because 'defaultRenderGlobals' has no "Production").
cmds.nodePreset(load=('defaultRenderGlobals', 'FinalFrameEXR'))
cmds.nodePreset(load=('miDefaultOptions', 'FinalFrameEXR'))
def setup_all():
for step in steps:
print '=>', step.__name__
step()
|
from rest_framework.mixins import (
CreateModelMixin, DestroyModelMixin, ListModelMixin
)
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from pydis_site.apps.api.models.bot import BumpedThread
from pydis_site.apps.api.serializers import BumpedThreadSerializer
class BumpedThreadViewSet(
GenericViewSet, CreateModelMixin, DestroyModelMixin, ListModelMixin
):
"""
View providing CRUD (Minus the U) operations on threads to be bumped.
## Routes
### GET /bot/bumped-threads
Returns all BumpedThread items in the database.
#### Response format
>>> list[int]
#### Status codes
- 200: returned on success
- 401: returned if unauthenticated
### GET /bot/bumped-threads/<thread_id:int>
Returns whether a specific BumpedThread exists in the database.
#### Status codes
- 204: returned on success
- 404: returned if a BumpedThread with the given thread_id was not found.
### POST /bot/bumped-threads
Adds a single BumpedThread item to the database.
#### Request body
>>> {
... 'thread_id': int,
... }
#### Status codes
- 201: returned on success
- 400: if one of the given fields is invalid
### DELETE /bot/bumped-threads/<thread_id:int>
Deletes the BumpedThread item with the given `thread_id`.
#### Status codes
- 204: returned on success
- 404: if a BumpedThread with the given `thread_id` does not exist
"""
serializer_class = BumpedThreadSerializer
queryset = BumpedThread.objects.all()
def retrieve(self, request: Request, *args, **kwargs) -> Response:
"""
DRF method for checking if the given BumpedThread exists.
Called by the Django Rest Framework in response to the corresponding HTTP request.
"""
self.get_object()
return Response(status=204)
|
# coding: utf-8
import time
import random
import logging
import threading
from Node import node #Para importar a class, em vez do module
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S')
logger = logging.getLogger('Employee')
class Employee(threading.Thread):
def __init__(self, address=('localhost', 5001+3), id=3, init_address=('localhost', 5000), timeout=3):
threading.Thread.__init__(self)
self.node = node(address, self.__class__.__name__, id, init_address)
self.deliveries = {} # Dicionario pa guardar o ID : ADDR do cliente
def run(self):
self.node.start()
while True:
o = self.node.IN_QUEUE.get()
logger.critical("[EMPLOYEE]: %s", o)
if o['method'] == 'PICKUP':
self.deliveries[o['cl_id']] = o['c_addr']
logger.debug("[EMPLOYEE]: PENDINGS (n=%s):%s", len(self.deliveries), self.deliveries)
elif o['method'] == 'DONE': # Como normalmente o Menu se acaba de fazer depois do pedido de Pickup, vai-se ao self.deliveries buscar o ADDR para saber a quem entregar o Menu
msg = {}
to_cl_id = o['cl_id']
msg['args'] = (to_cl_id, o['menu'])
to_cl_addr = self.deliveries.pop(to_cl_id) # Retira e returna o ADDR do cliente com o dado ID
time.sleep(random.gauss(2, 0.5))
self.node.send(to_cl_addr, msg) # Envia o comer ao Cliente
if __name__ == '__main__': #por pa por o ID por argument line ou assim...
emp = Employee(('localhost', 5001+3), 3,('localhost', 5000))
emp.start() ##depois mudar, para uma funçao, (tipo po proprio ciclo de vida ou assim...)
|
from itertools import cycle
from json import load
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
with open('bgm_anime_dataset.json', 'r', encoding='utf8') as f:
data = load(f)
scores = np.array(
[bangumi['rating']['score'] for bangumi in data],
dtype=np.float64
)
count = scores.size
mean = np.mean(scores, dtype=np.float64)
median = np.median(scores)
min_score = np.min(scores)
max_score = np.max(scores)
n, bins = np.histogram(scores, bins=np.arange(-0.05, 10.05, 0.1))
hist = np.vstack((bins[:-1] + 0.05, n))
for i in range(hist.shape[1]):
print('%.1f %3d' % (hist[0, i], hist[1, i]))
rc('font', family='FZZhunYuan-M02', size=14)
fig, ax = plt.subplots()
for i, c in zip(range(20), cycle(('#fca2ae', '#f6c2d0'))):
ax.bar(hist[0, i*5:i*5+5], hist[1, i*5:i*5+5], width=0.05, color=c)
ax.annotate(
'最高:%d @ %.1f' % (hist[1, 66], hist[0, 66]),
(hist[0, 66], hist[1, 66]),
xycoords='data', xytext=(0.25, 0.56), textcoords='axes fraction',
arrowprops={'arrowstyle': '->'}
)
ax.annotate(
'局部最低:%d @ %.1f' % (hist[1, 69], hist[0, 69]),
(hist[0, 69], hist[1, 69]),
xycoords='data', xytext=(0.25, 0.43), textcoords='axes fraction',
arrowprops={'arrowstyle': '->'}
)
ax.annotate(
'平均:%.3f' % (mean),
(mean, 0),
xycoords='data', xytext=(0.58, -0.12), textcoords='axes fraction',
arrowprops={'arrowstyle': '->'}
)
ax.annotate(
'中位:%.1f' % (median),
(median, 0),
xycoords='data', xytext=(0.68, -0.12), textcoords='axes fraction',
arrowprops={'arrowstyle': '->'}
)
ax.annotate(
'最低:%.1f' % (min_score),
(min_score, 0),
xycoords='data', xytext=(0.125, -0.12), textcoords='axes fraction',
arrowprops={'arrowstyle': '->'}
)
ax.annotate(
'最高:%.1f' % (max_score),
(max_score, 0),
xycoords='data', xytext=(0.87, -0.12), textcoords='axes fraction',
arrowprops={'arrowstyle': '->'}
)
ax.grid(True, axis='y')
ax.set_xticks(np.arange(0, 10 + 0.1, 0.5))
ax.set_xlabel('评分')
ax.set_ylabel('动画数')
ax.set_title(f'Bangumi 动画评分分布 总计{count}部')
fig.tight_layout()
plt.show()
|
from seltest import Base, url, hide
window_size = [200, 200]
base_url = 'google.com'
class GoogleTest(Base):
def base(self, driver): pass
def title(self, driver):
assert driver.title == 'Google', 'title should be "Google"'
def bad_title(self, driver):
assert driver.title != 'Google', 'this assertion should fail!'
@hide('center')
def hidden(self, driver): pass
|
# -*- coding: utf-8 -*-
"""
@author: Austin Nicolai
"""
import sys
from sensor import Sensor
import math
from random import randint
class POI_Sensor(Sensor):
def __init__(self, sector, location, rover_heading, sensor_range, sensor_noise):
super(POI_Sensor, self).__init__(sector, location, rover_heading, sensor_range, sensor_noise)
def getPoiCount(self, POI_list, min_observation_dist):
# determine the total rovers seen
POI_count = 0
# loop over all rovers
for poi in POI_list:
# determine the distance to the rover
distance = self.location - poi.location
# print 'Distance in poi count ', distance
# add sensor noise to the distance
random_noise = randint(-self.sensor_noise, self.sensor_noise)
distance = distance * (1. + random_noise/100.)
# determine the angle to the rover
dx = poi.location.x - self.location.x
dy = poi.location.y - self.location.y
if dx == 0:
dx = sys.float_info.min
angle = math.atan2(dy, dx)
angle = angle * 180. / math.pi # convert to degrees
# ensure angle in range [0, 360]
if angle < 0:
angle += 360
# angle range is: [left_edge, right_edge)
# if distance is 0, the POI is on top of the rover and can be seen:
if distance == 0:
sum_dist = max(distance**2, min_observation_dist**2)
POI_count += (poi.V/sum_dist)
# if angle range straddles 0:
elif (self.left_edge < 90) and (self.right_edge > 270):
if (distance <= self.sensor_range) and ((0 <= angle <= self.left_edge) or (360 > angle > self.right_edge)):
sum_dist = max(distance**2, min_observation_dist**2)
POI_count += (poi.V/sum_dist)
# if angle range is typical:
elif (distance <= self.sensor_range) and (self.right_edge < angle <= self.left_edge):
sum_dist = max(distance**2, min_observation_dist**2)
POI_count += (poi.V/sum_dist)
return POI_count
|
#!/usr/bin/env python
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import json
import logging
from datetime import datetime, timedelta
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
from urlparse import urljoin
import os
import re
from lxml import etree
import cgi
class BaseModel(ndb.Model):
added_on = ndb.DateTimeProperty(auto_now_add = True)
updated_on = ndb.DateTimeProperty(auto_now = True)
class Device(BaseModel):
name = ndb.StringProperty()
url = ndb.StringProperty()
class SiteInformation(BaseModel):
url = ndb.StringProperty()
favicon_url = ndb.StringProperty()
title = ndb.StringProperty()
description = ndb.StringProperty()
class ResolveScan(webapp2.RequestHandler):
def post(self):
input_data = self.request.body
input_object = json.loads(input_data) # Data is not sanitised.
metadata_output = []
output = {
"metadata": metadata_output
}
devices = []
if "objects" in input_object:
objects = input_object["objects"]
else:
objects = []
# Resolve the devices
for obj in objects:
key_id = None
url = None
force = False
if "id" in obj:
key_id = obj["id"]
elif "url" in obj:
key_id = obj["url"]
url = obj["url"]
if "force" in obj:
force = True
# We need to go and fetch. We probably want to asyncly fetch.
# We don't need RSSI yet.
#rssi = obj["rssi"]
# In this model we can only deal with one device with a given ID.
device = Device.get_or_insert(key_id, name = key_id, url = url)
device_data = {
"id": device.name
}
if force or device.url is not None:
# Really if we don't have the data we should not return it.
siteInfo = SiteInformation.get_by_id(device.url)
if force or siteInfo is None or siteInfo.updated_on < datetime.now() - timedelta(minutes=5):
# If we don't have the data or it is older than 5 minutes, fetch.
siteInfo = FetchAndStoreUrl(siteInfo, device.url)
if siteInfo is not None:
device_data["url"] = siteInfo.url
device_data["title"] = siteInfo.title
device_data["description"] = siteInfo.description
device_data["icon"] = siteInfo.favicon_url
device_data["favicon_url"] = siteInfo.favicon_url
else:
device_data["url"] = device.url
metadata_output.append(device_data)
logging.info(output);
# Resolve from DB based off key.
self.response.headers['Content-Type'] = 'application/json'
json_data = json.dumps(output);
self.response.write(json_data)
class SaveUrl(webapp2.RequestHandler):
def post(self):
name = self.request.get("name")
url = self.request.get("url")
title = ""
icon = "/favicon.ico"
device = Device.get_or_insert(name, name = name, url = url)
device.url = url
device.put()
# Index the page
FetchAndStoreUrl(device.url)
self.redirect("/index.html")
def FetchAndStoreUrl(siteInfo, url):
# Index the page
result = urlfetch.fetch(url)
if result.status_code == 200:
encoding = GetContentEncoding(result.content)
final_url = result.final_url or url
return StoreUrl(siteInfo, url, final_url, result.content, encoding)
def GetContentEncoding(content):
encoding = None
parser = etree.HTMLParser(encoding='iso-8859-1')
htmltree = etree.fromstring(content, parser)
value = htmltree.xpath("//head//meta[@http-equiv='Content-Type']/attribute::content")
if encoding is None:
if (len(value) > 0):
content_type = value[0]
_, params = cgi.parse_header(content_type)
if 'charset' in params:
encoding = params['charset']
if encoding is None:
value = htmltree.xpath("//head//meta/attribute::charset")
if (len(value) > 0):
encoding = value[0]
if encoding is None:
try:
encoding = 'utf-8'
u_value = unicode(content, 'utf-8')
except UnicodeDecodeError:
encoding = 'iso-8859-1'
u_value = unicode(content, 'iso-8859-1')
return encoding
def StoreUrl(siteInfo, url, final_url, content, encoding):
title = None
description = None
icon = None
# parse the content
parser = etree.HTMLParser(encoding=encoding)
htmltree = etree.fromstring(content, parser)
value = htmltree.xpath("//head//title/text()");
if (len(value) > 0):
title = value[0]
# Try to use <meta name="description" content="...">.
value = htmltree.xpath("//head//meta[@name='description']/attribute::content")
if (len(value) > 0):
description = value[0]
if description is not None and len(description) == 0:
description = None
if description == title:
description = None
# Try to use <div class="content">...</div>.
if description is None:
value = htmltree.xpath("//body//*[@class='content']//*[not(*|self::script|self::style)]/text()")
description = ' '.join(value)
if len(description) == 0:
description = None
# Try to use <div id="content">...</div>.
if description is None:
value = htmltree.xpath("//body//*[@id='content']//*[not(*|self::script|self::style)]/text()")
description = ' '.join(value)
if len(description) == 0:
description = None
# Fallback on <body>...</body>.
if description is None:
value = htmltree.xpath("//body//*[not(*|self::script|self::style)]/text()")
description = ' '.join(value)
if len(description) == 0:
description = None
# Cleanup.
if description is not None:
description = description.strip()
description = description.replace("\r", " ");
description = description.replace("\n", " ");
description = description.replace("\t", " ");
description = description.replace("\v", " ");
description = description.replace("\f", " ");
while " " in description:
description = description.replace(" ", " ");
if description is not None and len(description) > 500:
description = description[:500]
if icon is None:
value = htmltree.xpath("//head//link[@rel='shortcut icon']/attribute::href");
if (len(value) > 0):
icon = value[0]
if icon is None:
value = htmltree.xpath("//head//link[@rel='icon']/attribute::href");
if (len(value) > 0):
icon = value[0]
if icon is None:
value = htmltree.xpath("//head//link[@rel='apple-touch-icon']/attribute::href");
if (len(value) > 0):
icon = value[0]
if icon is not None:
icon = urljoin(final_url, icon)
if icon is None:
icon = urljoin(final_url, "/favicon.ico")
if siteInfo is None:
siteInfo = SiteInformation.get_or_insert(url,
url = final_url,
title = title,
favicon_url = icon,
description = description)
else:
# update the data because it already exists
siteInfo.url = final_url
siteInfo.title = title
siteInfo.favicon_url = icon
siteInfo.description = description
siteInfo.put()
return siteInfo
class Index(webapp2.RequestHandler):
def get(self):
self.response.out.write("")
app = webapp2.WSGIApplication([
('/', Index),
('/resolve-scan', ResolveScan),
('/add-device', SaveUrl)
], debug=True)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import pulp as lp
import re
from matplotlib import pyplot as plt
from queue import PriorityQueue
import pandas as pd
def user_definedProb(Charge_Freq,values):
totalFreq = sum(Charge_Freq)
Charge_Prob = [x / totalFreq for x in Charge_Freq]
Charge_Prob_new = np.zeros(1440)
values_interval = np.int_([x * 60 for x in values])
j = 0
for j in range(16):
if j < 15:
index_bottom = values_interval[j]
index_upper = values_interval[j + 1]
Charge_Prob_new[index_bottom:index_upper] = Charge_Prob[j]
if j == 15:
index_bottom = values_interval[0]
Charge_Prob_new[0:index_bottom] = Charge_Prob[j]
index_upper = values_interval[j]
if index_upper < 1440:
Charge_Prob_new[index_upper:1440] = Charge_Prob[j]
totalFreq = sum(Charge_Prob_new)
Charge_Prob_new2 = [x / totalFreq for x in Charge_Prob_new]
return Charge_Prob_new2
def getArrivalTime(nVech): # starting time is defined by use cases
if useCase ==1: #residential
arrivalTime = np.random.normal(20, 2, nVech)
return arrivalTime
if useCase==2: #office
arrivalTime = np.random.lognormal(2.32274, 0.301833, nVech)
return arrivalTime
if useCase==3: #commercial
Charge_Freq = [507.377049200000, 422.885245900000, 401.639344300000, 420.262295100000, 508.688524600000,
634.918032800000, 898.52459000000, 1390.57377000000, 1913.80327900000, 2187.81967200000,
2363.32786900000, 2139.70491800000, 1821.21511500000, 1335.36065600000, 1311.39344300000,
603.377049180328]
values = [0.097654718, 1.146678171, 3.221429132, 5.887402934, 7.3409287, 9.046121264, 10.92744529, 13.06608361,
15.9770178100000, 18.0320025100000, 19.7054278800000, 21.7332339800000, 22.5238842300000,
23.1479331700000, 23.8559887000000, 24.0976547180171]
values_new = range(1440)
Charge_Prob_new2=user_definedProb(Charge_Freq,values)
arrivalTime = np.random.choice(values_new, nVech, p=list(Charge_Prob_new2))
return arrivalTime/60
if useCase==4: #public
Charge_Freq = [377.049180300000, 407.065573800000, 439.491803300000, 502.68852500000,
557.18032800000, 664.37704900000, 874.91803300000, 1109.42623000000, 1773.93442600000,
1974.59016400000, 2073.77049200000, 2098.36065600000, 2110.65573800000, 2116.80327900000,
2104.50819700000, 2086.06557400000, 2079.91803300000, 2055.32786900000, 1944.67213100000,
1840.16393400000, 1704.91803300000, 1606.55737700000, 1508.19672100000, 1348.36065600000,
1225.40983600000, 1108.60655700000, 979.508196700000, 881.147541000000, 367.131147500000]
values = [0.466860146000000, 0.971605616000000, 1.90838497100000, 2.84092870000000,
3.94501529500000, 4.96368342600000, 5.81645619300000, 6.58557533900000, 7.35469448600000,
8.64020707500000, 9.84312495100000, 11.8225743200000, 13.3721076200000, 14.9219938800000,
16.4729390500000, 18.1103616000000, 19.7040160000000, 21.3417915100000, 22.2955133700000,
22.7321358500000, 22.8690877700000, 22.9608596800000, 23.0956937800000, 23.2340575700000,
23.3272413500000, 23.3770099600000, 23.5136089100000, 23.5623186100000, 23.7035061600000]
values_new=range (1440)
Charge_Prob_new2=user_definedProb(Charge_Freq, values)
arrivalTime = np.random.choice(values_new, nVech, p=list(Charge_Prob_new2))
return arrivalTime/60
def time24(time):
condlist = [time< 2160, time >=2160]
choicelist= [time, -1]
time=np.select(condlist, choicelist)
condlist = [time < 1440, time >= 1440]
choicelist = [time, time - 1440]
time = np.select(condlist, choicelist)
return time
def getStartingSOC(isSlow): # SOC is defined by charging frequencies
if isSlow == 1: # slow charge: one charge per day
dist_list = np.random.lognormal(3.67017251945698, 0.532230403897875, n_slowVeh)
startingSOC = 1 - dist_list / batteryRange
return startingSOC
if isSlow > 1: # fast charge: multiple days per charge
if useCase==1 or useCase==2: # resiential & office: 'mileages traveled' approach
dist_list = np.random.lognormal(3.67017251945698, 0.532230403897875, n_fastVeh)
for i in range(n_fastVeh):
dist_list[i] = dist_list[i] * Charge_frequency
if dist_list[i] > batteryRange * 0.8: #cannot be below 20%SOC
while True:
dist_list[i] = np.random.lognormal(3.67017251945698, 0.532230403897875, 1)
dist_list[i] = dist_list[i] * Charge_frequency
if dist_list[i] <= batteryRange * 0.8:
break
startingSOC = 1 - dist_list / batteryRange
return startingSOC
if useCase>2: # commercial & public: 'predefined starting SOC':
startingSOC = np.random.beta(1.37722, 3.42153, n_fastVeh)
return startingSOC
def getStartingSOC_V2G(n_Veh): # SOC is defined by charging frequencies
if useCase == 1 or useCase == 2: # resiential & office: 'mileages traveled' approach
dist_list = np.random.lognormal(3.67017251945698, 0.532230403897875, n_Veh)
for i in range(n_Veh):
dist_list[i] = dist_list[i] * Charge_frequency
if dist_list[i] > batteryRange * 0.8: # cannot be below 20%SOC
while True:
dist_list[i] = np.random.lognormal(3.67017251945698, 0.532230403897875, 1)
dist_list[i] = dist_list[i] * Charge_frequency
if dist_list[i] <= batteryRange * 0.8:
break
startingSOC = 1 - dist_list / batteryRange
return startingSOC
if useCase > 2: # commercial & public: 'predefined starting SOC':
startingSOC = np.random.beta(1.37722, 3.42153, n_Veh)
return startingSOC
def Unmanaged_charge():
iterator=range(n_iter)
endSOC=1
#first distinguish vehicles that use slow charge from those that use fast charge
if n_slowVeh >0:
slow_Load_maxtrix = np.zeros((n_iter, Maxtime_Interval))
for it in iterator:
slow_Load=np.zeros(Maxtime_Interval)
startingSOC=getStartingSOC(1)
charge_Duration=(endSOC-startingSOC) * batteryCapacity/power_Slow/charge_efficiency*60
arrivalTime = getArrivalTime(n_slowVeh)
arrivalTime.sort()
arrivalTime = np.int_(np.round(arrivalTime * 60))
# calculate the charging starting time based on the queuing size, that is, limited number of chargers
startingTime = np.zeros(n_slowVeh)
endTime = np.zeros(n_slowVeh)
startingTime[:nslow_Chargers] = arrivalTime[:nslow_Chargers]
endTime[:nslow_Chargers] = np.int_(
np.around(startingTime[:nslow_Chargers] + charge_Duration[:nslow_Chargers]))
q = PriorityQueue()
for i in range(0, nslow_Chargers): # initiate the queue
q.put(endTime[i])
for i in range(nslow_Chargers, len(arrivalTime)):
non_available = [j for j in q.queue if j > arrivalTime[i]]
# print('que status', q.queue)
if len(non_available) == nslow_Chargers:
startingTime[i] = np.int_(min(non_available)) + 1
q.get()
else:
startingTime[i] = np.int_(arrivalTime[i])
q.get()
endTime[i] = np.int_(np.around(startingTime[i] + charge_Duration[i]))
q.put(endTime[i])
waitTime = startingTime - arrivalTime
startingTime = time24(startingTime)
endTime = time24(endTime)
startingTime = [int(i) for i in startingTime]
endTime = [int(i) for i in endTime]
for i in range(0, len(waitTime)):
if endTime[i] == -1:
waitTime[i] = 0
avgwaitSlow = np.mean(waitTime)
unChargedveh_slow = endTime.count(-1)
# print('arrival time', arrivalTime)
# print('starting time', startingTime)
# print('end time', endTime)
# print('wait time', waitTime)
# print('average wait time for slow chargers is %5.2f hours' %avgwaitSlow)
# print('%4d vehicles uncharged due to lack of slow chargers' %unChargedveh_slow)
for c in range(n_slowVeh):
if endTime[c] ==-1 or startingTime[c] ==-1:
break
if endTime[c]<=Maxtime_Interval:
iterator_time=np.arange(startingTime[c]-1,endTime[c],1)
for t in iterator_time:
slow_Load[t]= slow_Load[t]+power_Slow
if endTime[c]>Maxtime_Interval:
iterator_time=np.arange(startingTime[c]-1, Maxtime_Interval,1)
for t in iterator_time:
slow_Load[t]=slow_Load[t]+power_Slow
iterator_time=np.arange(0,endTime[c]-Maxtime_Interval,1)
for t in iterator_time:
slow_Load[t]=slow_Load[t]+power_Slow
slow_Load_maxtrix[it]=slow_Load
slow_Load_avg=slow_Load_maxtrix.mean(0)
slow_Load_var=slow_Load_maxtrix.std(0)
slow_Load_upper=slow_Load_avg+slow_Load_var*2
slow_Load_bottom=slow_Load_avg-slow_Load_var*2
iterator_time=range(24)
for t in iterator_time:
slow_Load_1h_avg[t]=np.average(slow_Load_avg[t*60:(t+1)*60])
slow_Load_1h_upper=np.zeros(24)
for t in iterator_time:
slow_Load_1h_upper[t]=np.average(slow_Load_upper[t*60:(t+1)*60])
slow_Load_1h_bottom=np.zeros(24)
for t in iterator_time:
slow_Load_1h_bottom[t]=np.average(slow_Load_bottom[t*60:(t+1)*60])
if n_fastVeh >0:
fast_Load_matrix = np.zeros((n_iter, Maxtime_Interval))
for it in iterator:
fast_Load=np.zeros(Maxtime_Interval)
startingSOC=getStartingSOC(2)
charge_Duration=(endSOC-startingSOC) * batteryCapacity/power_Fast/charge_efficiency*60
arrivalTime = getArrivalTime(n_fastVeh)
arrivalTime.sort()
arrivalTime = np.int_(np.round(arrivalTime * 60))
# calculate the charging starting time based on the queuing situation, that is, limited number of chargers
startingTime = np.zeros(n_fastVeh)
endTime = np.zeros(n_fastVeh)
startingTime[:nfast_Chargers] = arrivalTime[:nfast_Chargers]
endTime[:nfast_Chargers] = np.int_(
np.around(startingTime[:nfast_Chargers] + charge_Duration[:nfast_Chargers]))
q = PriorityQueue()
for i in range(0, nfast_Chargers): # initiate the queue
q.put(endTime[i])
for i in range(nfast_Chargers, len(arrivalTime)):
non_available = [j for j in q.queue if j > arrivalTime[i]]
#print('que status', q.queue)
if len(non_available) == nfast_Chargers:
startingTime[i] = np.int_(min(non_available)) + 1
q.get()
else:
startingTime[i] = np.int_(arrivalTime[i])
q.get()
endTime[i] = np.int_(np.around(startingTime[i] + charge_Duration[i]))
q.put(endTime[i])
# print('starting time before 24hours', startingTime)
# print('end time before 24hours', endTime)
waitTime = (startingTime - arrivalTime) / 60
startingTime = time24(startingTime)
endTime = time24(endTime)
startingTime = [int(i) for i in startingTime]
endTime = [int(i) for i in endTime]
for i in range(0, len(waitTime)):
if endTime[i] == -1:
waitTime[i] = 0
avgwaitfast = np.mean(waitTime)
unChargedveh_fast = endTime.count(-1)
# print('arrival time', arrivalTime)
# print('starting time', startingTime)
# print('end time', endTime)
# print('average wait time for fast chargers is %5.2f hours' %avgwaitfast)
# print('%4d vehicles uncharged due to lack of fast chargers' %unChargedveh_fast)
for c in range(n_fastVeh):
if endTime[c] ==-1 or startingTime[c] ==-1:
break
if endTime[c]>startingTime[c]:
iterator_time=np.arange(startingTime[c]-1,endTime[c],1)
for t in iterator_time:
fast_Load[t]= fast_Load[t]+power_Fast
else:
iterator_time=np.arange(startingTime[c]-1, Maxtime_Interval,1)
for t in iterator_time:
fast_Load[t]=fast_Load[t]+power_Fast
iterator_time=np.arange(0,endTime[c],1)
for t in iterator_time:
fast_Load[t]=fast_Load[t]+power_Fast
fast_Load_matrix[it]=fast_Load
fast_Load_avg=fast_Load_matrix.mean(0)
fast_Load_var=fast_Load_matrix.std(0)
fast_Load_upper=fast_Load_avg + fast_Load_var*2
fast_Load_bottom=fast_Load_avg - fast_Load_var*2
iterator_time=range(24)
for t in iterator_time:
fast_Load_1h_avg[t]=np.average(fast_Load_avg[t*60:(t+1)*60])
fast_Load_1h_upper=np.zeros(24)
for t in iterator_time:
fast_Load_1h_upper[t]=np.average(fast_Load_upper[t*60:(t+1)*60])
fast_Load_1h_bottom=np.zeros(24)
for t in iterator_time:
fast_Load_1h_bottom[t]=np.average(fast_Load_bottom[t*60:(t+1)*60])
def initiate_V2G(n_V2G_Veh):
# process startingTime and endingTime for each V2G vehicle
v2g_startingTime = getArrivalTime(n_V2G_Veh)
condlist = [v2g_startingTime < 24, v2g_startingTime >= 24]
choicelist = [v2g_startingTime, v2g_startingTime - 24]
startingTime1 = np.select(condlist, choicelist)
v2g_startingTime = np.int_(np.round(startingTime1)) # need to time 4!
temp_start=np.int_(np.round(startingTime1))
temp_park_random=np.zeros(n_V2G_Veh)
for i in range(n_V2G_Veh):
temp_park=parking_Duration[temp_start[i]-1]
temp_park_random[i]=np.random.normal(temp_park,0.5,1)
v2g_endingTime=temp_start+temp_park_random
condlist= [v2g_endingTime<24, v2g_endingTime>=24]
choicelist=[v2g_endingTime, v2g_endingTime-24]
endingTime1= np.select(condlist, choicelist)
v2g_endingTime = np.int_(np.round(endingTime1)) # need to time 4!
#process startingSOC and travel distance for each V2G vehicle
v2g_startingSOC=getStartingSOC_V2G(n_V2G_Veh)
#v2g_distance=np.random.lognormal(3.67017251945698, 0.532230403897875, n_V2G_Veh)
#v2g_startingSOC=sorted(v2g_startingSOC)
#v2g_distance=sorted(v2g_distance)
#for i in range(n_V2G_Veh):
# startSOC_temp=1- v2g_distance[i]/batteryRange
# if startSOC_temp< v2g_startingSOC[i]:
# v2g_startingSOC[i]=startSOC_temp
return v2g_startingTime, v2g_endingTime, v2g_startingSOC
def V2G_optimization():
# initiate the problem statement
model=lp.LpProblem('Minimize_Distribution_level_Peak_Valley_Difference', lp.LpMinimize)
#define optimization variables
veh_V2G=range(n_V2G_Veh)
time_Interval=range(24)
chargeprofiles=lp.LpVariable.dicts('charging_profiles', ((i,j) for i in veh_V2G for j in time_Interval), lowBound=0, upBound=power_managed_Uppper)
chargestates=lp.LpVariable.dicts('charging_states', ((i,j) for i in veh_V2G for j in time_Interval), cat='Binary')
dischargeprofiles=lp.LpVariable.dicts('discharging_profiles', ((i,j) for i in veh_V2G for j in time_Interval), lowBound=power_V2G_Lower, upBound=0)
dischargestates=lp.LpVariable.dicts('discharging_states', ((i,j) for i in veh_V2G for j in time_Interval), cat='Binary')
total_load=lp.LpVariable.dicts('total_load', time_Interval,lowBound=0)
max_load=lp.LpVariable('max_load', lowBound=0)
min_load=lp.LpVariable('min_load', lowBound=0)
# define objective function
#model += max_load - min_load
model +=max_load
# define constraints
for t in time_Interval: # constraint 1 & 2: to identify the max and min loads
model += total_load[t] <= max_load
#model += total_load[t] >= min_load
for t in time_Interval: # constraint 3: calculate the total load at each time interval t
model += lp.lpSum([chargeprofiles[i,t]] for i in veh_V2G) + lp.lpSum([dischargeprofiles[i,t]*discharge_efficiency for i in veh_V2G]) + base_Load[t] + unmanaged_Load[t] == total_load[t] #need to plus base loads
for i in veh_V2G: # constraint 4: constraint on charging powers for each EV: only optimize the charge profile between start and end charging time
temp_start=v2g_startingTime[i]
temp_end=v2g_endingTime[i]
if temp_start >= temp_end:
for t in range (temp_end):
model += chargestates[i,t] + dischargestates[i,t] <=1
model += chargeprofiles[i,t] <= chargestates[i,t] * power_managed_Uppper
model += chargeprofiles[i,t] >= chargestates[i,t] * power_managed_Lower
model += dischargeprofiles[i,t] <= dischargestates[i,t] * power_V2G_Upper
model += dischargeprofiles[i,t] >= dischargestates[i,t] * power_V2G_Lower
for t in range(temp_end, temp_start, 1):
model += chargeprofiles[i,t] == 0
model += chargestates[i,t] == 0
model += dischargeprofiles[i,t]==0
model += dischargestates[i,t]==0
for t in range(temp_start, 24, 1):
model += chargestates[i, t] + dischargestates[i, t] <= 1
model += chargeprofiles[i,t] <= chargestates[i,t] * power_managed_Uppper
model += chargeprofiles[i,t] >= chargestates[i,t] * power_managed_Lower
model += dischargeprofiles[i, t] <= dischargestates[i, t] * power_V2G_Upper
model += dischargeprofiles[i, t] >= dischargestates[i, t] * power_V2G_Lower
if temp_start < temp_end:
for t in range(temp_start):
model += chargeprofiles[i,t] == 0
model += chargestates[i,t] ==0
model += dischargeprofiles[i,t]==0
model += dischargestates[i,t]==0
for t in range(temp_start, temp_end, 1):
model += chargestates[i, t] + dischargestates[i, t] <= 1
model += chargeprofiles[i,t] <= chargestates[i,t] * power_managed_Uppper
model += chargeprofiles[i,t] >= chargestates[i,t] * power_managed_Lower
model += dischargeprofiles[i, t] <= dischargestates[i, t] * power_V2G_Upper
model += dischargeprofiles[i, t] >= dischargestates[i, t] * power_V2G_Lower
for t in range(temp_end, 24, 1):
model += chargeprofiles[i,t] == 0
model += chargestates[i,t]==0
model += dischargeprofiles[i,t]==0
model += dischargestates[i,t]==0
for i in veh_V2G: # constraint 5: SOC constraint, cannot be greater than 1, end_SOC must be above certain levels
temp_start=v2g_startingTime[i]
temp_end=v2g_endingTime[i]
temp_startSOC=v2g_startingSOC[i]
if temp_start >= temp_end:
for t in range(temp_start+1, 24, 1):
temp_timer = range (temp_start, t, 1)
model += temp_startSOC + lp.lpSum( [chargeprofiles[i,tn] * charge_efficiency/batteryCapacity] for tn in temp_timer) \
+ lp.lpSum( [dischargeprofiles[i,tn] *(1/batteryCapacity)] for tn in temp_timer) <=1 #need to divide 4!
for t in range (0, temp_end+1, 1):
temp_timer = range (0, t, 1)
model += temp_startSOC + lp.lpSum( [chargeprofiles[i,tn] * charge_efficiency/batteryCapacity] for tn in range(temp_start, 24,1)) + lp.lpSum( [chargeprofiles[i,tn] * charge_efficiency/batteryCapacity] for tn in temp_timer) \
+ lp.lpSum( [dischargeprofiles[i,tn] *(1/ batteryCapacity)] for tn in range(temp_start, 24,1)) + lp.lpSum( [dischargeprofiles[i,tn] *(1/batteryCapacity)] for tn in temp_timer) <=1 #need to divide 4
#if end_SOC == 1:
# incrementSOC=v2g_distance[i]/batteryRange
# model += lp.lpSum([chargeprofiles[i, tn] * charge_efficiency / batteryCapacity] for tn in range(temp_start, 24, 1)) + lp.lpSum([chargeprofiles[i, tn] * charge_efficiency / batteryCapacity] for tn in temp_timer) \
# + lp.lpSum([dischargeprofiles[i, tn] *(1/ batteryCapacity)] for tn in range(temp_start, 24, 1)) + lp.lpSum([dischargeprofiles[i, tn] *(1/ batteryCapacity)] for tn in temp_timer) >= incrementSOC # need to divide 4
if end_SOC ==2:
model += temp_startSOC + lp.lpSum([chargeprofiles[i, tn] * charge_efficiency / batteryCapacity] for tn in range(temp_start, 24, 1)) + lp.lpSum([chargeprofiles[i, tn] * charge_efficiency / batteryCapacity] for tn in temp_timer) \
+ lp.lpSum([dischargeprofiles[i, tn] *(1/ batteryCapacity)] for tn in range(temp_start, 24, 1)) + lp.lpSum([dischargeprofiles[i, tn] *(1/ batteryCapacity)] for tn in temp_timer) ==1
if temp_start < temp_end:
for t in range (temp_start+1, temp_end+1, 1):
temp_timer = range (temp_start, t, 1)
model += temp_startSOC + lp.lpSum( [chargeprofiles[i,tn] * charge_efficiency/batteryCapacity] for tn in temp_timer) \
+ lp.lpSum( [dischargeprofiles[i,tn] *(1/batteryCapacity)] for tn in temp_timer) <=1 #need to divide by 4
#if end_SOC == 1:
# incrementSOC=v2g_distance[i]/batteryRange
# model += lp.lpSum( [chargeprofiles[i,tn] * charge_efficiency/batteryCapacity] for tn in temp_timer)\
# +lp.lpSum( [dischargeprofiles[i,tn] *(1/batteryCapacity)] for tn in temp_timer) >= incrementSOC # need to divide 4
if end_SOC ==2:
model += temp_startSOC + lp.lpSum([chargeprofiles[i, tn] * charge_efficiency / batteryCapacity] for tn in range(temp_start, 24, 1)) + lp.lpSum([chargeprofiles[i, tn] * charge_efficiency / batteryCapacity] for tn in temp_timer)\
+ lp.lpSum([dischargeprofiles[i, tn] *(1/batteryCapacity)] for tn in range(temp_start, 24, 1)) + lp.lpSum([dischargeprofiles[i, tn] *(1/batteryCapacity)] for tn in temp_timer)==1
#print(model)
#status=model.solve(lp.COIN(maxSeconds=2500))
status=model.solve()
print(lp.LpStatus[status])
print(lp.value(max_load))
return chargeprofiles, dischargeprofiles, total_load
def loadAnalysis(chargeprofiles,dischargeprofiles, total_load):
opt_EVprofiles=np.zeros((n_V2G_Veh, 24))
for i in chargeprofiles.items():
name=i[1].name
index = re.findall(r'\d+', name)
index = [int(i) for i in index]
veh_index=index[0]
time_index=index[1]
opt_EVprofiles[veh_index][time_index]=i[1].varValue
for i in dischargeprofiles.items():
name=i[1].name
index = re.findall(r'\d+', name)
index = [int(i) for i in index]
veh_index=index[0]
time_index=index[1]
opt_EVprofiles[veh_index][time_index]= i[1].varValue + opt_EVprofiles[veh_index][time_index]
opt_EVload=np.zeros(24)
for i in range (24):
opt_EVload[i]=sum(row[i] for row in opt_EVprofiles)
opt_Totalload=np.zeros(24)
for i in total_load.items():
name=i[1].name
index = re.findall(r'\d+', name)
index = [int(i) for i in index]
time_index = index[0]
opt_Totalload[time_index]=i[1].varValue
#show the results
capacity_Line = np.full(24,transformer_capacity)
efficiency_Line =np.full(24,maxEfficiency*transformer_capacity)
x = np.arange(0, 24, 1)
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True)
ax1.set_xlabel('Time of a day: hour')
ax1.set_ylabel('Load: kW')
ax1.fill_between(x, unmanaged_Load, color='whitesmoke', label='EV Load: Unmanaged Charging')
ax1.fill_between(x, opt_EVload, color='lavender', label='EV Load: V2G')
EV_load=np.zeros(24)
EV_load=opt_EVload+unmanaged_Load
ax1.plot(x, EV_load, color='orange', lw=3, label='EV Load: Total')
ax1.legend(loc='upper left')
ax1.set_ylim([-1000, 4500])
ax2.set_xlabel('Time of a day: hour')
ax2.set_ylabel('Load: kW')
ax2.set_ylim([-1000, 4500])
#ax2.set_ylabel('Load: kW')
ax2.plot(x,opt_Totalload,color='gray', label='Total load')
#ax2.plot(x, opt_EVload,'r--', label='EV load: V2G load')
#ax2.plot(x, unmanaged_Load, 'g--', label='EV: Unmanaged charge')
#ax2.plot(x, base_Load,'b-', label='Base load')
ax2.fill_between(x, base_Load, opt_Totalload, color='orange', label='EV Load')
ax2.fill_between(x, base_Load, color='lemonchiffon', label='Base Load')
ax2.plot(x, capacity_Line,'r:', label='Rated capacity')
ax2.plot(x, efficiency_Line, color='gold', linestyle='dashed', label='80% of Rated capacity')
ax2.legend(loc='upper left')
#plt.show()
max_Load = max(opt_Totalload)
max_BaseLoad = max(base_Load)
delta_Maxload = max_Load - max_BaseLoad
maxIndex = np.argmax(opt_Totalload)
maxTime = (maxIndex + 1) * 0.25
ev_toPeakLoad = (opt_EVload[maxIndex] + unmanaged_Load[maxIndex]) / max_Load
max_LoadFactor = max_Load / transformer_capacity
increasedCapacity = max_Load - maxEfficiency * transformer_capacity
min_Load = min(opt_Totalload)
peak_Valley_Diff = max_Load - min_Load
print('Peak load', np.round(max_Load,1))
print('Increases in peak load', np.round(delta_Maxload, 1))
print('Peak valley difference', np.round(peak_Valley_Diff, 1))
print('EV ratio in peak load', np.round(ev_toPeakLoad,2))
print('Utilization factor', np.round(max_LoadFactor,2))
print('Increased capacity required', np.round(increasedCapacity,0))
return EV_load
def writeExcel(evLoad, totalLoad):
df = pd.DataFrame({'EV load': evLoad})
with pd.ExcelWriter('C:\\pyscript\\UPS_model\\data\\test.xlsx',mode='a', engine='openpyxl') as writer:
df.to_excel(writer, sheet_name='Sheet1')
writer.save()
if __name__ == '__main__':
# define the inputs for vehicle to grid
useCase = 1 # 1:residential, 2:office, 3: public, 4: others(user defined)
# initiate the number of vehicles and enrolment of vehicles in V1G
n_vehicles = np.int_(800)
percentV2G = 0.2
percentFast = 0 #out of vehicles using unmanaged charging
n_V2G_Veh = np.int_(np.round(n_vehicles * percentV2G))
n_V0G_Veh = np.int_(np.round(n_vehicles * (1 - percentV2G)))
if useCase == 3 or useCase == 4:
percentFast = 1
n_fastVeh = np.int_(np.round(n_V0G_Veh * percentFast))
n_slowVeh = np.int_(np.round(n_V0G_Veh * (1 - percentFast)))
if useCase == 1 or useCase == 2:
if n_fastVeh > 0:
n_fastVeh = np.int_(np.round(n_fastVeh * 0.8))
#EV's technical specifications
batteryCapacity = 45
batteryRange = 300
#V2G charging power range
nslow_Chargers = 80 # number of level 1 chargers
nfast_Chargers = 0 # number of level 2 and DCF chargers
power_managed_Uppper=7
power_managed_Lower=3
power_V2G_Upper=-3
power_V2G_Lower=-7
power_Slow=7
power_Fast=20
charge_efficiency=0.9
discharge_efficiency=0.9
Charge_frequency = 1
end_SOC=2 # 2 is to charge fully
transformer_capacity=4000
maxEfficiency=0.8
if useCase==1 or useCase==2:
parking_Duration = [9.8,9,9.3,8.8,8.5,7.3,7.4,7.7,6.8,5.4, 5, 5.2, 5.1, 5.2, 5, 6.1, 7.2, 9.1, 9.8, 9.8, 10.7, 9.4, 10.1, 8.8]
if useCase==3 or useCase==4:
parking_Duration=np.zeros(24)
parking_Duration= parking_Duration +4
# transformers' status
if useCase==1:
base_Load = [1578.7, 1414.7, 1290.1, 1258.6, 1199.2, 1279.8, 1327.9, 1378, 1492.4, 1666.8, 1738.2, 1497.9, 1433.9,
1446.8, 1463.8, 1434.5, 1523.9, 1651.1, 1727.2, 1922, 2162.6, 2192.6, 1944.4, 1762.9]
if useCase==2:
base_Load =[1043.071385, 1009.268146, 980.293941, 985.1229752, 973.0503897, 1011.682663, 1294.181163, 1675.674865, 2087.350029,2290.169466, 2391.579184, 2125.982303,2237.050089, 1812.095081, 1746.903119,1733.623275, 1717.928914, 1800.022495,1558.570785, 1302.631973, 1154.139172,981.5011995, 874.0551888, 802.8269344]
#unmanaged model
n_iter=25
Maxtime_Interval=1440
slow_Load_1h_avg = np.zeros(24)
fast_Load_1h_avg = np.zeros(24)
Unmanaged_charge()
unmanaged_Load=np.zeros(24)
unmanaged_Load=slow_Load_1h_avg+fast_Load_1h_avg
#initiate V1G vehicles' states
v2g_startingTime, v2g_endingTime,v2g_startingSOC=initiate_V2G(n_V2G_Veh)
#optimize EV charging profiles
chargeprofiles, dischargeprofiles, total_load=V2G_optimization()
#perform total load analysis
EV_load = loadAnalysis(chargeprofiles, dischargeprofiles, total_load)
writeExcel(EV_load,total_load)
|
from elasticsearch import Elasticsearch, RequestsHttpConnection
from elasticsearch import helpers
from requests_aws4auth import AWS4Auth
import os
AWS_ACCESS_KEY = os.environ['AWS_ACCESS_KEY']
AWS_SECRET_KEY = os.environ['AWS_SECRET_KEY']
region = 'ap-northeast-2'
service = 'es'
awsauth = AWS4Auth(AWS_ACCESS_KEY, AWS_SECRET_KEY, region, service)
host = os.environ['AWS_ES_HOST']
# ex) tojung.search.net
es = Elasticsearch(
hosts = [{'host': host, 'port': 443}],
http_auth = awsauth,
use_ssl = True,
verify_certs = True,
connection_class = RequestsHttpConnection
)
def bulk_insert(datas):
actions = [
{
"_index": "koreabills",
"_type": "bill",
"_id": data['id'],
"_source": data
}
for data in datas
]
helpers.bulk(es, actions)
|
from main import app,permission
import flask,os,tempfile,json
from io import BytesIO
@app.route('/exportData')
@permission.ValidForLogged
def exportData():
encObj = app.config['DATA_CONTAINER']
buffer = BytesIO()
Data = {}
#---------- CONFIG -----
Data['CONFIG'] = {'PASSWORD':encObj.getPassword()}
#---------- password data ------------
#
kwDirct = [ {item['k']:item['v']} for item in encObj.getAllItems(tableName='keywords') ]
Data['PasswordManager'] = {
'keywords': kwDirct,
'data': encObj.getAllItems(tableName='PasswordManager'),
}
#---------- relation ------------
Data['Relations'] = encObj.getAllItems(tableName='Relations')
#---------- diary ------------
Data['Diary'] = encObj.getAllItems(tableName='Diary')
buffer.write( json.dumps(Data,indent = 4) .encode('utf-8') )
buffer.seek(0)
return flask.send_file(buffer, as_attachment=True,attachment_filename='export.json')#,mimetype='text/csv')
@app.route("/importData", methods=["POST"])
@permission.ValidForLogged
def importData():
if flask.request.files:
encObj = app.config['DATA_CONTAINER']
importedFile = flask.request.files["file"]
Data = json.loads(importedFile.read().decode())
#------- config --------
password = Data['CONFIG']['PASSWORD']
encObj.ResetPassword(password)
#------- PasswordManager ------
kwDict = [ {'k':list(d.keys())[0],'v':list(d.values())[0]} for d in Data['PasswordManager']['keywords'] ]
clsDict = Data['PasswordManager']['data']
RelDict = Data['Relations']
app.config['fun_FUM'].ResetTable(encObj,tableName='keywords',DictList=kwDict,key1='k',key2=None)
app.config['fun_FUM'].ResetTable(encObj,tableName='PasswordManager',DictList=clsDict,key1='class',key2='itemname')
app.config['fun_FUM'].ResetTable(encObj,tableName='Relations',DictList=RelDict,key1='id',key2=None)
app.config['fun_FUM'].ResetTable(encObj,tableName='Diary',DictList=Data['Diary'],key1='id',key2=None)
#---------------------
encObj.Save()
#---------------------
permission.SetLogout()
return flask.redirect(flask.url_for('index'))
|
from . import kafka_interface
from . import zeroMQ_interface
|
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings_production')
from django.conf import settings
from celery import Celery
app = Celery(app='daisychain',
backend='amqp')
# This reads, e.g., CELERY_ACCEPT_CONTENT = ['json'] from settings.py:
app.config_from_object('django.conf:settings')
# For autodiscover to work, define your tasks in a file called 'tasks.py'.
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print("Request: {0!r}".format(self.request))
|
"""
This provides autorecruitment functionality into Ellis.
"""
import socket
import time
import json
from ellis import config, ellis_modules, ns
class Autorecruit(ellis_modules.EllisModule, module_name="Autorecruit"):
"""
This is the module that providse the core functionality.
"""
# pylint: disable=abstract-method
def _auto_tg(self):
""" This is the actual logic for the recruiter. This is largely
designed to use the same mechanism as third-party applications so
that it can be split out easier, especially once I get the auto-loader
setup. Somes changes might be needed to detach it a bit further, but
otherwise it is independent. """
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_closed = False
recruited = []
nation = None
try:
ellis_modules.log.info("Connecting to Ellis...")
connection.connect(("localhost", 4526))
ellis_modules.log.info("Connected to Ellis!")
ellis_modules.log.debug("Entering AutoTG Loop!")
time.sleep(60)
while self.running:
ellis_modules.log.info("Getting Nation...")
connection.send("GET".encode('utf-8'))
nation = connection.recv(2048).decode('utf-8')
if nation.lower() == 'end':
server_closed = True
try:
nation = json.loads(nation)
except json.JSONDecodeError as ex:
ellis_modules.log.error(ex, exc_info=1)
continue
ellis_modules.log.info("Recieved Nation!")
ellis_modules.log.info("Recieved Nation: %s", nation)
ellis_modules.log.debug("Got Nation: %s", nation['name'])
ellis_modules.log.debug("Sending Telegram to %s!",
nation['name'])
with ns.lock:
self.ns_tg.send_telegram(nation['name'])
recruited.append(nation)
ellis_modules.log.info("Shutting Down!")
except BaseException as ex:
ellis_modules.log.error(ex, exc_info=1, stack_info=True)
raise
finally:
ellis_modules.log.info("Returning Nations...")
for nation in recruited:
connection.send(('RETURN {}'
).format(json.dumps(nation)).encode('utf-8'))
if not server_closed:
connection.send("END".encode('utf-8'))
connection.shutdown(socket.SHUT_RDWR)
connection.close()
ellis_modules.log.info("Goodbye...")
self.running = False
def start(self, *args, **kwargs):
# pylint: disable=attribute-defined-outside-init
self.running = True
try:
self.ns_tg = ns.NS_Telegram(ns.limit,
(config.Config['Autorecruit']
)['NS_TG_ID'],
(config.Config['Autorecruit']
)['NS_Secret_Key'],
(config.Config['Autorecruit']
)['NS_API_Key'])
except KeyError as ex:
config.add_module_config(self.module_name,
{
'NS_TG_ID': "Unknown",
'NS_Secret_Key': "Unknown",
'NS_API_Key': "Unknown"
})
raise SyntaxError("Failed to provide needed ID Keys.") from ex
while self.running:
try:
self._auto_tg()
except ConnectionRefusedError:
pass
except OSError as ex:
ellis_modules.log.error(ex, exc_info=1)
except BaseException as ex:
ellis_modules.log.error(ex, exc_info=1)
raise
def stop(self, *args, **kwargs):
self.running = False
|
# T'm "ok"
print('T\'m \"ok\"')
print("I\'m learning\nPython")
print('''line1
line2
line3''')
print(True)
print(3==2)
print(not True)
print(9 / 3) # 3.0
print(9 // 3) # 3
print(10 % 3) # 1
s3=r'Hello, "bart"'
print(s3)
# Hello, "bart"
s4=r'''hello,
Lisa!'''
print(s4)
|
from urllib.parse import urlparse
from re import match
from django_filters.views import FilterView
from .filters import EquipmentFilter, ReportRequestToRepairFilter, EmployeesFilter
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.urls import reverse_lazy, reverse
from django.utils.decorators import method_decorator
from django.views.generic import DetailView, CreateView, ListView, UpdateView
from django.views.generic.base import View
from django.core import serializers
from accounting_tech.forms import RoomForm, DepartamentForm, PositionForm, RequestForm
from .models import Equipment, Employees, RequestToRepair, \
Acquisition, ReportRequestToRepair
from .render_to_pdf import render_to_pdf
@method_decorator(login_required, name='dispatch')
class ListOfEquipment(FilterView):
"""
This class working with list of Equipment.
For filtering it using 3rd-party django_filters
"""
model = Equipment
template_name = 'accounting_tech/home.html'
paginate_by = 10
filterset_class = EquipmentFilter
@method_decorator(login_required, name='dispatch')
class EquipmentCreateView(CreateView):
"""In this class we're creating new kind od Equipment."""
model = Equipment
template_name = 'accounting_tech/create_equipment.html'
fields = '__all__'
def get_success_url(self):
return reverse_lazy('home')
@method_decorator(login_required, name='dispatch')
class EmployeesList(FilterView):
"""
This class working with list of Employees.
For filtering it using 3rd-party django_filters
"""
model = Employees
template_name = 'accounting_tech/employees_list.html'
filterset_class = EmployeesFilter
@method_decorator(login_required, name='dispatch')
class EmployeeCreateView(CreateView):
"""In this class we're creating new kind od Equipment."""
model = Employees
template_name = 'accounting_tech/create_employee.html'
fields = '__all__'
def get_success_url(self):
return reverse_lazy('employees_list')
class EmployeeRegisterView(CreateView): # register new Employee same as EmployeeCreateView
model = Employees
template_name = 'accounting_tech/register.html'
fields = '__all__'
def get_success_url(self):
return reverse_lazy('employees_list')
def list_equipment_employee(request, pk):
"""
:param request: Come from javascript
:param pk: Employee id
:return: All equipment that employee have
"""
equipment_list_of_worker_query_obj = list(Equipment.objects.filter(worker=pk))
pin_code_of_worker_query_obj = list(Employees.objects.filter(id=pk))
equipment_list_of_worker = serializers.serialize(
'json', pin_code_of_worker_query_obj + equipment_list_of_worker_query_obj)
return HttpResponse(equipment_list_of_worker)
@method_decorator(login_required, name='dispatch')
class EmployeeUpdateView(UpdateView):
"""In this class we're updating employees."""
model = Employees
template_name = 'accounting_tech/edit_employee.html'
fields = '__all__'
def get_success_url(self):
return reverse_lazy('employees_list')
@method_decorator(login_required, name='dispatch')
class EquipmentUpdateView(UpdateView):
"""In this class we're updating equipment."""
model = Equipment
template_name = 'accounting_tech/edit_equipment.html'
fields = '__all__'
def get_success_url(self):
return reverse_lazy('home')
class RequestToRepairCreateView(CreateView):
"""In this class we're creating new request to repair."""
model = RequestToRepair
template_name = 'accounting_tech/create_request_to_repair.html'
fields = ['complainant', 'inventory_number',
'phone', 'location', 'description_failure']
def form_valid(self, form):
"""
:param form: Form data
:return: If form data valid then redirect to success_url else redirect to home
"""
if match(r'^[0-9]-[0-9]{5}$', form.data['inventory_number']):
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
return redirect('home')
def get_success_url(self):
"""
It send notification if data is valid
:return: redirect to home
"""
from notifications.signals import notify
from django.contrib.auth.models import User, Group
notify.send(User.objects.get(username='Admin'), recipient=Group.objects.get(name='notify'), verb='New request')
return reverse_lazy('home')
class ReportRequestToRepairCreateView(CreateView):
"""In this class we're creating new report."""
model = ReportRequestToRepair
template_name = 'accounting_tech/create_report_request_to_repair.html'
fields = '__all__'
def form_valid(self, form):
form.save()
request_to_repair = RequestToRepair.objects.get(id=int(form.data['request_to_repair']))
request_to_repair.is_repaired = True
request_to_repair.save()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse_lazy('list_request_to_repair')
@method_decorator(login_required, name='dispatch')
class RequestToRepairList(ListView):
""":returns list requests to repair"""
model = RequestToRepair
def get_queryset(self):
return self.model.objects.filter(is_repaired=False)
template_name = 'accounting_tech/list_request_to_repair.html'
@method_decorator(login_required, name='dispatch')
class RequestToRepairDetail(DetailView):
""":returns detail info about request to repair"""
model = RequestToRepair
template_name = 'accounting_tech/detail_request_to_repair.html'
class PrintRequestRepairToPdf(View):
"""
This class using for print detail request to repair
"""
def get(self, request, *args, **kwargs):
requests = RequestToRepair.objects.filter(id=kwargs['pk'])
from django.conf import settings
data = {
'root': settings.PDF_FONT_PATH,
'requests': requests,
}
pdf = render_to_pdf('accounting_tech/print_request_to_repair.html', data)
return HttpResponse(pdf, content_type='application/pdf')
class AcquisitionCreateView(CreateView):
"""This create new one of request acquisition"""
model = Acquisition
template_name = 'accounting_tech/create_acquisition.html'
fields = '__all__'
def get_success_url(self):
return reverse_lazy('home')
@method_decorator(login_required, name='dispatch')
class AcquisitionList(ListView):
"""List all acquisitions"""
model = Acquisition
template_name = 'accounting_tech/list_acquisition.html'
class FormReturnRelativePath:
"""Because i repeated that code from time to time .
I create this class.
It return path relative of function, and save form.
"""
def __init__(self, request, model_form, template):
self.request = request
self.model_form = model_form
self.template = template
def return_relative(self):
if self.request.method == 'POST':
form = self.model_form(self.request.POST)
url = urlparse(self.request.POST['referrer'])
if form.is_valid():
form.save()
if url:
if reverse('create_employee') == url.path:
return HttpResponseRedirect(reverse('create_employee'))
elif reverse('register_employee') == url.path:
return HttpResponseRedirect(reverse('register_employee'))
else:
return HttpResponseRedirect(url.path)
else:
form = self.model_form()
return render(self.request, self.template, {'form': form})
def room_form(request):
template = 'accounting_tech/create_room.html'
fck = FormReturnRelativePath(request, RoomForm, template)
return fck.return_relative()
def departament_form(request):
template = 'accounting_tech/create_departament.html'
fck = FormReturnRelativePath(request, DepartamentForm, template)
return fck.return_relative()
def position_form(request):
template = 'accounting_tech/create_position.html'
fck = FormReturnRelativePath(request, PositionForm, template)
return fck.return_relative()
class ReportRequestToRepairListView(FilterView):
"""List all requests to repair"""
model = ReportRequestToRepair
template_name = 'accounting_tech/report_request_to_repair_list.html'
paginate_by = 10
ordering = ['-time_field']
filterset_class = ReportRequestToRepairFilter
|
"""
Script to generate data for testing FUV.
It must be run in a directory containing ut_FUV.m and
ut_constants.mat. ut_FUV.m needs to be modified to load
ut_constants.mat at the beginning, in addition to all the
intermediate points where it is already being loaded.
"""
import numpy as np
from scipy.io.matlab import savemat
from oct2py import octave, Oct2PyError
octave.convert_to_float = False
t0 = octave.datenum(1950., 1., 2.) # floats are required
print('t0 = ', t0)
t = np.linspace(t0, t0+300, 5)[:, None]
# Probably an octave oddity: the following *must* be a float
lat = 30.0
# Otherwise, in the expression "pi * lat / 180" or any variation
# of it, all operations seem to involve conversion of arguments
# to integers. This is not an oct2py problem; when lat is saved
# in a matfile, and then loaded in an interactive octave session,
# the problem persists.
linds = [1 + np.arange(146, dtype=int)[:, None],
[7, 8],
[12, 13, 14],
]
for ilind, lind in enumerate(linds):
shape = (7, len(t), len(lind))
Fo = np.zeros(shape, dtype=float)
Uo = Fo.copy()
Vo = Fo.copy()
flags = [[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 1],
[1, 0, 0, 0],
[0, 0, 1, 0],
[1, 0, 1, 0]]
for i, flag in enumerate(flags):
print(flag)
try:
F, U, V = octave.ut_FUV(t, t0, lind, lat, flag)
Fo[i] = F
Uo[i] = U
Vo[i] = V
except Oct2PyError:
print('failed')
save_args = dict(t=t, t0=t0, lat=lat, lind=lind, flags=flags,
Fo=Fo, Uo=Uo, Vo=Vo)
np.savez('FUV%d.npz' % ilind, **save_args)
savemat('FUV%d.mat' % ilind, save_args)
|
from nose import SkipTest
from funtests import transport
class test_sqla(transport.TransportCase):
transport = "sqlalchemy"
prefix = "sqlalchemy"
event_loop_max = 10
connection_options = {"hostname": "sqlite://"}
def before_connect(self):
try:
import sqlakombu # noqa
except ImportError:
raise SkipTest("kombu-sqlalchemy not installed")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.