content stringlengths 5 1.05M |
|---|
import setuptools
setuptools.setup(
name="EpiForecastStatMech",
version="0.2",
packages=setuptools.find_packages(),
install_requires=[
"absl-py",
"numpy",
"scipy",
"pandas",
"matplotlib",
"seaborn",
"sklearn",
# "tensorflow", # revert back to these when tfp 0.13 is released.
# "tensorflow_probability",
"tf-nightly",
"tfp-nightly",
"jax",
"jaxlib",
"flax",
"dm-tree",
"glmnet_py",
"xarray",
],
)
|
# This file is part of Cryptography GUI, licensed under the MIT License.
# Copyright (c) 2020 Benedict Woo Jun Kai
# See LICENSE.md for more details.
######################################
# Import and initialize the librarys #
######################################
import logging
import webbrowser
from item_storage import *
from pygame_ess import pygame_ess
from common_objects import common_objects
logging.info('Loading credits...')
#########################
# Variables declaration #
#########################
logging.debug('Initialising credits variables...')
page_name:str = 'credits'
credits_objects:dict = dict()
##############################
# Load credits objects #
##############################
logging.debug('Initialising credits objects...')
common_objects.load(credits_objects, page_name, ['back'])
# Button
credits_objects['check_it_out'] = item(name='check_it_out',
type='button',
images=pygame_ess.load.images([page_name, 'check_it_out']),
frame=coord(
266, 906,
492, 67,
266, 906),
runclass=webbrowser.open,
runclass_parameter='https://github.com/benwoo1110')
###################
# Generate window #
###################
logging.debug('Initialising caesar cipher window...')
# Load background and back button
credits_window:surface = surface(credits_objects, name=page_name,
frame=coord(bx=0, by=0, w=1024, h=1102, scale=False))
################
# Credits Page #
################
class credits:
'''Credits page'''
def run():
'''Display Credits page'''
# Load the screen
pygame_ess.display.screen(credits_window)
logging.info('Loaded credits window.')
while True:
# Check for selection
selection_result:dict = pygame_ess.event.selection(credits_window, credits_objects)
# Quit program
if selection_result['action_result'] == 'quit' or pygame_ess.buffer(credits_window):
return 'quit'
# Button press
elif selection_result['object_type'] == 'button':
# Go back to previous page
if selection_result['action_result'] == 'back': return True
|
# Copyright (C) 2014-2019 Tormod Landet
# SPDX-License-Identifier: Apache-2.0
import dolfin
from dolfin import Function, Constant
from ocellaris.solver_parts import SlopeLimiter
from . import register_multi_phase_model, MultiPhaseModel
from ..convection import get_convection_scheme, StaticScheme, VelocityDGT0Projector
from ocellaris.utils import linear_solver_from_input
from .vof import VOFMixin
from .advection_equation import AdvectionEquation
# Default values, can be changed in the input file
CONVECTION_SCHEME = 'Upwind'
CONTINUOUS_FIELDS = False
CALCULATE_MU_DIRECTLY_FROM_COLOUR_FUNCTION = False
FORCE_STATIC = False
FORCE_BOUNDED = False
FORCE_SHARP = False
PLOT_FIELDS = False
NUM_SUBCYCLES = 1
# Default values, can be changed in the input file
SOLVER_OPTIONS = {
'use_ksp': True,
'petsc_ksp_type': 'gmres',
'petsc_pc_type': 'asm',
'petsc_ksp_initial_guess_nonzero': True,
'petsc_ksp_view': 'DISABLED',
'inner_iter_rtol': [1e-10] * 3,
'inner_iter_atol': [1e-15] * 3,
'inner_iter_max_it': [1000] * 3,
}
@register_multi_phase_model('BlendedAlgebraicVOF')
class BlendedAlgebraicVofModel(VOFMixin, MultiPhaseModel):
description = 'A blended algebraic VOF scheme implementing HRIC/CICSAM type schemes'
def __init__(self, simulation):
"""
A blended algebraic VOF scheme works by using a specific
convection scheme in the advection of the colour function
that ensures a sharp interface.
* The convection scheme should be the name of a convection
scheme that is tailored for advection of the colour
function, i.e "HRIC", "MHRIC", "RHRIC" etc,
* The velocity field should be divergence free
The colour function is unity when rho=rho0 and nu=nu0 and
zero when rho=rho1 and nu=nu1
"""
self.simulation = simulation
simulation.log.info('Creating blended VOF multiphase model')
# Define function space and solution function
V = simulation.data['Vc']
self.degree = V.ufl_element().degree()
simulation.data['c'] = Function(V)
simulation.data['cp'] = Function(V)
simulation.data['cpp'] = Function(V)
# The projected density and viscosity functions for the new time step can be made continuous
self.continuous_fields = simulation.input.get_value(
'multiphase_solver/continuous_fields', CONTINUOUS_FIELDS, 'bool'
)
if self.continuous_fields:
simulation.log.info(' Using continuous rho and nu fields')
mesh = simulation.data['mesh']
V_cont = dolfin.FunctionSpace(mesh, 'CG', self.degree + 1)
self.continuous_c = dolfin.Function(V_cont)
self.continuous_c_old = dolfin.Function(V_cont)
self.continuous_c_oldold = dolfin.Function(V_cont)
self.force_bounded = simulation.input.get_value(
'multiphase_solver/force_bounded', FORCE_BOUNDED, 'bool'
)
self.force_sharp = simulation.input.get_value(
'multiphase_solver/force_sharp', FORCE_SHARP, 'bool'
)
# Calculate mu from rho and nu (i.e mu is quadratic in c) or directly from c (linear in c)
self.calculate_mu_directly_from_colour_function = simulation.input.get_value(
'multiphase_solver/calculate_mu_directly_from_colour_function',
CALCULATE_MU_DIRECTLY_FROM_COLOUR_FUNCTION,
'bool',
)
# Get the physical properties
self.set_physical_properties(read_input=True)
# The convection blending function that counteracts numerical diffusion
scheme = simulation.input.get_value(
'convection/c/convection_scheme', CONVECTION_SCHEME, 'string'
)
simulation.log.info(' Using convection scheme %s for the colour function' % scheme)
scheme_class = get_convection_scheme(scheme)
self.convection_scheme = scheme_class(simulation, 'c')
self.need_gradient = scheme_class.need_alpha_gradient
# Create the equations when the simulation starts
simulation.hooks.add_pre_simulation_hook(
self.on_simulation_start, 'BlendedAlgebraicVofModel setup equations'
)
# Update the rho and nu fields before each time step
simulation.hooks.add_pre_timestep_hook(
self.update, 'BlendedAlgebraicVofModel - update colour field'
)
simulation.hooks.register_custom_hook_point('MultiPhaseModelUpdated')
# Linear solver
# This causes the MPI unit tests to fail in "random" places for some reason
# Quick fix: lazy loading of the solver
LAZY_LOAD_SOLVER = True
if LAZY_LOAD_SOLVER:
self.solver = None
else:
self.solver = linear_solver_from_input(
self.simulation, 'solver/c', default_parameters=SOLVER_OPTIONS
)
# Subcycle the VOF calculation multiple times per Navier-Stokes time step
self.num_subcycles = scheme = simulation.input.get_value(
'multiphase_solver/num_subcycles', NUM_SUBCYCLES, 'int'
)
if self.num_subcycles < 1:
self.num_subcycles = 1
# Time stepping based on the subcycled values
if self.num_subcycles == 1:
self.cp = simulation.data['cp']
self.cpp = simulation.data['cpp']
else:
self.cp = dolfin.Function(V)
self.cpp = dolfin.Function(V)
# Plot density and viscosity fields for visualization
self.plot_fields = simulation.input.get_value(
'multiphase_solver/plot_fields', PLOT_FIELDS, 'bool'
)
if self.plot_fields:
V_plot = V if not self.continuous_fields else V_cont
self.rho_for_plot = Function(V_plot)
self.nu_for_plot = Function(V_plot)
self.rho_for_plot.rename('rho', 'Density')
self.nu_for_plot.rename('nu', 'Kinematic viscosity')
simulation.io.add_extra_output_function(self.rho_for_plot)
simulation.io.add_extra_output_function(self.nu_for_plot)
# Slope limiter in case we are using DG1, not DG0
self.slope_limiter = SlopeLimiter(simulation, 'c', simulation.data['c'])
simulation.log.info(' Using slope limiter: %s' % self.slope_limiter.limiter_method)
self.is_first_timestep = True
def on_simulation_start(self):
"""
This runs when the simulation starts. It does not run in __init__
since the solver needs the density and viscosity we define, and
we need the velocity that is defined by the solver
"""
sim = self.simulation
beta = self.convection_scheme.blending_function
# The time step (real value to be supplied later)
self.dt = Constant(sim.dt / self.num_subcycles)
# Setup the equation to solve
c = sim.data['c']
cp = self.cp
cpp = self.cpp
dirichlet_bcs = sim.data['dirichlet_bcs'].get('c', [])
# Use backward Euler (BDF1) for timestep 1
self.time_coeffs = Constant([1, -1, 0])
if dolfin.norm(cpp.vector()) > 0 and self.num_subcycles == 1:
# Use BDF2 from the start
self.time_coeffs.assign(Constant([3 / 2, -2, 1 / 2]))
sim.log.info('Using second order timestepping from the start in BlendedAlgebraicVOF')
# Make sure the convection scheme has something useful in the first iteration
c.assign(sim.data['cp'])
if self.num_subcycles > 1:
cp.assign(sim.data['cp'])
# Plot density and viscosity
self.update_plot_fields()
# Define equation for advection of the colour function
# ∂c/∂t + ∇⋅(c u) = 0
Vc = sim.data['Vc']
project_dgt0 = sim.input.get_value('multiphase_solver/project_uconv_dgt0', True, 'bool')
if self.degree == 0 and project_dgt0:
self.vel_dgt0_projector = VelocityDGT0Projector(sim, sim.data['u_conv'])
self.u_conv = self.vel_dgt0_projector.velocity
else:
self.u_conv = sim.data['u_conv']
forcing_zones = sim.data['forcing_zones'].get('c', [])
self.eq = AdvectionEquation(
sim,
Vc,
cp,
cpp,
self.u_conv,
beta,
time_coeffs=self.time_coeffs,
dirichlet_bcs=dirichlet_bcs,
forcing_zones=forcing_zones,
dt=self.dt,
)
if self.need_gradient:
# Reconstruct the gradient from the colour function DG0 field
self.convection_scheme.initialize_gradient()
# Notify listeners that the initial values are available
sim.hooks.run_custom_hook('MultiPhaseModelUpdated')
def get_colour_function(self, k):
"""
Return the colour function on timestep t^{n+k}
"""
if k == 0:
if self.continuous_fields:
c = self.continuous_c
else:
c = self.simulation.data['c']
elif k == -1:
if self.continuous_fields:
c = self.continuous_c_old
else:
c = self.simulation.data['cp']
elif k == -2:
if self.continuous_fields:
c = self.continuous_c_oldold
else:
c = self.simulation.data['cpp']
if self.force_bounded:
c = dolfin.max_value(dolfin.min_value(c, Constant(1.0)), Constant(0.0))
if self.force_sharp:
c = dolfin.conditional(dolfin.ge(c, 0.5), Constant(1.0), Constant(0.0))
return c
def update_plot_fields(self):
"""
These fields are only needed to visualise the rho and nu fields
in xdmf format for Paraview or similar
"""
if not self.plot_fields:
return
V = self.rho_for_plot.function_space()
dolfin.project(self.get_density(0), V, function=self.rho_for_plot)
dolfin.project(self.get_laminar_kinematic_viscosity(0), V, function=self.nu_for_plot)
def update(self, timestep_number, t, dt):
"""
Update the VOF field by advecting it for a time dt
using the given divergence free velocity field
"""
timer = dolfin.Timer('Ocellaris update VOF')
sim = self.simulation
# Get the functions
c = sim.data['c']
cp = sim.data['cp']
cpp = sim.data['cpp']
# Stop early if the free surface is forced to stay still
force_static = sim.input.get_value('multiphase_solver/force_static', FORCE_STATIC, 'bool')
if force_static:
c.assign(cp)
cpp.assign(cp)
timer.stop() # Stop timer before hook
sim.hooks.run_custom_hook('MultiPhaseModelUpdated')
self.is_first_timestep = False
return
if timestep_number != 1:
# Update the previous values
cpp.assign(cp)
cp.assign(c)
if self.degree == 0:
self.vel_dgt0_projector.update()
# Reconstruct the gradients
if self.need_gradient:
self.convection_scheme.gradient_reconstructor.reconstruct()
# Update the convection blending factors
is_static = isinstance(self.convection_scheme, StaticScheme)
if not is_static:
self.convection_scheme.update(dt / self.num_subcycles, self.u_conv)
# Update global bounds in slope limiter
if self.is_first_timestep:
lo, hi = self.slope_limiter.set_global_bounds(lo=0.0, hi=1.0)
if self.slope_limiter.has_global_bounds:
sim.log.info(
'Setting global bounds [%r, %r] in BlendedAlgebraicVofModel' % (lo, hi)
)
# Solve the advection equations for the colour field
if timestep_number == 1 or is_static:
c.assign(cp)
else:
if self.solver is None:
sim.log.info('Creating colour function solver', flush=True)
self.solver = linear_solver_from_input(
self.simulation, 'solver/c', default_parameters=SOLVER_OPTIONS
)
# Solve the advection equation
A = self.eq.assemble_lhs()
for _ in range(self.num_subcycles):
b = self.eq.assemble_rhs()
self.solver.inner_solve(A, c.vector(), b, 1, 0)
self.slope_limiter.run()
if self.num_subcycles > 1:
self.cpp.assign(self.cp)
self.cp.assign(c)
# Optionally use a continuous predicted colour field
if self.continuous_fields:
Vcg = self.continuous_c.function_space()
dolfin.project(c, Vcg, function=self.continuous_c)
dolfin.project(cp, Vcg, function=self.continuous_c_old)
dolfin.project(cpp, Vcg, function=self.continuous_c_oldold)
# Report properties of the colour field
sim.reporting.report_timestep_value('min(c)', c.vector().min())
sim.reporting.report_timestep_value('max(c)', c.vector().max())
# The next update should use the dt from this time step of the
# main Navier-Stoke solver. The update just computed above uses
# data from the previous Navier-Stokes solve with the previous dt
self.dt.assign(dt / self.num_subcycles)
if dt != sim.dt_prev:
# Temporary switch to first order timestepping for the next
# time step. This code is run before the Navier-Stokes solver
# in each time step
sim.log.info('VOF solver is first order this time step due to change in dt')
self.time_coeffs.assign(Constant([1.0, -1.0, 0.0]))
else:
# Use second order backward time difference next time step
self.time_coeffs.assign(Constant([3 / 2, -2.0, 1 / 2]))
self.update_plot_fields()
timer.stop() # Stop timer before hook
sim.hooks.run_custom_hook('MultiPhaseModelUpdated')
self.is_first_timestep = False
|
#Nonebot plugin
#AutoWhiteList
#__init__.py
#Author:dixiatielu
from nonebot import on_command, CommandSession
from nonebot import on_natural_language, NLPSession, IntentCommand
from .data_source import get_whitelist
__plugin_name__ = '自动白名单'
__plugin_usage__ = r'''
自动白名单
含有自然语言处理功能
用你最自然的语言,让我为你加上Minecraft服务器的白名单吧!
'''
@on_command('wl', aliases=('白名', '白名单', '加白名单', '添加白名单', '加个白名单'))
async def wl(session: CommandSession):
uid = session.get('uid', prompt='您在LittleSkin注册的角色的ID是?')
ress = await get_whitelist(uid, session.bot.config.RCON_HOST, session.bot.config.RCON_PORT, session.bot.config.RCON_PWD)
await session.send(ress)
# wl.args_parser 装饰器将函数声明为 wl 命令的参数解析器
# 命令解析器用于将用户输入的参数解析成命令真正需要的数据
@wl.args_parser
async def _(session: CommandSession):
# 去掉消息首尾的空白符
stripped_arg = session.current_arg_text.strip()
if session.is_first_run:
# 该命令第一次运行(第一次进入命令会话)
if stripped_arg:
# 第一次运行参数不为空,意味着用户直接将ID跟在命令名后面,作为参数传入
# 例如用户可能发送了:wl dixiatielu
session.state['uid'] = stripped_arg
return
if not stripped_arg:
# 用户没有发送有效的名称(而是发送了空白字符),则提示重新输入
# 这里 session.pause() 将会发送消息并暂停当前会话(该行后面的代码不会被运行)
session.pause('您的ID不能为空呢,请重新输入')
# 如果当前正在向用户询问更多信息(例如本例中的要查询的id),且用户输入有效,则放入会话状态
session.state[session.current_key] = stripped_arg
# on_natural_language 装饰器将函数声明为一个自然语言处理器
# keywords 表示需要响应的关键词,类型为任意可迭代对象,元素类型为 str
# 如果不传入 keywords,则响应所有没有被当作命令处理的消息
@on_natural_language(keywords={'白名', 'whitelist'})
async def _(session: NLPSession):
# 返回意图命令,前两个参数必填,分别表示置信度和意图命令名
return IntentCommand(90.0, 'wl')
|
from ..hidro.basin import BasinApi
from ..hidro.stations import Stations
from ..hidro.serie_temporal import SerieTemporal
from ..hidro.entities import EntityApi
__init__ = ["BasinApi", "Stations", "SerieTemporal", "EntityApi"]
|
import itertools
def Process2LineGraph(size, edge, label):
nText = ['' for _ in range(size)]
nEdge = [[], []]
# Process Nodes (text)
for i, (u, v) in enumerate(zip(*edge)):
nText[v] += label[i]
# Process Edges
def push2Edge(edges, type):
if type == 1:
for u, v in edges:
nEdge[0].append(u)
nEdge[1].append(v)
elif type == 0:
u, v = edges
nEdge[0].append(u)
nEdge[1].append(v)
def oppositeDir(des):
return list(itertools.permutations(des, 2))
def sameDir(u, v):
return [u, v]
oneHop = {}
for u, v in zip(*edge):
if u not in oneHop:
oneHop[u] = []
oneHop[u].append(v)
for orig, des in oneHop.items():
push2Edge(oppositeDir(des), 1)
for d in des:
if d in oneHop:
for hop1 in oneHop[d]:
push2Edge(sameDir(d, hop1), 0)
nLabel = ['' for _ in nEdge[0]]
for e, (u, v) in enumerate(zip(*nEdge)):
nLabel[e] = nText[u] + nText[v]
assert len(nLabel) == len(nEdge[0])
return nLabel, nEdge |
import boto3
import uuid
import json
from socket import gethostbyname, gaierror
from inspect import stack
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def blacklist_ip(ip_address):
try:
client = boto3.client('ec2')
nacls = client.describe_network_acls()
for nacl in nacls["NetworkAcls"]:
min_rule_id = min(
rule['RuleNumber'] for rule in nacl["Entries"] if not rule["Egress"]
)
if min_rule_id < 1:
raise Exception("Rule number is less than 1")
r = client.create_network_acl_entry(
CidrBlock='{}/32'.format(ip_address),
Egress=False,
NetworkAclId=nacl["NetworkAclId"],
Protocol='-1',
RuleAction='deny',
RuleNumber=min_rule_id - 1,
)
logger.info("GDPatrol: Successfully executed action {} for ".format(
stack()[0][3], ip_address))
return True
except Exception as e:
logger.error("GDPatrol: Error executing {} - {}".format(stack()[0][3], e))
def whitelist_ip(ip_address):
try:
client = boto3.client('ec2')
nacls = client.describe_network_acls()
for nacl in nacls["NetworkAcls"]:
for rule in nacl["Entries"]:
if rule["CidrBlock"] == '{}/32'.format(ip_address):
client.delete_network_acl_entry(
NetworkAclId=nacl["NetworkAclId"],
Egress=rule["Egress"],
RuleNumber=rule["RuleNumber"]
)
logger.info("GDPatrol: Successfully executed action {} for {}".format(stack()[0][3], ip_address))
return True
except Exception as e:
logger.error("GDPatrol: Error executing {} - {}".format(stack()[0][3], e))
return False
def quarantine_instance(instance_id, vpc_id):
try:
client = boto3.client('ec2')
sg = client.create_security_group(
GroupName='Quarantine-{}'.format(str(uuid.uuid4().fields[-1])[:6]),
Description='Quarantine for {}'.format(instance_id),
VpcId=vpc_id
)
sg_id = sg["GroupId"]
# NOTE: Remove the default egress group
client.revoke_security_group_egress(
GroupId=sg_id,
IpPermissions=[
{
'IpProtocol': '-1',
'FromPort': 0,
'ToPort': 65535,
'IpRanges': [
{
'CidrIp': "0.0.0.0/0"
},
]
}
]
)
# NOTE: Assign security group to instance
client.modify_instance_attribute(InstanceId=instance_id, Groups=[sg_id])
logger.info("GDPatrol: Successfully executed action {} for {}".format(stack()[0][3], instance_id))
return True
except Exception as e:
logger.error("GDPatrol: Error executing {} - {}".format(stack()[0][3], e))
return False
def snapshot_instance(instance_id):
try:
client = boto3.client('ec2')
instance_described = client.describe_instances(InstanceIds=[instance_id])
blockmappings = instance_described['Reservations'][0]['Instances'][0]['BlockDeviceMappings']
for device in blockmappings:
snapshot = client.create_snapshot(
VolumeId=device["Ebs"]["VolumeId"],
Description="Created by GDpatrol for {}".format(instance_id)
)
logger.info("GDPatrol: Successfully executed action {} for {}".format(stack()[0][3], instance_id))
return True
except Exception as e:
logger.error("GDPatrol: Error executing {} - {}".format(stack()[0][3], e))
return False
def disable_account(username):
try:
client = boto3.client('iam')
client.put_user_policy(
UserName=username,
PolicyName='BlockAllPolicy',
PolicyDocument="{\"Version\":\"2012-10-17\", \"Statement\""
":{\"Effect\":\"Deny\", \"Action\":\"*\", "
"\"Resource\":\"*\"}}"
)
logger.info("GDPatrol: Successfully executed action {} for {}".format(stack()[0][3], username))
return True
except Exception as e:
logger.error("GDPatrol: Error executing {} - {}".format(stack()[0][3], e))
return False
def disable_ec2_access(username):
try:
client = boto3.client('iam')
client.put_user_policy(
UserName=username,
PolicyName='BlockEC2Policy',
PolicyDocument="{\"Version\":\"2012-10-17\", \"Statement\""
":{\"Effect\":\"Deny\", \"Action\":\"ec2:*\" , "
"\"Resource\":\"*\"}}"
)
logger.info("GDPatrol: Successfully executed action {} for {}".format(stack()[0][3], username))
return True
except Exception as e:
logger.error("GDPatrol: Error executing {} - {}".format(stack()[0][3], e))
return False
def enable_ec2_access(username):
try:
client = boto3.client('iam')
client.delete_user_policy(
UserName=username,
PolicyName='BlockEC2Policy',
)
logger.info("GDPatrol: Successfully executed action {} for {}".format(stack()[0][3], username))
return True
except Exception as e:
logger.error("GDPatrol: Error executing {} - {}".format(stack()[0][3], e))
return False
def disable_sg_access(username):
try:
client = boto3.client('iam')
client.put_user_policy(
UserName=username,
PolicyName='BlockSecurityGroupPolicy',
PolicyDocument="{\"Version\":\"2012-10-17\", \"Statement\""
":{\"Effect\":\"Deny\", \"Action\": [ "
"\"ec2:AuthorizeSecurityGroupIngress\", "
"\"ec2:RevokeSecurityGroupIngress\", "
"\"ec2:AuthorizeSecurityGroupEgress\", "
"\"ec2:RevokeSecurityGroupEgress\" ], "
"\"Resource\":\"*\"}}"
)
logger.info("GDPatrol: Successfully executed action {} for {}".format(stack()[0][3], username))
return True
except Exception as e:
logger.error("GDPatrol: Error executing {} - {}".format(stack()[0][3], e))
return False
def enable_sg_access(username):
try:
client = boto3.client('iam')
client.delete_user_policy(
UserName=username,
PolicyName='BlockSecurityGroupPolicy',
)
logger.info("GDPatrol: Successfully executed action {} for {}".format(stack()[0][3], username))
return True
except Exception as e:
logger.error("GDPatrol: Error executing {} - {}".format(stack()[0][3], e))
return False
def asg_detach_instance(instance_id):
try:
client = boto3.client('autoscaling')
response = client.describe_auto_scaling_instances(
InstanceIds=[instance_id],
MaxRecords=1
)
asg_name = None
instances = response['AutoScalingInstances']
if instances:
asg_name = instances[0]['AutoScalingGroupName']
if asg_name is not None:
response = client.detach_instances(
InstanceIds=[instance_id],
AutoScalingGroupName=asg_name,
ShouldDecrementDesiredCapacity=False
)
logger.info("GDPatrol: Successfully executed action {} for {}".format(stack()[0][3], instance_id))
return True
except Exception as e:
logger.error("GDPatrol: Error executing {} - {}".format(stack()[0][3], e))
return False
class Config(object):
def __init__(self, finding_type):
self.finding_type = finding_type
self.actions = []
self.reliability = 0
def get_actions(self):
with open('config.json', 'r') as config:
jsonloads = json.loads(config.read())
for item in jsonloads['playbooks']['playbook']:
if item['type'] == self.finding_type:
self.actions = item['actions']
return self.actions
def get_reliability(self):
with open('config.json', 'r') as config:
jsonloads = json.loads(config.read())
for item in jsonloads['playbooks']['playbook']:
if item['type'] == self.finding_type:
self.reliability = int(item['reliability'])
return self.reliability
def lambda_handler(event, context):
logger.info("GDPatrol: Received JSON event - ".format(event))
try:
finding_id = event['id']
finding_type = event['type']
logger.info("GDPatrol: Parsed Finding ID: {} - Finding Type: {}".format(finding_id, finding_type))
config = Config(event['type'])
severity = int(event['severity'])
config_actions = config.get_actions()
config_reliability = config.get_reliability()
resource_type = event['resource']['resourceType']
except KeyError as e:
logger.error("GDPatrol: Could not parse the Finding fields correctly, please verify that the JSON is correct")
exit(1)
if resource_type == 'Instance':
instance = event['resource']['instanceDetails']
instance_id = instance["instanceId"]
vpc_id = instance['networkInterfaces'][0]['vpcId']
elif resource_type == 'AccessKey':
username = event['resource']['accessKeyDetails']['userName']
if event['service']['action']['actionType'] == 'DNS_REQUEST':
domain = event['service']['action']['dnsRequestAction']['domain']
elif event['service']['action']['actionType'] == 'AWS_API_CALL':
ip_address = event['service']['action']['awsApiCallAction']['remoteIpDetails']['ipAddressV4']
elif event['service']['action']['actionType'] == 'NETWORK_CONNECTION':
ip_address = event['service']['action']['networkConnectionAction']['remoteIpDetails']['ipAddressV4']
elif event['service']['action']['actionType'] == 'PORT_PROBE':
ip_address = event['service']['action']['portProbeAction']['portProbeDetails'][0]['remoteIpDetails']['ipAddressV4']
successful_actions = 0
total_config_actions = len(config_actions)
actions_to_be_executed = 0
for action in config_actions:
logger.info("GDPatrol: Action: {}".format(action))
if action == 'blacklist_ip':
if severity + config_reliability > 10:
actions_to_be_executed += 1
logger.info("GDPatrol: Executing action {}".format(action))
result = blacklist_ip(ip_address)
successful_actions += int(result)
elif action == 'whitelist_ip':
if severity + config_reliability > 10:
actions_to_be_executed += 1
logger.info("GDPatrol: Executing action {}".format(action))
result = whitelist_ip(ip_address)
successful_actions += int(result)
elif action == 'blacklist_domain':
if severity + config_reliability > 10:
actions_to_be_executed += 1
logger.info("GDPatrol: Executing action {}".format(action))
try:
ip_address = gethostbyname(domain)
result = blacklist_ip(ip_address)
successful_actions += int(result)
except gaierror as e:
logger.error("GDPatrol: Error resolving domain {} - {}".format(domain, e))
pass
elif action == 'quarantine_instance':
if severity + config_reliability > 10:
actions_to_be_executed += 1
logger.info("GDPatrol: Executing action {}".format(action))
result = quarantine_instance(instance_id, vpc_id)
successful_actions += int(result)
elif action == 'snapshot_instance':
if severity + config_reliability > 10:
actions_to_be_executed += 1
logger.info("GDPatrol: Executing action {}".format(action))
result = snapshot_instance(instance_id)
successful_actions += int(result)
elif action == 'disable_account':
if severity + config_reliability > 10:
actions_to_be_executed += 1
logger.info("GDPatrol: Executing action {}".format(action))
result = disable_account(username)
successful_actions += int(result)
elif action == 'disable_ec2_access':
if severity + config_reliability > 10:
actions_to_be_executed += 1
logger.info("GDPatrol: Executing action {}".format(action))
result = disable_ec2_access(username)
successful_actions += int(result)
elif action == 'enable_ec2_access':
if severity + config_reliability > 10:
actions_to_be_executed += 1
logger.info("GDPatrol: Executing action {}".format(action))
result = enable_ec2_access(username)
successful_actions += int(result)
elif action == 'disable_sg_access':
if severity + config_reliability > 10:
actions_to_be_executed += 1
logger.info("GDPatrol: Executing action {}".format(action))
result = disable_sg_access(username)
successful_actions += int(result)
elif action == 'enable_sg_access':
if severity + config_reliability > 10:
actions_to_be_executed += 1
logger.info("GDPatrol: Executing action {}".format(action))
result = enable_sg_access(username)
successful_actions += int(result)
elif action == 'asg_detach_instance':
if severity + config_reliability > 10:
actions_to_be_executed += 1
logger.info("GDPatrol: Executing action {}".format(action))
result = asg_detach_instance(instance_id)
successful_actions += int(result)
logger.info("GDPatrol: Total actions: {} - Actions to be executed: {} - Successful Actions: {} - Finding ID: {} - Finding Type: {}".format(
total_config_actions, actions_to_be_executed, successful_actions, finding_id, finding_type))
|
#!/usr/bin/env python3
"""Find the 9 hashes needed for subscription local geo realtime updates"""
__author__ = "H. Martin"
__version__ = "0.1.0"
import argparse
import geohash
import json
import redis
from http.server import BaseHTTPRequestHandler, HTTPServer
from optparse import OptionParser
from urllib.parse import urlparse, parse_qs
redis_client = redis.StrictRedis(host='localhost', port=6379, db=0)
class RequestHandler(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def count_geohash(self, hashcode):
return int(redis_client.get(hashcode + '.count'))
def should_zoom_up(self, hashcode, minimum):
start_count = self.count_geohash(hashcode[:-1])
return start_count < minimum
def zoom_out_slightly(self, nbrs, minimum):
zoomed_nbrs = {}
zcount = 0
for n in nbrs:
znbrs = geohash.neighbors(n)
if n not in zoomed_nbrs:
zoomed_nbrs[n] = self.count_geohash(n)
zcount += zoomed_nbrs[n]
for zn in znbrs:
if zn not in zoomed_nbrs:
zoomed_nbrs[zn] = self.count_geohash(zn)
zcount += zoomed_nbrs[zn]
znbrs = list(zoomed_nbrs.keys())
if zcount < minimum:
return self.zoom_out_slightly(znbrs, minimum)
else:
return znbrs
def zoom_out(self, hashcode, minimum):
start_count = self.count_geohash(hashcode)
if start_count > minimum:
print(str(start_count) + " > " + str(minimum))
return [hashcode]
nbrs = geohash.neighbors(hashcode)
ncount = self.count_geohash(hashcode)
for n in nbrs:
i = self.count_geohash(n)
ncount += i
if ncount > minimum:
nbrs.append(hashcode)
return nbrs
elif self.should_zoom_up(hashcode, minimum):
return self.zoom_out(hashcode[:-1], minimum)
else:
return self.zoom_out_slightly(nbrs, minimum)
def send_not_found(self, msg):
self.send_error(404, message=msg)
def do_GET(self):
r = urlparse(self.path)
query = parse_qs(r.query)
gh = geohash.encode(float(query['lat'][0]), float(query['lon'][0]), precision=6)
minimum = int(query['min'][0])
subs = self.zoom_out(gh, minimum)
resp_body = bytes(json.dumps({'subs':subs}), "utf8")
self.send_response(200)
self.send_header('content-length', len(resp_body))
self.send_header('content-type', 'plain/text')
self.end_headers()
self.wfile.write(resp_body)
do_POST = do_GET
do_PUT = do_GET
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=8080)
args = parser.parse_args()
server = HTTPServer(('', args.port), RequestHandler)
server.serve_forever()
if __name__ == "__main__":
main() |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CVXPY wrappers."""
from typing import Any
from typing import Callable
from typing import Optional
from typing import Tuple
from dataclasses import dataclass
import jax
import jax.numpy as jnp
from jaxopt._src import base
from jaxopt._src import implicit_diff as idf
from jaxopt._src import linear_solve
from jaxopt._src import tree_util
def _check_params(params_obj, params_eq=None, params_ineq=None):
if params_obj is None:
raise ValueError("params_obj should be a tuple (Q, c)")
Q, c = params_obj
if Q.shape[0] != Q.shape[1]:
raise ValueError("Q must be a square matrix.")
if Q.shape[1] != c.shape[0]:
raise ValueError("Q.shape[1] != c.shape[0]")
if params_eq is not None:
A, b = params_eq
if A.shape[0] != b.shape[0]:
raise ValueError("A.shape[0] != b.shape[0]")
if A.shape[1] != Q.shape[1]:
raise ValueError("Q.shape[1] != A.shape[1]")
if params_ineq is not None:
G, h = params_ineq
if G.shape[0] != h.shape[0]:
raise ValueError("G.shape[0] != h.shape[0]")
if G.shape[1] != Q.shape[1]:
raise ValueError("G.shape[1] != Q.shape[1]")
def _make_cvxpy_qp_optimality_fun():
"""Makes the optimality function for CVXPY quadratic programming.
Returns:
optimality_fun(params, params_obj, params_eq, params_ineq) where
params = (primal_var, eq_dual_var, ineq_dual_var)
params_obj = (Q, c)
params_eq = (A, b) or None
params_ineq = (G, h) or None
"""
def obj_fun(primal_var, params_obj):
Q, c = params_obj
return 0.5 * jnp.vdot(primal_var, jnp.dot(Q, primal_var)) + jnp.vdot(primal_var, c)
def eq_fun(primal_var, params_eq):
if params_eq is None:
return None
A, b = params_eq
return jnp.dot(A, primal_var) - b
def ineq_fun(primal_var, params_ineq):
if params_ineq is None:
return None
G, h = params_ineq
return jnp.dot(G, primal_var) - h
return idf.make_kkt_optimality_fun(obj_fun, eq_fun, ineq_fun)
@dataclass(eq=False)
class CvxpyQP(base.Solver):
"""Wraps CVXPY's quadratic solver with implicit diff support.
No support for matvec, pytrees, jit and vmap.
Meant to be run on CPU. Provide high precision solutions.
The objective function is::
0.5 * x^T Q x + c^T x subject to Gx <= h, Ax = b.
Attributes:
solver: string specifying the underlying solver used by Cvxpy, in ``"OSQP", "ECOS", "SCS"`` (default: ``"OSQP"``).
"""
solver: str = 'OSQP' #TODO(lbethune): "True" original OSQP implementation (not the one written in Jax). Confusing for user ?
implicit_diff_solve: Optional[Callable] = None
def run(self,
init_params: Optional[jnp.ndarray], # unused
params_obj: base.ArrayPair,
params_eq: Optional[base.ArrayPair] = None,
params_ineq: Optional[base.ArrayPair] = None) -> base.OptStep:
"""Runs the quadratic programming solver in CVXPY.
The returned params contains both the primal and dual solutions.
Args:
init_params: ignored.
params_obj: (Q, c).
params_eq: (A, b) or None if no equality constraints.
params_ineq: (G, h) or None if no inequality constraints.
Returns:
(params, state), ``params = (primal_var, dual_var_eq, dual_var_ineq)``
"""
# TODO(frostig,mblondel): experiment with `jax.experimental.host_callback`
# to "support" other devices (GPU/TPU) in the interim, by calling into the
# host CPU and running cvxpy there.
#
# TODO(lbethune): the interface of CVXPY could easily allow pytrees for constraints,
# by populating `constraints` list with different Ai x = bi and Gi x <= hi.
# Pytree support for x could be possible by creating a cp.Variable for each leaf in the pytree c.
import cvxpy as cp
del init_params # no warm start
_check_params(params_obj, params_eq, params_ineq)
Q, c = params_obj
x = cp.Variable(len(c))
objective = 0.5 * cp.quad_form(x, Q) + c.T @ x
constraints = []
if params_eq is not None:
A, b = params_eq
constraints.append(A @ x == b)
if params_ineq is not None:
G, h = params_ineq
constraints.append(G @ x <= h)
pb = cp.Problem(cp.Minimize(objective), constraints)
pb.solve(solver=self.solver)
if pb.status in ["infeasible", "unbounded"]:
raise ValueError("The problem is %s." % pb.status)
dual_eq = None if params_eq is None else jnp.array(pb.constraints[0].dual_value)
dual_ineq = None if params_ineq is None else jnp.array(pb.constraints[-1].dual_value)
sol = base.KKTSolution(primal=jnp.array(x.value),
dual_eq=dual_eq,
dual_ineq=dual_ineq)
# TODO(lbethune): pb.solver_stats is a "state" the user might be interested in.
return base.OptStep(params=sol, state=None)
def l2_optimality_error(
self,
params: jnp.array,
params_obj: base.ArrayPair,
params_eq: Optional[base.ArrayPair],
params_ineq: Optional[base.ArrayPair]) -> base.OptStep:
"""Computes the L2 norm of the KKT residuals."""
pytree = self.optimality_fun(params, params_obj, params_eq, params_ineq)
return tree_util.tree_l2_norm(pytree)
def __post_init__(self):
self.optimality_fun = _make_cvxpy_qp_optimality_fun()
# Set up implicit diff.
decorator = idf.custom_root(self.optimality_fun, has_aux=True,
solve=self.implicit_diff_solve)
# pylint: disable=g-missing-from-attributes
self.run = decorator(self.run)
|
import sympy
import sympy as sp
from sympy.core.relational import Relational
from Abstract.equation import Equation
class SymEquation(Equation):
"""
Concrete equation built based purely on sympy.core.relational.Relational class
"""
symq: sp.core.relational
def set_type(self, mode):
self.symq = Relational(self.symq.lhs, self.symq.rhs, mode)
def add_slack_variable(self, variables) -> Equation:
i = 1
new_slack = sympy.symbols(f"y{i}")
while new_slack in variables:
i += 1
new_slack = sympy.symbols(f"y{i}")
if self.get_type() == LPPy.Abstract.equation.LEQ:
return SymEquation(Relational(self.symq.lhs + new_slack, self.symq.rhs, LPPy.Abstract.equation.EEQ))
elif self.get_type() == LPPy.Abstract.equation.GEQ:
return SymEquation(Relational(self.symq.lhs - new_slack, self.symq.rhs, LPPy.Abstract.equation.EEQ))
else:
return self
def __neg__(self):
if self.get_type() == LPPy.Abstract.equation.LEQ:
new_type = LPPy.Abstract.equation.GEQ
elif self.get_type() == LPPy.Abstract.equation.GEQ:
new_type = LPPy.Abstract.equation.LEQ
else:
new_type = LPPy.Abstract.equation.EEQ
return SymEquation(Relational(-self.symq.lhs, -self.symq.rhs, new_type))
def __init__(self, eq):
self.symq = eq
def get_array_form(self, mask):
if self.get_type() is None:
focus = self.symq
else:
focus = self.symq.lhs
ret_val = []
for item in mask:
ret_val.append(focus.coeff(item))
return ret_val
def get_type(self):
symq_t = type(self.symq)
if symq_t == sp.core.relational.Le:
return LPPy.Abstract.equation.LEQ
elif symq_t == sp.core.relational.Ge:
return LPPy.Abstract.equation.GEQ
elif symq_t == sp.core.relational.Eq:
return LPPy.Abstract.equation.EEQ
else:
return None
def get_lhs(self):
return self.symq.lhs
def get_rhs(self):
return self.symq.rhs
def substitute(self, old_var, new_var):
self.symq.subs(old_var, new_var)
def __str__(self):
if self.get_type() is not None:
return self.symq.lhs.__str__() + " " + self.get_type() + " " + self.symq.rhs.__str__()
else:
return self.symq.__str__()
def get_vars(self):
return list(self.symq.free_symbols)
|
import tensorflow as tf
# Inception module 1
def Inception_traditional(Inputs, nfilters_11=64, nfilters_11Before33=64,
nfilters_11Before55=48, nfilters_11After33Pool=32,
nfilters_33=96, nfilters_55=64, name=None):
'''
最基本的Inception模块,拼接不同感受野的卷积结果
其实传入的参数还能更加细,这里默认所有卷积步长都是1,padding都是same
:param Inputs: 上一层的输出,该层的输入
:param nfilters_11: 1×1卷积层的卷积核数
:param nfilters_11Before33: 3×3卷积层前的1×1卷积降维的卷积核数
:param nfilters_11Before55: 5×5卷积层前的1×1卷积降维的卷积核数
:param nfilters_11After33Pool: 3×3池化后的1×1卷积核的数量
:param nfilters_33: 3×3卷积层的卷积核数
:param nfilters_55: 5×5卷积层的卷积核数(下面的实现用俩个3×3替代了5×5,两个3×3的卷积核数都为该参数)
:param name: 该层的名字
:return:
'''
# 1×1的卷积层
conv1 = tf.layers.conv2d(inputs=Inputs, filters=nfilters_11, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
# 3×3的卷积层
conv2_1 = tf.layers.conv2d(inputs=Inputs, filters=nfilters_11Before33, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv2_2 = tf.layers.conv2d(inputs=conv2_1, filters=nfilters_33, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
# 5×5的卷积层
conv3_1 = tf.layers.conv2d(inputs=Inputs, filters=nfilters_11Before55, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv3_2 = tf.layers.conv2d(inputs=conv3_1, filters=nfilters_55, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
conv3_3 = tf.layers.conv2d(inputs=conv3_2, filters=nfilters_55, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
# 池化+卷积
pool = tf.layers.average_pooling2d(inputs=Inputs, pool_size=3, strides=1, padding='same')
conv4 = tf.layers.conv2d(inputs=pool, filters=nfilters_11After33Pool, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
# 在通道维度上拼接各输出
outputs = tf.concat([conv1, conv2_2, conv3_3, conv4], axis=-1)
return outputs
# Inception module 2 带不对称的卷积
def Inception_AsymmetricConv(Inputs, nfilters_11=192, nfilters_11Before7=128,
nfilters_11Before77=128, nfilters_11After33Pool=192,
nfilters_7=128, nfilters_77=128, name=None):
'''
将n×n的卷积变成连续的1×n和n×1的两次卷积
其实这一层的参数也不止这么多,不过大概是这么个意思
有兴趣的朋友可以让参数更加具体地描述该模块
步长都默认1
:param Inputs: 输入
:param nfilters_11: 1×1卷积层的卷积核数
:param nfilters_11Before7: 1×7然后7×1卷积前1×1卷积核数
:param nfilters_11Before77: 7×1,1×7然后又7×1,1×7卷积前1×1的卷积核数
:param nfilters_11After33Pool: 3×3池化后的1×1卷积核的数量
:param nfilters_7: 1×7然后7×1卷积的卷积核数
:param nfilters_77: 7×1,1×7然后又7×1,1×7卷积的卷积核数
:param name: 该层的名字
:return:
'''
# 1×1的卷积层
conv1 = tf.layers.conv2d(Inputs, filters=nfilters_11, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
# 1×7然后7×1的卷积层
conv2_1 = tf.layers.conv2d(Inputs, filters=nfilters_11Before7, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv2_2 = tf.layers.conv2d(conv2_1, filters=nfilters_7, kernel_size=(1, 7), strides=1, padding='same',
activation=tf.nn.relu)
conv2_3 = tf.layers.conv2d(conv2_2, filters=nfilters_7, kernel_size=(7, 1), strides=1, padding='same',
activation=tf.nn.relu)
# 7×1,1×7然后又7×1,1×7的卷积层
conv3_1 = tf.layers.conv2d(Inputs, filters=nfilters_11Before77, kernel_size=1, strides=1)
conv3_2 = tf.layers.conv2d(conv3_1, filters=nfilters_77, kernel_size=(7, 1), strides=1, padding='same',
activation=tf.nn.relu)
conv3_3 = tf.layers.conv2d(conv3_2, filters=nfilters_77, kernel_size=(1, 7), strides=1, padding='same',
activation=tf.nn.relu)
conv3_4 = tf.layers.conv2d(conv3_3, filters=nfilters_77, kernel_size=(7, 1), strides=1, padding='same',
activation=tf.nn.relu)
conv3_5 = tf.layers.conv2d(conv3_4, filters=nfilters_77, kernel_size=(1, 7), strides=1, padding='same',
activation=tf.nn.relu)
# 池化+卷积
pool = tf.layers.average_pooling2d(Inputs, pool_size=3, strides=1, padding='same')
conv4 = tf.layers.conv2d(pool, filters=nfilters_11After33Pool, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
# 在通道维度上拼接各输出
outputs = tf.concat([conv1, conv2_3, conv3_5, conv4], axis=-1)
return outputs
# Inception module 3 平行的不对称的卷积
def Inception_parallelAsymmetricConv(Inputs, nfilters_11=320, nfilters_11Before33=384,
nfilters_11Before55=448, nfilters_11After33Pool=192,
nfilters_33=384, nfilters_55=384, name=None):
'''
将1×n和n×1的两个卷积并行操作,然后拼接起来
:param Inputs: 输入
:param nfilters_11: 1×1卷积层的卷积核数
:param nfilters_11Before33: 3×3卷积层前的1×1卷积降维的卷积核数
:param nfilters_11Before55: 5×5卷积层前的1×1卷积降维的卷积核数
:param nfilters_11After33Pool: 3×3池化后的1×1卷积核的数量
:param nfilters_33: 平行的1×3和3×1方式卷积的卷积核数
:param nfilters_55: 两个3×3构成的卷积层,但是第二个3×3会用平行的1×3和3×1方式卷积
:param name:
:return:
'''
# 1×1的卷积层
conv1 = tf.layers.conv2d(Inputs, filters=nfilters_11, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
# 3×3的卷积层
conv2_1 = tf.layers.conv2d(Inputs, filters=nfilters_11Before33, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv2_21 = tf.layers.conv2d(conv2_1, filters=nfilters_33, kernel_size=(1, 3), strides=1, padding='same',
activation=tf.nn.relu)
conv2_22 = tf.layers.conv2d(conv2_1, filters=nfilters_33, kernel_size=(3, 1), strides=1, padding='same',
activation=tf.nn.relu)
conv2_3 = tf.concat([conv2_21, conv2_22], axis=-1)
# 两个3×3的卷积层
conv3_1 = tf.layers.conv2d(Inputs, filters=nfilters_11Before55, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv3_2 = tf.layers.conv2d(conv3_1, filters=nfilters_55, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
conv3_31 = tf.layers.conv2d(conv3_2, filters=nfilters_55, kernel_size=(1, 3), strides=1, padding='same',
activation=tf.nn.relu)
conv3_32 = tf.layers.conv2d(conv3_2, filters=nfilters_55, kernel_size=(3, 1), strides=1, padding='same',
activation=tf.nn.relu)
conv3_4 = tf.concat([conv3_31, conv3_32], axis=-1)
# 池化+卷积
pool = tf.layers.average_pooling2d(Inputs, pool_size=3, strides=1, padding='same')
conv4 = tf.layers.conv2d(pool, filters=nfilters_11After33Pool, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
# 在通道维度上拼接各输出
outputs = tf.concat([conv1, conv2_3, conv3_4, conv4], axis=-1)
return outputs
# 池化和卷积并行的降特征图尺寸的方法
def reduction(Inputs, nfilters_11Before33=192, nfilters_11Before55=192,
nfilters_33=320, nfilters_55=192, ):
'''
注意拼接前的最后一次的卷积步长要变成2了
:param Inputs: 输入
:param nfilters_11Before33: 3×3卷积前的1×1卷积核数量
:param nfilters_11Before55: 两个3×3卷积前的1×1卷积核数量
:param nfilters_33: 3×3卷积核数量
:param nfilters_55: 两个3×3卷积的核数量
:return:
'''
# 3×3卷积
conv1_1 = tf.layers.conv2d(Inputs, filters=nfilters_11Before33, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv1_2 = tf.layers.conv2d(conv1_1, filters=nfilters_33, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu)
# 两个3×3卷积
conv2_1 = tf.layers.conv2d(Inputs, filters=nfilters_11Before55, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv2_2 = tf.layers.conv2d(conv2_1, filters=nfilters_55, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
conv2_3 = tf.layers.conv2d(conv2_2, filters=nfilters_55, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu)
# 池化
pool = tf.layers.average_pooling2d(Inputs, pool_size=3, strides=2, padding='same')
# 拼接
outputs = tf.concat([conv1_2, conv2_3, pool], axis=-1)
return outputs
# 模型初始的部分
def InitialPart(Inputs):
'''
论文模型中在使用Inception模块之前还是正常的一些卷积和池化
:param Inputs: 初始img输入
:return:
'''
conv1 = tf.layers.conv2d(Inputs, filters=32, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu)
conv2 = tf.layers.conv2d(conv1, filters=32, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
conv3 = tf.layers.conv2d(conv2, filters=64, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(conv3, pool_size=3, strides=2, padding='same')
conv4 = tf.layers.conv2d(pool1, filters=80, kernel_size=1, strides=1, padding='same',
activation=tf.nn.relu)
conv5 = tf.layers.conv2d(conv4, filters=192, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(conv5, pool_size=3, strides=2, padding='same')
return pool2
def multiChannelWeightLayer(Inputs, batchNormTraining,batchSize):
'''
对输入完成BatchNorm + relu + Wx_plus_b操作
:param Inputs: 输入张量
:param batchNormTraining: batchNorm层Training参数,在训练和预测阶段传入不同值
:return:
'''
batchNorm = tf.layers.batch_normalization(Inputs, training=batchNormTraining)
relu = tf.nn.relu(batchNorm)
transposed = tf.transpose(relu, [0, 3, 1, 2])
num_channels = Inputs.get_shape()[-1].value
size = Inputs.get_shape()[1].value
weight = tf.Variable(tf.truncated_normal(shape=(size, size)), dtype=tf.float32, trainable=True)
weight_expand = tf.expand_dims(weight, axis=0)
weight_nchannels = tf.tile(weight_expand, tf.constant([num_channels, 1, 1]))
batch_expand = tf.expand_dims(weight_nchannels, axis=0)
weight_final = tf.tile(batch_expand, tf.concat([tf.stack([batchSize,1],axis=0),[1,1]],axis=0))
WX = tf.matmul(transposed, weight_final)
bias = tf.Variable(tf.truncated_normal(shape=(size,)), dtype=tf.float32, trainable=True)
bias_expand = tf.expand_dims(bias, axis=0)
bias_size = tf.tile(bias_expand, tf.constant([size, 1]))
bias_channels_expand = tf.expand_dims(bias_size, axis=0)
bias_channels = tf.tile(bias_channels_expand, tf.constant([num_channels, 1, 1]))
bias_batch_expand = tf.expand_dims(bias_channels, axis=0)
bias_final = tf.tile(bias_batch_expand,tf.concat([tf.stack([batchSize,1],axis=0),[1,1]],axis=0))
WX_PLUS_B = WX + bias_final
outputs = tf.transpose(WX_PLUS_B, [0, 2, 3, 1])
return outputs
def ResNetBlock(Inputs, batchNormTraining, batchSize):
'''
堆叠两次(BatchNorm + relu + Wx_plus_b)操作,形成一个残差模块
:param Inputs: 输入张量
:param batchNormTraining: batchNorm层Training参数,在训练和预测阶段传入不同值
:return:
'''
shortcut = Inputs
wx_1 = multiChannelWeightLayer(Inputs, batchNormTraining=batchNormTraining, batchSize=batchSize)
res = multiChannelWeightLayer(wx_1, batchNormTraining=batchNormTraining, batchSize=batchSize)
outputs = tf.add(shortcut, res)
return outputs
|
#!/usr/bin/env python
###############################################################################
# Copyright (C) 1994 - 2009, Performance Dynamics Company #
# #
# This software is licensed as described in the file COPYING, which #
# you should have received as part of this distribution. The terms #
# are also available at http://www.perfdynamics.com/Tools/copyright.html. #
# #
# You may opt to use, copy, modify, merge, publish, distribute and/or sell #
# copies of the Software, and permit persons to whom the Software is #
# furnished to do so, under the terms of the COPYING file. #
# #
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY #
# KIND, either express or implied. #
###############################################################################
#
# $Id$
#
#---------------------------------------------------------------------
import pdq
#
# Based on time_share.c
#
# Illustrates PDQ solver for closed uni-server queue. Compare with repair.c
#
##### Model specific variables #######################################
#pop = 200.0
pop = 100.0
think = 300.0
servt = 0.63
##### Initialize the model giving it a name ##########################
pdq.Init("Time Share Computer")
pdq.SetComment("This is just a simple M/M/1 queue.");
##### Define the workload and circuit type ###########################
pdq.streams = pdq.CreateClosed("compile", pdq.TERM, pop, think)
##### Define the queueing center #####################################
pdq.nodes = pdq.CreateNode("CPU", pdq.CEN, pdq.FCFS)
##### Define service demand ##########################################
pdq.SetDemand("CPU", "compile", servt)
##### Solve the model ################################################
pdq.Solve(pdq.EXACT)
pdq.Report()
#---------------------------------------------------------------------
|
import os
import json
import unittest
from unittest.mock import MagicMock
from py_client import Client, ResponseStatus
from py_client.models import LoginResponseModel
from py_client.modules.holdings_limits.models import *
from py_client.modules.holdings_limits import endpoints
from .common import create_login_model
from .mock import mock_post
class TestHoldingsLimits(unittest.TestCase):
"""
Test holdings limits module
"""
def setUp(self) -> None:
self.client = Client(os.getenv('API_URL'), os.getenv('SOCKET_URL'))
# mock methods
self.post_mock = MagicMock(wraps=mock_post)
self.client._Client__hl.post = self.post_mock
self.client.users.login = MagicMock(return_value=LoginResponseModel(susertoken='abcdefg'))
# login
self.credentials = create_login_model()
self.token = self.client.login(self.credentials).susertoken
assert self.token is not None
def test_holdings(self):
model = HoldingsRequestModel(
uid=self.credentials.uid,
actid=self.credentials.uid,
prd='ABC123'
)
response = self.client.holdings(model)
with self.subTest('request should be called with proper data'):
expected_data = {
"uid": self.credentials.uid,
"actid": self.credentials.uid,
"prd": 'ABC123'
}
expected_body = f'jData={json.dumps(expected_data)}&jKey={self.token}'
self.post_mock.assert_called_with(endpoints.HOLDINGS, expected_body)
with self.subTest('response should be parsed properly'):
assert response is not None
assert response.stat is not None
if response.stat == ResponseStatus.OK:
assert response.holdqty is not None
assert response.colqty is not None
assert response.btstqty is not None
assert response.btstcolqty is not None
assert response.usedqty is not None
assert response.upldprc is not None
assert response.exch_tsym is not None
assert type(response.exch_tsym) == list
if len(response.exch_tsym) > 0:
item = response.exch_tsym[0]
assert item.exch is not None
assert item.tsym is not None
assert item.token is not None
else:
assert response.emsg is not None
assert type(response.emsg) == str
def test_limits(self):
model = LimitsRequestModel(
uid=self.credentials.uid,
actid=self.credentials.uid,
prd='ABC123'
)
response = self.client.limits(model)
with self.subTest('request should be called with proper data'):
expected_data = {
"uid": self.credentials.uid,
"actid": self.credentials.uid,
"prd": 'ABC123'
}
expected_body = f'jData={json.dumps(expected_data)}&jKey={self.token}'
self.post_mock.assert_called_with(endpoints.LIMITS, expected_body)
with self.subTest('response should be parsed properly'):
assert response is not None
assert response.stat is not None
if response.stat == ResponseStatus.OK:
assert response.cash is not None
assert response.payin is not None
assert response.payout is not None
assert response.brkcollamt is not None
assert response.unclearedcash is not None
assert response.daycash is not None
assert response.turnoverlmt is not None
assert response.pendordvallmt is not None
assert response.turnover is not None
assert response.pendordval is not None
assert response.marginused is not None
assert response.mtomcurper is not None
assert response.unmtom is not None
assert response.grexpo is not None
assert response.uzpnl_e_i is not None
assert response.uzpnl_e_m is not None
assert response.uzpnl_e_c is not None
else:
assert response.emsg is not None
assert type(response.emsg) == str
|
import os
import sys
import argparse
import logging
import time
import cProfile
from .builder import Builder
from .server import SampleServer
from .lexer import Lexer
from .parser import Parser
from .formatter import Formatter
from .transform import TransformMinifyScope
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def copy_staticdir(staticdir, outdir):
if staticdir and os.path.exists(staticdir):
for dirpath, dirnames, filenames in os.walk(staticdir):
paths = []
for dirname in dirnames:
src_path = os.path.join(dirpath, dirname)
dst_path = os.path.join(outdir, "static", os.path.relpath(src_path, staticdir))
if not os.path.exists(dst_path):
os.makedirs(dst_path)
for filename in filenames:
src_path = os.path.join(dirpath, filename)
dst_path = os.path.join(outdir, "static", os.path.relpath(src_path, staticdir))
with open(src_path, "rb") as rb:
with open(dst_path, "wb") as wb:
wb.write(rb.read())
def copy_favicon(builder, outdir):
inp_favicon = builder.find("favicon.ico")
out_favicon = os.path.join(outdir, "favicon.ico")
with open(inp_favicon, "rb") as rb:
with open(out_favicon, "wb") as wb:
wb.write(rb.read())
def build(outdir, index_js, staticdir=None, staticdata=None, paths=None, platform=None, minify=False, onefile=False):
if paths is None:
paths = []
if staticdata is None:
staticdata = {}
html_path_output = os.path.join(outdir, "index.html")
js_path_output = os.path.join(outdir, "static", "index.js")
css_path_output = os.path.join(outdir, "static", "index.css")
builder = Builder(paths, staticdata, platform=platform)
css, js, html = builder.build(index_js, minify=minify, onefile=onefile)
makedirs(outdir)
with open(html_path_output, "w") as wf:
cmd = 'daedalus ' + ' '.join(sys.argv[1:])
wf.write("<!--%s-->\n" % cmd)
wf.write(html)
if not onefile:
makedirs(os.path.join(outdir, 'static'))
with open(js_path_output, "w") as wf:
wf.write(js)
with open(css_path_output, "w") as wf:
wf.write(css)
copy_staticdir(staticdir, outdir)
copy_favicon(builder, outdir) |
import fitbit
import gather_keys_oauth2 as Oauth2
import pandas as pd
import datetime
import os.path as osp
import os
# ************ UBC MIST Account *********
# CLIENT_ID ='22DF24'
# CLIENT_SECRET = '7848281e9151008de32698f7dd304c68'
# ************ Hooman's Account *********
CLIENT_ID ='22D68G'
CLIENT_SECRET = '32e28a7e72842298fd5d97ce123104ca'
"""for obtaining Access-token and Refresh-token"""
server = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)
server.browser_authorize()
ACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])
REFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])
"""Authorization"""
auth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)
yesterday = str((datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y%m%d")) # To avoid updating dates everyday
yesterday2 = str((datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y-%m-%d"))
today = str(datetime.datetime.now().strftime("%Y%m%d"))
# ****************************************************************
# ************* get heart rate data / for Yesterday ***************
# ****************************************************************
heart_rate_data_csv_address = 'Data/Heart/Heart_Rate_Data/'+ yesterday +'.csv'
if not osp.exists(osp.dirname(heart_rate_data_csv_address)):
os.makedirs(osp.dirname(heart_rate_data_csv_address))
if not osp.isfile(heart_rate_data_csv_address):
fit_statsHR = auth2_client.intraday_time_series('activities/heart', base_date=yesterday2, detail_level='1sec') #collects data
#put it in a readable format using Panadas
time_list = []
val_list = []
for i in fit_statsHR['activities-heart-intraday']['dataset']:
val_list.append(i['value'])
time_list.append(i['time'])
heartdf = pd.DataFrame({'Heart Rate':val_list,'Time':time_list})
# saving the data locally
heartdf.to_csv(heart_rate_data_csv_address, columns=['Time','Heart Rate'], header=True, index = False)
# ****************************************************************
# ************* Heart Rate Summary / for Today ***************
# ****************************************************************
heart_rate_summary_csv_address = 'Data/Heart/Heard_Summary/' + today + '.csv'
if not osp.exists(osp.dirname(heart_rate_summary_csv_address)):
os.makedirs(osp.dirname(heart_rate_summary_csv_address))
if not osp.isfile(heart_rate_summary_csv_address):
fitbit_stats = auth2_client.intraday_time_series('activities/heart', base_date='today', detail_level='1sec')
stats = fitbit_stats
# get heart summary
# TODO get the summary of calories burned in each heart rate zone and total. include it
hsummarydf = pd.DataFrame({'Date': stats["activities-heart"][0]['dateTime'],
'HR max': stats["activities-heart"][0]['value']['heartRateZones'][0]['max'],
'HR min': stats["activities-heart"][0]['value']['heartRateZones'][0]['min']}, index=[0])
hsummarydf.to_csv(heart_rate_summary_csv_address, header=False, index=False, mode='a')
# ****************************************************************
# ************* Sleep Data / for Today (last night) ***************
# ****************************************************************
sleep_data_csv_address = 'Data/Sleep/Sleep_Data/' + today + '.csv'
if not osp.exists(osp.dirname(sleep_data_csv_address)):
os.makedirs(osp.dirname(sleep_data_csv_address))
if not osp.isfile(sleep_data_csv_address):
# TODO Fix this part. can't get the sleep data and gets zero in return
fit_statsSl = auth2_client.sleep(date='today')
stime_list = []
sval_list = []
# for i in fit_statsSl['sleep'][0]['minuteData']:
# stime_list.append(i['dateTime'])
# sval_list.append(i['value'])
# sleepdf = pd.DataFrame({'State':sval_list, 'Time':stime_list})
# sleepdf['Interpreted'] = sleepdf['State'].map({'2':'Awake','3':'Very Awake','1':'Asleep'})
#
# sleepdf.to_csv(sleep_data_csv_address,
# columns = ['Time','State','Interpreted'], header=True , index = False)
# ****************************************************************
# ************* Sleep Summary / for Today (last night) ***************
# ****************************************************************
sleep_summary_csv_address = 'Data/Sleep/Sleep_Summary/' + today + '.csv'
if not osp.exists(osp.dirname(sleep_summary_csv_address)):
os.makedirs(osp.dirname(sleep_summary_csv_address))
# if not osp.isfile(sleep_summary_csv_address):
# TODO Fix the sleep data collection from the previous TODO and fix this one after
# fit_statsSum = fit_statsSl['sleep'][0]
# ssummarydf = pd.DataFrame({'Date':fit_statsSum['dateOfSleep'],
# 'MainSleep':fit_statsSum['isMainSleep'],
# 'Efficiency':fit_statsSum['efficiency'],
# 'Duration':fit_statsSum['duration'],
# 'Minutes Asleep':fit_statsSum['minutesAsleep'],
# 'Minutes Awake':fit_statsSum['minutesAwake'],
# 'Awakenings':fit_statsSum['awakeCount'],
# 'Restless Count':fit_statsSum['restlessCount'],
# 'Restless Duration':fit_statsSum['restlessDuration'],
# 'Time in Bed':fit_statsSum['timeInBed']}
# ,index=[0])
#
#
# ssummarydf.to_csv(sleep_summary_csv_address)
# # ssummarydf.to_csv('Data/Sleep/sleepsummary.csv', header=False, index=False, mode='a')
|
n=1
while n<=20:
print n
n +=1
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Run: python3 cratesio-temporal-changes <path-to-crates.io-index> <path-to-dylib>
Return csv-file with resolved edges
"""
import sys
import json
from pathlib import Path
from ctypes import cdll, c_bool, c_void_p, cast, c_char_p, c_int32
assert len(sys.argv) == 3
RUST = cdll.LoadLibrary(sys.argv[2])
### is_match function
RUST.is_match.argtypes = (c_void_p,c_void_p)
RUST.is_match.restype = c_bool
### cmp function
RUST.cmp.argtypes = (c_void_p,c_void_p)
RUST.cmp.restype = c_int32
### Helper function
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
### Examples
# ver_list = "3.0.0|1.0.0|1.0.1|1.1.0|2.0.0|2.0.1|2.1.0|2.10.0|2.11.0|2.12.0|2.13.0|2.14.0|2.15.0|2.16.0|2.17.0|2.17.1|2.2.0|2.2.1|2.3.0|2.3.1|2.4.0|2.5.0|2.5.1|2.6.0|2.6.1|2.7.0|2.8.0|2.9.0".split('|')
# print(sorted(ver_list, key=cmp_to_key(RUST.cmp)))
# VER = "0.1.0"
# REQ = "^0.1.0"
# print('Does {} match {}? {}'.format(VER, REQ, RUST.is_match(REQ.encode('ascii'), VER.encode('ascii'))))
version_registry= {}
package_versions = list()
###
### Create version table
###
for path in Path(sys.argv[1]).glob('**/*'):
if path.is_file() and "config.json" not in path.name and "crates.io-index/.git/" not in str(path):
with path.open() as idx_fh:
for raw_entry in idx_fh.readlines():
entry = json.loads(raw_entry)
package_versions.append(entry)
if entry['name'] not in version_registry:
version_registry[entry['name']] = list()
version_registry[entry['name']].append(entry['vers'])
with open("resolved_graph.csv", "w") as graph_file:
graph_file.write("source_name,source_version,target_name,target_version\n")
for rev in package_versions:
if 'deps' in rev:
for dep in rev['deps']:
if dep['name'] in version_registry and 'kind' in dep and (dep['kind'] == 'normal' or dep['kind'] == 'builds'):
valid_vers = [ver for ver in version_registry[dep['name']] if RUST.is_match(dep['req'].encode('ascii'), ver.encode('ascii'))]
if len(valid_vers) > 0:
row = "{},{},{},{}\n".format(rev['name'],rev['vers'],dep['name'],valid_vers.pop())
graph_file.write(row)
|
import yaml
import argparse
from source.knn import create_knn
from source.cdp import cdp
def main():
parser = argparse.ArgumentParser(description="CDP")
parser.add_argument('--config', default='', type=str)
args = parser.parse_args()
with open(args.config) as f:
config = yaml.load(f)
for k,v in config.items():
setattr(args, k, v)
assert isinstance(args.committee, list), "committee should be a list of strings"
create_knn(args)
cdp(args)
if __name__ == "__main__":
main()
|
import subprocess
import configparser
import datetime
import tempfile
import shutil
import jinja2
import os
repo_name = input("Input repository name: ").replace(" ", "_")
repo_path = os.path.join("/", "srv", "git", repo_name + ".git")
if os.path.exists(repo_path):
print("ERROR: A repository with that name already exists. Please try another")
exit()
os.mkdir(repo_path)
cwd = os.getcwd()
os.chdir(repo_path)
subprocess.run(["git", "init", "--bare"])
description = input("Input repository description: ")
with open(os.path.join(repo_path, "description"), "w") as f:
f.write(description)
author = input("Input repository author: ")
with open(os.path.join(repo_path, "author"), "w") as f:
f.write(author)
with open(os.path.join(repo_path, "url"), "w") as f:
f.write("git@eda.gay:" + repo_name)
private = input("Would you like the repository to appear on the web version git.eda.gay? <y/n>: ").lower() == "n"
accessstr = "git@git.eda.gay:" + str(repo_path)
if input("Would you like the repository to remain bare? Useful for making mirrors of Github repos. <y/n>: ").lower() != "y":
with tempfile.TemporaryDirectory() as tempdir:
subprocess.run(["git", "clone", accessstr, tempdir])
os.chdir(tempdir)
with open("README.md", "w") as f:
f.write("# %s\n\n%s\n" % (repo_name, description))
gitignore_templates_dir = "/home/eden/gitignore/"
templates = sorted([f[:-10] for f in os.listdir(gitignore_templates_dir) if f.endswith(".gitignore")])
templates.insert(0, "[None]")
for i, template in enumerate(templates, 1):
print("%3d: %-23s" % (i, template), end = "")
if i % 4 == 0:
print("")
selected_index = int(input("\nSelect .gitignore template: "))
if selected_index != 0:
shutil.copy(os.path.join(gitignore_templates_dir, templates[selected_index - 1]) + ".gitignore", ".gitignore", follow_symlinks = True)
licenses_templates_dir = "/home/eden/license-templates/templates/"
templates = sorted([f[:-4] for f in os.listdir(licenses_templates_dir) if not f.endswith("-header.txt")])
templates.insert(0, "[None]")
for i, template in enumerate(templates, 1):
print("%2d: %-22s" % (i, template), end = "")
if i % 4 == 0:
print("")
selected_index = int(input("\nSelect license template: "))
if selected_index != 0:
with open(os.path.join(licenses_templates_dir, templates[selected_index - 1]) + ".txt", "r") as f:
jinja_template = jinja2.Template(f.read())
with open("LICENSE", "w") as f:
f.write(jinja_template.render(**{
"year": str(datetime.datetime.today().year),
"organization": author,
"project": repo_name
}))
subprocess.run(["git", "add", "-A"])
subprocess.run(["git", "commit", "-m", "Initialized repository"])
subprocess.run(["git", "push", "origin", "master"])
# user input in an executed string? YIKES
# to run this you have to have ssh access anyway soo....
# still bad form though tbh
if not private:
subprocess.run(["ssh", "git@192.168.1.92", "cd /media/git/html/ && mkdir %s && cd %s && stagit ../../%s.git/" % (repo_name, repo_name, repo_name)])
with open(os.path.join(repo_path, "hooks", "post-receive"), "w") as f:
f.write("#!/bin/sh\n\n")
f.write("ssh git@192.168.1.92 'cd /media/git/html/%s && stagit ../../%s.git/'\n" % (repo_name, repo_name))
f.write("python3 /home/git/remake_index.py\n")
else:
with open("/home/git/git/private_repos.txt", "a") as f:
f.write("%s.git\n" % repo_name)
os.chdir(cwd)
import remake_index
subprocess.run(["ln", "-s", repo_path, repo_name])
subprocess.run(["ln", "-s", repo_path, repo_name + ".git"])
gitconf = configparser.ConfigParser()
gitconf.read("/srv/git/github.conf")
print("""
Repository created. You can now clone or add remote:
git remote add other %s
%s
git clone %s
And add github mirror (insecure method, keys are stored locally):
git remote add github https://%s:%s@github.com/%s/%s
""" % (accessstr, "git@eda.gay:" + repo_name, accessstr, gitconf.get("github", "user"), gitconf.get("github", "key"), gitconf.get("github", "user"),repo_name ))
|
import pygame as pg
import sys, time
import random
pg.init()
#initilization of certain variables
mode = 0
#can change screen size
size = (width, height) = (1280, 720)
sea = (46,139,87)
pink = (254,171,185)
Background = (0,144,158)
red = (255,0,0)
green = (0,255,0)
#Can change squareSize
squareSize = 4
#initialization of screen
screen = pg.display.set_mode(size)
pg.display.set_caption('Cellular Automata')
screen.fill(Background)
screenX = (width-20)//squareSize
screenY = (height-40)//squareSize
#array to hold current state and screen state
screenArray = {}
arrayMem = {}
for i in range(screenY):
screenArray[i] = [0]*screenX
arrayMem[i] = [0]*screenX
#draw button
pg.draw.rect(screen, red, pg.Rect(10, height-30, 20, 20) )
def updateAllSquares():
"Based on state of array, draws corresponding square into grid"
for i in range(screenY):
for j in range(screenX):
if(screenArray[i][j]==0):
pg.draw.rect(screen, sea, pg.Rect(squareSize+j*squareSize, squareSize+i*squareSize, squareSize, squareSize))
else:
pg.draw.rect(screen, pink, pg.Rect(squareSize+j*squareSize, squareSize+i*squareSize, squareSize, squareSize))
def updateSquare(j,i):
"Draw single square at position i,j"
if(screenArray[i][j]==0):
pg.draw.rect(screen, sea, pg.Rect(squareSize+j*squareSize, squareSize+i*squareSize, squareSize, squareSize))
else:
pg.draw.rect(screen, pink, pg.Rect(squareSize+j*squareSize, squareSize+i*squareSize, squareSize, squareSize))
def randSquare():
"initialization of random states in the grid"
for i in range(screenY):
for j in range(screenX):
random.seed()
num = random.randint(0,100)
if(num<40):
screenArray[i][j] = 1
else:
screenArray[i][j] = 0
updateAllSquares()
pg.display.flip()
#random generation
randSquare()
#mainloop
while(1):
for event in pg.event.get():
if(event.type == pg.QUIT):
sys.exit()
#check click
click = pg.mouse.get_pressed()
#if mode is edit(which is 0), let user change mode or square state
if(mode == 0):
if(click[0]==True):
position = pg.mouse.get_pos()
if(position[0]<=30) and (position[0]>=10):
if(position[1]>=height-30) and (position[1]<=height-10):
mode = 1
pg.draw.rect(screen, green, pg.Rect(10, height-30, 20, 20) )
pg.display.flip()
time.sleep(0.1)
click = (0,0,0)
if(position[0]<=(screenX*squareSize)+squareSize) and (position[0]>=squareSize):
if(position[1]>=squareSize) and (position[1]<=(screenY*squareSize)+squareSize):
x = (position[0]-squareSize)//squareSize
y = (position[1]-squareSize)//squareSize
row = screenArray[y]
if(row[x] == 0):
row[x] = 1
else:
row[x] = 0
updateSquare(x,y)
pg.display.flip()
time.sleep(0.5)
click = (0,0,0)
#while mode is run(which is 1), simulation is run, pressing any key stops simulation
while(mode == 1):
#copy screen state into array state
#screen state changes before memory state so newborn cells do not affect current calculations
for y in range(screenY):
for x in range(screenX):
arrayMem[y][x] = screenArray[y][x]
for y in range(screenY):
row = arrayMem[y]
top = arrayMem[(y-1)%(screenY)]
bottom = arrayMem[(y+1)%(screenY)]
for x in range(screenX):
total = 0
#calculate neighbouring cell locations
Ctop = (y-1)%(screenY-1)
Cbot = (y+1)%(screenY-1)
Cwest = (x-1)%(screenX-1)
Ceast = (x+1)%(screenX-1)
#calculate total neighbours of each cell
total = arrayMem[Ctop][x]+arrayMem[Cbot][x]+arrayMem[y][Ceast]+arrayMem[y][Cwest]+arrayMem[Ctop][Ceast]+arrayMem[Ctop][Cwest]+arrayMem[Cbot][Ceast]+arrayMem[Cbot][Cwest]
#uncomment if desired
"""if total!=0:
print("row: %d column: %d total: %d"%(y,x,total))
print("N: %d %d S: %d %d E: %d %d W: %d %d NE: %d %d NW: %d %d SE: %d %d SW: %d %d "%(Ctop, x, Cbot, x, y, Ceast, y, Cwest, Ctop, Ceast, Ctop, Cwest, Cbot, Ceast, Cbot, Cwest))
print("Val: %d %d %d %d %d %d %d %d"%(arrayMem[Ctop][x],arrayMem[Cbot][x],arrayMem[y][Ceast],arrayMem[y][Cwest],arrayMem[Ctop][Ceast],arrayMem[Ctop][Cwest],arrayMem[Cbot][Ceast],arrayMem[Cbot][Cwest]))
print("calc: %d %d %d %d %d %d %d %d")
print("_________________________________")"""
#implement the rules of Conway's Game of Life
if(row[x]==1):
if(total<2) or (total>3):
screenArray[y][x] = 0
if(row[x]==0) and total==3:
screenArray[y][x] = 1
#display new state and check if user pressed space to pause
updateAllSquares()
#uncomment if simulation is too fast
#time.sleep(0.1)
pg.display.flip()
events = pg.event.get()
for event in events:
if(event.type==pg.KEYDOWN):
if event.key==pg.K_SPACE:
mode = 0
pg.draw.rect(screen, red, pg.Rect(10, height-30, 20, 20) )
pg.display.flip()
time.sleep(0.1)
|
import AMC
import time
IP = "192.168.1.1"
# Setup connection to AMC
amc = AMC.Device(IP)
amc.connect()
# Activate axis 1
# Internally, axes are numbered 0 to 2
axis = 0 # Axis 1
amc.control.setControlOutput(axis, True)
# Check if open loop positioner is connected
OL = 1
if amc.status.getOlStatus(axis) == OL:
# Continuous open loop drive forward for 1 second
# Start
amc.move.setControlContinuousFwd(axis, True)
time.sleep(1)
# Stop
amc.move.setControlContinuousFwd(axis, False)
# Stepwise open loop drive forward
nSteps = 10 # Number of steps, /PRO-feature required for nSteps > 1
backwards = False
# Perform nSteps steps
amc.move.setNSteps(axis, backwards, nSteps)
else:
# Closed loop drive 10000nm in forward direction
position = amc.move.getPosition(axis)
amc.move.setControlTargetPosition(axis, position + 10000)
amc.control.setControlMove(axis, True)
while not amc.status.getStatusTargetRange(axis):
# Read out position in nm
position = amc.move.getPosition(axis)
print(position)
time.sleep(0.1)
# Stop approach
amc.control.setControlMove(axis, False)
# Deativate axis
amc.control.setControlOutput(axis, False)
# Close connection
amc.close()
|
"""Strategy Design Pattern
Here we design strategies as classes.
"""
class MyClass:
def __init__(self, strategy=None):
class BasicStrategy:
def method1(self, name):
print("Dummy1", name)
def method2(self, name):
print("Dummy2", name)
if strategy:
self.strategy = strategy
else:
self.strategy = BasicStrategy()
self.name = "Strategy Example!"
def run(self):
self.strategy.method1(self.name)
self.strategy.method2(self.name)
class StrategyA:
def method1(self, name):
print("StrategyA, method1:", name)
def method2(self, name):
print("StrategyA, method2:", name)
class StrategyB:
def method1(self, name):
print("StrategyB, method1:", name)
def method2(self, name):
print("StrategyB, method2:", name)
if __name__ == "__main__":
strat0 = MyClass()
strat1 = MyClass(StrategyA())
strat2 = MyClass(StrategyB())
print("strat0:")
strat0.run()
print("strat1:")
strat1.run()
print("strat2:")
strat2.run() |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-24 17:08
# pylint: skip-file
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('passive_data_kit', '0026_dataservermetadata'),
]
operations = [
migrations.RenameModel(
old_name='DataServerMetadata',
new_name='DataServerMetadatum',
),
]
|
import numpy as np
def best_values(series, nb=None, min_count=None):
"""Return unique values in series ordered by most to less occurrences"""
occurrences = series.value_counts()
if min_count:
occurrences = occurrences[occurrences > min_count]
nb = len(occurrences) if nb is None else nb
return list(occurrences.index[:nb])
def required_values(series, target_ratio):
"""Return unique values required to cover ratio of usage in series
Args:
series (pandas.Series): list values
target_ratio (float): ratio of total usage to reach
nans are not counted as 'usage'
for instance in [1, numpy.nan, 2], 1 represents 50% of usage
"""
item_occurrences = series.value_counts()
total_occurrences = sum(item_occurrences)
target = target_ratio * total_occurrences
items, usage = [], 0
for item, occurrences in item_occurrences.iteritems():
usage += occurrences
items.append(item)
if usage > target:
break
prc = 100 * len(items) / len(item_occurrences)
t_prc = 100 * (usage / total_occurrences)
print(
f". {len(items)} / {len(item_occurrences)} ({prc:0.2f}%)"
f" items required to reach {t_prc:0.2f}% of usage"
)
return items
def required_weights(weights, target_ratio, as_ratio=False):
"""Return weights required to reach target_ratio"""
res = weights[required_weights_i(weights, target_ratio, as_ratio=as_ratio)]
prc = 100 * len(res) / len(weights)
if as_ratio:
t_prc = 100 * sum(res)
else:
t_prc = 100 * (sum(res) /sum(weights))
print(
f". {len(res)} / {len(weights)} ({prc:0.2f}%)"
f" {'ratio' if as_ratio else 'weight'}s required to reach {t_prc:0.2f}%"
+ ("" if as_ratio else " of total weight")
)
return res
def required_weights_i(weights, target_ratio, as_ratio=False):
"""Return indexes of weights required to reach target_ratio"""
assert (weights >= 0).all()
if as_ratio:
ratios = weights
else:
ratios = weights / sum(weights)
sindexes = np.argsort(-ratios)
rindexes = []
total_r = 0
for i in sindexes:
if total_r >= target_ratio:
break
total_r += ratios[i]
rindexes.append(i)
return np.array(rindexes)
|
# Generated by Django 2.2.9 on 2019-12-28 17:00
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ViettelUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None, unique=True)),
],
),
migrations.CreateModel(
name='Shake',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='shake.ViettelUser')),
],
),
]
|
from . import Choice, Result_T
from .boolean import BooleanPrompt
from .select import SelectPrompt
from .text import TextPrompt
class FChoice(Choice[Result_T]):
def __init__(self, value: Result_T):
self.name = str(value)
self.data = value
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#from blocks_utils import *
from model.vgg_bn import *
from utils.en_decode import *
from utils.prior_box import *
from utils.config import priorbox_config, detect_config
from utils.multibox_loss import *
from utils.box_utils import *
class Refinedet(object):
def __init__(self, num_class, data_format='channels_last', priorbox_config=priorbox_config, detect_config=detect_config):
super(Refinedet, self).__init__()
self._num_class = num_class
self._data_format = data_format
self._priorbox_config = priorbox_config
self._priorboxes_obj = Priorbox(self._priorbox_config.input_shape, self._priorbox_config.feature_shapes,
self._priorbox_config.min_sizes, self._priorbox_config.max_sizes,
self._priorbox_config.aspect_ratios, self._priorbox_config.steps,
self._priorbox_config.offset)
self._priorboxes = self._priorboxes_obj()
self._variances = self._priorbox_config.variances
self._num_pirors_depth_per_layer = self._priorboxes_obj.num_priors_depth_per_layer
self._detect_config = detect_config
self._feature_extracter = VGG_BN_Backbone(data_format=self._data_format)
self._extra_layers = self.__extra_layers(data_format=self._data_format, name='extra_layers')
self._tcb_layers = self.__tcb_layers(data_format=self._data_format, name='tcb')
self._arm_heads = self.__multibox_heads(2, self._num_pirors_depth_per_layer, data_format=self._data_format, name='arm')
self._odm_heads = self.__multibox_heads(self._num_class, self._num_pirors_depth_per_layer, data_format=self._data_format, name='odm')
def __extra_layers(self, data_format='channels_last', name=None):
extra_layers = []
with tf.variable_scope(name) as scope:
extra_layers.append(ssd_conv_bn_block(256, 2, data_format=data_format, name='conv8'))
extra_layers.append(ssd_conv_bn_block(128, 2, data_format=data_format, name='conv9'))
return extra_layers
def __tcb_layers(self, data_format='channels_last', name=None):
tcb_layers = []
with tf.variable_scope(name) as scope:
tcb_layers.append(transfer_connection_block(data_format=data_format, name='conv4_bn_block_tcb'))
tcb_layers.append(transfer_connection_block(data_format=data_format, name='fc7_m_tcb'))
tcb_layers.append(transfer_connection_block(data_format=data_format, name='conv8_tcb'))
tcb_layers.append(transfer_connection_block(data_format=data_format, has_deconv_layer=False, name='conv9_tcb'))
return tcb_layers
def __multibox_heads(self, num_class, num_pirors_depth_per_layer, padding='same', data_format='channels_last', use_bias=True, name=None):
cls_pred = []
loc_pred = []
with tf.variable_scope(name) as scope:
for idx, num_prior in enumerate(num_pirors_depth_per_layer):
cls_pred.append(tf.layers.Conv2D(filters=num_prior * num_class, kernel_size=3, strides=1, padding=padding,
data_format=data_format, activation=None, use_bias=use_bias,
kernel_initializer=conv_initializer(),
bias_initializer=tf.zeros_initializer(),
name='cls_{}'.format(idx), _scope='cls_{}'.format(idx),
_reuse=None))
loc_pred.append(tf.layers.Conv2D(filters=num_prior * 4, kernel_size=3, strides=1, padding=padding,
data_format=data_format, activation=None, use_bias=use_bias,
kernel_initializer=conv_initializer(),
bias_initializer=tf.zeros_initializer(),
name='loc_{}'.format(idx), _scope='loc_{}'.format(idx),
_reuse=None))
return cls_pred, loc_pred
def __odm_fpn(self, arm_features, tcb_layers, use_bn_tcb=False, training=False):
assert len(arm_features) == len(tcb_layers), 'arm detect head number must equal to odm detect head number!'
odm_features = []
concat_idx = 2 if not use_bn_tcb else 4
for feature, tcb_block in list(zip(arm_features, tcb_layers))[::-1]:
if 'upsample' not in tcb_block.keys():
feature = forward_block(feature, tcb_block['tcb'], training=training)
else:
tcb_block_concat_before = tcb_block['tcb'][:concat_idx + 1]
feature_concat_before = forward_block(feature, tcb_block_concat_before, training=training)
upsample_block = tcb_block['upsample']
feature_up_concat_before = forward_block(odm_features[-1], upsample_block, training=training)
feature_concat = feature_concat_before + feature_up_concat_before
tcb_block_concat_after = tcb_block['tcb'][concat_idx + 1:]
feature = forward_block(feature_concat, tcb_block_concat_after, training=training)
odm_features.append(feature)
return odm_features[::-1]
def __pred_head(self, features, heads, training=False):
pred_cls = []
pred_loc = []
head_cls, head_loc = heads
forward_predict = lambda x: forward_block(x[0], x[1], training=training)
for idx, feature in enumerate(features):
pred_cls.append(forward_predict([feature, head_cls[idx]]))
pred_loc.append(forward_predict([feature, head_loc[idx]]))
return pred_cls, pred_loc
def forward(self, inputs, training=False):
arm_features = []
extract_features = self._feature_extracter.forward(inputs, training=training)
_, inputs = extract_features
arm_features.extend(extract_features)
for extra_layer_block in self._extra_layers:
inputs = forward_block(inputs, extra_layer_block, training=training)
arm_features.append(inputs)
odm_features = self.__odm_fpn(arm_features, self._tcb_layers, training=training)
arm_pred_cls, arm_pred_loc = self.__pred_head(arm_features, self._arm_heads, training=training)
odm_pred_cls, odm_pred_loc = self.__pred_head(odm_features, self._odm_heads, training=training)
return arm_pred_cls, arm_pred_loc, odm_pred_cls, odm_pred_loc
def predictions_transform(self, features, num_class):
if self._data_format == 'channels_first':
features = [tf.transpose(feature, [0, 2, 3, 1]) for feature in features]
features = [tf.reshape(feature, [tf.shape(feature)[0], -1, num_class]) for feature in features]
return tf.concat(features, axis=1)
def refinedet_multibox_loss(self, predictions, targets, ohem=True, negative_ratio=3):
gt_labels, gt_bboxes, num_reals = targets
arm_cls, arm_loc, odm_cls, odm_loc = predictions
arm_cls = self.predictions_transform(arm_cls, 2)
arm_loc = self.predictions_transform(arm_loc, 4)
odm_cls = self.predictions_transform(odm_cls, self._num_class)
odm_loc = self.predictions_transform(odm_loc, 4)
with tf.name_scope('arm_loss'):
arm_conf_loss, arm_loc_loss = multibox_loss([arm_cls, arm_loc], gt_labels, gt_bboxes, num_reals, self._num_class, self._priorboxes, self._variances)
with tf.name_scope('odm_loss'):
odm_conf_loss, odm_loc_loss = multibox_loss([arm_cls, arm_loc, odm_cls, odm_loc], gt_labels, gt_bboxes, num_reals, self._num_class, self._priorboxes, self._variances, iou_threshold=0.65, use_arm=True)
return arm_conf_loss * (1 + negative_ratio), arm_loc_loss, odm_conf_loss * (1 + negative_ratio), odm_loc_loss
def detect(self, predictions):
arm_cls, arm_loc, odm_cls, odm_loc = predictions
arm_cls = self.predictions_transform(arm_cls, 2)
arm_loc = self.predictions_transform(arm_loc, 4)
odm_cls = self.predictions_transform(odm_cls, self._num_class)
odm_loc = self.predictions_transform(odm_loc, 4)
tf.identity(arm_cls, name='arm_cls')
tf.identity(arm_loc, name='arm_loc')
tf.identity(odm_cls, name='odm_cls')
tf.identity(odm_loc, name='odm_loc')
def decode_fn(arm_cls, arm_loc, odm_cls, odm_loc, priorboxes=self._priorboxes, variances=self._variances, num_class=self._num_class, config=self._detect_config):
#arm_loc = transform_yx2xy(arm_loc)
#odm_loc = transform_yx2xy(odm_loc)
priorboxes_refine = to_center(decode(arm_loc, priorboxes, variances))
decode_bboxes = decode(odm_loc, priorboxes_refine, variances)
arm_conf_pred = tf.nn.softmax(arm_cls)
odm_conf_pred = tf.nn.softmax(odm_cls)
arm_filter_mask = arm_conf_pred[:, 1] > config.filter_obj_score
arm_filter_mask = tf.matmul(tf.cast(tf.reshape(arm_filter_mask, shape=[-1, 1]), dtype=tf.float32), tf.ones(shape=[1, num_class]))
odm_conf_pred = tf.multiply(odm_conf_pred, arm_filter_mask)
odm_conf_pred = tf.slice(odm_conf_pred, [0, 1], [tf.shape(odm_conf_pred)[0], num_class - 1])
detection_scores, detection_classes, detection_bboxes = detect_post_process(odm_conf_pred, decode_bboxes, self._num_class, config)
return detection_scores, detection_classes, detection_bboxes
lamb_decode_fn = lambda x: decode_fn(x[0], x[1], x[2], x[3])
det_results = tf.map_fn(lamb_decode_fn, (arm_cls, arm_loc, odm_cls, odm_loc), dtype=(tf.float32, tf.int64, tf.float32), back_prop=False)
detection_scores = tf.concat(det_results[0], axis=0)
detection_classes = tf.concat(det_results[1], axis=0)
detection_bboxes = tf.concat(det_results[2], axis=0)
return detection_scores, detection_classes, detection_bboxes
|
import logging
from typing import Union
import discord
from discord.ext import commands
log = logging.getLogger(__name__)
class DebugMessageLogger(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
channel: Union[discord.abc.Messageable] = message.channel
channel_str: str = str(channel)
if isinstance(channel, discord.abc.GuildChannel):
guild_channel: discord.abc.GuildChannel = channel
channel_str = str(guild_channel.guild) + '#' + channel_str
author: discord.abc.User = message.author
log.info('[{}] {}: {}'.format(channel_str, author, message.content))
def setup(bot: commands.Bot):
bot.add_cog(DebugMessageLogger(bot))
def teardown(bot: commands.Bot):
pass
|
## Import the required modules
# Check time required
import time
time_start = time.time()
import sys
import os
import argparse as ap
import math
import imageio
from moviepy.editor import *
import numpy as np
sys.path.append(os.path.dirname(__file__) + "/../")
from scipy.misc import imread, imsave
from config import load_config
from dataset.factory import create as create_dataset
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
from multiperson.detections import extract_detections
from multiperson.predict import SpatialModel, eval_graph, get_person_conf_multicut
from multiperson.visualize import PersonDraw, visualize_detections
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
font = ImageFont.truetype("./font/NotoSans-Bold.ttf", 12)
import random
# for object-tracker
import dlib
import video_pose
####################
cfg = load_config("demo/pose_cfg_multi.yaml")
dataset = create_dataset(cfg)
sm = SpatialModel(cfg)
sm.load()
draw_multi = PersonDraw()
# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
##########
## Get the source of video
parser = ap.ArgumentParser()
parser.add_argument('-f', "--videoFile", help="Path to Video File")
parser.add_argument('-w', "--videoWidth", help="Width of Output Video")
parser.add_argument('-o', "--videoType", help="Extension of Output Video")
args = vars(parser.parse_args())
if args["videoFile"] is not None:
video_name = args["videoFile"]
else:
print("You have to input videoFile name")
sys.exit(1)
video_output_name = video_name.split('.')[0]
video = video_pose.read_video(video_name)
print("Input video size: [" + str(video.size[0]) + ", " + str(video.size[1]) + "]")
if args["videoWidth"] is not None:
video_width = int(args["videoWidth"])
video = video.resize(width = video_width)
print("Changed video size: [" + str(video.size[0]) + ", " + str(video.size[1]) + "]")
if args["videoType"] is not None:
video_type = args["videoType"]
else:
video_type = "mp4"
print("Output video type: " + video_type)
##########
## Define some functions to mark at image
def ellipse_set(person_conf_multi, people_i, point_i):
return (person_conf_multi[people_i][point_i][0] - point_r, person_conf_multi[people_i][point_i][1] - point_r, person_conf_multi[people_i][point_i][0] + point_r, person_conf_multi[people_i][point_i][1] + point_r)
##########
video_frame_number = int(video.duration * video.fps) ## duration: second / fps: frame per second
video_frame_ciphers = math.ceil(math.log(video_frame_number, 10)) ## ex. 720 -> 3
pose_frame_list = []
point_r = 3 # radius of points
point_min = 14 # threshold of points - If there are more than point_min points in person, we define he/she is REAL PERSON
point_num = 17 # There are 17 points in 1 person
tracking_people_count = 0
tracker_len_prev = 0
##########
# for object-tracker
target_points = [] # format: [(minx, miny, maxx, maxy), (minx, miny, maxx, maxy) ... ]
tracker = []
for i in range(0, video_frame_number):
# Save i-th frame as image
image = video.get_frame(i/video.fps)
##########
## By pose-tensorflow
image_batch = data_to_input(image)
# Compute prediction with the CNN
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref, pairwise_diff = predict.extract_cnn_output(outputs_np, cfg, dataset.pairwise_stats)
detections = extract_detections(cfg, scmap, locref, pairwise_diff)
unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections)
person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array)
#####
# Add library to draw image
image_img = Image.fromarray(image)
# Prepare saving image with points of pose
draw = ImageDraw.Draw(image_img)
#####
people_num = 0
people_real_num = 0
people_num = person_conf_multi.size / (point_num * 2)
people_num = int(people_num)
#####
if i != 0:
tracker_left = []
for k in range(len(tracker)):
tracker[k].update(image)
rect = tracker[k].get_position()
if int(rect.left()) <= 0 or int(rect.top()) <= 0 or int(rect.right()) >= video.size[0] or int(rect.bottom()) >= video.size[1]:
# object left(leave)
print('Object GONE!')
tracker_left.append(k)
else:
draw.rectangle([rect.left(), rect.top(), rect.right(), rect.bottom()], outline='red')
print('Object ' + str(k) + ' tracked at [' + str(int(rect.left())) + ',' + str(int(rect.top())) + ', ' + str(int(rect.right())) + ',' + str(int(rect.bottom())) + ']')
if len(tracker_left) != 0:
for j in range(len(tracker_left)):
del tracker[tracker_left[len(tracker_left) - 1 - j]]
#####
for people_i in range(0, people_num):
point_color_r = random.randrange(0, 256)
point_color_g = random.randrange(0, 256)
point_color_b = random.randrange(0, 256)
point_color = (point_color_r, point_color_g, point_color_b, 255)
point_list = []
point_count = 0
point_i = 0 # index of points
# To find rectangle which include that people - list of points x, y coordinates
people_x = []
people_y = []
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
point_count = point_count + 1
point_list.append(point_i)
if point_count >= point_min:
people_real_num = people_real_num + 1
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
draw.ellipse(ellipse_set(person_conf_multi, people_i, point_i), fill=point_color)
people_x.append(person_conf_multi[people_i][point_i][0])
people_y.append(person_conf_multi[people_i][point_i][1])
if i == 0:
target_points.append((int(min(people_x)), int(min(people_y)), int(max(people_x)), int(max(people_y))))
else:
is_new_person = True
for k in range(len(tracker)):
rect = tracker[k].get_position()
if np.mean(people_x) < rect.right() and np.mean(people_x) > rect.left() and np.mean(people_y) < rect.bottom() and np.mean(people_y) > rect.top():
is_new_person = False
if is_new_person == True:
tracker.append(dlib.correlation_tracker())
print('is_new_person!')
rect_temp = []
rect_temp.append((int(min(people_x)), int(min(people_y)), int(max(people_x)), int(max(people_y))))
[tracker[i+len(tracker)-1].start_track(image, dlib.rectangle(*rect)) for i, rect in enumerate(rect_temp)]
##########
if i == 0:
# Initial co-ordinates of the object to be tracked
# Create the tracker object
tracker = [dlib.correlation_tracker() for _ in range(len(target_points))]
# Provide the tracker the initial position of the object
[tracker[i].start_track(image, dlib.rectangle(*rect)) for i, rect in enumerate(target_points)]
#####
if tracker_len_prev < int(len(tracker)):
tracking_people_count = tracking_people_count + int(len(tracker)) - tracker_len_prev
tracker_len_prev = int(len(tracker))
draw.text((0, 0), 'People(this frame): ' + str(len(tracker)), (0,0,0), font=font)
draw.text((0, 18), 'People(cumulative): ' + str(tracking_people_count), (0,0,0), font=font)
draw.text((0, 36), 'Frame: ' + str(i) + '/' + str(video_frame_number), (0,0,0), font=font)
draw.text((0, 54), 'Total time required: ' + str(round(time.time() - time_start, 1)) + 'sec', (0,0,0), font=font)
print('People(this frame): ' + str(len(tracker)))
print('People(cumulative): ' + str(tracking_people_count))
print('Frame: ' + str(i) + "/" + str(video_frame_number))
print('Time required: ' + str(round(time.time() - time_start, 1)) + 'sec')
image_img_numpy = np.asarray(image_img)
pose_frame_list.append(image_img_numpy)
if not (os.path.isdir("testset/" + video_output_name)):
os.mkdir("testset/" + video_output_name)
image_name = "testset/" + video_output_name + "/" + str(i).zfill(10) + "_" + str(int(video.fps)) + "_" + str(len(tracker)) + ".jpg"
print(image_name)
image_img.save(image_name)
video_pose = ImageSequenceClip(pose_frame_list, fps=video.fps)
video_pose.write_videofile("testset/" + video_output_name + "_tracking." + video_type, fps=video.fps, progress_bar=False)
os.system('zip ' + './testset/' + video_output_name + '/' + video_output_name + ' ./testset/' + video_output_name + '/*')
print("Time(s): " + str(time.time() - time_start))
print("Output video size: [" + str(video.size[0]) + ", " + str(video.size[1]) + "]")
|
"""Conversions between different network parameters (S, Z, Y, T, ABCD)."""
import numpy as np
# S-parameters to ... --------------------------------------------------------
# def s_to_zparam(sparam, z0):
# """Convert S-parameters to Z-parameters.
# Args:
# sparam (ndarray): S-parameters
# z0 (ndarray): port impedance
# Returns:
# ndarray: Z-parameters
# """
# # Method 1:
# # https://en.wikipedia.org/wiki/Impedance_parameters#Relation_to_S-parameters
# # zparam = np.empty_like(sparam)
# # _, _, npts = sparam.shape
# # for idx in range(npts):
# # zsqrt = np.matrix([[np.sqrt(z0[0,idx]), 0],
# # [0, np.sqrt(z0[1,idx])]])
# # s = np.matrix(sparam[:,:,idx])
# # i = np.matrix([[1., 0.],
# # [0., 1.]])
# # zparam[:,:,idx] = zsqrt * (i + s) * np.linalg.inv(i - s) * zsqrt
# # Method 2:
# # DOI: 10.1109/22.275248
# z01 = z0[0]
# z02 = z0[1]
# z01c = np.conj(z01)
# z02c = np.conj(z02)
# r01 = z01.real
# r02 = z02.real
# s11 = sparam[0,0]
# s21 = sparam[1,0]
# s12 = sparam[0,1]
# s22 = sparam[1,1]
# denom = (1 - s11) * (1 - s22) - s12 * s21
# z11 = ((z01c + s11 * z01) * (1 - s22) + s12 * s21 * z01) / denom
# z12 = (2 * s12 * (r01 * r02)**0.5) / denom
# z21 = (2 * s21 * (r01 * r02)**0.5) / denom
# z22 = ((z02c + s22 * z02) * (1 - s11) + s12 * s21 * z02) / denom
# zparam = np.empty_like(sparam)
# zparam[0,0] = z11
# zparam[1,0] = z21
# zparam[0,1] = z12
# zparam[1,1] = z22
# return zparam
def s_to_tparam(sparam):
"""Convert S-parameters to T-parameters.
Args:
sparam (ndarray): S-parameters
Returns:
ndarray: T-parameters
"""
s11 = sparam[0,0]
s21 = sparam[1,0]
s12 = sparam[0,1]
s22 = sparam[1,1]
t11 = -(s11 * s22 - s12 * s21) / s21
t12 = s11 / s21
t21 = -s22 / s21
t22 = 1 / s21
tparam = np.empty_like(sparam)
tparam[0,0] = t11
tparam[1,0] = t21
tparam[0,1] = t12
tparam[1,1] = t22
return tparam
def s_to_zparam(sparam, z0):
"""Convert S-parameters to Z-parameters.
Args:
sparam (ndarray): S-parameters
z0 (ndarray): port impedance
Returns:
ndarray: Z-parameters
"""
z01 = z0[0]
z02 = z0[1]
z01c = np.conj(z01)
z02c = np.conj(z02)
r01 = z01.real
r02 = z02.real
s11 = sparam[0,0]
s21 = sparam[1,0]
s12 = sparam[0,1]
s22 = sparam[1,1]
z11 = (z01c + s11 * z01) * (1 - s22) + s12 * s21 * z01
z12 = 2 * s12 * np.sqrt(r01 * r02)
z21 = 2 * s21 * np.sqrt(r01 * r02)
z22 = (z02c + s22 * z02) * (1 - s11) + s12 * s21 * z02
denom = (1 - s11) * (1 - s22) - s12 * s21
zparam = np.empty_like(sparam)
zparam[0, 0] = z11 / denom
zparam[0, 1] = z12 / denom
zparam[1, 0] = z21 / denom
zparam[1, 1] = z22 / denom
return zparam
def s_to_abcd(sparam, z0):
"""Convert S-parameters to ABCD-parameters.
Args:
sparam (ndarray): S-parameters
z0 (ndarray): port impedance
Returns:
ndarray: ABCD-parameters
"""
z01 = z0[0]
z02 = z0[1]
z01c = np.conj(z01)
z02c = np.conj(z02)
r01 = z01.real
r02 = z02.real
s11 = sparam[0,0]
s21 = sparam[1,0]
s12 = sparam[0,1]
s22 = sparam[1,1]
denom = 2 * s21 * np.sqrt(r01 * r01)
A = ((z01c + s11 * z01) * (1 - s22) + s12 * s21 * z01) / denom
B = ((z01c + s11 * z01) * (z02c + s22 * z02) - s12 * s21 * z01 * z02) / denom
C = ((1 - s11) * (1 - s22) - s12 * s21) / denom
D = ((1 - s11) * (z02c + s22 * z02) + s12 * s21 * z02) / denom
abcd = np.empty_like(sparam)
abcd[0, 0] = A
abcd[0, 1] = B
abcd[1, 0] = C
abcd[1, 1] = D
return abcd
# ... to S-parameters --------------------------------------------------------
def z_to_sparam(zparam, z0):
"""Convert Z-parameters to S-parameters.
Args:
zparam (ndarray): Z-parameters
z0 (ndarray): port impedance
Returns:
ndarray: S-parameters
"""
z11 = zparam[0, 0]
z12 = zparam[0, 1]
z21 = zparam[1, 0]
z22 = zparam[1, 1]
z01 = z0[0]
z02 = z0[1]
z01c = np.conj(z01)
z02c = np.conj(z02)
r01 = z01.real
r02 = z02.real
s11 = (z11 - z01c) * (z22 + z02) - z12 * z21
s12 = 2 * z12 * np.sqrt(r01 * r02)
s21 = 2 * z21 * np.sqrt(r01 * r02)
s22 = (z11 + z01) * (z22 - z02c) - z12 * z21
denom = (z11 + z01) * (z22 + z02) - z12 * z21
sparam = np.empty_like(zparam)
sparam[0, 0] = s11 / denom
sparam[0, 1] = s12 / denom
sparam[1, 0] = s21 / denom
sparam[1, 1] = s22 / denom
return sparam
def abcd_to_sparam(abcd, z0):
"""Convert ABCD-parameters to S-parameters.
Args:
abcd (ndarray): ABCD-parameters
z0 (ndarray): port impedance
Returns:
ndarray: S-parameters
"""
z01 = z0[0]
z02 = z0[1]
z01c = np.conj(z01)
z02c = np.conj(z02)
r01 = z01.real
r02 = z02.real
A = abcd[0, 0]
B = abcd[0, 1]
C = abcd[1, 0]
D = abcd[1, 1]
denom = A * z02 + B + C * z01 * z02 + D * z01
s11 = (A * z02 + B - C * z01c * z02 - D * z01c) / denom
s12 = (2 * (A * D - B * C) * np.sqrt(r01 * r02)) / denom
s21 = (2 * np.sqrt(r01 * r02)) / denom
s22 = (-A * z02c + B - C * z01 * z02c + D * z01) / denom
sparam = np.empty_like(abcd)
sparam[0,0] = s11
sparam[0,1] = s12
sparam[1,0] = s21
sparam[1,1] = s22
return sparam
def t_to_sparam(tparam):
"""Convert T-parameters to S-parameters.
Args:
tparam (ndarray): T-parameters
Returns:
ndarray: S-parameters
"""
t11 = tparam[0,0]
t21 = tparam[1,0]
t12 = tparam[0,1]
t22 = tparam[1,1]
s11 = t12 / t22
s12 = (t11 * t22 - t12 * t21) / t22
s21 = 1 / t22
s22 = -t21 / t22
sparam = np.empty_like(tparam)
sparam[0,0] = s11
sparam[1,0] = s21
sparam[0,1] = s12
sparam[1,1] = s22
return sparam
|
from ._common import ParticleArray
import numpy as np
import pandas as pd
from softnanotools.logger import Logger
logger = Logger(__name__)
class Salt(ParticleArray):
def __init__(self, item, **kwargs):
ParticleArray.__init__(self, item, **kwargs)
# charge on each species
self.anion = float(self._item['anion'])
self.cation = float(self._item['cation'])
self._atoms = self.generate_atoms()
def generate_atoms(self):
"""
Returns dataframe with the following columns
"""
q = self.generate_charges()
logger.debug(f'Generated charges for Salt object:\n{q}')
positions \
= np.random.uniform(size=(q.shape[0], 3)) \
* self._kwargs['box']
data = pd.DataFrame({
'mol' : len(positions) * [self.mol],
'type' : len(positions) * [self.types['atom']],
'x' : positions[:, 0],
'y' : positions[:, 1],
'z' : positions[:, 2],
'q' : q
})
return data
def generate_charges(self):
# lowest common multiple
lcm = np.lcm(abs(int(self.cation)), abs(int(self.anion)))
# number of each species
n_anions = int(self._item['concentration'] * lcm/abs(self.anion))
n_cations = int(self._item['concentration'] * lcm/abs(self.cation))
charge_array = np.concatenate(
[
np.ones((n_anions, 1)) * -self.anion,
np.ones((n_cations, 1)) * self.cation
]
).T[0]
return charge_array |
# -*- coding: utf-8 -*-
import os
from collections import OrderedDict
import yaml
from restapi.utilities.logs import log
PROJECTS_DEFAULTS_FILE = 'projects_defaults.yaml'
PROJECT_CONF_FILENAME = 'project_configuration.yaml'
def read_configuration(
default_file_path, base_project_path, projects_path, submodules_path):
"""
Read default configuration
"""
custom_configuration = load_yaml_file(
PROJECT_CONF_FILENAME, path=base_project_path, keep_order=True
)
# Verify custom project configuration
project = custom_configuration.get('project')
if project is None:
raise AttributeError("Missing project configuration")
variables = ['title', 'description', 'version', 'rapydo']
for key in variables:
if project.get(key) is None:
log.exit(
"Project not configured, missing key '{}' in file {}/{}",
key,
base_project_path,
PROJECT_CONF_FILENAME,
)
if default_file_path is None:
base_configuration = {}
else:
base_configuration = load_yaml_file(
file=PROJECTS_DEFAULTS_FILE, path=default_file_path, keep_order=True)
extended_project = project.get('extends')
if extended_project is None:
# Mix default and custom configuration
return mix(base_configuration, custom_configuration), None, None
extends_from = project.get('extends-from', 'projects')
if extends_from == "projects":
extend_path = projects_path
elif extends_from.startswith("submodules/"):
repository_name = (extends_from.split("/")[1]).strip()
if repository_name == '':
log.exit('Invalid repository name in extends-from, name is empty')
extend_path = submodules_path
else:
suggest = "Expected values: 'projects' or 'submodules/${REPOSITORY_NAME}'"
log.exit("Invalid extends-from parameter: {}.\n{}", extends_from, suggest)
if not os.path.exists(extend_path):
log.exit("From project not found: {}", extend_path)
extend_file = "extended_{}".format(PROJECT_CONF_FILENAME)
extended_configuration = load_yaml_file(
file=extend_file, path=extend_path, keep_order=True)
m1 = mix(base_configuration, extended_configuration)
return mix(m1, custom_configuration), extended_project, extend_path
def mix(base, custom):
if base is None:
base = {}
for key, elements in custom.items():
if key not in base:
base[key] = custom[key]
continue
if elements is None:
if isinstance(base[key], dict):
log.warning("Cannot replace {} with empty list", key)
continue
if isinstance(elements, dict):
mix(base[key], custom[key])
elif isinstance(elements, list):
for e in elements:
base[key].append(e)
else:
base[key] = elements
return base
class OrderedLoader(yaml.SafeLoader):
"""
A 'workaround' good enough for ordered loading of dictionaries
https://stackoverflow.com/a/21912744
NOTE: This was created to skip dependencies.
Otherwise this option could be considered:
https://pypi.python.org/pypi/ruamel.yaml
"""
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node))
def load_yaml_file(file, path, keep_order=False):
filepath = os.path.join(path, file)
log.verbose("Reading file {}", filepath)
if not os.path.exists(filepath):
raise AttributeError("YAML file does not exist: {}".format(filepath))
with open(filepath) as fh:
try:
if keep_order:
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping
)
loader = yaml.load_all(fh, OrderedLoader)
else:
loader = yaml.load_all(fh, yaml.loader.Loader)
docs = list(loader)
if len(docs) == 0:
raise AttributeError("YAML file is empty: {}".format(filepath))
return docs[0]
except Exception as e:
# # IF dealing with a strange exception string (escaped)
# import codecs
# error, _ = codecs.getdecoder("unicode_escape")(str(error))
raise AttributeError("Failed to read file {}: {}".format(filepath, e))
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
import detect_lincense
def split_lincense_horizontally(image):
if isinstance(image, str):
img = cv2.imread(image)
else:
img = image
# 彩色图转灰度图, opencv读取图片通道顺序默认是bgr, 而非一半的rgb.
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# gray_GB = cv2.GaussianBlur(gray, (3, 3), 0)
# edges = cv2.Canny(gray_GB, 60, 120) # img2tfrds can use the borders too
# 阈值化, params: 灰度图像, 将灰度值>90的像素升至255, 反之降至0, thr是阈值化后的二值图像.
ret, thr = cv2.threshold(gray, 90, 255, cv2.THRESH_BINARY)
# 已知车牌共有7个字符+第三位置的"·"
character_num = 7
# 经验证, 这里原有的两个for循环并没有发挥功效, 原本的目的和下面一段相同, 都是为了上下两端的冗余黑背景的.
# 之前代码不佳, 还请见谅.
# 因为我在训练网络时, 尽可能地将字符调成了黑底白字,
# 因此我这里设置: 最上面一行的白像素个数达到一半, 则认为该图片的背景是白的, 所以将其取反, 黑白互换.
if np.sum(thr[:1, :]) * 2 > 255 * thr.shape[1]:
thr = 255 - thr
# smooth bottom and top
# 这2个for循环的目的是将上下"磨平", 主要目的是去除上下两端冗余的黑底.
# 未去除冗余的示意图为 "图0.png", 供参考比较.
# 去冗余的整体思路: 从左至右遍历某行, 行内像素值跳变次数多, 则表示有有效字符, 不然就当做冗余. 剔除.
for i in range(thr.shape[0]-1, -1, -1):
jump_counter = 0
prev_value = thr[i][0] # 选取当前行的首个像素的值作为起始值.
is_jump = 0 # the step must be larger than the criteria
for j in thr[i]:
is_jump += 1
if j != prev_value:
# 若行内的当前列的值j != 之前的像素值(即: 黑->白 或 白->黑)
if is_jump > 3: # if the step is satisfied to the condition
# 这里设一个is_jump的意思是, 为了防止像素值连续跳变(黑白黑白..., 这可能产生于边缘的凹凸处, 将图片放很大可看清, 见"图3.png")
jump_counter += 1 # 当前行跳变数 +1
prev_value = j
is_jump = 0
if jump_counter < 12:
thr = thr[:i]
else:
# 第一次出现不该删除的行, 则说明往里更内部的行皆为有效行.
break
thr = thr[::-1]
# 刚刚是从下往上处理, 这里是先将图片上下颠倒后, 再从下往上处理, 再上下颠倒.
for i in range(thr.shape[0]-1, -1, -1):
jump_counter = 0
prev_value = thr[i][0]
is_jump = 0 # the step must be larger than the criteria
for j in thr[i]:
is_jump += 1
if j != prev_value:
if is_jump > 3: # if the step is satisfied to the condition
jump_counter += 1
prev_value = j
is_jump = 0
if jump_counter < 12:
thr = thr[:i]
else:
break
# floodFill to delete the vertical white space on both sides
thr = thr[::-1]
if thr[:, 0].all():
# thr[:, 0].all() != 0 说明第一列全白, 这在汉字来说不可能, 因此只能是左右冗余的白段, 在(0, 0)和(shape[1]-1, 0)两点浸水填充一下, 消去.
cv2.floodFill(
thr, np.zeros((thr.shape[0]+2, thr.shape[1]+2), dtype=np.uint8),
(0, 0), 0)
if thr[:, -1].all():
cv2.floodFill(
thr, np.zeros((thr.shape[0]+2, thr.shape[1]+2), dtype=np.uint8),
(thr.shape[1]-1, 0), 0)
plt.imshow(thr, cmap="gray")
plt.title("floodFill")
plt.show()
# Morphology
# 使用形态学变换, 是字符膨胀"成团"
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 3))
kernel1 = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
thr_without_circle = cv2.morphologyEx(
cv2.dilate(thr, kernel), cv2.MORPH_CLOSE, kernel1)
# cross line to kick out dot
# 通过划线来剔除车牌中的"·"
thr_without_circle[5, :] = 255
# 在第5行划白线, 由于第5行很高, 因此不会接触到圆点, 从而可以把正常字符都连接起来, 形成一个大连通域.
# 然后消去小的连通域 -- 正常字符的主体都是那个大连通域的一部分, 因此不受影响, 而"·"未能连接, 因此属于小面积轮廓, 将被消去(将它填充背景色).
_, cnts, _ = cv2.findContours(thr_without_circle,
cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
small_area = [i for i in cnts if cv2.contourArea(i) < 100]
cv2.fillPoly(thr_without_circle, small_area, 0) # Denoising
# 通过上面三行, 我们获取了用于消去"·"的掩码--图4.
# 然后让图4与图3做 与操作, 即可得到缺失"·"的原图.
# 原来这里有多余的三行, 无效, 故删之.
kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 5))
thr_without_circle_and = cv2.bitwise_and(
cv2.dilate(thr_without_circle, kernel2), thr)
# fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(12, 6))
# ax0.imshow(thr_without_circle, cmap="gray")
# ax1.imshow(thr_without_circle_and, cmap="gray")
# plt.show()
# cut out the all-black borders
# 通过定位每个字符的最小纵坐标和最大纵坐标, 去除字符之间的黑段, 将其分别提取出来.
for i in range(thr_without_circle_and.shape[1]):
if sum(thr_without_circle_and[:, i]) > 0:
thr_without_circle_and = thr_without_circle_and[:, i:]
break
for i in range(thr_without_circle_and.shape[1]-1, -1, -1):
if sum(thr_without_circle_and[:, i]) > 0:
thr_without_circle_and = thr_without_circle_and[:, :i+1]
break
# get split lines
# 类似于之前的去除上下端冗余黑段, 这里也是从左往右扫获取所有字符的最小横坐标,从右往左扫获取all x_min
# 2者zip一下便获得了((x0_min, x0_max), (x1_min, x1_max), ..., (x6_min, x6_max)), 据此截段, 作为结果.
vertical_lines_0 = []
vertical_lines_1 = []
is_append = 1
for i in range(thr_without_circle_and.shape[1]):
if sum(thr_without_circle_and[:, i]) == 0:
if is_append:
vertical_lines_0.append(i)
is_append = 0
else:
is_append = 1
is_append = 1
for i in range(thr_without_circle_and.shape[1] - 1, -1, -1):
if sum(thr_without_circle_and[:, i]) == 0:
if is_append:
vertical_lines_1.append(i)
is_append = 0
else:
is_append = 1
# split out the individual character into split_imgs
black_spaces = list(zip(vertical_lines_0, sorted(vertical_lines_1)))
white_spaces = black_spaces.copy()
white_spaces = np.array(black_spaces).flatten().tolist()
white_spaces.append(thr_without_circle_and.shape[1])
white_spaces.insert(0, 0)
# print("white_spaces:", white_spaces)
split_imgs = []
for i in range(0, len(white_spaces), 2):
split_imgs.append(
thr_without_circle_and[:, white_spaces[i]:white_spaces[i+1]])
return split_imgs
def main():
image = "./images/cars/car_0.jpg"
plate = detect_lincense.detect_lincense(image)
split_imgs = split_lincense_horizontally(plate)
# print("split_imgs:", split_imgs)
for i in split_imgs:
plt.imshow(i, cmap="gray")
plt.show()
if __name__ == '__main__':
main()
|
from django.urls import path, include
from django.conf.urls import url
from .views import PostViewSet
from rest_framework.routers import DefaultRouter
import dedicated_page.views as dedicated_page_views
from django.views.generic import TemplateView
router = DefaultRouter()
router.register('post', PostViewSet, basename='post')
urlpatterns = [
path('api/', include(router.urls)),
path('api/<int:pk>/', include(router.urls)),
url(r'^dedicated_page/(?P<pk>\d+)/$', dedicated_page_views.dedicated_page, name='dedicated_page'),
path('create_post/', PostViewSet.as_view({"post": "create_post"}), name='new_post'),
] |
import logging
import boto3
import os
import json
from random import shuffle
from mail_handlers import ses_handler, mailgun_handler
# TODO
# Should using the deadletter queue, at the moment the naive approach
# is adding a message, when it has been 'taken' three times from the queue.
def setup_handlers():
'''
Factory method for setting up handlers for the worker
'''
handlers = [ses_handler.SESHandler(), mailgun_handler.MailgunHandler()]
shuffle(handlers)
return handlers
def process_messages():
logging.info('Starting processing messages')
sqs = boto3.resource('sqs', region_name=os.environ.get('REGION'))
queue = sqs.get_queue_by_name(QueueName=os.environ.get('QUEUE'))
deadletter_queue = sqs.get_queue_by_name(QueueName=os.environ.get('DEADLETTERQUEUE'))
handlers = setup_handlers()
while True:
logging.info('Waiting for messages')
messages = queue.receive_messages(WaitTimeSeconds=20, MaxNumberOfMessages=10)
for message in messages:
messsage_body = json.loads(message.body)
success = False
dead_message = False
handler_index = 0
while not success and not dead_message and handler_index < len(handlers):
success, dead_message = handlers[handler_index].send_mail(messsage_body)
handler_index += 1
if dead_message:
deadletter_queue.send_message(MessageGroupId='1', MessageBody=json.dumps(messsage_body))
message.delete()
if success:
message.delete()
if __name__ == '__main__':
process_messages()
|
#A PushBuffer is an iterable that contains a fixed number of items
#When a new item is pushed into a PushBuffer, it will be placed in the front (lowest index)
# and all other items are shifted one place higher
#If pushing a new item would require more space than the size allows,
# the oldest item (highest index) is discarded.
#PushBuffers can use get() and set() to read and write items by index (although each item's index changes upon a push)
#Could I have emulated a container type using __getitem__ and __setitem__ instead? Yes. Would that be better? Probably also yes.
class PushBuffer:
def push(self, newItem):
self.__memory.pop()
self.__memory.insert(0, newItem)
def __init__(self, size = 100, items = None):
if size <= 0:
raise ValueError('Size must be greater than zero')
self.size = size
#Create empty list with specified size
self.__memory = [None] * self.size
#Import items by pushing each one in reverse order (so that order is preserved)
if items != None:
for item in items[::-1]:
self.push(item)
def get(self, index):
return self.__memory[index]
def set(self, index, newValue):
self.__memory[index] = newValue
#Clears all values
def flush(self):
self.__memory = [None] * self.size
#Returns the currently stored values as a tuple
def getValues(self):
return tuple(self.__memory)
#Returns only entries that aren't none as a tuple
def getNonNoneValues(self):
nonNoneValues = []
for value in self.__memory:
if value != None: nonNoneValues.append(value)
return tuple(nonNoneValues)
def __str__(self):
return str(self.__memory)
def __repr__(self):
return "PushBuffer(%d, %s)" % (self.size, str(self.__memory))
def __iter__(self):
return iter(self.__memory)
|
"""Start a hyperoptimization from a single node"""
import sys
import numpy as np
import pickle as pkl
import hyperopt
from hyperopt import hp, fmin, tpe, Trials
import pysr
import time
import contextlib
@contextlib.contextmanager
def temp_seed(seed):
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
# Change the following code to your file
################################################################################
TRIALS_FOLDER = "trials"
NUMBER_TRIALS_PER_RUN = 1
def run_trial(args):
"""Evaluate the model loss using the hyperparams in args
:args: A dictionary containing all hyperparameters
:returns: Dict with status and loss from cross-validation
"""
print("Running on", args)
args["niterations"] = 100
args["npop"] = 100
args["ncyclesperiteration"] = 1000
args["topn"] = 10
args["parsimony"] = 0.0
args["useFrequency"] = True
args["annealing"] = True
if args["npop"] < 20 or args["ncyclesperiteration"] < 3:
print("Bad parameters")
return {"status": "ok", "loss": np.inf}
args["weightDoNothing"] = 1.0
ntrials = 3
with temp_seed(0):
X = np.random.randn(100, 10) * 3
eval_str = [
"np.sign(X[:, 2])*np.abs(X[:, 2])**2.5 + 5*np.cos(X[:, 3]) - 5",
"np.exp(X[:, 0]/2) + 12.0 + np.log(np.abs(X[:, 0])*10 + 1)",
"(np.exp(X[:, 3]) + 3)/(np.abs(X[:, 1]) + np.cos(X[:, 0]) + 1.1)",
"X[:, 0] * np.sin(2*np.pi * (X[:, 1] * X[:, 2] - X[:, 3] / X[:, 4])) + 3.0",
]
print("Starting", str(args))
try:
local_trials = []
for i in range(len(eval_str)):
print(f"Starting test {i}")
for j in range(ntrials):
print(f"Starting trial {j}")
y = eval(eval_str[i])
trial = pysr.pysr(
X,
y,
procs=4,
populations=20,
binary_operators=["plus", "mult", "pow", "div"],
unary_operators=["cos", "exp", "sin", "logm", "abs"],
maxsize=25,
constraints={"pow": (-1, 1)},
**args,
)
if len(trial) == 0:
raise ValueError
local_trials.append(
np.min(trial["MSE"]) ** 0.5 / np.std(eval(eval_str[i - 1]))
)
print(f"Test {i} trial {j} with", str(args), f"got {local_trials[-1]}")
except ValueError:
print("Broken", str(args))
return {"status": "ok", "loss": np.inf} # or 'fail' if nan loss
loss = np.average(local_trials)
print(f"Finished with {loss}", str(args))
return {"status": "ok", "loss": loss} # or 'fail' if nan loss
space = {
"alpha": hp.lognormal("alpha", np.log(10.0), 1.0),
"fractionReplacedHof": hp.lognormal("fractionReplacedHof", np.log(0.1), 1.0),
"fractionReplaced": hp.lognormal("fractionReplaced", np.log(0.1), 1.0),
"perturbationFactor": hp.lognormal("perturbationFactor", np.log(1.0), 1.0),
"weightMutateConstant": hp.lognormal("weightMutateConstant", np.log(4.0), 1.0),
"weightMutateOperator": hp.lognormal("weightMutateOperator", np.log(0.5), 1.0),
"weightAddNode": hp.lognormal("weightAddNode", np.log(0.5), 1.0),
"weightInsertNode": hp.lognormal("weightInsertNode", np.log(0.5), 1.0),
"weightDeleteNode": hp.lognormal("weightDeleteNode", np.log(0.5), 1.0),
"weightSimplify": hp.lognormal("weightSimplify", np.log(0.05), 1.0),
"weightRandomize": hp.lognormal("weightRandomize", np.log(0.25), 1.0),
}
################################################################################
def merge_trials(trials1, trials2_slice):
"""Merge two hyperopt trials objects
:trials1: The primary trials object
:trials2_slice: A slice of the trials object to be merged,
obtained with, e.g., trials2.trials[:10]
:returns: The merged trials object
"""
max_tid = 0
if len(trials1.trials) > 0:
max_tid = max([trial["tid"] for trial in trials1.trials])
for trial in trials2_slice:
tid = trial["tid"] + max_tid + 1
local_hyperopt_trial = Trials().new_trial_docs(
tids=[None], specs=[None], results=[None], miscs=[None]
)
local_hyperopt_trial[0] = trial
local_hyperopt_trial[0]["tid"] = tid
local_hyperopt_trial[0]["misc"]["tid"] = tid
for key in local_hyperopt_trial[0]["misc"]["idxs"].keys():
local_hyperopt_trial[0]["misc"]["idxs"][key] = [tid]
trials1.insert_trial_docs(local_hyperopt_trial)
trials1.refresh()
return trials1
loaded_fnames = []
trials = None
# Run new hyperparameter trials until killed
while True:
np.random.seed()
# Load up all runs:
import glob
path = TRIALS_FOLDER + "/*.pkl"
for fname in glob.glob(path):
if fname in loaded_fnames:
continue
trials_obj = pkl.load(open(fname, "rb"))
n_trials = trials_obj["n"]
trials_obj = trials_obj["trials"]
if len(loaded_fnames) == 0:
trials = trials_obj
else:
print("Merging trials")
trials = merge_trials(trials, trials_obj.trials[-n_trials:])
loaded_fnames.append(fname)
print("Loaded trials", len(loaded_fnames))
if len(loaded_fnames) == 0:
trials = Trials()
n = NUMBER_TRIALS_PER_RUN
try:
best = fmin(
run_trial,
space=space,
algo=tpe.suggest,
max_evals=n + len(trials.trials),
trials=trials,
verbose=1,
rstate=np.random.RandomState(np.random.randint(1, 10 ** 6)),
)
except hyperopt.exceptions.AllTrialsFailed:
continue
print("current best", best)
hyperopt_trial = Trials()
# Merge with empty trials dataset:
save_trials = merge_trials(hyperopt_trial, trials.trials[-n:])
new_fname = (
TRIALS_FOLDER
+ "/"
+ str(np.random.randint(0, sys.maxsize))
+ str(time.time())
+ ".pkl"
)
pkl.dump({"trials": save_trials, "n": n}, open(new_fname, "wb"))
loaded_fnames.append(new_fname)
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide various utility/container types needed by Endpoints Framework.
Putting them in this file makes it easier to avoid circular imports,
as well as keep from complicating tests due to importing code that
uses App Engine apis.
"""
from __future__ import absolute_import
# import attr
__all__ = [
'OAuth2Scope', 'Issuer', 'LimitDefinition', 'Namespace',
]
# @attr.s(frozen=True, slots=True)
class OAuth2Scope(object):
# scope = attr.ib(validator=attr.validators.instance_of(basestring))
# description = attr.ib(validator=attr.validators.instance_of(basestring))
def __init__(self, scope, description, **kwargs):
s = kwargs.get("scope", scope)
d = kwargs.get("description", description)
assert isinstance(s, basestring), "invalid type"
assert isinstance(d, basestring), "invalid type"
self.scope = s
self.description = d
# fix me for immutable
self.__setattr__ = self.setLate__setattr__
def __eq__(self, other):
if type(other) is type(self):
return self.scope == other.scope
else:
return False
def __str__(self):
return "OAuth2Scope: s:{0} d:{1}".format(self.scope, self.description)
def __repr__(self):
return str(self)
def setLate__setattr__(self):
raise Exception.message("instance is immutable")
def __getstate__(self):
# being pickled; required to support pickling;
return {slot: getattr(self, slot) for slot in self.__slots__}
def __setstate__(self, d):
# being hydrated
for slot in d:
setattr(self, slot, d[slot])
@property
def __key(self):
return self.scope #, self.description)
def __hash__(self):
return hash(self.__key)
@classmethod
def convert_scope(cls, scope):
"Convert string scopes into OAuth2Scope objects."
if isinstance(scope, cls):
return scope
return cls(scope=scope, description=scope)
@classmethod
def convert_list(cls, values):
"Convert a list of scopes into a list of OAuth2Scope objects."
if values is not None:
return [cls.convert_scope(value) for value in values]
# Issuer = attr.make_class('Issuer', ['issuer', 'jwks_uri'])
class Issuer(object):
def __init__(self, issuer, jwks_uri, **kwargs):
s = kwargs.get("issuer", issuer)
j = kwargs.get("jwks_uri", jwks_uri)
self.issuer = s
self.jwks_uri = j
def __str__(self):
return "Issuer: s:{0} d:{1}".format(self.issuer, self.jwks_uri)
def __repr__(self):
return str(self)
# LimitDefinition = attr.make_class('LimitDefinition', ['metric_name',
# 'display_name',
# 'default_limit'])
class LimitDefinition(object):
def __init__(self, metric_name, display_name, default_limit, **kwargs):
m = kwargs.get("metric_name", metric_name)
dn = kwargs.get("display_name", display_name)
dl = kwargs.get("default_limit", default_limit)
self.metric_name = m
self.display_name = dn
self.default_limit = dl
def __str__(self):
return "LimitDefinition: mn:{0} dn:{1} dl:{2}".format(self.metric_name, self.display_name, self.default_limit)
def __repr__(self):
return str(self)
# Namespace = attr.make_class('Namespace', ['owner_domain', 'owner_name', 'package_path'])
class Namespace(object):
def __init__(self, owner_domain, owner_name, package_path, **kwargs):
m = kwargs.get("owner_domain", owner_domain)
dn = kwargs.get("owner_name", owner_name)
dl = kwargs.get("package_path", package_path)
self.owner_domain = m
self.owner_name = dn
self.package_path = dl
def __str__(self):
return "Namespace: od:{0} on:{1} pp:{2}".format(self.owner_domain, self.owner_name, self.package_path)
def __repr__(self):
return str(self) |
from .parser import CContext, CEnum, CBitmask, CStruct, CType, CHandle, CMember, CCommand, CAlias
from . import typeconversion as tc
from typing import Optional, Tuple, List, Dict
import re
class SwiftEnum(CEnum):
def __init__(self, c_enum: CEnum, raw_type: str, error: bool = False, **kwargs):
super().__init__(**kwargs)
self.c_enum = c_enum
self.raw_type = raw_type
self.error = error
class SwiftOptionSet(CEnum):
def __init__(self, c_bitmask: CBitmask, raw_type: str, **kwargs):
super().__init__(**kwargs)
self.c_bitmask = c_bitmask
self.raw_type = raw_type
class SwiftMember:
def __init__(self, name: str, type_: str, is_closure: bool = False):
self.name = name
self.type = type_
self.is_closure = is_closure
class SwiftStruct:
def __init__(self, c_struct: CStruct, name: str,
members: List[SwiftMember], member_conversions: tc.MemberConversions,
convertible_from_c_struct: bool = True, parent_class: 'SwiftClass' = None):
self.name = name
self.members = members
self.member_conversions: tc.MemberConversions = member_conversions
self.c_struct = c_struct
self.convertible_from_c_struct = convertible_from_c_struct
self.parent_class = parent_class
class SwiftCommand:
def __init__(self, c_command: CCommand, name: str, return_type: str, throws: bool,
class_params: Dict[str, 'SwiftClass'], params: List[SwiftMember],
param_conversions: tc.MemberConversions, return_conversion: tc.Conversion,
output_param: str = None, output_param_implicit_type: str = None, unwrap_output_param: bool = False,
enumeration_pointer_param: str = None, enumeration_count_param: str = None,
dispatcher: 'SwiftClass' = None):
self.c_command = c_command
self.name = name
self.return_type = return_type
self.throws = throws
self.class_params = class_params
self.params = params
self.param_conversions = param_conversions
self.return_conversion = return_conversion
self.output_param = output_param
self.output_param_implicit_type = output_param_implicit_type
self.unwrap_output_param = unwrap_output_param
self.enumeration_pointer_param = enumeration_pointer_param
self.enumeration_count_param = enumeration_count_param
self.dispatcher = dispatcher
class DispatchTable:
def __init__(self, name: str, loader: Tuple[str, str], param: Tuple[str, str] = None,
commands: List[CCommand] = None):
self.name = name
self.loader = loader
self.param = param
self.commands = commands or []
class SwiftClass:
def __init__(self, name: str, reference_name: str, c_handle: CHandle = None, parent: 'SwiftClass' = None,
dispatch_table: DispatchTable = None, dispatcher: 'SwiftClass' = None,
commands: List[SwiftCommand] = None):
self.c_handle = c_handle
self.name = name
self.reference_name = reference_name
self.parent = parent
self.dispatch_table = dispatch_table
self.dispatcher = dispatcher
self.commands = commands or []
@property
def ancestors(self) -> List['SwiftClass']:
ancestors: List[SwiftClass] = []
current_class = self
while current_class.parent:
current_class = current_class.parent
ancestors.append(current_class)
return ancestors
class SwiftAlias:
def __init__(self, c_alias: CAlias, name: str, alias: str):
self.c_alias = c_alias
self.name = name
self.alias = alias
class SwiftContext:
def __init__(self):
self.enums: List[SwiftEnum] = []
self.option_sets: List[SwiftOptionSet] = []
self.structs: List[SwiftStruct] = []
self.classes: List[SwiftClass] = []
self.aliases: List[SwiftAlias] = []
self.dispatch_tables: List[DispatchTable] = []
class Importer:
def __init__(self, c_context: CContext):
self.c_context = c_context
self.swift_context = SwiftContext()
self.imported_enums: Dict[str, str] = {}
self.imported_option_sets: Dict[str, str] = {}
self.imported_option_set_bits: Dict[str, str] = {}
self.imported_structs: Dict[str, SwiftStruct] = {}
self.imported_classes: Dict[str, SwiftClass] = {}
self.imported_aliases: Dict[str, SwiftAlias] = {}
self.pointer_types = [handle.name for handle in c_context.handles] + [alias.name for alias in c_context.aliases]
self.c_structs = {c_struct.name: c_struct for c_struct in c_context.structs}
def import_all(self) -> SwiftContext:
for enum in self.c_context.enums:
self.import_enum(enum)
for bitmask in self.c_context.bitmasks:
self.import_bitmask(bitmask)
for handle in self.c_context.handles:
if not handle.protect:
self.import_handle(handle)
for alias in self.c_context.aliases:
if not alias.protect:
self.import_alias(alias)
for struct in self.c_context.structs:
if struct.name not in ('VkBaseInStructure', 'VkBaseOutStructure'):
self.import_struct(struct)
for command in self.c_context.commands:
self.import_command(command)
return self.swift_context
def import_enum(self, c_enum: CEnum) -> SwiftEnum:
swift_enum = SwiftEnum(
name=remove_vk_prefix(c_enum.name),
cases=[],
c_enum=c_enum,
raw_type='UInt32',
error=c_enum.name == 'VkResult'
)
prefix, enum_tag = self.pop_extension_tag(swift_enum.name)
starts_with_digit = False
for case in c_enum.cases:
name = remove_vk_prefix(case.name)
name, tag = self.pop_extension_tag(name)
name = snake_to_pascal(name)
if name.startswith(prefix):
name = name[len(prefix):]
if not name:
name = tag.lower()
elif tag and tag != enum_tag:
name += tag
name = name[0].lower() + name[1:]
if name[0].isdigit():
starts_with_digit = True
try:
if int(case.value) < 0:
swift_enum.raw_type = 'Int32'
except ValueError:
pass
swift_enum.cases.append(SwiftEnum.Case(name=name, value=case.value))
if starts_with_digit:
for case in swift_enum.cases:
case.name = 'type' + case.name[0].upper() + case.name[1:]
self.swift_context.enums.append(swift_enum)
self.imported_enums[c_enum.name] = swift_enum.name
return swift_enum
def import_bitmask(self, c_bitmask: CBitmask) -> SwiftOptionSet:
option_set = SwiftOptionSet(
name=remove_vk_prefix(c_bitmask.name),
cases=[],
c_bitmask=c_bitmask,
raw_type='UInt32'
)
if c_bitmask.enum:
prefix, enum_tag = self.pop_extension_tag(option_set.name)
if prefix.endswith('Flags'):
prefix = prefix[:-5]
starts_with_digit = False
for case in c_bitmask.enum.cases:
name = remove_vk_prefix(case.name)
name, tag = self.pop_extension_tag(name)
name = snake_to_pascal(name)
if name.startswith(prefix):
name = name[len(prefix):]
if name.endswith('Bit'):
name = name[:-3]
if not name:
name = tag.lower()
elif tag and tag != enum_tag:
name += tag
name = name[0].lower() + name[1:]
if name[0].isdigit():
starts_with_digit = True
option_set.cases.append(SwiftOptionSet.Case(name=name, value=case.value))
if starts_with_digit:
for case in option_set.cases:
case.name = 'type' + case.name[0].upper() + case.name[1:]
self.imported_option_set_bits[c_bitmask.enum.name] = option_set.name
self.swift_context.option_sets.append(option_set)
self.imported_option_sets[c_bitmask.name] = option_set.name
return option_set
def import_struct(self, c_struct: CStruct) -> SwiftStruct:
if c_struct.name in self.imported_structs:
return self.imported_structs[c_struct.name]
name = remove_vk_prefix(c_struct.name)
convertible_from_c_struct = True
parent_class: SwiftClass = None
for member in c_struct.members:
type_name = member.type.type_name
if type_name in self.c_structs:
child_struct = self.import_struct(self.c_structs[type_name])
if convertible_from_c_struct:
convertible_from_c_struct = child_struct.convertible_from_c_struct
if child_struct.parent_class:
parent_class = child_struct.parent_class
elif convertible_from_c_struct:
if type_name in self.imported_aliases:
type_name = self.imported_aliases[type_name].c_alias.alias
if type_name in ('VkPhysicalDevice', 'VkDisplayKHR', 'VkDisplayModeKHR'):
parent_class = self.imported_classes[type_name].parent
elif type_name in self.imported_classes:
convertible_from_c_struct = False
members, conversions = self.get_member_conversions(c_struct.members, c_struct=c_struct)
struct = SwiftStruct(c_struct=c_struct,
name=name,
members=members,
member_conversions=conversions,
convertible_from_c_struct=convertible_from_c_struct,
parent_class=parent_class)
self.swift_context.structs.append(struct)
self.imported_structs[c_struct.name] = struct
return struct
def import_entry(self) -> SwiftClass:
if 'entry' in self.imported_classes:
return self.imported_classes['entry']
dispatch_table = DispatchTable('EntryDispatchTable', ('vkGetInstanceProcAddr', 'PFN_vkGetInstanceProcAddr'))
loader = SwiftClass(name='Loader', reference_name='loader')
entry = SwiftClass(name='Entry', reference_name='entry', parent=loader,
dispatch_table=dispatch_table, dispatcher=loader)
self.swift_context.dispatch_tables.append(dispatch_table)
self.swift_context.classes.append(entry)
self.imported_classes['entry'] = entry
return entry
def import_handle(self, handle: CHandle) -> SwiftClass:
if handle.name in self.imported_classes:
return self.imported_classes[handle.name]
name = remove_vk_prefix(handle.name)
reference_name, _ = self.pop_extension_tag(name)
reference_name = reference_name[0].lower() + reference_name[1:]
if handle.name == 'VkInstance':
parent = self.import_entry()
elif handle.name == 'VkSwapchainKHR':
parent = self.imported_classes['VkDevice']
else:
parent = self.import_handle(handle.parent) if handle.parent else None
if handle.name == 'VkInstance':
dispatch_table = DispatchTable('InstanceDispatchTable',
('vkGetInstanceProcAddr', 'PFN_vkGetInstanceProcAddr'),
('instance', 'VkInstance'))
dispatcher = self.imported_classes['entry'].parent
elif handle.name == 'VkDevice':
dispatch_table = DispatchTable('DeviceDispatchTable',
('vkGetDeviceProcAddr', 'PFN_vkGetDeviceProcAddr'),
('device', 'VkDevice'))
dispatcher = self.imported_classes['VkInstance']
else:
dispatch_table = None
dispatcher = None
if dispatch_table:
self.swift_context.dispatch_tables.append(dispatch_table)
cls = SwiftClass(
c_handle=handle,
name=name,
reference_name=reference_name,
parent=parent,
dispatch_table=dispatch_table,
dispatcher=dispatcher
)
self.swift_context.classes.append(cls)
self.imported_classes[handle.name] = cls
return cls
def import_command(self, c_command: CCommand) -> SwiftCommand:
class_params_and_classes = self.get_class_params(c_command)
current_class = class_params_and_classes[-1][1] if class_params_and_classes \
else self.imported_classes['entry']
class_name_without_extension, _ = self.pop_extension_tag(current_class.name)
name = remove_vk_prefix(c_command.name)
name = re.sub(f'({class_name_without_extension})([A-Z]\w*)?$', r'\2', name)
name = name[0].lower() + name[1:]
if name.startswith('enumerate'):
name = 'get' + name[9:]
c_return_type = c_command.return_type
throws = False
if c_return_type.name == 'VkResult':
throws = True
c_return_type = CType(name='void')
return_type, return_conversion = self.get_type_conversion(c_return_type, force_optional=True)
output_param: str = None
output_param_implicit_type: str = None
unwrap_output_param = False
enumeration_pointer_param: str = None
enumeration_count_param: str = None
if c_return_type.name == 'void':
output_params = get_output_params(c_command)
if len(output_params) == 1:
if c_command.name == 'vkEnumerateInstanceVersion':
output_param = output_params[0].name
return_type, return_conversion = 'Version', tc.version_conversion
output_param_implicit_type = 'UInt32'
elif is_array_convertible(output_params[0].type, ignore_const=True):
output_param = output_params[0].name
return_type, return_conversion = self.get_array_conversion(output_params[0].type)
output_param_implicit_type, _ = self.get_type_conversion(output_params[0].type.pointer_to,
implicit_only=True, force_optional=True)
elif not output_params[0].type.length:
output_param = output_params[0].name
return_type, return_conversion = self.get_type_conversion(output_params[0].type.pointer_to,
force_optional=False)
output_param_implicit_type, _ = self.get_type_conversion(output_params[0].type.pointer_to,
implicit_only=True, force_optional=False)
unwrap_output_param = self.is_pointer_type(output_params[0].type.pointer_to)
elif len(output_params) == 2 and output_params[1].type.length == output_params[0].name:
enumeration_pointer_param = output_params[1].name
enumeration_count_param = output_params[0].name
return_type, return_conversion = self.get_array_conversion(output_params[1].type, force_optional=False)
class_params = [param for param, _ in class_params_and_classes]
output_params = (output_param, enumeration_pointer_param, enumeration_count_param)
c_input_params = [param for param in c_command.params if param.name not in output_params]
c_input_params = class_params + c_input_params[len(class_params):]
params, conversions = self.get_member_conversions(c_input_params, c_command=c_command)
dispatcher = self.get_dispatcher(c_command)
if dispatcher.dispatch_table:
dispatcher.dispatch_table.commands.append(c_command)
command = SwiftCommand(
c_command=c_command,
name=remove_vk_prefix(name),
return_type=return_type,
throws=throws,
class_params={param.name: cls for param, cls in class_params_and_classes},
params=params[len(class_params):],
param_conversions=conversions,
return_conversion=return_conversion,
output_param=output_param,
output_param_implicit_type=output_param_implicit_type,
unwrap_output_param=unwrap_output_param,
enumeration_pointer_param=enumeration_pointer_param,
enumeration_count_param=enumeration_count_param,
dispatcher=dispatcher
)
current_class.commands.append(command)
return command
def import_alias(self, c_alias: CAlias) -> SwiftAlias:
alias = SwiftAlias(c_alias, remove_vk_prefix(c_alias.name), self.imported_classes[c_alias.alias].name)
self.swift_context.aliases.append(alias)
self.imported_aliases[c_alias.name] = alias
return alias
def get_dispatcher(self, command: CCommand) -> SwiftClass:
if command.name == 'vkGetInstanceProcAddr':
return self.imported_classes['entry'].parent
if command.name == 'vkGetDeviceProcAddr':
return self.imported_classes['VkInstance']
if command.params:
param = command.params[0]
if param.type.name and param.type.name in self.imported_classes:
cls = self.imported_classes[param.type.name]
for ancestor in [cls] + cls.ancestors:
if ancestor.c_handle.name in ('VkInstance', 'VkDevice'):
return ancestor
return self.imported_classes['entry']
def get_class_params(self, command: CCommand) -> List[Tuple[CMember, SwiftClass]]:
class_params: List[Tuple[CMember, SwiftClass]] = []
previous_class: SwiftClass = None
for param in command.params:
if param.type.name and param.type.name in self.imported_classes:
if not param.type.optional or command.name.startswith(('vkDestroy', 'vkFree')):
cls = self.imported_classes[param.type.name]
if not previous_class or previous_class in cls.ancestors:
previous_class = cls
class_params.append((CMember(param.name, CType(param.type.name)), cls))
continue
break
return class_params
def get_member_conversions(self, c_members: List[CMember], c_struct: CStruct = None, c_command: CCommand = None
) -> Tuple[List[SwiftMember], tc.MemberConversions]:
members: List[SwiftMember] = []
conversions = tc.MemberConversions()
lengths: List[str] = []
for c_member in c_members:
if is_array_convertible(c_member.type):
lengths.append(c_member.type.length)
if c_struct:
# why aren't these specified in the Vulkan spec?
if c_struct.name == 'VkPhysicalDeviceMemoryProperties':
lengths += ['memoryTypeCount', 'memoryHeapCount']
elif c_struct.name == 'VkPhysicalDeviceGroupProperties':
lengths.append('physicalDeviceCount')
for c_member in c_members:
if c_member.name in lengths:
continue
if len(c_member.values) == 1:
conversions.add_static_value(c_member.name, c_member.values[0])
continue
if c_command and c_member.name == 'pAllocator':
conversions.add_static_value(c_member.name, 'nil')
continue
if c_struct and c_member.name == 'pNext':
conversions.add_static_value(c_member.name, 'nil')
continue
if (c_struct and (
(c_struct.name in ('VkPhysicalDeviceProperties', 'VkApplicationInfo')
and c_member.name == 'apiVersion')
or (c_struct.name == 'VkLayerProperties' and c_member.name == 'specVersion')
)):
swift_type, conversion = 'Version', tc.version_conversion
elif c_struct and c_struct.name == 'VkPhysicalDeviceMemoryProperties' and c_member.name == 'memoryTypes':
swift_type, conversion = 'Array<MemoryType>', tc.tuple_array_conversion(
tc.struct_array_conversion('MemoryType', 'memoryTypeCount'), 'VkMemoryType', c_member.type.length)
elif c_struct and c_struct.name == 'VkPhysicalDeviceMemoryProperties' and c_member.name == 'memoryHeaps':
swift_type, conversion = 'Array<MemoryHeap>', tc.tuple_array_conversion(
tc.struct_array_conversion('MemoryHeap', 'memoryHeapCount'), 'VkMemoryHeap', c_member.type.length)
elif c_struct and c_struct.name == 'VkPhysicalDeviceGroupProperties' and c_member.name == 'physicalDevices':
swift_type, conversion = 'Array<PhysicalDevice>', tc.tuple_array_conversion(
tc.array_mapped_conversion(
tc.class_conversion('PhysicalDevice', 'instance'), 'physicalDeviceCount'
), 'VkPhysicalDevice?', c_member.type.length
)
else:
swift_type, conversion = self.get_type_conversion(c_member.type,
convert_array_to_pointer=c_command is not None)
swift_name = get_member_name(c_member.name, c_member.type)
is_closure = c_member.type.name and c_member.type.name.startswith('PFN_') and not c_member.type.optional
member = SwiftMember(name=swift_name, type_=swift_type, is_closure=is_closure)
members.append(member)
conversions.add_conversion(c_member.name, swift_name, conversion)
return members, conversions
def get_type_conversion(self, c_type: CType, implicit_only: bool = False, force_optional: bool = None,
convert_array_to_pointer: bool = False) -> Tuple[str, tc.Conversion]:
optional = force_optional if force_optional is not None else c_type.optional
if c_type.name:
if c_type.name in tc.IMPLICIT_TYPE_MAP:
return tc.IMPLICIT_TYPE_MAP[c_type.name], tc.implicit_conversion
if not implicit_only:
if c_type.name == 'VkBool32':
return 'Bool', tc.bool_conversion
if c_type.name in self.imported_enums:
swift_enum = self.imported_enums[c_type.name]
return swift_enum, tc.enum_conversion(c_type.name, swift_enum)
if c_type.name in self.imported_option_sets:
option_set = self.imported_option_sets[c_type.name]
return option_set, tc.option_set_conversion(option_set)
if c_type.name in self.imported_option_set_bits:
option_set = self.imported_option_set_bits[c_type.name]
return option_set, tc.option_set_bit_conversion(c_type.name, option_set)
if c_type.name in self.imported_structs:
swift_struct = self.imported_structs[c_type.name]
parent_name = swift_struct.parent_class.reference_name if swift_struct.parent_class else None
return swift_struct.name, tc.struct_conversion(swift_struct.name, parent_name)
alias = self.imported_aliases.get(c_type.name)
c_name = alias.c_alias.alias if alias else c_type.name
if c_name in self.imported_classes:
cls = self.imported_classes[c_name]
cls_name = alias.name if alias else cls.name
parent_name = cls.parent.reference_name if cls.parent else None
if optional:
return cls_name + '?', tc.optional_class_conversion(cls_name, parent_name)
else:
return cls_name, tc.class_conversion(cls_name, parent_name)
swift_type = c_type.name
if self.is_pointer_type(c_type) and optional:
swift_type += '?'
return swift_type, tc.implicit_conversion
elif c_type.pointer_to:
if c_type.pointer_to.name == 'void':
swift_type = 'UnsafeRawPointer' if c_type.pointer_to.const else 'UnsafeMutableRawPointer'
if optional:
swift_type += '?'
return swift_type, tc.implicit_conversion
if not implicit_only and c_type.pointer_to.const:
if is_string_convertible(c_type):
if optional:
return 'String?', tc.optional_string_conversion
else:
return 'String', tc.string_conversion
if is_array_convertible(c_type):
return self.get_array_conversion(c_type)
if c_type.pointer_to.name and not c_type.length and c_type.pointer_to.name in self.imported_structs:
swift_struct = self.imported_structs[c_type.pointer_to.name]
parent_name = swift_struct.parent_class.reference_name if swift_struct.parent_class else None
if optional:
return swift_struct.name + '?', tc.optional_struct_conversion(swift_struct.name, parent_name)
else:
return swift_struct.name, tc.struct_pointer_conversion(swift_struct.name, parent_name)
to_type, _ = self.get_type_conversion(c_type.pointer_to, implicit_only=True, force_optional=True)
swift_type = f'UnsafePointer<{to_type}>' if c_type.pointer_to.const else f'UnsafeMutablePointer<{to_type}>'
if optional:
swift_type += '?'
return swift_type, tc.implicit_conversion
elif c_type.array_of:
if c_type.array_of.name == 'char':
return 'String', tc.char_array_conversion
of_type, _ = self.get_type_conversion(c_type.array_of, implicit_only=True, force_optional=True)
swift_type = f'({", ".join([of_type] * c_type.length)})'
if convert_array_to_pointer:
return swift_type, tc.tuple_pointer_conversion(of_type)
else:
return swift_type, tc.implicit_conversion
def get_array_conversion(self, c_type: CType, force_optional: bool = None) -> Tuple[str, tc.ArrayConversion]:
optional = force_optional if force_optional is not None else c_type.optional
if is_string_convertible(c_type.pointer_to) and not optional:
return 'Array<String>', tc.string_array_conversion(c_type.length)
if c_type.pointer_to.name and c_type.pointer_to.name in self.imported_structs:
swift_struct = self.imported_structs[c_type.pointer_to.name]
parent_name = swift_struct.parent_class.reference_name if swift_struct.parent_class else None
if not optional:
return f'Array<{swift_struct.name}>', \
tc.struct_array_conversion(swift_struct.name, c_type.length, parent_name)
else:
return f'Array<{swift_struct.name}>?', \
tc.optional_struct_array_conversion(swift_struct.name, c_type.length, parent_name)
if c_type.pointer_to.name:
element_type, element_conversion = self.get_type_conversion(c_type.pointer_to)
if element_conversion != tc.implicit_conversion:
if not optional:
return f'Array<{element_type}>', \
tc.array_mapped_conversion(element_conversion, c_type.length)
else:
return f'Array<{element_type}>?', \
tc.optional_array_mapped_conversion(element_conversion, c_type.length)
element_type, _ = self.get_type_conversion(c_type.pointer_to, implicit_only=True, force_optional=True)
if not optional:
return f'Array<{element_type}>', tc.array_conversion(c_type.length)
else:
return f'Array<{element_type}>?', tc.optional_array_conversion(c_type.length)
def is_pointer_type(self, c_type: CType) -> bool:
return (c_type.pointer_to is not None
or (c_type.name and (c_type.name in self.pointer_types or c_type.name.startswith('PFN_'))))
def pop_extension_tag(self, string: str) -> Tuple[str, Optional[str]]:
for tag in self.c_context.extension_tags:
if string.endswith(tag):
return string[:-len(tag)].rstrip('_'), tag
return string, None
def get_output_params(command: CCommand) -> List[CMember]:
output_params: List[CMember] = []
for param in command.params:
if param.type.pointer_to and not param.type.pointer_to.const and param.type.pointer_to.name != 'void':
output_params.append(param)
return output_params
def is_string_convertible(type_: CType) -> bool:
return (type_.pointer_to and type_.pointer_to.name == 'char' and type_.length == 'null-terminated'
and type_.pointer_to.const)
def is_array_convertible(type_: CType, ignore_const: bool = False) -> bool:
return (type_.pointer_to and (type_.pointer_to.const or ignore_const)
and type_.length and type_.length != 'null-terminated' and 'latexmath' not in type_.length
and type_.pointer_to.name != 'void')
def remove_vk_prefix(string: str) -> str:
if string[:2].lower() == 'vk':
string = string[2:]
return string.lstrip('_')
def snake_to_pascal(string: str) -> str:
parts = string.lower().split('_')
parts = map(lambda p: p[0].upper() + p[1:], parts)
return ''.join(parts)
def snake_to_camel(string: str) -> str:
pascal = snake_to_pascal(string)
return pascal[0].lower() + pascal[1:]
def get_member_name(c_name: str, c_type: CType) -> str:
if c_type.pointer_to and c_name.startswith('p'):
return get_member_name(c_name[1].lower() + c_name[2:], c_type.pointer_to)
return c_name
|
def calculate_area(rc):
return rc.width * rc.height
|
import torch
import pandas as pd
import numpy as np
from os import path
from torch.utils.data import Dataset, sampler
from scipy.ndimage.filters import gaussian_filter
class MRIDataset(Dataset):
"""Dataset of MRI organized in a CAPS folder."""
def __init__(self, img_dir, data_file, preprocessing='linear', transform=None):
"""
Args:
img_dir (string): Directory of all the images.
data_file (string): File name of the train/test split file.
preprocessing (string): Defines the path to the data in CAPS
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.img_dir = img_dir
self.transform = transform
self.diagnosis_code = {'CN': 0, 'AD': 1, 'sMCI': 0, 'pMCI': 1, 'MCI': 1, 'unlabeled': -1}
self.data_path = preprocessing
# Check the format of the tsv file here
if isinstance(data_file, str):
self.df = pd.read_csv(data_file, sep='\t')
elif isinstance(data_file, pd.DataFrame):
self.df = data_file
else:
raise Exception('The argument datafile is not of correct type.')
if ('diagnosis' not in list(self.df.columns.values)) or ('session_id' not in list(self.df.columns.values)) or \
('participant_id' not in list(self.df.columns.values)):
raise Exception("the data file is not in the correct format."
"Columns should include ['participant_id', 'session_id', 'diagnosis']")
self.size = self[0]['image'].numpy().size
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
img_name = self.df.loc[idx, 'participant_id']
img_label = self.df.loc[idx, 'diagnosis']
sess_name = self.df.loc[idx, 'session_id']
# Not in BIDS but in CAPS
if self.data_path == "linear":
image_path = path.join(self.img_dir, 'subjects', img_name, sess_name,
't1', 'preprocessing_dl',
img_name + '_' + sess_name + '_space-MNI_res-1x1x1.pt')
elif self.data_path == "mni":
image_path = path.join(self.img_dir, 'subjects', img_name, sess_name,
't1', 'spm', 'segmentation', 'normalized_space',
img_name + '_' + sess_name + '_space-Ixi549Space_T1w.pt')
else:
raise NotImplementedError("The data path %s is not implemented" % self.data_path)
image = torch.load(image_path)
label = self.diagnosis_code[img_label]
if self.transform:
image = self.transform(image)
sample = {'image': image, 'label': label, 'participant_id': img_name, 'session_id': sess_name,
'image_path': image_path}
return sample
def session_restriction(self, session):
"""
Allows to generate a new MRIDataset using some specific sessions only (mostly used for evaluation of test)
:param session: (str) the session wanted. Must be 'all' or 'ses-MXX'
:return: (DataFrame) the dataset with the wanted sessions
"""
from copy import copy
data_output = copy(self)
if session == "all":
return data_output
else:
df_session = self.df[self.df.session_id == session]
df_session.reset_index(drop=True, inplace=True)
data_output.df = df_session
if len(data_output) == 0:
raise Exception("The session %s doesn't exist for any of the subjects in the test data" % session)
return data_output
class GaussianSmoothing(object):
def __init__(self, sigma):
self.sigma = sigma
def __call__(self, sample):
image = sample['image']
np.nan_to_num(image, copy=False)
smoothed_image = gaussian_filter(image, sigma=self.sigma)
sample['image'] = smoothed_image
return sample
class ToTensor(object):
"""Convert image type to Tensor and diagnosis to diagnosis code"""
def __call__(self, image):
np.nan_to_num(image, copy=False)
image = image.astype(float)
return torch.from_numpy(image[np.newaxis, :]).float()
class MinMaxNormalization(object):
"""Normalizes a tensor between 0 and 1"""
def __call__(self, image):
return (image - image.min()) / (image.max() - image.min())
def load_data(train_val_path, diagnoses_list, split, n_splits=None, baseline=True):
train_df = pd.DataFrame()
valid_df = pd.DataFrame()
if n_splits is None:
train_path = path.join(train_val_path, 'train')
valid_path = path.join(train_val_path, 'validation')
else:
train_path = path.join(train_val_path, 'train_splits-' + str(n_splits),
'split-' + str(split))
valid_path = path.join(train_val_path, 'validation_splits-' + str(n_splits),
'split-' + str(split))
print("Train", train_path)
print("Valid", valid_path)
for diagnosis in diagnoses_list:
if baseline:
train_diagnosis_path = path.join(train_path, diagnosis + '_baseline.tsv')
else:
train_diagnosis_path = path.join(train_path, diagnosis + '.tsv')
valid_diagnosis_path = path.join(valid_path, diagnosis + '_baseline.tsv')
train_diagnosis_df = pd.read_csv(train_diagnosis_path, sep='\t')
valid_diagnosis_df = pd.read_csv(valid_diagnosis_path, sep='\t')
train_df = pd.concat([train_df, train_diagnosis_df])
valid_df = pd.concat([valid_df, valid_diagnosis_df])
train_df.reset_index(inplace=True, drop=True)
valid_df.reset_index(inplace=True, drop=True)
return train_df, valid_df
def load_data_test(test_path, diagnoses_list):
test_df = pd.DataFrame()
for diagnosis in diagnoses_list:
test_diagnosis_path = path.join(test_path, diagnosis + '_baseline.tsv')
test_diagnosis_df = pd.read_csv(test_diagnosis_path, sep='\t')
test_df = pd.concat([test_df, test_diagnosis_df])
test_df.reset_index(inplace=True, drop=True)
return test_df
|
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from typing import Iterator
from pycatia.space_analyses_interfaces.inertia import Inertia
from pycatia.system_interfaces.any_object import AnyObject
from pycatia.system_interfaces.collection import Collection
from pycatia.types.general import cat_variant
class Inertias(Collection):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.Collection
| Inertias
|
| A collection of all inertia objects currently managed by the
| application.
|
| WARNING: this collection will be DEPRECATED in the next release. It is
| recommended to use the method GetTechnologicalObject("Inertia") on the product
| to analyze, to retrieve an Inertia object.
"""
def __init__(self, com_object):
super().__init__(com_object, child_object=Inertia)
self.inertias = com_object
def add(self, i_object: AnyObject) -> Inertia:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func Add(AnyObject iObject) As Inertia
|
| Creates an Inertia object from an object and adds it to the Inertias
| collection.
|
| Parameters:
|
| iObject
| The Object
|
| Returns:
| The created Inertia
| Example:
|
| This example creates a new Inertia from a product
| TheProduct
| in the TheInertias collection.
|
|
| Dim NewInertia As Inertia
| Set NewInertia = TheInertias.Add(TheProduct)
:param AnyObject i_object:
:return: Inertia
:rtype: Inertia
"""
return Inertia(self.inertias.Add(i_object.com_object))
def item(self, i_index: cat_variant) -> Inertia:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func Item(CATVariant iIndex) As Inertia
|
| Returns an Inertia object using its index or its name from the Inertias
| collection.
|
| Parameters:
|
| iIndex
| The index or the name of the Inertia to retrieve from the
| collection of inertias. As a numerics, this index is the rank of the Inertia in
| the collection. The index of the first Inertia in the collection is 1, and the
| index of the last Inertia is Count. As a string, it is the name you assigned to
| the Inertia.
|
| Example:
|
| This example retrieves in ThisInertia the ninth
| Inertia,
| and in ThatInertia the Inertia named
| Inertia Of MyProduct from the TheInertias collection.
|
|
|
| Dim ThisInertia As Inertia
| Set ThisInertia = TheInertias.Item(9)
| Dim ThatInertia As Inertia
| Set ThatInertia = TheInertias.Item("Inertia Of MyProduct")
:param cat_variant i_index:
:return: Inertia
:rtype: Inertia
"""
return Inertia(self.inertias.Item(i_index))
def remove(self, i_index: cat_variant) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub Remove(CATVariant iIndex)
|
| Removes an Inertia object from the Inertias collection.
|
| Parameters:
|
| iIndex
| The index or the name of the Inertia to remove from the collection
| of inertias. As a numerics, this index is the rank of the Inertia in the
| collection. The index of the first Inertia in the collection is 1, and the
| index of the last Inertia is Count. As a string, it is the name you assigned to
| the Inertia.
|
| Example:
|
| The following example removes the tenth Inertia and the Inertia
| named
| Inertia Of MyProduct from the TheInertias
| collection.
|
|
| TheInertias.Remove(10)
| TheInertias.Remove("Inertia Of MyProduct")
:param cat_variant i_index:
:return: None
:rtype: None
"""
return self.inertias.Remove(i_index)
def __getitem__(self, n: int) -> Inertia:
if (n + 1) > self.count:
raise StopIteration
return Inertia(self.inertias.item(n + 1))
def __iter__(self) -> Iterator[Inertia]:
for i in range(self.count):
yield self.child_object(self.com_object.item(i + 1))
def __repr__(self):
return f'Inertias(name="{self.name}")'
|
"""
The util module contains various utility functions.
"""
from __future__ import print_function
import argparse
import binascii
import re
import sys
from six.moves import range
import pwnypack.codec
import pwnypack.main
__all__ = [
'cycle',
'cycle_find',
'reghex',
]
def deBruijn(n, k):
"""
An implementation of the FKM algorithm for generating the de Bruijn
sequence containing all k-ary strings of length n, as described in
"Combinatorial Generation" by Frank Ruskey.
"""
a = [ 0 ] * (n + 1)
def gen(t, p):
if t > n:
for v in a[1:p + 1]:
yield v
else:
a[t] = a[t - p]
for v in gen(t + 1, p):
yield v
for j in range(a[t - p] + 1, k):
a[t] = j
for v in gen(t + 1, t):
yield v
return gen(1, 1)
def cycle(length, width=4):
"""
Generate a de Bruijn sequence of a given length (and width). A de Bruijn
sequence is a set of varying repetitions where each sequence of *n*
characters is unique within the sequence. This type of sequence can be
used to easily find the offset to the return pointer when exploiting a
buffer overflow.
Args:
length(int): The length of the sequence to generate.
width(int): The width of each element in the sequence.
Returns:
str: The sequence.
Example:
>>> from pwny import *
>>> cycle(80)
AAAABAAACAAADAAAEAAAFAAAGAAAHAAAIAAAJAAAKAAALAAAMAAANAAAOAAAPAAAQAAARAAASAAATAAA
"""
iter = deBruijn(width, 26)
return ''.join([chr(ord('A') + next(iter)) for i in range(length)])
def cycle_find(key, width=4):
"""
Given an element of a de Bruijn sequence, find its index in that sequence.
Args:
key(str): The piece of the de Bruijn sequence to find.
width(int): The width of each element in the sequence.
Returns:
int: The index of ``key`` in the de Bruijn sequence.
"""
key_len = len(key)
buf = ''
it = deBruijn(width, 26)
for i in range(key_len):
buf += chr(ord('A') + next(it))
if buf == key:
return 0
for i, c in enumerate(it):
buf = buf[1:] + chr(ord('A') + c)
if buf == key:
return i + 1
return -1
REGHEX_PATTERN = r'(([a-fA-F0-9]{2})|(([?.])(\{\d+\})?)|(\*|\+)|\s+)'
reghex_check = re.compile(REGHEX_PATTERN + '+')
reghex_regex = re.compile(REGHEX_PATTERN)
def reghex(pattern):
"""
Compile a regular hexpression (a short form regular expression subset
specifically designed for searching for binary strings).
A regular hexpression consists of hex tuples interspaced with control
characters. The available control characters are:
- ``?``: Any byte (optional).
- ``.``: Any byte (required).
- ``?{n}``: A set of 0 up to *n* bytes.
- ``.{n}``: A set of exactly *n* bytes.
- ``*``: Any number of bytes (or no bytes at all).
- ``+``: Any number of bytes (at least one byte).
Args:
pattern(str): The reghex pattern.
Returns:
regexp: A regular expression as returned by ``re.compile()``.
"""
if not reghex_check.match(pattern):
raise SyntaxError('Invalid reghex pattern.')
b_pattern = b''
for match in reghex_regex.finditer(pattern):
_, match_hex, _, match_char, match_char_len, match_star_plus = match.groups()
if match_hex:
b_pattern += re.escape(pwnypack.codec.dehex(match_hex))
elif match_char:
if match_char == '?':
if match_char_len is None:
b_pattern += b'.?'
else:
b_pattern += ('.{0,%d}?' % int(match_char_len[1:-1])).encode('ascii')
else:
if match_char_len is None:
b_pattern += b'.'
else:
b_pattern += b'.' * int(match_char_len[1:-1])
elif match_star_plus:
b_pattern += b'.' + match_star_plus.encode('ascii') + b'?'
try:
return re.compile(b_pattern)
except (TypeError, binascii.Error, re.error):
raise SyntaxError('Invalid reghex pattern.')
@pwnypack.main.register('cycle')
def cycle_app(parser, cmd, args): # pragma: no cover
"""
Generate a de Bruijn sequence of a given length.
"""
parser.add_argument('-w', '--width', type=int, default=4, help='the length of the cycled value')
parser.add_argument('length', type=int, help='the cycle length to generate')
args = parser.parse_args(args)
return cycle(args.length, args.width)
@pwnypack.main.register('cycle-find')
def cycle_find_app(_parser, cmd, args): # pragma: no cover
"""
Find the first position of a value in a de Bruijn sequence.
"""
parser = argparse.ArgumentParser(
prog=_parser.prog,
description=_parser.description,
)
parser.add_argument('-w', '--width', type=int, default=4, help='the length of the cycled value')
parser.add_argument('value', help='the value to determine the position of, read from stdin if missing', nargs='?')
args = parser.parse_args(args)
index = cycle_find(pwnypack.main.string_value_or_stdin(args.value), args.width)
if index == -1:
print('Not found.')
sys.exit(1)
else:
print('Found at position: %d' % index)
|
# import turtle
import time
import datetime
import pytz
#
# # noinspection PyUnresolvedReferences
# # comment above was used to remove the warning due t o a bug in the turtle module
#
# turtle.forward(150)
# turtle.right(250)
# turtle.forward(150)
#
# time.sleep(5)
# ====================================
# import random
# import webbrowser
#
# # webbrowser.open("https://www.python.org/")
#
# help(webbrowser)
# print("-" * 50)
# help(random)
# print("-" * 50)
# help(random.randint)
# ====================================
# import time (already imported above)
print("=" * 10)
print(time.gmtime(0))
print(time.localtime())
print(time.time())
# ====================================
# import pytz (already imported above)
# import datetime (already imported above)
print("=" * 50)
country = "Europe/Moscow"
tz_to_display = pytz.timezone(country)
local_time = datetime.datetime.now(tz=tz_to_display)
print("The time in {} is {}".format(country, local_time))
print("UTC is {}".format(datetime.datetime.utcnow()))
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocessing stage tests."""
import time
import numpy as np
import tensorflow.compat.v2 as tf
from keras.engine import base_preprocessing_layer
from keras.layers.preprocessing import preprocessing_stage
from keras.layers.preprocessing import preprocessing_test_utils
from keras.testing_infra import test_combinations
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class PreprocessingStageTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_adapt(self):
class PL(base_preprocessing_layer.PreprocessingLayer):
def __init__(self, **kwargs):
self.adapt_time = None
self.adapt_count = 0
super().__init__(**kwargs)
def adapt(self, data, reset_state=True):
self.adapt_time = time.time()
self.adapt_count += 1
def call(self, inputs):
return inputs + 1.0
# Test with NumPy array
stage = preprocessing_stage.PreprocessingStage(
[
PL(),
PL(),
PL(),
]
)
stage.adapt(np.ones((3, 4)))
self.assertEqual(stage.layers[0].adapt_count, 1)
self.assertEqual(stage.layers[1].adapt_count, 1)
self.assertEqual(stage.layers[2].adapt_count, 1)
self.assertLessEqual(
stage.layers[0].adapt_time, stage.layers[1].adapt_time
)
self.assertLessEqual(
stage.layers[1].adapt_time, stage.layers[2].adapt_time
)
# Check call
y = stage(tf.ones((3, 4)))
self.assertAllClose(y, np.ones((3, 4)) + 3.0)
# Test with dataset
adapt_data = tf.data.Dataset.from_tensor_slices(np.ones((3, 10)))
adapt_data = adapt_data.batch(2) # 5 batches of 2 samples
stage.adapt(adapt_data)
self.assertEqual(stage.layers[0].adapt_count, 2)
self.assertEqual(stage.layers[1].adapt_count, 2)
self.assertEqual(stage.layers[2].adapt_count, 2)
self.assertLess(stage.layers[0].adapt_time, stage.layers[1].adapt_time)
self.assertLess(stage.layers[1].adapt_time, stage.layers[2].adapt_time)
# Test error with bad data
with self.assertRaisesRegex(ValueError, "requires a "):
stage.adapt(None)
if __name__ == "__main__":
tf.test.main()
|
from __future__ import print_function
import re
import sys
import api
import config
_VALID_SORTS = [
'Number',
'YearFrom',
'YearFromDESC',
'Pieces',
'PiecesDESC',
'Minifigs',
'MinifigsDESC',
'Rating',
'UKRetailPrice',
'UKRetailPriceDESC',
'USRetailPrice',
'USRetailPriceDESC',
'CARetailPrice',
'CARetailPriceDESC',
'DERetailPrice',
'DERetailPriceDESC',
'FRRetailPrice',
'FRRetailPriceDESC',
'UKPricePerPiece',
'UKPricePerPieceDESC',
'USPricePerPiece',
'USPricePerPieceDESC',
'CAPricePerPiece',
'CAPricePerPieceDESC',
'DEPricePerPiece',
'DEPricePerPieceDESC',
'FRPricePerPiece',
'FRPricePerPieceDESC',
'Theme',
'Subtheme',
'Name',
'Random',
'QtyOwned',
'QtyOwnedDESC',
'OwnCount',
'OwnCountDESC',
'WantCount',
'WantCountDESC',
'UserRating'
'CollectionID'
]
def get_sets(
query,
id,
set_number,
theme,
subtheme,
year,
tag,
owned,
wanted,
updated_since,
limit,
order_by,
extended,
id_only,
count
):
# NOTE: userHash is always required despite documentation saying it is optional unless using collection filters
params = {}
if query:
params['query'] = query
if id:
params['setID'] = id
if set_number:
params['setNumber'] = ','.join(set_number)
if theme:
params['theme'] = ','.join(theme)
if subtheme:
params['subtheme'] = ','.join(subtheme)
if year:
params['year'] = ','.join(year)
if tag:
params['tag'] = tag
if owned:
params['owned'] = 1
if wanted:
params['wanted'] = 1
if updated_since and _is_iso8601_date(updated_since):
params['updatedSince'] = updated_since
if order_by and _is_valid_order_by(order_by):
params['orderBy'] = order_by
if extended:
params['extendedData'] = 1
# TODO: validate limit
if count:
params['pageSize'] = 0
else:
params['pageSize'] = limit
sets_json = api.execute_api_request('getSets', include_hash=True, params=params)
config.update_cache(sets_json['sets'])
if count:
print(sets_json['matches'])
else:
[_print_set(lego_set, id_only) for lego_set in sets_json['sets']]
def update_set(id, owned, wanted, notes, rating):
params = {}
if owned is not None:
if owned == 1:
params['own'] = owned
else:
params['qtyOwned'] = owned
if wanted is not None:
params['want'] = 1 if wanted else 0
if notes:
params['notes'] = notes
if rating is not None:
params['rating'] = rating
api.execute_api_request('setCollection', include_hash=True, setID=id, params=params)
def _id_to_set_number_generator(ids):
for i in ids:
yield _get_set_number(i)
def _set_number_to_id_generator(set_numbers):
for n in set_numbers:
yield _get_id(n)
def get_instructions(id, directory, set_number=None):
ids = id if id is not None else _set_number_to_id_generator(set_number)
set_numbers = set_number if set_number is not None else _id_to_set_number_generator(id)
# for set_id in ids:
for set_id, cur_set_number in zip(ids, set_numbers):
if not set_id:
print('No instructions found for set number {}'.format(cur_set_number))
continue
if not cur_set_number:
print('No instructions found for set ID {}'.format(set_id))
continue
# getting the set number will increase key usage and may result in hitting the API limit
# cur_set_number = set_number if set_number is not None else _get_set_number(set_id)
instructions_json = api.execute_api_request('getInstructions', setID=set_id)
if not instructions_json['instructions']:
print('No instructions found for {} ({})'.format(cur_set_number, set_id))
if directory:
with open('{}/{}_noinstructions.txt'.format(directory, cur_set_number), 'wb'):
pass # don't actually write anything to the file
continue
instructions = instructions_json['instructions']
if directory:
[api.download_instruction(directory, cur_set_number, i) for i in instructions]
else:
[_print_instruction(cur_set_number, i) for i in instructions]
def get_themes(theme):
themes_json = api.execute_api_request('getThemes')
for a_theme in themes_json['themes']:
if theme is None or re.search(theme, a_theme['theme'], flags=re.IGNORECASE):
print('{} ({}-{}): {} set(s), {} subtheme(s)'.format(
a_theme['theme'], a_theme['yearFrom'], a_theme['yearTo'], a_theme['setCount'], a_theme['subthemeCount']
))
def get_subthemes(theme, subtheme=None):
subthemes_json = api.execute_api_request('getSubthemes', Theme=theme)
for a_subtheme in subthemes_json['subthemes']:
if subtheme is None or re.search(subtheme, a_subtheme['subtheme'], flags=re.IGNORECASE):
print('{} ({}-{}): {} set(s)'.format(
a_subtheme['subtheme'], a_subtheme['yearFrom'], a_subtheme['yearTo'], a_subtheme['setCount']
))
def get_years(theme):
years_json = api.execute_api_request('getYears', Theme=theme)
for year in years_json['years']:
print('{}: {}'.format(year['year'], year['setCount']))
def _is_iso8601_date(updated_since):
if not re.compile('^\\d{4}-\\d{2}-\\d{2}$').match(updated_since):
sys.exit('ERROR: updated_since must have format yyyy-MM-dd')
return True
def _print_set(lego_set, id_only):
if id_only:
print(lego_set['setID'])
return
collection_details = []
if lego_set['collection']['owned']:
collection_details.append(str(lego_set['collection']['qtyOwned']) + ' owned')
if lego_set['collection']['wanted']:
collection_details.append('wanted')
set_details = '{}-{} {} {}'.format(
lego_set['number'],
lego_set['numberVariant'],
lego_set['setID'],
lego_set['name'].encode('UTF-8')
)
if collection_details:
set_details = set_details + ' (' + ', '.join(collection_details) + ')'
print(set_details)
def _is_valid_order_by(order_by):
for valid_sort in _VALID_SORTS:
if re.compile('^{}$'.format(valid_sort), flags=re.IGNORECASE).match(order_by):
return True
sys.exit('ERROR: invalid sort option')
def _get_set_number(set_id):
cache = config.get_cache()
if set_id not in cache['sets']:
sets_json = api.execute_api_request('getSets', include_hash=True, params={'setID': set_id})
config.update_cache(sets_json['sets'])
return cache['sets'].get(set_id, None)
def _get_id(set_number):
cache = config.get_cache()
if set_number not in cache['sets']:
sets_json = api.execute_api_request('getSets', include_hash=True, params={'setNumber': set_number})
cache = config.update_cache(sets_json['sets'], cache)
return cache['sets'].get(set_number, None)
def _print_instruction(set_number, instruction):
print('{}: "{}" {}'.format(set_number, instruction['description'], instruction['URL']))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
import lxml
import lxml.html
import os
import hashlib
import jieba
from gensim.models import Word2Vec
import content_extract as ce
import text_util
def cache_dir():
work_dir = os.path.dirname(os.path.realpath(__file__)) + "/.cache"
if not os.path.isdir(work_dir):
os.makedirs(work_dir)
return work_dir
def get(url, enc='utf-8', cache=True):
if cache:
md5 = hashlib.md5(url).hexdigest()
f = "%s/%s" % (cache_dir(), md5)
if os.path.isfile(f):
return open(f).read().decode(enc, 'ignore')
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3 QQDownload/1.7')
req.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
req.add_header('Accept-Language', 'zh-cn,zh;q=0.5')
try:
response = urllib2.urlopen(req)
content = response.read()
if cache:
out = open(f, "w")
out.write(content)
out.close()
content = content.decode(enc, 'ignore')
return content
except:
return None
def crawl_report_list():
'''
抓取政府工作报告列表
http://www.gov.cn/guoqing/2006-02/16/content_2616810.htm
'''
content = get("http://www.gov.cn/guoqing/2006-02/16/content_2616810.htm")
if content is None: return []
doc = lxml.html.document_fromstring(content)
return doc.xpath("*//td//a/@href")
def build_model(cache=True):
if cache:
f = "%s/word2vec.model" % cache_dir()
if os.path.isfile(f):
return Word2Vec.load(f)
texts = []
for url in crawl_report_list():
html = get(url)
enc, time, title, text = ce.parse(url, html)
sentences = text_util.get_sentences(text)
for s in sentences:
texts.append([w for w in jieba.cut(s)])
b = Word2Vec(texts)
if cache:
b.save(f)
return b
if __name__ == "__main__":
model = build_model()
ret = model.most_similar(u'税收', topn=50)
for i in ret:
print i[0],i[1]
|
import sys
#import GRASP to use its thread safe print function
#sys.path.insert(0, 'graspy/')
#from grasp import tprint
from _thread import start_new_thread
from queue import Queue
#import all the modules
import GRASP_RegServer
#import TLS_Server
import NETCONF_client
import REST_Server
def main(args):
print("starting main Thread")
devicesQueue = Queue()
"""
threads = []
threads.append(REST_Server.REST_Server_Thread(devicesQueue))
for t in threads:
t.start()
"""
start_new_thread(REST_Server.main,(devicesQueue,))
#start_new_thread(TLS_Server.main,())
start_new_thread(GRASP_RegServer.main, (1,))
#start_new_thread(NETCONF_client.main, (,))
while(True):
tmpIP = devicesQueue.get()
print("received ", tmpIP, "starting NETCONF therad")
start_new_thread(NETCONF_client.main, (tmpIP,))
pass
if __name__ == '__main__':
main(sys.argv[1:])
|
from django.contrib import admin
from donations.models import CreditCard, Donation, RecurringDonation
admin.site.register(CreditCard)
admin.site.register(Donation)
admin.site.register(RecurringDonation)
|
from .optimizer import Optimizer
from .propellers import DatabaseFitProp, DatabaseDataProp, BladeElementProp
from .electronics import Battery, ESC, Motor
from .propulsion_unit import PropulsionUnit
from .exceptions import ThrottleNotFoundError, MaxCurrentExceededError, DatabaseRecordNotFoundError, TorquesNotMatchedError, InvalidRuntimeError, PropDataBoundsError
from .special_functions import create_component_from_database
from .helpers import to_rads, to_rpm |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'filter.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(648, 338)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_3 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 2, 0, 1, 1)
self.label_12 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_12.setFont(font)
self.label_12.setObjectName("label_12")
self.gridLayout_2.addWidget(self.label_12, 2, 2, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.price_min = QtWidgets.QLineEdit(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.price_min.setFont(font)
self.price_min.setObjectName("price_min")
self.horizontalLayout.addWidget(self.price_min)
self.label_16 = QtWidgets.QLabel(Dialog)
self.label_16.setMinimumSize(QtCore.QSize(16, 33))
self.label_16.setMaximumSize(QtCore.QSize(16, 33))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_16.setFont(font)
self.label_16.setObjectName("label_16")
self.horizontalLayout.addWidget(self.label_16)
self.price_max = QtWidgets.QLineEdit(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.price_max.setFont(font)
self.price_max.setObjectName("price_max")
self.horizontalLayout.addWidget(self.price_max)
self.gridLayout_2.addLayout(self.horizontalLayout, 2, 3, 1, 1)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.purchase_date_chk = QtWidgets.QCheckBox(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.purchase_date_chk.setFont(font)
self.purchase_date_chk.setText("")
self.purchase_date_chk.setObjectName("purchase_date_chk")
self.horizontalLayout_2.addWidget(self.purchase_date_chk)
self.purchase_date_min = QtWidgets.QDateEdit(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.purchase_date_min.setFont(font)
self.purchase_date_min.setCalendarPopup(True)
self.purchase_date_min.setObjectName("purchase_date_min")
self.horizontalLayout_2.addWidget(self.purchase_date_min)
self.label_8 = QtWidgets.QLabel(Dialog)
self.label_8.setMinimumSize(QtCore.QSize(16, 33))
self.label_8.setMaximumSize(QtCore.QSize(16, 33))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.horizontalLayout_2.addWidget(self.label_8)
self.purchase_date_max = QtWidgets.QDateEdit(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.purchase_date_max.setFont(font)
self.purchase_date_max.setCalendarPopup(True)
self.purchase_date_max.setObjectName("purchase_date_max")
self.horizontalLayout_2.addWidget(self.purchase_date_max)
self.horizontalLayout_5.addLayout(self.horizontalLayout_2)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem)
self.gridLayout_2.addLayout(self.horizontalLayout_5, 3, 1, 1, 3)
self.label_11 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.gridLayout_2.addWidget(self.label_11, 1, 2, 1, 1)
self.subcategory = QtWidgets.QComboBox(Dialog)
self.subcategory.setMinimumSize(QtCore.QSize(200, 0))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.subcategory.setFont(font)
self.subcategory.setEditable(True)
self.subcategory.setObjectName("subcategory")
self.gridLayout_2.addWidget(self.subcategory, 0, 3, 1, 1)
self.label = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 1, 0, 1, 1)
self.brand = QtWidgets.QComboBox(Dialog)
self.brand.setMinimumSize(QtCore.QSize(200, 0))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.brand.setFont(font)
self.brand.setEditable(True)
self.brand.setObjectName("brand")
self.gridLayout_2.addWidget(self.brand, 1, 3, 1, 1)
self.spec = QtWidgets.QComboBox(Dialog)
self.spec.setMinimumSize(QtCore.QSize(200, 0))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.spec.setFont(font)
self.spec.setEditable(True)
self.spec.setObjectName("spec")
self.gridLayout_2.addWidget(self.spec, 2, 1, 1, 1)
self.label_7 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.gridLayout_2.addWidget(self.label_7, 6, 0, 1, 1)
self.use_department = QtWidgets.QComboBox(Dialog)
self.use_department.setMinimumSize(QtCore.QSize(200, 0))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.use_department.setFont(font)
self.use_department.setEditable(True)
self.use_department.setObjectName("use_department")
self.gridLayout_2.addWidget(self.use_department, 6, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.gridLayout_2.addWidget(self.label_2, 3, 0, 1, 1)
self.label_6 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.gridLayout_2.addWidget(self.label_6, 5, 0, 1, 1)
self.label_5 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 4, 0, 1, 1)
self.name = QtWidgets.QComboBox(Dialog)
self.name.setMinimumSize(QtCore.QSize(200, 0))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.name.setFont(font)
self.name.setEditable(True)
self.name.setObjectName("name")
self.gridLayout_2.addWidget(self.name, 1, 1, 1, 1)
self.category = QtWidgets.QComboBox(Dialog)
self.category.setMinimumSize(QtCore.QSize(200, 0))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.category.setFont(font)
self.category.setEditable(True)
self.category.setObjectName("category")
self.gridLayout_2.addWidget(self.category, 0, 1, 1, 1)
self.label_10 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
self.gridLayout_2.addWidget(self.label_10, 0, 2, 1, 1)
self.keep_department = QtWidgets.QComboBox(Dialog)
self.keep_department.setMinimumSize(QtCore.QSize(200, 0))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.keep_department.setFont(font)
self.keep_department.setEditable(True)
self.keep_department.setObjectName("keep_department")
self.gridLayout_2.addWidget(self.keep_department, 5, 1, 1, 1)
self.keeper = QtWidgets.QComboBox(Dialog)
self.keeper.setMinimumSize(QtCore.QSize(200, 0))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.keeper.setFont(font)
self.keeper.setEditable(True)
self.keeper.setObjectName("keeper")
self.gridLayout_2.addWidget(self.keeper, 6, 3, 1, 1)
self.label_14 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_14.setFont(font)
self.label_14.setObjectName("label_14")
self.gridLayout_2.addWidget(self.label_14, 6, 2, 1, 1)
self.label_13 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.gridLayout_2.addWidget(self.label_13, 5, 2, 1, 1)
self.place = QtWidgets.QComboBox(Dialog)
self.place.setMinimumSize(QtCore.QSize(200, 0))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.place.setFont(font)
self.place.setEditable(True)
self.place.setObjectName("place")
self.gridLayout_2.addWidget(self.place, 5, 3, 1, 1)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.acquire_date_chk = QtWidgets.QCheckBox(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.acquire_date_chk.setFont(font)
self.acquire_date_chk.setText("")
self.acquire_date_chk.setObjectName("acquire_date_chk")
self.horizontalLayout_3.addWidget(self.acquire_date_chk)
self.acquire_date_min = QtWidgets.QDateEdit(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.acquire_date_min.setFont(font)
self.acquire_date_min.setCalendarPopup(True)
self.acquire_date_min.setObjectName("acquire_date_min")
self.horizontalLayout_3.addWidget(self.acquire_date_min)
self.label_9 = QtWidgets.QLabel(Dialog)
self.label_9.setMinimumSize(QtCore.QSize(16, 33))
self.label_9.setMaximumSize(QtCore.QSize(16, 33))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.horizontalLayout_3.addWidget(self.label_9)
self.acquire_date_max = QtWidgets.QDateEdit(Dialog)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.acquire_date_max.setFont(font)
self.acquire_date_max.setCalendarPopup(True)
self.acquire_date_max.setObjectName("acquire_date_max")
self.horizontalLayout_3.addWidget(self.acquire_date_max)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.gridLayout_2.addLayout(self.horizontalLayout_3, 4, 1, 1, 3)
self.verticalLayout.addLayout(self.gridLayout_2)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem2)
self.cancelBtn = QtWidgets.QPushButton(Dialog)
self.cancelBtn.setMaximumSize(QtCore.QSize(75, 35))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.cancelBtn.setFont(font)
self.cancelBtn.setAutoDefault(False)
self.cancelBtn.setObjectName("cancelBtn")
self.horizontalLayout_4.addWidget(self.cancelBtn)
self.submitBtn = QtWidgets.QPushButton(Dialog)
self.submitBtn.setMaximumSize(QtCore.QSize(75, 35))
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.submitBtn.setFont(font)
self.submitBtn.setDefault(True)
self.submitBtn.setObjectName("submitBtn")
self.horizontalLayout_4.addWidget(self.submitBtn)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.retranslateUi(Dialog)
self.cancelBtn.clicked.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.category, self.subcategory)
Dialog.setTabOrder(self.subcategory, self.name)
Dialog.setTabOrder(self.name, self.brand)
Dialog.setTabOrder(self.brand, self.spec)
Dialog.setTabOrder(self.spec, self.price_min)
Dialog.setTabOrder(self.price_min, self.price_max)
Dialog.setTabOrder(self.price_max, self.purchase_date_chk)
Dialog.setTabOrder(self.purchase_date_chk, self.purchase_date_min)
Dialog.setTabOrder(self.purchase_date_min, self.purchase_date_max)
Dialog.setTabOrder(self.purchase_date_max, self.acquire_date_chk)
Dialog.setTabOrder(self.acquire_date_chk, self.acquire_date_min)
Dialog.setTabOrder(self.acquire_date_min, self.acquire_date_max)
Dialog.setTabOrder(self.acquire_date_max, self.keep_department)
Dialog.setTabOrder(self.keep_department, self.use_department)
Dialog.setTabOrder(self.use_department, self.keeper)
Dialog.setTabOrder(self.keeper, self.cancelBtn)
Dialog.setTabOrder(self.cancelBtn, self.submitBtn)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "請設定資料過濾條件"))
self.label_3.setText(_translate("Dialog", "規格:"))
self.label_12.setText(_translate("Dialog", "單價:"))
self.label_16.setText(_translate("Dialog", "~"))
self.label_8.setText(_translate("Dialog", "~"))
self.label_11.setText(_translate("Dialog", "品牌:"))
self.label.setText(_translate("Dialog", "物品大類:"))
self.label_4.setText(_translate("Dialog", "物品名稱:"))
self.label_7.setText(_translate("Dialog", "使用單位:"))
self.label_2.setText(_translate("Dialog", "購置日期:"))
self.label_6.setText(_translate("Dialog", "保管單位:"))
self.label_5.setText(_translate("Dialog", "取得日期:"))
self.label_10.setText(_translate("Dialog", "物品細目:"))
self.label_14.setText(_translate("Dialog", "保管人:"))
self.label_13.setText(_translate("Dialog", "存置地點:"))
self.label_9.setText(_translate("Dialog", "~"))
self.cancelBtn.setText(_translate("Dialog", "取消"))
self.submitBtn.setText(_translate("Dialog", "確定"))
|
from django.http.response import HttpResponseRedirect
from django.shortcuts import render
from django.views.decorators.csrf import csrf_protect
from django.urls.base import reverse
from axes.utils import reset
from bluebottle.bluebottle_dashboard.forms import AxesCaptchaForm
from bluebottle.utils.utils import get_client_ip
@csrf_protect
def locked_out(request):
if request.POST:
form = AxesCaptchaForm(request.POST)
if form.is_valid():
ip = get_client_ip(request)
reset(ip=ip)
return HttpResponseRedirect(
reverse('admin:login')
)
else:
form = AxesCaptchaForm()
return render(
request,
'admin/locked_out.html',
dict(form=form),
)
|
from setuptools import setup, find_packages
def read(f):
return open(f, 'r', encoding='utf-8').read()
setup(
name='ajelastic',
version='1.0.2',
packages=find_packages(exclude=["tests*"]),
url='https://github.com/aasaanjobs/ajelastic-sdk',
license='MIT',
author='Sohel Tarir',
author_email='sohel.tarir@aasaanjobs.com',
description='Python - Elasticsearch Integration (for Aasaanjobs Internal Usage)',
long_description=read('README.md'),
long_description_content_type='text/markdown',
zip_safe=False,
entry_points={
'console_scripts': [
'aj-es-reindex=ajelastic.commands.reindex:main'
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
]
)
|
from __future__ import absolute_import, division, print_function
import os
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.contrib.eager as tfe
def parse_csv(line):
example_defaults = [[0.], [0.], [0.], [0.], [0]] # sets field types
parsed_line = tf.decode_csv(line, example_defaults)
# First 4 fields are features, combine into single tensor
features = tf.reshape(parsed_line[:-1], shape=(4,))
# Last field is the label
label = tf.reshape(parsed_line[-1], shape=())
return features, label
tf.enable_eager_execution()
print("TensorFlow version: {}".format(tf.VERSION))
print("Eager execution: {}".format(tf.executing_eagerly()))
train_dataset_url = "http://download.tensorflow.org/data/iris_training.csv"
train_dataset_fp = tf.keras.utils.get_file(fname=os.path.basename(train_dataset_url),
origin=train_dataset_url)
print("Local copy of the dataset file: {}".format(train_dataset_fp))
train_dataset = tf.data.TextLineDataset(train_dataset_fp)
train_dataset = train_dataset.skip(1) # skip the first header row
train_dataset = train_dataset.map(parse_csv) # parse each row
train_dataset = train_dataset.shuffle(buffer_size=1000) # randomize
train_dataset = train_dataset.batch(32)
# View a single example entry from a batch
iter = train_dataset.__iter__()
features, label = iter.get_next()
#iter = train_dataset.make_initializable_iterator()
#el = iter.get_next()
print("example features:", features[0])
print("example label:", label[0])
|
#!/usr/bin/env python3
# Copyright (c) 2019, Bosch Engineering Center Cluj and BFMC organizers
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
import io
import numpy as np
import time
import rospy
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
class cameraSpooferNODE():
def __init__(self, ext = '.h264'):
"""Node used for spoofing a camera/ publishing a video stream from a folder
with videos
"""
# params
self.videoSize = (640,480)
self.videoDir = "path/to/videos/directory"
self.videos = self.open_files(self.videoDir, ext = ext)
rospy.init_node('cameraSpooferNODE', anonymous=False)
self.image_publisher = rospy.Publisher("/automobile/image_raw", Image, queue_size=1)
self.bridge = CvBridge()
#================================ RUN ================================================
def run(self):
"""Apply the initializing methods and start the thread.
"""
rospy.loginfo("starting camaeraSpooferNODE")
self._play_video(self.videos)
# ===================================== INIT VIDEOS ==================================
def open_files(self, inputDir, ext):
"""Open all files with the given path and extension
"""
files = glob.glob(inputDir + '/*' + ext)
return files
# ===================================== PLAY VIDEO ===================================
def _play_video(self, videos):
"""Iterate through each video in the folder, open a cap and publish the frames.
"""
while True:
for video in videos:
cap = cv2.VideoCapture(video)
while True:
ret, frame = cap.read()
stamp = time.time()
if ret:
frame = cv2.resize(frame, self.videoSize)
# output image and time stamp
# Note: The sending process can be blocked, when doesn't exist any consumer process and it reaches the limit size.
try:
imageObject = self.bridge.cv2_to_imgmsg(frame, "bgr8")
imageObject.header.stamp = rospy.Time.now()
self.image_publisher.publish(imageObject)
except CvBridgeError as e:
print(e)
else:
break
cap.release()
if __name__ == "__main__":
camNod = cameraSpooferNODE()
camNod.run()
|
"""
A simple example for Reinforcement Learning using table lookup Q-learning method.
An agent "o" is on the left of a 1 dimensional world, the treasure is on the rightmost location.
Run this program and to see how the agent will improve its strategy of finding the treasure.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
"""
import sys
"""
CC: 这个线性agent因为可以撰写环境反馈程序,所以简单的增加指数级的ACTION并更新环境反馈函数就行了;
比如cartpole这个agent因为是通过物理环境反馈的小车速度和位置,杆子的角度和速度,所以无法更新环境反馈代码,所以需要直接设计一个外挂的指数级别的ACTION积累函数,其实就是长短记忆!!!
看是否能利用神经网络引入指数效果?神经网络的框架还是不错的。
"""
import numpy as np
import pandas as pd
import time
from contents.tool.utils import display_progress
np.random.seed(2) # reproducible
N_STATES = 500 # the length of the 1 dimensional world
END_POS = N_STATES - 1 # 总共有n_states个位置,所以编号从0到n_states-1,结束位置标志是n_states-1
BEGIN_POS = 0 # 总共有n_states个位置,所以编号从0到n_states-1,开始位置标志是0
#ACTIONS = ['left', 'right'] # available actions
ACTIONS = ['l1', 'r1'] # available actions
ACTIONS_EXP = ['l1', 'r1','l2', 'r2','l4', 'r4','l8', 'r8','l16', 'r16','l32', 'r32','l64', 'r64','l128', 'r128'] # available actions
EPSILON = 0.9 # greedy police
ALPHA = 0.1 # learning rate
GAMMA = 0.9 # discount factor
MAX_EPISODES = 60 # maximum episodes
FRESH_TIME = 0.01 # fresh time for one move
#steps_table = [] # save steps used every EPISODE
display_steps = False # display steps in procedure, it make program VERY SLOW!
def build_q_table(n_states, actions):
table = pd.DataFrame(
np.zeros((n_states, len(actions))), # q_table initial values
columns=actions, # actions's name
)
# print(table) # show table
return table
def choose_action(state, q_table, actions):
# This is how to choose an action
state_actions = q_table.iloc[state, :]
if (np.random.uniform() > EPSILON) or ((state_actions == 0).all()): # act non-greedy or state-action have no value
#action_name = np.random.choice(ACTIONS)
action_name = np.random.choice(actions)
else: # act greedy
action_name = state_actions.idxmax() # replace argmax to idxmax as argmax means a different function in newer version of pandas
return action_name
def get_env_feedback_orig(S, A):
# This is how agent will interact with the environment
if A == 'r1': # move right
if S == N_STATES - 2: # terminate
S_ = 'terminal'
R = 1
else:
S_ = S + 1
R = 0
else: # move left
R = 0
if S == 0:
S_ = S # reach the wall
else:
S_ = S - 1
return S_, R
def get_env_feedback(S, A):
# This is how agent will interact with the environment
if A[0] == 'r': # move right
step_nbr = int(A[1:]) # get step number
if S + step_nbr > END_POS: # beyond the end pos, new round again! start from begin pos
R = 0
S_ = BEGIN_POS
elif S + step_nbr == END_POS: # reach end pos/goal! success!
R = 1
S_ = 'terminal'
else: # go right (step_nbr) steps and not reach/beyond end pos yet!
R = 0
S_ = S + step_nbr
elif A[0] == 'l': # move left
step_nbr = int(A[1:]) # get step number
if S - step_nbr <= BEGIN_POS: # beyond/reach the begin pos/wall, new round again! start from begin pos
R = 0
S_ = BEGIN_POS
else: # go left (step_nbr) steps and not reach/beyond the WALL/BEGIN pos yet!
R = 0
S_ = S - step_nbr
else:
print("[WARN]Bad action: '{0}'!".format(A))
exit(-1)
return S_, R
def update_env(S, episode, step_counter):
# This is how environment be updated
env_list = ['-']*(N_STATES-1) + ['T'] # '---------T' our environment
if S == 'terminal':
interaction = 'Episode %s: total_steps = %s' % (episode+1, step_counter)
print('\r{}'.format(interaction), end='')
time.sleep(FRESH_TIME)
print('\r ', end='')
else:
#env_list[S] = 'o'
env_list[S] = str(S)
interaction = ''.join(env_list)
print('\r{}'.format(interaction), end='')
time.sleep(FRESH_TIME)
def rl(get_env_feedback_func,actions):
# main part of RL loop
#q_table = build_q_table(N_STATES, ACTIONS)
steps_table = []
q_table = build_q_table(N_STATES, actions)
for episode in range(MAX_EPISODES):
step_counter = 0
S = 0
is_terminated = False
#update_env(S, episode, step_counter)
if display_steps == False:
display_progress(int(episode / MAX_EPISODES * 100)) # 显示命令行进度
while not is_terminated:
A = choose_action(S, q_table,actions)
#S_, R = get_env_feedback(S, A) # take action & get next state and reward
S_, R = eval(get_env_feedback_func)(S, A) # dynamic/runtime get func name and handler first before call it!
q_predict = q_table.loc[S, A]
if S_ != 'terminal':
q_target = R + GAMMA * q_table.iloc[S_, :].max() # next state is not terminal
else:
q_target = R # next state is terminal
is_terminated = True # terminate this episode
q_table.loc[S, A] += ALPHA * (q_target - q_predict) # update
S = S_ # move to next state
if display_steps:
update_env(S, episode, step_counter+1)
step_counter += 1
steps_table.append(step_counter) # save steps used
return q_table,steps_table
if __name__ == "__main__":
"""
func_list = [get_env_feedback, get_env_feedback2]
for func in func_list:
func_name = func.__name__
print('RUN: {0}'.format(func_name))
q_table, steps_table = rl(func_name,ACTIONS)
print('\r\nSteps-table: \n{0}'.format(steps_table))
print('\r\nQ-table:\n{0}'.format(q_table))
print('\r\n')
"""
env_feedback_func = [get_env_feedback]
#actions_list = [ACTIONS, ACTIONS_EXP]
actions_list = [ACTIONS_EXP]
for actions in actions_list:
print('RUN in actions mode: {0}'.format(actions))
q_table, steps_table = rl(env_feedback_func[0].__name__,actions)
print('\r\nSteps-table: \n{0}'.format(steps_table))
print('\r\nQ-table:\n{0}'.format(q_table))
print('\r\n')
# TODO 增加时间的统计;步骤统计;形成table供显示;
# TODO 将steps递减和round弄成二维表;动态比较两种方法的情况;
# TODO 如果能将时间显示在图表上就更好了;
# TODO 添加多核计算(纯python无框架用多进程;如果是keras_tf等框架尝试多线程?了解一下keras_tf框架)
# TODO 研究cartpole立杆子和开小车的GYM游戏 |
# This file contains basic statistics functionality. All runtime values are stored
# and can be referenced by name for further usage.
# Author: Stefan Kahl, 2018, Chemnitz University of Technology
import sys
sys.path.append("..")
import copy
import time
import config as cfg
from utils import log
def clearStats(clear_all=False):
# Clears all recorded values
# Exceptions are:
# Permanent values cannot be deleted
if not 'permanent' in cfg.STATS:
cfg.STATS['permanent'] = {}
p = copy.deepcopy(cfg.STATS['permanent'])
# Static values will only be deleted if said so
if not 'static' in cfg.STATS:
cfg.STATS['static'] = {}
s = copy.deepcopy(cfg.STATS['static'])
if not clear_all:
cfg.STATS = {'static':s, 'permanent':p}
else:
cfg.STATS = {'static':{}, 'permanent':p}
# Copy values
for name in cfg.STATS['permanent']:
cfg.STATS[name] = cfg.STATS['permanent'][name]
for name in cfg.STATS['static']:
cfg.STATS[name] = cfg.STATS['static'][name]
def tic(name):
if not 'times' in cfg.STATS:
cfg.STATS['times'] = {}
cfg.STATS['times'][name] = time.time()
def toc(name):
s = int(abs(time.time() - cfg.STATS['times'][name]) * 100) / 100.0
setValue(name, s)
def setValue(name, v, mode='replace', static=False, permanent=False):
if not name in cfg.STATS:
if mode == 'replace' or mode == 'add':
cfg.STATS[name] = v
else:
cfg.STATS[name] = [v]
else:
if mode == 'append':
cfg.STATS[name].append(v)
elif mode == 'add':
cfg.STATS[name] += v
else:
cfg.STATS[name] = v
if static:
cfg.STATS['static'][name] = cfg.STATS[name]
if permanent:
cfg.STATS['permanent'][name] = cfg.STATS[name]
def getValue(name, default=-1):
if name in cfg.STATS:
return cfg.STATS[name]
else:
return default
last_update = -1
def showProgress(epoch, done=False):
global last_update
# First call?
if not 'batch_count' in cfg.STATS:
bcnt = 0
else:
bcnt = cfg.STATS['batch_count']
# Calculate number of batches to train
total_batches = cfg.STATS['sample_count'] // cfg.BATCH_SIZE + 1
# Current progess
if not done:
if bcnt == 0:
log.p(('EPOCH', epoch, '['), new_line=False)
else:
p = bcnt * 100 / total_batches
if not p % 5 and not p == last_update:
log.p('=', new_line=False)
last_update = p
else:
log.p(']', new_line=False)
# Clear on first load
clearStats(True)
|
"""
Atividade 02
"""
def mes_do_Ano(dia,mes,ano):
if mes == 1:
mes = 'Janeiro'
print(f"{dia} de {mes} de {ano}")
elif mes == 2:
mes = 'Fevereiro'
print(f"{dia} de {mes} de {ano}")
elif mes == 3:
mes = 'Março'
print(f"{dia} de {mes} de {ano}")
elif mes == 4:
mes = 'Abril'
print(f"{dia} de {mes} de {ano}")
elif mes == 5:
mes = 'Maio'
print(f"{dia} de {mes} de {ano}")
elif mes == 6:
mes = 'Junho'
print(f"{dia} de {mes} de {ano}")
elif mes == 7:
mes = 'Julho'
print(f"{dia} de {mes} de {ano}")
elif mes == 8:
mes = 'Agosto'
print(f"{dia} de {mes} de {ano}")
elif mes == 9:
mes = 'Setembro'
print(f"{dia} de {mes} de {ano}")
elif mes == 10:
mes = 'Outubro'
print(f"{dia} de {mes} de {ano}")
elif mes == 11:
mes = 'Novembro'
print(f"{dia} de {mes} de {ano}")
elif mes == 12:
mes = 'Dezembro'
print(f"{dia} de {mes} de {ano}")
dia = int(input("Informe o dia: "))
mes = int(input("Informe o Mês: "))
ano = int(input("Informe o Ano: "))
print(f"{dia}/{mes}/{ano}")
mes_do_Ano(dia,mes,ano) |
from django.contrib.auth.hashers import make_password, check_password
from django.db import models
# Create your models here.
from werkzeug.security import check_password_hash
class AdminUser(models.Model):
a_username = models.CharField(max_length=15,unique=True)
a_password = models.CharField(max_length=256)
is_delete = models.BooleanField(default=False)
is_super = models.BooleanField(default=False)
def set_password(self,password):
self.a_password = make_password(password)
def check_admin_password(self,password):
return check_password(password,self.a_password)
def has_permission(self, permission_name):
permissions = self.permission_set.all()
for permission in permissions:
if permission_name == permission.p_name:
return True
return False
class Permission(models.Model):
p_name = models.CharField(max_length=32,unique=True)
p_users = models.ManyToManyField(AdminUser) |
# as_rwGPS.py Asynchronous device driver for GPS devices using a UART.
# Supports a limited subset of the PMTK command packets employed by the
# widely used MTK3329/MTK3339 chip.
# Sentence parsing based on MicropyGPS by Michael Calvin McCoy
# https://github.com/inmcm/micropyGPS
# Copyright (c) 2018 Peter Hinch
# Released under the MIT License (MIT) - see LICENSE file
import as_drivers.as_GPS as as_GPS
try:
from micropython import const
except ImportError:
const = lambda x : x
HOT_START = const(1)
WARM_START = const(2)
COLD_START = const(3)
FULL_COLD_START = const(4)
STANDBY = const(5)
DEFAULT_SENTENCES = const(6)
VERSION = const(7)
ENABLE = const(8)
ANTENNA = const(9)
NO_ANTENNA = const(10)
# Return CRC of a bytearray.
def _crc(sentence):
x = 1
crc = 0
while sentence[x] != ord('*'):
crc ^= sentence[x]
x += 1
return crc # integer
class GPS(as_GPS.AS_GPS):
fixed_commands = {HOT_START: b'$PMTK101*32\r\n',
WARM_START: b'$PMTK102*31\r\n',
COLD_START: b'$PMTK103*30\r\n',
FULL_COLD_START: b'$PMTK104*37\r\n',
STANDBY: b'$PMTK161,0*28\r\n',
DEFAULT_SENTENCES: b'$PMTK314,-1*04\r\n',
VERSION: b'$PMTK605*31\r\n',
ENABLE: b'$PMTK414*33\r\n',
ANTENNA: b'$PGCMD,33,1*6C',
NO_ANTENNA: b'$PGCMD,33,0*6D',
}
def __init__(self, sreader, swriter, local_offset=0,
fix_cb=lambda *_ : None, cb_mask=as_GPS.RMC, fix_cb_args=(),
msg_cb=lambda *_ : None, msg_cb_args=()):
super().__init__(sreader, local_offset, fix_cb, cb_mask, fix_cb_args)
self._swriter = swriter
self.version = None # Response to VERSION query
self.enabled = None # Response to ENABLE query
self.antenna = 0 # Response to ANTENNA.
self._msg_cb = msg_cb
self._msg_cb_args = msg_cb_args
async def _send(self, sentence):
# Create a bytes object containing hex CRC
bcrc = '{:2x}'.format(_crc(sentence)).encode()
sentence[-4] = bcrc[0] # Fix up CRC bytes
sentence[-3] = bcrc[1]
await self._swriter.awrite(sentence)
async def baudrate(self, value=9600):
if value not in (4800,9600,14400,19200,38400,57600,115200):
raise ValueError('Invalid baudrate {:d}.'.format(value))
sentence = bytearray('$PMTK251,{:d}*00\r\n'.format(value))
await self._send(sentence)
async def update_interval(self, ms=1000):
if ms < 100 or ms > 10000:
raise ValueError('Invalid update interval {:d}ms.'.format(ms))
sentence = bytearray('$PMTK220,{:d}*00\r\n'.format(ms))
await self._send(sentence)
self._update_ms = ms # Save for timing driver
async def enable(self, *, gll=0, rmc=1, vtg=1, gga=1, gsa=1, gsv=5, chan=0):
fstr = '$PMTK314,{:d},{:d},{:d},{:d},{:d},{:d},0,0,0,0,0,0,0,0,0,0,0,0,{:d}*00\r\n'
sentence = bytearray(fstr.format(gll, rmc, vtg, gga, gsa, gsv, chan))
await self._send(sentence)
async def command(self, cmd):
if cmd not in self.fixed_commands:
raise ValueError('Invalid command {:s}.'.format(cmd))
await self._swriter.awrite(self.fixed_commands[cmd])
# Should get 705 from VERSION 514 from ENABLE
def parse(self, segs):
if segs[0] == 'PMTK705': # Version response
self.version = segs[1:]
segs[0] = 'version'
self._msg_cb(self, segs, *self._msg_cb_args)
return True
if segs[0] == 'PMTK514':
print('enabled segs', segs)
self.enabled = {'gll': segs[1], 'rmc': segs[2], 'vtg': segs[3],
'gga': segs[4], 'gsa': segs[5], 'gsv': segs[6],
'chan': segs[19]}
segs = ['enabled', self.enabled]
self._msg_cb(self, segs, *self._msg_cb_args)
return True
if segs[0] == 'PGTOP':
self.antenna = segs[2]
segs = ['antenna', self.antenna]
self._msg_cb(self, segs, *self._msg_cb_args)
return True
if segs[0][:4] == 'PMTK':
self._msg_cb(self, segs, *self._msg_cb_args)
return True
return False
|
#!/usr/bin/env python
from cloudify import ctx
import os
import yaml
kubeconfig_path = '/home/docker/.kube/config'
kubeconfig_raw = os.popen('sudo cat ' + kubeconfig_path)
kube_config_dict = yaml.safe_load(kubeconfig_raw.read())
ca_path = '/home/docker/.minikube/ca.crt'
ca_raw = os.popen('sudo cat ' + ca_path + ' | base64')
crt_path = '/home/docker/.minikube/profiles/minikube/client.crt'
crt_raw = os.popen('sudo cat ' + crt_path + ' | base64')
key_path = '/home/docker/.minikube/profiles/minikube/client.key'
key_raw = os.popen('sudo cat ' + key_path + ' | base64')
kube_config_dict['clusters'][0]['cluster']['certificate-authority-data'] = \
''.join([line.rstrip('\n') for line in ca_raw.readlines()])
kube_config_dict['clusters'][0]['cluster']['server'] = \
'https://' + os.getenv('HOST_VM_IP') + ':8443'
kube_config_dict['users'][0]['user']['client-certificate-data'] = \
''.join([line.rstrip('\n') for line in crt_raw.readlines()])
kube_config_dict['users'][0]['user']['client-key-data'] = \
''.join([line.rstrip('\n') for line in key_raw.readlines()])
del(kube_config_dict['clusters'][0]['cluster']['certificate-authority'])
del(kube_config_dict['users'][0]['user']['client-certificate'])
del(kube_config_dict['users'][0]['user']['client-key'])
ctx.instance.runtime_properties['config'] = kube_config_dict
|
import pandas as pd
import tweepy
import webbrowser
import os
import urllib.request
from urllib.error import URLError, HTTPError
def get_twitter_url(LinkCorpus_file):
tweet_urls = []
claim_ids = []
relevant_doc_ids = []
snopes_url = []
count = 0
data = pd.read_csv(LinkCorpus_file)
urls = data['Original Link URL']
ids = data['claim_id']
r_ids = data['relevant_document_id']
snops = data['Snopes URL']
for index, url in enumerate(urls):
words = url.split('/')
if 'www.twitter.com' in words:
tweet_urls.append(url)
claim_ids.append(ids[index])
relevant_doc_ids.append(r_ids[index])
snopes_url.append(snops[index])
count+=1
elif 'twitter.com' in words:
tweet_urls.append(url)
claim_ids.append(ids[index])
relevant_doc_ids.append(r_ids[index])
snopes_url.append(snops[index])
count+=1
print(count)
return tweet_urls,claim_ids,relevant_doc_ids,snopes_url
def get_status_id(tweet_urls,claim_ids,relevant_doc_ids,snopes_url):
from urllib.parse import urlparse
import re
status_id = []
new_claim_ids = []
new_rel_doc_ids = []
new_snoops = []
new_tweet_urls = []
for idx, item in enumerate(tweet_urls):
url = item
df = urlparse(url).path.split('/')
for item in df:
if len(item) ==19 or len(item) == 18:
status_id.append(item)
new_claim_ids.append(claim_ids[idx])
new_rel_doc_ids.append(relevant_doc_ids[idx])
new_snoops.append(snopes_url[idx])
new_tweet_urls.append(tweet_urls[idx])
print(f"{len(status_id)}, {len(new_claim_ids)}")
return status_id,new_tweet_urls,new_claim_ids,new_rel_doc_ids,new_snoops
def get_auth():
consumer_key = "kSsK1G38xcyMHnhXOXyurxrOH"
consumer_secret = "SiVcbtucDekrZtlBndFkNRsxj5p2AF6hli3rrabd3Shced7BYd"
callback_uri = 'oob'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret, callback_uri)
redirect_url = auth.get_authorization_url()
# webbrowser.open(redirect_url)
# user_pin_input = input('whats the pin value ')
# auth.get_access_token(user_pin_input)
api = tweepy.API(auth)
return api
def fetch_tweets(parent_dir,api,status_id,new_tweet_urls,new_claim_ids,new_rel_doc_ids,new_snoops):
import pickle
import time
tweet_data = []
claim_ids = []
rel_doc_ids = []
snoopes_url = []
twitter_url = []
for idx, id in enumerate(status_id):
try:
data = api.get_status(id , tweet_mode='extended')._json
tweet_data.append(data)
claim_ids.append(new_claim_ids[idx])
rel_doc_ids.append(new_rel_doc_ids[idx])
snoopes_url.append(new_snoops[idx])
twitter_url.append(new_tweet_urls[idx])
parse_one_tweet(parent_dir,data,new_claim_ids[idx],new_rel_doc_ids[idx],new_snoops[idx],new_tweet_urls[idx],idx)
time.sleep(1)
except Exception as e:
print(e)
print(id)
print('-'*20)
time.sleep(1)
if idx %100==0:
print(idx)
with open(os.path.join(parent_dir,'tweets.pickle'), 'wb') as handle:
pickle.dump(tweet_data, handle)
return claim_ids,rel_doc_ids,snoopes_url,twitter_url
def stop_handled( cursor):
while True:
try:
yield cursor.next()
except StopIteration:
return
import csv
def write_csv(parent_dir,data):
with open(os.path.join(parent_dir,'twitter_data_new.csv'), 'a') as outfile:
writer = csv.writer(outfile)
writer.writerow(data)
import pickle
import json
import pandas as pd
def parse_one_tweet(parent_dir,tweet,claim_id,relevant_doc_id,snoop,tweet_url,idx):
image_dir = parent_dir+'/images'
videos_dir =parent_dir+'/videos'
text = tweet['full_text']
id = tweet['id_str']
prefix=str(claim_id).zfill(5)+"-"+str(relevant_doc_id).zfill(5)+"-"
photo_url = []
video_url = []
media_type = None
photo_count = 0
video_count = 0
if 'extended_entities' in tweet:
for item in tweet['extended_entities']['media']:
media_type = item['type']
if media_type == 'photo':
url = item['media_url_https']
path = os.path.join(image_dir,prefix+ str(photo_count) +"-"+ id) + ".jpg"
photo_count+=1
try:
urllib.request.urlretrieve(url,path)
except HTTPError as e:
print('&'*20)
print(url)
print('&'*20)
photo_url.append(url)
elif media_type == 'video':
variants = item['video_info']['variants']
for data in variants:
if data['content_type'] == 'video/mp4':
if data["bitrate"] == 832000 or data["bitrate"] == 632000:
url = data['url']
path = os.path.join(videos_dir,prefix+ str(video_count) +"-"+ id) + ".mp4"
video_count+=1
try:
urllib.request.urlretrieve(url,path)
except HTTPError as e:
# do something
print('&'*20)
print(url)
print('&'*20)
video_url.append(url)
continue
if media_type == None:
dat = [str(id), claim_id, relevant_doc_id, snoop, tweet_url, text, 'None', 'None']
write_csv(parent_dir,dat)
elif media_type == 'photo':
dat = [str(id), claim_id, relevant_doc_id, snoop, tweet_url, text, media_type, photo_url]
write_csv(parent_dir,dat)
elif media_type == 'video':
dat = [str(id), claim_id, relevant_doc_id, snoop, tweet_url, text, media_type, video_url]
write_csv(parent_dir,dat)
if idx<5:
print("Text:", text)
print("CLAIM ID:", claim_id)
print("REL DOC ID", relevant_doc_id)
print("Media type:", media_type)
print("Photo_url:", photo_url )
print("Video url:", video_url)
print(dat)
print('-'*100)
def main():
LinkCorpus_file = 'final_corpus/politifact_v1/LinkCorpus.csv'
parent_dir="final_corpus/politifact_v1/twitter"
tweet_urls,claim_ids,relevant_doc_ids,snopes_url=get_twitter_url(LinkCorpus_file)
status_id,new_tweet_urls,new_claim_ids,new_rel_doc_ids,new_snoops=get_status_id(tweet_urls,claim_ids,relevant_doc_ids,snopes_url)
api=get_auth()
claim_ids,rel_doc_ids,snoopes_url,twitter_url=fetch_tweets(parent_dir,api,status_id,new_tweet_urls,new_claim_ids,new_rel_doc_ids,new_snoops)
# parse_tweet(parent_dir,claim_ids,rel_doc_ids,snoopes_url,twitter_url)
main()
|
import curses, curses.ascii, os
from subprocess import run
os.environ['TERM'] = 'xterm'
screen = curses.initscr()
screen.keypad(True)
screen.notimeout(True)
os.environ.setdefault('ESCDELAY', '0')
os.environ.setdefault('MBC_SEQUENCE_WAIT', '0')
exitmsg = "Ctrl+D to exit \n"
screen.addstr(exitmsg)
screen.refresh()
keydict = {
"curses.KEY_UP": ":67",
"curses.KEY_DOWN": ":6C",
"curses.KEY_LEFT": ":69",
"curses.KEY_RIGHT": ":6A",
"curses.KEY_HOME": ":66",
"curses.KEY_F1": ":3B",
"curses.KEY_F2": ":3C",
"curses.KEY_F3": ":3D",
"curses.KEY_F4": ":3E",
"curses.KEY_F5": ":3F",
"curses.KEY_F6": ":40",
"curses.KEY_F7": ":41",
"curses.KEY_F8": ":42",
"curses.KEY_F9": ":43",
"curses.KEY_F10": ":44",
"curses.KEY_F11": ":57",
"curses.KEY_F12": ":58",
#KEY_SPACE
"curses.ascii.SP": ":39",
#KEY_ENTER
"curses.ascii.NL": ":1C",
#"curses.ascii.SOH": "ctrlA",
#"curses.ascii.EOT": "ctrlD",
#"curses.ascii.ENQ": "ctrlE",
#"curses.ascii.CAN": "ctrlX",
#KEY_ESC
"curses.ascii.ESC": ":01",
#KEY_DELETE
"curses.KEY_DC": ":6F",
#KEY_INSERT
"curses.KEY_IC": ":6E",
#KEY_PAGEDOWN
"curses.KEY_NPAGE": ":6D",
#KEY_PAGEUP
"curses.KEY_PPAGE": ":68",
"curses.KEY_END": ":6B",
"curses.KEY_HOME": ":66",
"curses.ascii.BS": ":0E"
}
while True:
c = screen.getch()
screen.clear()
if c == curses.KEY_UP:
screen.addstr(exitmsg + "Up")
run(['mbc', 'raw_seq', ':67'])
elif c == curses.KEY_DOWN:
screen.addstr(exitmsg + "Down")
run(['mbc', 'raw_seq', ':6C'])
elif c == curses.KEY_LEFT:
screen.addstr(exitmsg + "Left")
run(['mbc', 'raw_seq', ':69'])
elif c == curses.KEY_RIGHT:
screen.addstr(exitmsg + "Right")
run(['mbc', 'raw_seq', ':6A'])
elif c == curses.ascii.ESC:
screen.addstr(exitmsg + "Esc")
run(['mbc', 'raw_seq', ':01'])
elif c == curses.ascii.NL:
screen.addstr(exitmsg + "Enter")
run(['mbc', 'raw_seq', ':1C'])
elif c == curses.KEY_HOME:
screen.addstr(exitmsg + "Home")
run(['mbc', 'raw_seq', ':66'])
elif c == curses.KEY_F1:
screen.addstr(exitmsg + "F1")
run(['mbc', 'raw_seq', ':3B'])
elif c == curses.KEY_F2:
screen.addstr(exitmsg + "F2")
run(['mbc', 'raw_seq', ':3C'])
elif c == curses.KEY_F3:
screen.addstr(exitmsg + "F3")
run(['mbc', 'raw_seq', ':3D'])
elif c == curses.KEY_F4:
screen.addstr(exitmsg + "F4")
run(['mbc', 'raw_seq', ':3E'])
elif c == curses.KEY_F5:
screen.addstr(exitmsg + "F5")
run(['mbc', 'raw_seq', ':3F'])
elif c == curses.KEY_F6:
screen.addstr(exitmsg + "F6")
run(['mbc', 'raw_seq', ':40'])
elif c == curses.KEY_F7:
screen.addstr(exitmsg + "F7")
run(['mbc', 'raw_seq', ':41'])
elif c == curses.KEY_F8:
screen.addstr(exitmsg + "F8")
run(['mbc', 'raw_seq', ':42'])
elif c == curses.KEY_F9:
screen.addstr(exitmsg + "F9")
run(['mbc', 'raw_seq', ':43'])
elif c == curses.KEY_F10:
screen.addstr(exitmsg + "F10")
run(['mbc', 'raw_seq', ':44'])
elif c == curses.KEY_F11:
screen.addstr(exitmsg + "F11")
run(['mbc', 'raw_seq', ':57'])
elif c == curses.KEY_F12:
screen.addstr(exitmsg + "F12")
run(['mbc', 'raw_seq', ':58'])
elif c == curses.ascii.BS or c == 8 or c == 127:
screen.addstr(exitmsg + "Backspace")
run(['mbc', 'raw_seq', ':0E'])
elif c == curses.ascii.SP:
screen.addstr(exitmsg + "Space")
run(['mbc', 'raw_seq', ':39'])
elif c == curses.KEY_DC:
screen.addstr(exitmsg + "Delete")
run(['mbc', 'raw_seq', ':6F'])
elif c == curses.KEY_NPAGE:
screen.addstr(exitmsg + "Page Down")
run(['mbc', 'raw_seq', ':6D'])
elif c == curses.KEY_PPAGE:
screen.addstr(exitmsg + "Page Up")
run(['mbc', 'raw_seq', ':68'])
elif c == curses.KEY_END:
screen.addstr(exitmsg + "End")
run(['mbc', 'raw_seq', ':6B'])
elif c == curses.KEY_HOME:
screen.addstr(exitmsg + "Home")
run(['mbc', 'raw_seq', ':66'])
elif c == curses.ascii.EOT:
break
else:
screen.addstr(exitmsg + chr(c))
run(['mbc', 'raw_seq', chr(c)])
screen.refresh()
curses.endwin()
|
# -*- coding: utf-8 -*-
"""
trace_simexp._version
*********************
Module with version number unified across project, used in the module,
setup.py, and other command line interfaces.
"""
__version__ = "0.5.0"
|
"""
Implement numerical drop imputer.
"""
from typing import Any, Union, List, Optional
import dask.dataframe as dd
from dask.dataframe import from_pandas
import pandas as pd
class DropImputer:
"""Drop column with missing values
Attributes:
null_values
Specified null values which should be recognized.
isdrop
Whether data column should be dropped.
"""
def __init__(self, null_values: Optional[List[Any]]) -> None:
"""
This function initiate drop imputer.
Parameters
----------
null_values
Specified null values which should be recognized.
"""
self.null_values = null_values
self.isdrop = False
def fit(self, col_df: dd.Series) -> Any:
"""
Check if the provided column need to be dropped.
Parameters
----------
col_df
Provided data column.
"""
self.isdrop = True in col_df.map(self.check_isdrop).values
return self
def transform(self, col_df: dd.Series) -> dd.Series:
"""
Check the value of isdrop. If yes, then drop this column.
If no, then return origin df.
Parameters
----------
col_df
Provided data column.
"""
if not self.isdrop:
return col_df
return from_pandas(pd.Series([]), npartitions=2)
def fit_transform(self, col_df: dd.Series) -> dd.Series:
"""
Check if the provided column need to be dropped.
If yes, then drop this column.
If no, then return origin df.
Parameters
----------
col_df
Data column.
"""
return self.fit(col_df).transform(col_df)
def check_isdrop(self, val: Union[int, float]) -> bool:
"""
Check if the value is missing value.
If yes, then the whole column should be dropped.
If no, then return origin df.
Parameters
----------
val
Current value needs to be checked.
"""
if not self.null_values is None:
if val in self.null_values:
return True
return False
|
from PIL import Image
import numpy as np
import time
'''将txt里的数据 转为{img,label}的字典格式'''
def make_img_label_dic(_path):
dic_list = []
file = open(_path)
total_imgs = 0
for item in file.readlines():
item = item.strip()
total_imgs = total_imgs + 1
key = item.split(' ')[0]
value = item.split(' ')[1]
dic_list.append({key:value})
print('readed items from txt:',total_imgs)
return dic_list
def seperate_img_lebel(_dic_list):
imgs_list = []
labels_list = []
for item in _dic_list:
imgs_list.append(list(item.keys())[0])
labels_list.append(int(list(item.values())[0]))
return imgs_list,labels_list
'''
输入:将图片列表
返回:图像点的 array 类型 (None.224,224,3)
'''
def imgs_to_array(_img_list):
images_pixel_list = []
for item in _img_list:
PATH = PATH_imgs + item + '.jpg'
im = Image.open(PATH)
images_pixel_list.append(np.array(im)/255) # 图片像素点归一化到 0~1
return np.array(images_pixel_list,dtype='float32').reshape((len(_img_list),IMG_SIZE,IMG_SIZE,IMG_CHANNEL))
'''将标签列表转为array类型'''
def labels_to_array(_label_list):
return np.array(_label_list)
'''
将从txt中读取到的{图像,标签}列表按照标签分类
'''
def classify_with_labels(_dic_list):
classified_list = []
head_index = 1
end_index = 0
temp_value = list(_dic_list[0].values())[0]
now_index = 0
for item in _dic_list:
now_index = now_index + 1
now_value = list(item.values())[0] # examps: '0'
if now_value == temp_value:
pass
else:
end_index = now_index
temp_value = now_value
classified_list.append(_dic_list[head_index-1:end_index-1])
head_index = now_index
classified_list.append(_dic_list[head_index-1:])
total_imags = 0
for item in classified_list:
total_imags = total_imags + len(item)
print('classified total imags:',total_imags)
return classified_list
'''将{图像,标签}字典以SCALE的比例分为测试集和训练集,
只要dataset.txt不变,scale不变,每次生成相同测试集'''
def make_train_test(_classified_list,_SCALE):
#clusters_num = len(_classified_list)
train_dic_list = []
test_dic_list = []
for item in _classified_list:
end_index = int(len(item)*_SCALE)
train_dic_list.extend(list(item)[0:end_index])
test_dic_list.extend(list(item)[end_index:])
return train_dic_list,test_dic_list
'''用于外部调用数据的函数 ,返回一个字典'''
def load_data():
# 将数据集分为 训练集 和 测试集
dic_list = make_img_label_dic(PATH_txt)
classified_list = classify_with_labels(dic_list)
train_set,test_set = make_train_test(classified_list,SCALE)
# ##
# 总共的类 数量
#CLUSTER_NUM = len(classified_list)
return {'train_set':train_set,'test_set':test_set}
'''从图片名字转换到地址'''
def get_img_path_from_path(_name):
path = PATH_imgs + _name + '.jpg'
return path
time_start = time.time()
#------------------------------------
CLUSTER_NUM = 2 # 总标签数
PATH_imgs = 'data/'
PATH_txt = 'dataset_'+str(CLUSTER_NUM)+'c.txt'
SCALE = 0.8 #训练集占总图像集的比例
IMG_SIZE = 224 # 读取的图片的大小
IMG_CHANNEL = 3
#train = [] #训练的字典
#test = [] #测试的字典
#dic_list = make_img_label_dic(PATH_txt)
#classified_list = classify_with_labels(dic_list)
#train,test = make_train_test(classified_list,SCALE)
#img_list,la = seperate_img_lebel(test)
#array_ = imgs_to_array(img_list[:100])
#result_dic = load_data()
#------------------------------------
time_end = time.time()
epis = time_end - time_start
print('used time:',int(epis/60),'mins',int(epis%60),'secs')
|
import pylab, math
def showGrowth(lower, upper):
log = []
linear = []
quadratic = []
logLinear = []
exponential = []
for n in range(lower, upper+1):
log.append(math.log(n, 2))
linear.append(n)
logLinear.append(n*math.log(n, 2))
quadratic.append(n**2)
exponential.append(2**n)
pylab.plot(log, label = 'log')
pylab.plot(linear, label = 'linear')
pylab.legend(loc = 'upper left')
pylab.figure()
pylab.plot(linear, label = 'linear')
pylab.plot(logLinear, label = 'log linear')
pylab.legend(loc = 'upper left')
pylab.figure()
pylab.plot(logLinear, label = 'log linear')
pylab.plot(quadratic, label = 'quadratic')
pylab.legend(loc = 'upper left')
pylab.figure()
pylab.plot(quadratic, label = 'quadratic')
pylab.plot(exponential, label = 'exponential')
pylab.legend(loc = 'upper left')
pylab.figure()
pylab.plot(quadratic, label = 'quadratic')
pylab.plot(exponential, label = 'exponential')
pylab.semilogy()
pylab.legend(loc = 'upper left')
return
showGrowth(1, 1000)
pylab.show()
|
#!/usr/bin/env python
class P4Dirs(object):
|
result = ''
string = input()
string = string.lower()
for i in range(len(string)):
if (string[i] != 'a' and string[i] != 'o' and
string[i] != 'y' and string[i] != 'e' and
string[i] != 'u' and string[i] != 'i'):
result += '.' + string[i]
print(result) |
"""
This module defines APIs for multitasking and queueing
"""
from __future__ import print_function
# http://stackoverflow.com/a/2740494
# http://stackoverflow.com/a/6319267
# http://stackoverflow.com/a/15144765
from future import standard_library
standard_library.install_aliases()
#from builtins import str
from builtins import range
from builtins import object
import multiprocessing
import multiprocessing.dummy
import functools
try:
import threading as threading
except ImportError:
import dummy_threading as threading # ensures threading exists
import queue
from time import time as _time
from multiprocessing.managers import SyncManager
from advutils import BaseCreation
# http://stackoverflow.com/a/33764672/5288758
# https://pymotw.com/3/multiprocessing/communication.html
from itertools import count
Empty = queue.Empty
class MultiProcessingAPI(object):
"""
Class to unify Multi processing and threading
"""
def __init__(self, spawn=False):
self.spawn = spawn
def Process(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
if self.spawn: # creates new python process
return multiprocessing.Process(*args, **kwargs)
else: # creates new thread
return threading.Thread(*args, **kwargs)
Thread = Process
def Pool(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
if self.spawn:
return multiprocessing.Pool(*args, **kwargs)
else:
return multiprocessing.dummy.Pool(*args, **kwargs)
################################################
def Queue(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
if self.spawn: # creates new python process
return multiprocessing.Queue(*args, **kwargs)
else: # creates new thread
return queue.Queue(*args, **kwargs)
def Event(self):
"""
:return:
"""
if self.spawn: # creates new python process
return multiprocessing.Event()
else: # creates new thread
return threading.Event()
def Semaphore(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
if self.spawn: # creates new python process
return multiprocessing.Semaphore(*args, **kwargs)
else: # creates new thread
return threading.Semaphore(*args, **kwargs)
def Lock(self):
"""
:return:
"""
if self.spawn: # creates new python process
return multiprocessing.Lock()
else: # creates new thread
return threading.Lock()
def RLock(self):
"""
:return:
"""
if self.spawn: # creates new python process
return multiprocessing.RLock()
else: # creates new thread
return threading.RLock()
################################################
def decorate(self, obj):
"""
:param obj:
:return:
"""
@functools.wraps(obj)
def dd(*args, **kwargs):
return self.manage(obj, *args, **kwargs)
return dd
def manage(self, obj, *args, **kwargs):
"""
:param obj:
:param args:
:param kwargs:
:return:
"""
if self.spawn: # manage if in process
SyncManager.register('temp', obj)
a = SyncManager()
a.start()
return a.temp(*args, **kwargs)
return obj(*args, **kwargs)
api = MultiProcessingAPI() # adds a global manager
def heappush(l, item):
"""
Append to queue with priority (where carriers are organized from
smaller to biggest)
:param l: list queue
:param item: Event
"""
# TODO see if this method is in queue package and select better
for pos, i in enumerate(l):
if item >= i:
l.insert(pos, item)
return
l.append(item)
def heappop(l):
"""
Consume last item from queue list (biggest carrier)
:param l: list queue
:return: last item from list
"""
return l.pop()
class PriorityQueue(queue.PriorityQueue):
"""
Variant of Queue.PriorityQueue in that FIFO rule is kept inside
the same priority number groups.
Entries are typically tuples of the form: (priority number, data).
"""
def _init(self, maxsize):
self.queue = api.manage(list)
def _put(self, item, heappush=heappush):
heappush(self.queue, item)
def _get(self, heappop=heappop):
return heappop(self.queue)
class Designator(queue.PriorityQueue):
"""
Task Designator with priority queue
"""
def _init(self, maxsize):
"""
:param maxsize:
:return:
"""
self.queue = api.manage(list)
self._isOpen = True
def _put(self, item, heappush=heappush):
"""
:param item:
:param heappush:
:return:
"""
if self._isOpen:
heappush(self.queue, item)
else:
raise Exception("Queue is closed")
def _get(self, heappop=heappop):
"""
:param heappop:
:return:
"""
return heappop(self.queue)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.not_empty.acquire()
try:
if not block or not self._isOpen: # do not block when closed
if not self._qsize():
raise Empty
elif timeout is None:
while self._isOpen and not self._qsize():
self.not_empty.wait()
if not self._isOpen and not self._qsize():
# Exception("While waiting empty Queue it was closed")
raise Empty
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while not self._qsize():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def close(self):
"""
:return:
"""
self._isOpen = False
self.not_empty.acquire()
self.not_empty.notifyAll()
self.not_empty.release()
def isOpen(self):
"""
:return:
"""
return self._isOpen
def __iter__(self):
self.mutex.acquire()
queue = self.queue
for i in range(len(queue) - 1, -1, -1):
yield queue[i]
self.mutex.release()
def __getitem__(self, index):
return self.queue[index]
def __setitem__(self, index, value):
self.queue[index] = value
def __delitem__(self, index):
del self.queue[index]
def __len__(self):
return len(self.queue)
HIGHEST_PRIORITY = float("Inf") # highest priority for QueueCarrier
LOWEST_PRIORITY = float("-Inf") # lowest priority for QueueCarrier
@functools.total_ordering # adds ordering and priority capabilities
class QueueCarrier(BaseCreation):
"""
Base class Carrier used to convey data reliably in PriorityQueues
"""
HIGHEST_PRIORITY = HIGHEST_PRIORITY
LOWEST_PRIORITY = LOWEST_PRIORITY
def __init__(self, priority):
super(QueueCarrier, self).__init__()
self.priority = priority
# def __cmp__(self, other): # deleted in python 3, replaced for __eq__ and __lt__
# return cmp(self.priority, other.priority)
def __eq__(self, other): # complement with functools.total_ordering
# priority comparison
# return (self.priority,self.creation) == (other.priority,other.creation)
# equality comparison
return id(self) == id(other)
def __lt__(self, other): # complement with functools.total_ordering
# priority comparison
# if A is created first than B then A is expected to be less than B
return (
self.priority,
self.creation_time) < (
other.priority,
other.creation_time)
# return (self.priority,self._creation_order) < (other.priority,other._creation_order)
# return (self.priority,) < (other.priority,)
class IterDecouple(object):
"""
Decouple iterator from main thread and with processes.
"""
def __init__(self, iterable, processes=None, buffsize=0, handler=None):
"""
Get values from an iterable in a different thread. if the process that uses
the items from the iterator is busy it keeps buffering values until they
are requested. It enhances performance by reducing the waiting time taken
by the retrieving items from an iterator used in the for loop.
# given the following iterable
iterable = not_processed_data() # e.g. generator
# problem case: process that wastes idle time
for i in iterable: # retrieving item from iterable takes time
busy_process(i) # idle time to retrieve next i item
# Usage: reduces wasted time by decoupling
for i in decoupled_for(iterable): # for has been decoupled from iterable
busy_process(i) # meanwhile next i items are been retrieved
:param iterable: any object usable in a for loop
:param processes: Number of processes to spawn
:param buffsize: size of buffer to retrieve items ahead
:param handler: handle function to process item from iterable
and generate data. Notice that processing times from handler
functions are detached from main.
:param spawn: True to create new process, False to create new Thread
Note: processes only support pickable objects.
"""
self.iterable = iterable
self.processes = processes
self.call_func = handler
self.buffsize = buffsize
# Initialize variables
self.queue = None
self._finish_signal = None
self.thread = None
self._running = None # knows it has never been initialized if None
def start(self):
"""
Start generating data from self.iterable
to be consumable from self.queue
"""
if self._running is True:
raise Exception("Already running")
def worker(queue, iterable):
if self.processes is not None and self.call_func is not None:
# call call_func inside processes and
# synchronously put results into queue
def process_func(previous_lock, next_lock, id, data):
def stop_func(force=False):
"""
function to clean up locks and processes
:param force: force to clean and notify to close.
:return: True to close else False
"""
if self._running and not force:
return False # do not finish
if id is not None:
del processes_memo[id] # release this process
next_lock.release() # release for next task
# by releasing next task they can finish
# without putting data in queue
return True # it can finish before starting to put data
if stop_func(): # close if iteration stopped
return
# process data
value = self.call_func(data)
# wait previous answers
if previous_lock is not None:
previous_lock.acquire()
if stop_func(): # close if iteration stopped
return
# put answer after previous answers
queue.put(value)
stop_func(force=True) # clean up
# initialize variables
it = iter(iterable)
processes_memo = {} # list of processes
# start first task
previous_lock = api.Lock()
previous_lock.acquire()
id_time = _time() # create id of process
p = api.Process(target=process_func, args=(
None, previous_lock, id_time, next(it)))
processes_memo[id_time] = p
p.start()
# keep filling processes with tasks
while True:
if not self._running:
# execute just this routine
if len(processes_memo) == 0:
break # ensures all processes are finished
else:
continue
try:
# fill processes with tasks
while len(processes_memo) < self.processes:
next_lock = api.Lock()
next_lock.acquire()
id_time = _time()
p = api.Process(target=process_func, args=(
previous_lock, next_lock, id_time, next(it)))
processes_memo[id_time] = p
p.start()
# update lock for next task
previous_lock = next_lock
except StopIteration:
if len(processes_memo) == 0:
break
elif self.call_func is not None:
# call call_func and put into queue
for i in iterable:
if not self._running:
break # it can finish before starting to put data
queue.put(self.call_func(i))
else:
# just place values into queue
for i in iterable:
if not self._running:
break # it can finish before starting to put data
queue.put(i) # put data into queue
self._finish_signal.set() # decoupled for is finished
self.queue = queue = api.Queue(
self.buffsize) # gets values from worker
self._finish_signal = sig = api.Event() # handles finishing signal
self.thread = thread = threading.Thread(target=worker,
args=(queue, self.iterable))
self._running = True
thread.start()
def close(self):
self._running = False
def join(self):
"""
Wait until data is generated and consumed from self.iterable
"""
if not self._running:
raise Exception("Not running")
self.thread.join()
self._running = False
def __iter__(self):
"""
Iterate over detached data from self.iterable
"""
if not self._running:
# start if not running
self.start()
return self
def generator(self):
"""
Generate detached data from self.iterable
"""
while True:
if self.queue.empty():
# if tasks are done and queue was consumed then break
if self._finish_signal.is_set() and self.queue.empty():
break
else:
# do not read if queue is empty
value = self.queue.get()
self.queue.task_done()
yield value
if self._running:
self.join()
def __next__(self):
for i in self.generator():
return i
raise StopIteration
next = __next__ # compatibility with python 2
def use_pool(func, iterable, workers=4, chunksize=1):
"""
Map function over iterable using workers.
:param func: function to use in processing
:param iterable: iterable object
:param workers: number of workers
:param chunksize: number of chunks to process per thread
:return:
"""
pool = api.Pool(workers) # Make the Pool of workers
return pool.imap(func, iterable, chunksize)
|
"""The logger class supporting the crawler"""
# Author: Honglin Yu <yuhonglin1986@gmail.com>
# License: BSD 3 clause
import os
import time
class Logger(object):
"""record the crawling status, error and warnings
"""
def __init__(self, outputDir=""):
"""
Arguments:
- `outputDir`:
"""
self._output_dir = outputDir
self._log_file_dict = {'log' : open(self._output_dir + '/log', 'a+r')}
#self._mutex_done = threading.Lock()
#self._mutex_log = threading.Lock()
self._done_file = open(self._output_dir + '/key.done', 'a+r')
def add_log(self, d):
"""
Arguments:
- `d`: log_file_key : log_file_name
"""
for i, j in d.iteritems():
self._log_file_dict[i] = open(self._output_dir + j, 'a+r')
def get_key_done(self, lfkl):
"""get the keys that have been crawled
Arguments:
- `lfkl`: additional list that want to put input done list
"""
r = []
for i in lfkl:
tmp = self._log_file_dict[i]
for l in tmp:
r.append( eval(l)[1] )
return r + [x.rstrip('\n') for x in self._done_file]
def log_done(self, k):
""" this function is thread safe
Arguments:
- `k`:
"""
#self._mutex_done.acquire()
self._done_file.write( '%s\n' % k )
self._done_file.flush()
#self._mutex_done.release()
def log_warn(self, k, m, lfk='log'):
"""log message as warning
Arguments:
- `k` : the key
- `m`: the message
- `lfk`: log_file_key
"""
#self._mutex_log.acquire()
self._log_file_dict[lfk].write( str([time.strftime('%Y_%m_%d_%m_%H_%M'), k, m ]) + '\n' )
self._log_file_dict[lfk].flush()
#self._mutex_log.release()
|
#!/usr/bin/env python
#
# Public Domain 2014-2017 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os
from suite_subprocess import suite_subprocess
from wtscenario import make_scenarios
import wiredtiger, wttest
# test_huffman02.py
# Huffman key and value configurations test.
class test_huffman02(wttest.WiredTigerTestCase, suite_subprocess):
huffkey = [
('bad', dict(keybad=1,huffkey=',huffman_key=bad')),
('english', dict(keybad=0,huffkey=',huffman_key=english')),
('none', dict(keybad=0,huffkey=',huffman_key=none')),
]
huffval = [
('bad', dict(valbad=1,huffval=',huffman_value=bad')),
('english', dict(valbad=0,huffval=',huffman_value=english')),
('none', dict(valbad=0,huffval=',huffman_value=english')),
]
type = [
('file', dict(uri='file:huff')),
('table', dict(uri='table:huff')),
]
scenarios = make_scenarios(type, huffkey, huffval)
def test_huffman(self):
if self.keybad or self.valbad:
msg = '/Invalid argument/'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda:
self.session.create(self.uri, self.huffkey + self.huffval), msg)
else:
self.session.create(self.uri, self.huffkey + self.huffval)
if __name__ == '__main__':
wttest.run()
|
import attr
from datetime import datetime
from typing import Optional
@attr.dataclass(frozen=True, slots=True)
class Jurusan:
jurusan_id: int
nama_jurusan: str
untuk_sma: str
untuk_smk: str
untuk_pt: str
untuk_slb: str
untuk_smklb: str
jurusan_induk: Optional[str]
level_bidang_id: str
create_date: datetime
last_update: datetime
expired_date: Optional[datetime]
last_sync: datetime
@property
def level_bidang(self):
# TODO API
return self.level_bidang_id
def __str__(self):
return self.nama_jurusan
|
# read test.txt
with open("test.txt") as file:
for line in file:
print(line[0])
|
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2016-2020 German Aerospace Center (DLR) and others.
# SUMOPy module
# Copyright (C) 2012-2017 University of Bologna - DICAM
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file maps.py
# @author Joerg Schweizer
# @date
import os
import numpy as np
import wx
import urllib
from collections import OrderedDict
import agilepy.lib_base.classman as cm
import agilepy.lib_base.arrayman as am
import agilepy.lib_base.xmlman as xm
from agilepy.lib_base.processes import Process
#import timeit
#t = timeit.Timer()
#t_start = t.timer()
#from matplotlib import pyplot as plt
IS_MAPSUPPORT = True
try:
from PIL import ImageFilter, Image, ImageChops, ImagePath, ImageDraw
except:
print "WARNING: Maps requires PIL module."
IS_MAPSUPPORT = False
try:
import pyproj
except:
try:
from mpl_toolkits.basemap import pyproj
except:
print "WARNING: Maps requires pyproj module."
IS_MAPSUPPORT = False
# print __doc__
# raise
URL_GOOGLEMAP = "http://maps.googleapis.com/maps/api/staticmap?"
def download_googlemap(filepath, bbox, proj, size=640, filetype='gif', maptype='satellite'):
# https://developers.google.com/maps/documentation/static-maps/intro#Paths
x_sw, y_sw = bbox[0]
x_ne, y_ne = bbox[1]
# 01 11
#
#
# 00 10
lon00, lat00 = proj(x_sw, y_sw, inverse=True)
lon10, lat10 = proj(x_ne, y_sw, inverse=True)
lon11, lat11 = proj(x_ne, y_ne, inverse=True)
lon01, lat01 = proj(x_sw, y_ne, inverse=True)
size_x = size_y = size/2
urllib.urlretrieve(URL_GOOGLEMAP+"size=%dx%d&visible=%.6f,%.6f|%.6f,%.6f&format=%s&maptype=%s&scale=2"
% (size_x, size_y, lat00, lon00, lat11, lon11, filetype.upper(), maptype), filepath)
bbox_lonlat = np.array([[lon00, lat00], [lon11, lat11]])
return bbox_lonlat
def download_googlemap_bb(filepath, bbox, proj, size=640, filetype='gif', maptype='satellite', color="0xff0000ff"):
# https://developers.google.com/maps/documentation/static-maps/intro#Paths
x_sw, y_sw = bbox[0]
x_ne, y_ne = bbox[1]
# 01 11
#
#
# 00 10
lon00, lat00 = proj(x_sw, y_sw, inverse=True)
lon10, lat10 = proj(x_ne, y_sw, inverse=True)
lon11, lat11 = proj(x_ne, y_ne, inverse=True)
lon01, lat01 = proj(x_sw, y_ne, inverse=True)
size_x = size_y = size/2
url = URL_GOOGLEMAP + \
"size=%dx%d&format=%s&maptype=%s&scale=2&path=color:%s|weight:1" % (
size_x, size_y, filetype.upper(), maptype, color)
url += "|%.6f,%.6f" % (lat00, lon00)
url += "|%.6f,%.6f" % (lat10, lon10)
url += "|%.6f,%.6f" % (lat11, lon11)
url += "|%.6f,%.6f" % (lat01, lon01)
url += "|%.6f,%.6f" % (lat00, lon00)
# urllib.urlretrieve (URL_GOOGLEMAP+"size=%dx%d&format=%s&maptype=%s&scale=2&path=color:0xff0000ff|weight:1|%.5f,%.5f|%.5f,%.5f|%.5f,%.5f|%.5f,%.5f"\
# %(size_x,size_y,filetype,maptype,lat00,lon00, lat11,lon11, lat01,lon01, lat10,lon10), filepath)
# print 'url=',url
urllib.urlretrieve(url, filepath)
bbox_lonlat = np.array([[lon00, lat00], [lon11, lat11]])
return bbox_lonlat
def estimate_angle(filepath,
rect=[(72, 36), (1243, 69), (1210, 1244), (39, 1211)],
):
im = Image.open(filepath).convert("RGB")
print 'estimate_angle image', filepath, "%dx%d" % im.size, im.mode, im.getbands()
imr, img, imb = im.split()
# calculate width and height of bbox in pixel from measured rectangle
wr = int(np.sqrt((rect[1][0]-rect[0][0])**2+(rect[1][1]-rect[0][1])**2))
#wr_check = int(np.sqrt((rect[2][0]-rect[3][0])**2+(rect[2][1]-rect[3][1])**2))
hr = int(np.sqrt((rect[3][0]-rect[0][0])**2+(rect[3][1]-rect[0][1])**2))
#h_check = int(np.sqrt((rect[2][0]-rect[1][0])**2+(rect[2][1]-rect[1][1])**2))
xcb = im.size[0]/2
ycb = im.size[1]/2
bbox = [(xcb-wr/2, ycb-hr/2), (xcb+wr/2, ycb-hr/2), (xcb+wr/2, ycb+hr/2), (xcb-wr/2, ycb+hr/2), (xcb-wr/2, ycb-hr/2)]
im_bbox = ImageChops.constant(im, 0)
draw = ImageDraw.Draw(im_bbox)
draw.line(bbox, fill=255)
del draw
angles = np.arange(-2.0, 2.0, 0.01)
matches = np.zeros(len(angles))
for i in xrange(len(angles)):
im_bbox_rot = im_bbox.rotate(angles[i]) # gimp 1.62
im_corr = ImageChops.multiply(imr, im_bbox_rot)
# im_corr.show()
im_corr_arr = np.asarray(im_corr)
matches[i] = np.sum(im_corr_arr)/255
# print ' angles[i],matches[i]',angles[i],matches[i]
angle_opt = angles[np.argmax(matches)]
print ' angle_opt', angle_opt
return -angle_opt, bbox
# im_box.show()
class MapsImporter(Process):
def __init__(self, maps, logger=None, **kwargs):
print 'MapsImporter.__init__', maps, maps.parent.get_ident()
self._init_common('mapsimporter', name='Background maps importer',
logger=logger,
info='Downloads and converts background maps.',
)
self._maps = maps
attrsman = self.set_attrsman(cm.Attrsman(self))
#self.net = attrsman.add( cm.ObjConf( network.Network(self) ) )
# self.status = attrsman.add(cm.AttrConf(
# 'status', 'preparation',
# groupnames = ['_private','parameters'],
# perm='r',
# name = 'Status',
# info = 'Process status: preparation-> running -> success|error.'
# ))
self.width_tile = attrsman.add(cm.AttrConf('width_tile', kwargs.get('width_tile', 500.0),
groupnames=['options'],
choices=OrderedDict([("500", 500.0),
("1000", 1000.0),
("2000", 2000.0),
("4000", 4000.0),
("8000", 8000.0),
]),
perm='rw',
name='Tile width',
unit='m',
info='Tile width in meter of quadratic tile. This is the real width of one tile that will be downloaded.',
))
self.size_tile = attrsman.add(cm.AttrConf('size_tile', kwargs.get('size_tile', 1280),
groupnames=['options'],
perm='rw',
name='Tile size',
info='Tile size in pixel. This is the size of one tile that will be downloaded and determins the map resolution. Maximum is 1280.',
))
self.n_tiles = attrsman.add(cm.FuncConf('n_tiles', 'get_n_tiles', 0,
groupnames=['options'],
name='Number of tiles',
#info = 'Delete a row.',
))
# self.add_option( 'maptype',kwargs.get('maptype','satellite'),
# choices = ['satellite',]
# perm='rw',
# name = 'Map type',
# info = 'Type of map to be downloaded.',
# )
# self.add_option( 'filetype',kwargs.get('filetype','png'),
# choices = ['png',]
# perm='rw',
# name = 'File type',
# info = 'Image file format to be downloaded.',
# )
# self.add_option( 'mapserver',kwargs.get('mapserver','google'),
# choices = ['google',]
# perm='rw',
# name = 'Map server',
# info = 'Map server from where to download. Some servers require username and password.',
# )
# self.add_option( 'username',kwargs.get('username',''),
# perm='rw',
# name = 'User',
# info = 'User name of map server (if required).',
# )
# self.add_option( 'password',kwargs.get('password',''),
# perm='rw',
# name = 'User',
# info = 'User name of map server (if required).',
# )
self.is_remove_orig = attrsman.add(cm.AttrConf('is_remove_orig', kwargs.get('is_remove_orig', True),
groupnames=['options'],
perm='rw',
name='Remove originals',
info='Remove original files. Original, untransformed files are not necessary, but can be kept.',
))
def get_n_tiles(self):
"""
The number of tiles to be downloaded. Please do not download more han 300 tiles, otherwise map server is likely to be offended.
"""
return self._maps.get_n_tiles(self.width_tile)
def do(self):
self.update_params()
print 'MapsImporter.do'
# self._maps.download(maptype = self.maptype, mapserver = self.mapserver,
# filetype = 'png', rootfilepath = None,
# width_tile = self.width_tile, size_tile = self.size_tile,
# is_remove_orig = True):
self._maps.download(maptype='satellite', mapserver='google',
filetype='png', rootfilepath=None,
width_tile=self.width_tile, size_tile=self.size_tile,
is_remove_orig=self.is_remove_orig)
#import_xml(self, rootname, dirname, is_clean_nodes = True)
# self.run_cml(cml)
# if self.status == 'success':
return True
def update_params(self):
"""
Make all parameters consistent.
example: used by import OSM to calculate/update number of tiles
from process dialog
"""
pass
class Maps(am.ArrayObjman):
def __init__(self, landuse, **kwargs):
self._init_objman(ident='maps',
parent=landuse,
name='Maps',
info='Information on background maps.',
**kwargs)
self._init_attributes()
def _init_attributes(self):
# print 'maps._init_attributes'
# self.add(cm.AttrConf( 'width_tile',500,
# groupnames = ['state'],
# perm='r',
# name = 'Tile width',
# unit = 'm',
# info = 'Tile width in meter of quadratic tile. This is the real wdith of one tile that will be downloaded.',
# ))
# self.add(cm.AttrConf( 'size_tile',1280,
# groupnames = ['state'],
# perm='r',
# name = 'Tile size',
# info = 'Tile size in pixel. This is the size of one tile that will be downloaded.',
# ))
if self.has_attrname('width_tile'):
# no longer attributes
self.delete('width_tile')
self.delete('size_tile')
# put r/w permissione to older version
# self.get_config('width_tile').set_perm('rw')
# self.get_config('size_tile').set_perm('rw')
self.add_col(am.ArrayConf('bboxes', np.zeros((2, 2), dtype=np.float32),
groupnames=['state'],
perm='r',
name='BBox',
unit='m',
info='Bounding box of map in network coordinate system (lower left coord, upper right coord).',
is_plugin=True,
))
self.add_col(am.ArrayConf('filenames', None,
dtype=np.object,
groupnames=['state'],
perm='rw',
metatype='filepath',
name='File',
info='Image file name.',
))
def write_decals(self, fd, indent=4, rootdir=None):
print 'write_decals', len(self)
net = self.parent.get_net()
if rootdir is None:
rootdir = os.path.dirname(net.parent.get_rootfilepath())
#proj = pyproj.Proj(str(net.get_projparams()))
#offset = net.get_offset()
#width_tile = self.width_tile.value
#size_tile = self.size_tile.value
for filename, bbox in zip(self.filenames.get_value(), self.bboxes.get_value()):
#x0, y0 = proj(bbox_lonlat[0][0], bbox_lonlat[0][1])
#x1, y1 = proj(bbox_lonlat[1][0],bbox_lonlat[1][1])
#bbox = np.array([[x0, y0, 0.0],[x1, y1 ,0.0]],np.float32)
#bbox_tile = [[x_sw,y_sw ],[x_ne,y_ne]]
#x0,y0 = bbox_abs[0]+offset
#x1,y1 = bbox_abs[1]+offset
#bbox = np.array([[x0, y0, 0.0],[x1, y1 ,0.0]],np.float32)
# print ' bbox decal',bbox
xc, yc = 0.5*(bbox[0]+bbox[1])
zc = 0.0
width_tile = bbox[1, 0] - bbox[0, 0]
# print ' xc,yc',xc,yc
# print ' width_tile',width_tile,bbox
if filename == os.path.basename(filename):
# filename does not contain path info
filepath = filename # os.path.join(rootdir,filename)
else:
# filename contains path info (can happen if interactively inserted)
filepath = filename
calxml = '<decal filename="%s" centerX="%.2f" centerY="%.2f" centerZ="0.00" width="%.2f" height="%.2f" altitude="0.00" rotation="0.00" tilt="0.00" roll="0.00" layer="0.00"/>\n' % (
filepath, xc, yc, width_tile, width_tile)
fd.write(indent*' '+calxml)
def clear_all(self):
"""
Remove all map information.
"""
self.clear_rows()
# here we could also delete files ??
def update_netoffset(self, deltaoffset):
"""
Called when network offset has changed.
Children may need to adjust theur coordinates.
"""
bboxes = self.bboxes.get_value()
bboxes[:, :, :2] = bboxes[:, :, :2] + deltaoffset
def get_n_tiles(self, width_tile):
"""
Estimates number of necessary tiles.
"""
net = self.parent.get_net()
bbox_sumo, bbox_lonlat = net.get_boundaries()
x0 = bbox_sumo[0] # -0.5*width_tile
y0 = bbox_sumo[1] # -0.5*width_tile
width = bbox_sumo[2]-x0
height = bbox_sumo[3]-y0
nx = int(width/width_tile+0.5)
ny = int(height/width_tile+0.5)
return nx*ny
def download(self, maptype='satellite', mapserver='google',
filetype='png', rootfilepath=None,
width_tile=1000.0, size_tile=1280,
is_remove_orig=True):
self.clear_rows()
net = self.parent.get_net()
if rootfilepath is None:
rootfilepath = net.parent.get_rootfilepath()
bbox_sumo, bbox_lonlat = net.get_boundaries()
offset = net.get_offset()
# latlon_sw=np.array([bbox_lonlat[1],bbox_lonlat[0]],np.float32)
# latlon_ne=np.array([bbox_lonlat[3],bbox_lonlat[2]],np.float32)
x0 = bbox_sumo[0] # -0.5*width_tile
y0 = bbox_sumo[1] # -0.5*width_tile
width = bbox_sumo[2]-x0
height = bbox_sumo[3]-y0
print 'download to', rootfilepath
# '+proj=utm +zone=32 +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
#params_proj="+proj=utm +zone=32 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
params_proj = net.get_projparams()
proj = pyproj.Proj(str(params_proj))
# print ' params_proj',params_proj,IS_MAPSUPPORT
# these values are measured manually and are only valid for size_tile = 256/640
# width_tile_eff= width_tile#*float(2*size_tile)/238.0#500m 1205.0*width_tile#m 1208
# height_tile_eff = width_tile#*float(2*size_tile)/238.0# 500m1144.0*width_tile#m 1140
nx = int(width/width_tile+0.5)+1
ny = int(height/width_tile+0.5)+1
print ' offset', offset
print ' bbox_sumo', bbox_sumo
print ' width_tile', width_tile, 'm'
print ' Will download %dx%d= %d maps' % (nx, ny, nx*ny)
#latlon_tile = np.array([(latlon_ne[0]-latlon_sw[0])/ny, (latlon_ne[1]-latlon_sw[1])/nx])
#filepaths = []
#centers = []
#
#
# 0 1
# 3 2
#
# 0 1 2 3
angle = None
bbox = None
ids_map = []
for ix in xrange(nx):
for iy in xrange(ny):
# tile in SUMO network coords. These are the saved coords
x_tile = x0+ix*width_tile
y_tile = y0+iy*width_tile
print ' x_tile,y_tile', x_tile, y_tile
bb = np.array([[x_tile, y_tile], [x_tile+width_tile, y_tile+width_tile]], np.float32)
# tile in absolute coordinates. Coords used for download
x_sw = x_tile-offset[0]
y_sw = y_tile-offset[1]
x_ne = x_sw+width_tile
y_ne = y_sw+width_tile
bbox_tile = [[x_sw, y_sw], [x_ne, y_ne]]
filepath = rootfilepath+'_map%04dx%04d.%s' % (ix, iy, filetype)
# print ' filepath=',filepath
if angle is None:
download_googlemap_bb(filepath, bbox_tile, proj,
size=size_tile,
filetype=filetype, maptype=maptype)
angle, bbox = estimate_angle(filepath)
bbox_tile_lonlat = download_googlemap_bb(filepath, bbox_tile, proj,
size=size_tile, filetype=filetype,
maptype=maptype, color="0x0000000f")
print ' bbox_tile', bbox_tile
print ' bbox_tile_lonlat', bbox_tile_lonlat
im = Image.open(filepath).convert("RGB")
if 1:
print ' downloaded image', filepath, "%dx%d" % im.size, im.mode, im.getbands()
# print ' x_sw,y_sw',x_sw,y_sw
# print ' x_ne,y_ne',x_ne,y_ne
# print ' start rotation'
im_rot = im.rotate(angle) # gimp 1.62
# im_rot.show()
region = im_rot.crop([bbox[0][0], bbox[0][1], bbox[2][0], bbox[2][1]])
regsize = region.size
# print ' regsize',regsize
im_crop = Image.new('RGB', (regsize[0], regsize[1]), (0, 0, 0))
im_crop.paste(region, (0, 0, regsize[0], regsize[1]))
im_tile = im_crop.resize((1024, 1024))
# im_crop.show()
outfilepath = rootfilepath+'_rot%04dx%04d.%s' % (ix, iy, filetype)
# print 'save ',outfilepath,"%dx%d" % im_crop.size,im_crop.getbands()
im_tile.save(outfilepath, filetype.upper())
# print ' bb_orig=',bb
#lon0, lat0 = proj(x_tile-offset[0], y_tile-offset[1])
#lon1, lat1 = proj(x_tile+width_tile-offset[0], y_tile+width_tile-offset[1])
# print ' bb',bb.shape,bb
# print ' outfilepath',outfilepath,os.path.basename(outfilepath)
# print ' saved bbox',np.array([[x_tile-offset[0], y_tile-offset[1]],[x_tile+width_tile-offset[0], y_tile+width_tile-offset[1]]],np.float32)
# print ' saved bbox',bbox_tile_lonlat
id_map = self.add_row(filenames=os.path.basename(outfilepath),
# bbox_tile,#bbox_tile_lonlat#np.array([[lon0, lat0],[lon1, lat1]],np.float32),
bboxes=bb,
)
ids_map.append(id_map)
if is_remove_orig:
# remove original file
os.remove(filepath)
return ids_map
if __name__ == '__main__':
############################################################################
###
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 20 11:55:00 2019
@author: github.com/sahandv
"""
import sys
import time
import gc
import collections
import json
import re
import os
import pprint
from random import random
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.cluster import AgglomerativeClustering, KMeans, SpectralClustering, AffinityPropagation
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.manifold import TSNE
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from yellowbrick.cluster import KElbowVisualizer
import scipy.cluster.hierarchy as sch
from scipy import spatial,sparse,sign
from bokeh.io import push_notebook, show, output_notebook, output_file
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, LabelSet
import nltk
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
import fasttext
from gensim.models import FastText as fasttext_gensim
from gensim.test.utils import get_tmpfile
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
stop_words = set(stopwords.words("english"))
from sciosci.assets import keyword_assets as kw
from sciosci.assets import generic_assets as sci
from sciosci.assets import advanced_assets as aa
period_cluster = '2017-2018 13'
# Read cluster centers
cluster_centers = pd.read_csv('/home/sahand/GoogleDrive/Data/FastText doc clusters - SIP/50D/cluster_centers/agglomerative ward '+period_cluster,index_col=0)
# Read and make keyword list
keywords = pd.read_csv('/home/sahand/GoogleDrive/Data/Author keywords - 29 Oct 2019/1990-2018 keyword frequency',names=['keyword','frequency'])
keywords = keywords[keywords['frequency']>20]
keywords_list = keywords['keyword'].values.tolist()
# Get keyword embeddings
gensim_model_address = '/home/sahand/GoogleDrive/Data/FastText Models/50D/fasttext-scopus_wos-merged-310k_docs-gensim 50D.model'
model = fasttext_gensim.load(gensim_model_address)
# Save in a list
keyword_vectors = []
for token in tqdm(keywords_list[:],total=len(keywords_list[:])):
keyword_vectors.append(model.wv[token])
# Cosine distance of the cluster centers and keywords to find the closest keywords to clusters
names = []
names.append('clusters')
sim_A_to_B = []
for idx_A,vector_A in cluster_centers.iterrows():
inner_similarity_scores = []
inner_similarity_scores.append(idx_A)
for idx_B,vector_B in enumerate(keyword_vectors):
distance_tmp = spatial.distance.cosine(vector_A.values, vector_B)
similarity_tmp = distance_tmp#1 - distance_tmp
# inner_similarity_scores.append(keywords_list[idx_B])
inner_similarity_scores.append(similarity_tmp)
if idx_A == 0:
# names.append('keyword_'+str(idx_B))
names.append(keywords_list[idx_B])
sim_A_to_B.append(inner_similarity_scores)
# print('cluster of A:',idx_A,'to cluster of B:',idx_B,'similarity',similarity_tmp)
sim_A_to_B = pd.DataFrame(sim_A_to_B,columns=names)
sim_A_to_B.to_csv('/home/sahand/GoogleDrive/Data/FastText doc clusters - SIP/50D/cluster_center to author_keyword similarity/'+period_cluster+' - distance.csv',index=False) |
from legacy.crawler import crawler
import os
url = "https://www.wired.com"
output_dir = os.path.join('.', 'data', 'wired.com')
headers = {
'Accept':'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding':'*',
'Accept-Language':'zh-CN,zh;q=0.8',
'Cookie': 'pay_ent_smp=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsInZlciI6MX0.eyJ1cmxzIjpbIi8yMDAxLzAzL2hvbmV5cG90cy1iYWl0LWZvci10aGUtY3JhY2tlciJdLCJjbnQiOjEsIm1heCI6NCwiZXhwIjoyMDE4MTB9.SvGXeHRGxLna6rX9Kr9qTXrSU-pSbIkrDE_hQwQ60O4; CN_xid=d75239c8-659c-476c-9f5d-ea0547275e55; _sdsat_landing_page=https://www.wired.com/2001/03/honeypots-bait-for-the-cracker/|1540044766455; _sdsat_session_count=1; _sdsat_traffic_source=https://www.google.com.hk/; visitedCount_jwt=1; AMCVS_F7093025512D2B690A490D44%40AdobeOrg=1; CN_sp=fa58760d-4e72-4ce3-b443-2c9b42243770; CN_su=d19800c0-59da-4191-a88c-5c4148ebb692; CN_segments=; _ga=GA1.2.802886981.1540044768; fpcid=2456436582056134046_FP; v30=google.com.hk; v39=google.com.hk; s_cc=true; __gads=ID=f8c89cd159acd50f:T=1540044768:S=ALNI_MainR0wk-mflQoNeN_UO7dory-7gQ; aamconde=conde%3Dsv%3BCN%3D764985; aam_optimizely=aam%3D226821; aam_uuid=26426712967223226083480932539320192677; _sdsat_lt_pages_viewed=2; _sdsat_pages_viewed=2; _sdsat_AAM_UUID=26426712967223226083480932539320192677; CN_visits_m=1541001600572%26vn%3D2; CN_in_visit_m=true; sID=2955f7c3-91dd-4bc7-abc7-999968ecee3c; pID=1d3e3648-9926-466b-beec-6a2e98c8702c; AMCV_F7093025512D2B690A490D44%40AdobeOrg=1099438348%7CMCIDTS%7C17834%7CMCMID%7C26574736731012813853459705698242364028%7CMCAAMLH-1541404553%7C3%7CMCAAMB-1541404553%7CRKhpRz8krg2tLO6pguXWp5olkAcUniQYPHaMWWgdJ3xzPWQmdj0y%7CMCOPTOUT-1540806953s%7CNONE%7CMCAID%7CNONE%7CvVersion%7C2.1.0; s_vnum_m=1541001600667%26vn%3D2; sinvisit_m=true; s_depth=1; timeSpent=1540799753187; s_ppn=https%3A%2F%2Fwww.wired.com%2Fcategory%2Fsecurity%2Fthreatlevel%2F; s_pct=Index; s_nr=1540799753188-Repeat; sailthru_pageviews=1; bounceClientVisit2825v=N4IgNgDiBcIBYBcEQM4FIDMBBNAmAYnvgO6kB0xAlgE4CmAJmQMYD2AtkUwIYK0DmLagE8iCOHR5haAN1pgiIADQhqMECAC+QA; _polar_tu=*_%22mgtn%22_@2Q_u_@_97f78f97-5c77-4716-b78f-a0dccc974ab0_Q_n_@3Q_s_@2Q_sc_@*_v_@1Q_a_@1+Q_ss_@_%22phcop7_Q_sl_@_%22phcop7_Q_sd_@*+Q_v_@nullQ_vc_@*_e_@0+Q_vs_@_%22phcop7_Q_vl_@_%22phcop7_Q_vd_@*+Q_vu_@_555fdf068442e929ddada46236b2ea5b_Q_vf_@_%22jnu0e179_+; _parsely_session={%22sid%22:2%2C%22surl%22:%22https://www.wired.com/category/threatlevel/%22%2C%22sref%22:%22%22%2C%22sts%22:1540799755083%2C%22slts%22:1540044768270}; _parsely_visitor={%22id%22:%22c609b887-dad6-414c-906e-f6a107dbb880%22%2C%22session_count%22:2%2C%22last_session_ts%22:1540799755083}; sailthru_content=e43720c11f5345e88d86bc1d5be31f74e2553d06f8d9ea3b9cb7420abe100f46; sailthru_visitor=1cb98baf-6809-4646-a0f8-aa82685e000a; AMP_TOKEN=%24NOT_FOUND; _gid=GA1.2.368334264.1540799760',
'Connection':'keep-alive',
'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/70.0.3538.67 Chrome/70.0.3538.67 Safari/537.36',
'X-Requested-With':'XMLHttpRequest'
}
if __name__ == "__main__":
from legacy import common_crawl
wpc = crawler.WordPressCrawler(url, headers, output_dir)
common_crawl(wpc)
|
import jwt
from django.contrib.auth import get_user_model
from rest_framework.authentication import BaseAuthentication
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rest_framework import serializers
from restui.models.annotations import UeMappingStatus
from restui.models.annotations import UeMappingComment
from restui.models.annotations import UeMappingLabel
from restui.models.annotations import CvUeStatus
from restui.models.annotations import UeUnmappedEntryLabel
from restui.models.annotations import UeUnmappedEntryComment
from restui.models.annotations import UeUnmappedEntryStatus
class CvUeStatusSerializer(serializers.ModelSerializer):
class Meta:
model = CvUeStatus
fields = '__all__'
class StatusHistorySerializer(serializers.Serializer):
"""
Serialize a status history record
"""
status = serializers.CharField()
time_stamp = serializers.DateTimeField()
user = serializers.CharField()
class MappingStatusSerializer(serializers.ModelSerializer):
"""
mapping/:id/status endpoint
Serialize status associated to mapping
"""
def create(self, validated_data):
return UeMappingStatus.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.time_stamp = validated_data.get('time_stamp', instance.time_stamp)
instance.user_stamp = validated_data.get('user_stamp', instance.user_stamp)
instance.status = validated_data.get('status', instance.status)
instance.mapping = validated_data.get('mapping', instance.mapping)
instance.save()
return instance
class Meta:
model = UeMappingStatus
fields = '__all__'
class UnmappedEntryStatusSerializer(serializers.ModelSerializer):
"""
unmapped/:id/status endpoint
Serialize status associated to unmapped entry
"""
def create(self, validated_data):
return UeUnmappedEntryStatus.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.time_stamp = validated_data.get('time_stamp', instance.time_stamp)
instance.user_stamp = validated_data.get('user_stamp', instance.user_stamp)
instance.status = validated_data.get('status', instance.status)
instance.uniprot = validated_data.get('uniprot', instance.uniprot)
instance.save()
return instance
class Meta:
model = UeUnmappedEntryStatus
fields = '__all__'
class MappingCommentSerializer(serializers.ModelSerializer):
"""
mapping/:id/comments endpoint
Serialize comment associated to mapping
"""
deleted = serializers.BooleanField(write_only=True)
# this is probably not needed, the framework should already
# check the provided comment data is not blank
def validate_comment(self, value):
"""
Check the comment is non-empty
"""
if not value.translate({ord(" "):None, ord("\t"):None}):
raise serializers.ValidationError("Comment is empty")
return value
def create(self, validated_data):
return UeMappingComment.objects.create(**validated_data)
class Meta:
model = UeMappingComment
fields = '__all__'
class UnmappedEntryCommentSerializer(serializers.ModelSerializer):
"""
unmapped/:id/comments endpoint
Serialize comment associated to an unmapped entry
"""
deleted = serializers.BooleanField(write_only=True)
# this is probably not needed, the framework should already
# check the provided comment data is not blank
def validate_comment(self, value):
"""
Check the comment is non-empty
"""
if not value.translate({ord(" "):None, ord("\t"):None}):
raise serializers.ValidationError("Comment is empty")
return value
def create(self, validated_data):
return UeUnmappedEntryComment.objects.create(**validated_data)
class Meta:
model = UeUnmappedEntryComment
fields = '__all__'
class MappingLabelSerializer(serializers.ModelSerializer):
"""
mapping/:id/labels endpoint
Serialize label associated to mapping
"""
def create(self, validated_data):
return UeMappingLabel.objects.create(**validated_data)
class Meta:
model = UeMappingLabel
fields = '__all__'
class UnmappedEntryLabelSerializer(serializers.ModelSerializer):
"""
unmapped/<int:mapping_view_id>/labels/<label_id>/ endpoint
Serialize label associated to unmapped entry
"""
def create(self, validated_data):
return UeUnmappedEntryLabel.objects.create(**validated_data)
class Meta:
model = UeUnmappedEntryLabel
fields = '__all__'
class LabelSerializer(serializers.Serializer):
"""
Serializer for an individual label
"""
label = serializers.CharField()
id = serializers.IntegerField()
status = serializers.BooleanField()
class LabelsSerializer(serializers.Serializer):
"""
For nested serialization of user label for a mapping in call to
(mapping|unmapped)/<id>/labels endpoint.
"""
labels = LabelSerializer(many=True)
|
"""
Notebook rendering classes
Currently, only an HTMLRenderer class is available. Eventually this could be
extended to include something like a MarkdownRenderer class for hosting on
Github, etc.
"""
import os
import shutil
from jinja2 import Environment, PackageLoader
from pkg_resources import resource_filename, Requirement
class Renderer(object):
"""Base notebook Renderer class"""
def __init__(self, author, title, email, date, entries,
output_file, user_css, user_js, theme='default'):
self.author = author
self.title = title
self.email = email
self.date = date
self.entries = entries
self.output_file = output_file
self.user_css = user_css
self.user_js = user_js
self.theme = '%s.html' % theme
# Load Jinja2 template
env = Environment(loader=PackageLoader('labnote', 'templates'))
self.template = env.get_template(self.theme)
def render(self):
"""Abstract method for rendering the notebook"""
pass
class HTMLRenderer(Renderer):
"""HTML notebook renderer"""
def __init__(self, author, title, email, date, entries, output_file,
user_css, user_js, template):
super().__init__(author, title, email, date, entries, output_file,
user_css, user_js, template)
def render(self):
"""Renders notebook"""
html = self.template.render(author=self.author, title=self.title,
email=self.email, date=self.date,
user_css=self.user_css,
user_js=self.user_js,
entries=self.entries)
print("- Generating notebook HTML")
# Output notebook
with open(self.output_file, 'w') as fp:
fp.write(html)
print("- Saving notebook to %s" % self.output_file)
# Path to resources/ directory
resources = resource_filename(Requirement.parse('labnote'),
os.path.join('labnote', 'resources'))
img_resources = os.path.join(resources, 'img')
css_resources = os.path.join(resources, 'css')
# Copy CSS and image resources to output directory
output_base = os.path.join(os.path.dirname(self.output_file),
'resources')
output_img = os.path.join(output_base, 'img')
output_css = os.path.join(output_base, 'css')
# Remove existing img/ and css/ directories
for x in [output_img, output_css]:
if os.path.isdir(x):
shutil.rmtree(x)
ignore_pattern = shutil.ignore_patterns("__init__.py", "__pycache__")
shutil.copytree(img_resources, output_img, ignore=ignore_pattern)
shutil.copytree(css_resources, output_css, ignore=ignore_pattern)
|
from cli import *
from sim_commands import *
import string, os
disk_types = { "320" : [40, 8, 2],
"360" : [40, 9, 2],
"720" : [80, 9, 2],
"1.2" : [80, 15, 2],
"1.44" : [80, 18, 2],
"2.88" : [80, 36, 2]}
def floppy_drive_get_info(obj):
return ( None )
def floppy_drive_get_status(obj):
floppy_size = [obj.tracks, obj.sectors_per_track, obj.heads]
try:
idx = disk_types.values().index(floppy_size)
floppy_type = disk_types.keys()[idx]
except:
floppy_type = "Unknown"
drive = ("Drive",
[ ("Busy", obj.drive_busy),
("Seek in progress", obj.seek_in_progress),
("Disk changed", obj.disk_changed),
("Motor", iff(obj.motor_on, "on", "off")),
("Data rate", obj.data_rate),
("Current head", obj.cur_head),
("Current sector", obj.cur_sector) ] )
if obj.image:
floppy = ("Floppy",
[ ("Floppy type", floppy_type),
("Write protect", obj.write_protect),
("Tracks", obj.tracks),
("Sectors per track", obj.sectors_per_track),
("Heads", obj.heads),
("Image object", obj.image.name) ] )
else:
floppy = ("Floppy",
[ ("No floppy", "") ])
return [ drive, floppy ]
new_info_command('floppy-drive', floppy_drive_get_info)
new_status_command('floppy-drive', floppy_drive_get_status)
def i82077_get_info(obj):
drive_list = join([d.name for d in obj.drives])
if drive_list == '':
drive_list = 'None'
return [ (None,
[ ("IRQ device", obj.irq_dev),
("IRQ number", obj.irq_level),
("DMA device", obj.dma_dev),
("DMA channel", obj.dma_channel),
("Drives", drive_list) ] ) ]
def i82077_get_status(obj):
return [ (None,
[ ("Enabled", iff(obj.enabled, "yes", "no")),
("DMA enabled", iff(obj.dma_enabled, "yes", "no")),
("FIFO enabled", iff(obj.fifo_enabled, "yes", "no")),
("Poll enabled", iff(obj.poll_enabled, "yes", "no")),
("State", ["idle", "command", "execute", "result"][obj.state]),
("Step rate", obj.step_rate),
("Selected drive", obj.drive_select),
("Command busy", obj.command_busy),
("Poll change", obj.poll_change),
("Current command", "0x%x" % obj.cmd_id),
("Implied seek", obj.implied_seek),
("ST0 register", obj.st0),
("ST1 register", obj.st1),
("ST2 register", obj.st2) ] ) ]
new_info_command('i82077', i82077_get_info)
new_status_command('i82077', i82077_get_status)
#
# -------------------- insert-floppy --------------------
#
floppy_count = 0
def insert_floppy_cmd(obj, drive, floppy_file, rw, size):
global floppy_count
drive = string.upper(drive)
if not drive in ('A', 'B'):
print "Incorrect drive-letter, use one of A and B"
SIM_command_has_problem()
return
try:
if drive == 'A' and len(obj.drives) < 1:
raise Exception
elif len(obj.drives) < 2:
raise Exception
except:
print ("No drive '%s' connected to controller %s. "
"Cannot insert floppy." % (drive, obj.name))
SIM_command_has_problem()
return
try:
disk_size = disk_types[size]
except:
print "Unknown disk size %s." % size
SIM_command_has_problem()
return
if drive == 'A':
fd = obj.drives[0]
else:
fd = obj.drives[1]
if fd.image:
print "Floppy already inserted into drive %s." % drive
SIM_command_has_problem()
return
fd.disk_changed = 1
fd.tracks = disk_size[0]
fd.sectors_per_track = disk_size[1]
fd.heads = disk_size[2]
# simply replace the old image object
# make sure we use a unique name (e.g. after a checkpoint)
unique = 0
while not unique:
image_name = 'fd_image_%s_%d' % (drive, floppy_count)
floppy_count += 1
try:
SIM_get_object(image_name)
except:
unique = 1
im_size = disk_size[0] * disk_size[1] * disk_size[2] * 512
SIM_create_object('image', image_name,
[['queue', SIM_current_processor()],
['size', im_size]])
fd.image = SIM_get_object(image_name)
filesize = os.stat(floppy_file)[6]
if filesize == 0:
filesize = fd.image.size
print "Image %s reported zero size, assuming special file." % (
floppy_file)
rw_str = iff(rw == 1, 'rw', 'ro')
fd.image.files = [[floppy_file, rw_str, 0, filesize]]
print "Floppy inserted in drive '%s:'. (File %s)." % (drive, floppy_file)
if size != '1.44':
print "Remember to set the floppy size in the CMOS as well."
new_command("insert-floppy", insert_floppy_cmd,
[arg(str_t, "drive-letter"),
arg(filename_t(exist = 1, simpath = 1), "floppy-image"),
arg(flag_t, "-rw"),
arg(str_t, "size", "?", "1.44")],
alias = "",
type = "i82077 commands",
short = "insert floppy in drive",
namespace = "i82077",
doc = """
Insert the file <arg>floppy-image</arg> as a floppy in the disk drive specified by
<arg>drive-letter</arg>. For floppies with a different size than 1.44 MB, the size
must be specified explicitly.
The <arg>-rw</arg> flag uses <arg>floppy-image</arg> in read-write mode, meaning
that no save or save-diff-file command to the associated image object need to be
used in order to save data written by the target software.
""", filename="/mp/simics-3.0/src/devices/82077/commands.py", linenumber="149")
def eject_floppy_cmd(obj, drive):
drive = string.upper(drive)
if not drive in ('A', 'B'):
print "Incorrect drive-letter, use one of A and B"
SIM_command_has_problem()
return
try:
if drive == 'A' and len(obj.drives) < 1:
raise Exception
elif len(obj.drives) < 2:
raise Exception
except:
print ("No drive '%s' connected to controller %s. "
"Cannot insert floppy." % (drive, obj.name))
SIM_command_has_problem()
return
if drive == 'A':
fd = obj.drives[0]
else:
fd = obj.drives[1]
if fd.image == None:
print "No floppy in drive %s." % drive
SIM_command_has_problem()
return
fd.disk_changed = 1
fd.image = None
print "Floppy ejected from drive '%s:'." % (drive)
new_command("eject-floppy", eject_floppy_cmd,
[arg(str_t, "drive-letter")],
type = "i82077 commands",
short = "eject floppy",
namespace = "i82077",
doc = """
Eject the media from the disk drive specified by <i>drive-letter</i>.
""", filename="/mp/simics-3.0/src/devices/82077/commands.py", linenumber="196")
|
from .flop_benchmark import get_model_infos
|
# Import external libraries
import os
from pathlib import Path
from glob import glob
import shutil
import numpy as np
import nibabel as nib
import cv2
from pystackreg import StackReg
# A function to create directory
def create_dir(path_of_dir):
try:
os.makedirs(path_of_dir) # For one directory containing inner/sub directory(ies)
except FileExistsError:
print("Directory %s already exists" % path_of_dir)
except OSError:
print ("Creation of the directory %s failed" % path_of_dir)
else:
print ("Successfully created the directory %s " % path_of_dir)
# A function to delete directory
def delete_dir(path_of_dir):
try:
shutil.rmtree(path_of_dir)
except FileNotFoundError:
print('Directory %s or its path doesn\'t exist' % path_of_dir)
else:
print('Directory %s has been deleted' % path_of_dir)
# Create QC transform matrices and warped seg. masks and store in .npy format
def create_qc():
# Create New Data Directories
path_root, path_dir = os.path.split(DATA_PATH)
DATA_DESTINATION_PATH = os.path.join(path_root, DATA_DESTINATION_FOLDER_NAME)
create_dir(DATA_DESTINATION_PATH)
# Define naming formats:
ct_data_nii_gz = '_CT.nii.gz'
lav_data_nii_gz = '_LAV-label.nii.gz'
# create list of data files in path
data_ct_nii_gz_list = glob(os.path.join(DATA_PATH, '*' + ct_data_nii_gz))
data_lav_nii_gz_list = glob(os.path.join(DATA_PATH, '*' + lav_data_nii_gz))
# Order using sort
data_ct_nii_gz_list.sort()
data_lav_nii_gz_list.sort()
# Select Data to Use for QC: Build Atlas with Several Patient Data
no_studies = 5 # No of patients to build an atlas from
for index in range(no_studies):
ct = data_ct_nii_gz_list[index]
lav = data_lav_nii_gz_list[index]
# Get the image arrays
img_arr_ct = nib.load(ct).get_fdata()
img_arr_lav = nib.load(lav).get_fdata()
j = 0
img_shape_lav = img_arr_lav.shape[2]
par, par_dir = os.path.split(Path(ct))
for i in range(img_shape_lav):
lav_image_i = img_arr_lav[..., i]
ct_image_i = img_arr_ct[..., i]
if ((lav_image_i).any() > 0):
# Save Both CT and LAV for IMAGE Segmentation Task (i.e. CT Segmentation Mask):
# -----------------------------------------------------------------------------
npy_ct_seg_file_name = os.path.join(DATA_DESTINATION_PATH,
par_dir.replace(ct_data_nii_gz, '_') + str(f'{j:04}') + '_pixel_array_data.npy')
np.save(npy_ct_seg_file_name, np.array(ct_image_i))
npy_lav_seg_file_name = os.path.join(DATA_DESTINATION_PATH,
par_dir.replace(ct_data_nii_gz, '_') + str(f'{j:04}') + '_seg_data.npy')
np.save(npy_lav_seg_file_name, np.array(lav_image_i, dtype=np.int32)) # Must only 'integize' segmentation mask image!
j += 1
# create list of *npy data files in path
data_list_img = glob(DATA_DESTINATION_PATH + '/*_pixel_array_data.npy')
data_list_seg = glob(DATA_DESTINATION_PATH + '/*_seg_data.npy')
# Order using sort
data_list_seg.sort()
data_list_img.sort()
# Main Program
list_transforms = []
list_warped_segmentations = []
skip = 25
refslices = [i for i in range(0, len(data_list_img), skip)]
for refslice_index in refslices:
# Load reference ('fixed') image
img_ct_ref, seg_ct_ref = load_images(DATA_DESTINATION_PATH, refslice_index, data_list_seg, data_list_img)
for index in refslices:
if index != refslice_index:
print('Index: ', index)
# Load 'moving' image
img_ct_moving, seg_ct_moving = load_images(DATA_DESTINATION_PATH, index, data_list_seg, data_list_img)
# Rigid Body transformation
sr = StackReg(StackReg.RIGID_BODY)
reg_image = sr.register_transform(img_ct_ref, img_ct_moving)
transform_matrix = sr.get_matrix()
# Compute Warped transform images
height, width = seg_ct_moving.shape
warped_seg = cv2.warpPerspective(seg_ct_moving.astype('float32'),
transform_matrix, (width, height))
list_transforms.append(transform_matrix)
list_warped_segmentations.append(warped_seg)
else:
print('skipping slice: ', index)
print('Length of Transformation/Warped Segmentation Matrices: ', len(list_transforms))
# Delete all *.npy files and recreate the folder
delete_dir(DATA_DESTINATION_PATH)
create_dir(DATA_DESTINATION_PATH)
# Save Transformation Matrix in *.npy
npy_ct_file_name1 = os.path.join(DATA_DESTINATION_PATH, 'QC_LAV_Transform_Matrix.npy')
np.save(npy_ct_file_name1, list_transforms)
# Save Warped Segmentation Masks in *.npy
npy_ct_file_name2 = os.path.join(DATA_DESTINATION_PATH, 'QC_LAV_Warped_Seg_Masks.npy')
np.save(npy_ct_file_name2, list_warped_segmentations)
if __name__ == "__main__":
# Define Source Data Directory
DATA_PATH = 'path/to/dataset/folder'
# Define Destination Directory Name (*.npy will be save in DATA_PATH/DATA_DESTINATION_FOLDER_NAME)
DATA_DESTINATION_FOLDER_NAME = 'Data_Quality_Control_LAV'
# Create QC transform matrices and warped seg. masks and store in .npy format:
create_qc()
|
# ##### BEGIN MIT LICENSE BLOCK #####
#
# MIT License
#
# Copyright (c) 2020 Steven Garcia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ##### END MIT LICENSE BLOCK #####
import bpy
from ..global_functions import global_functions
from bpy_extras.io_utils import (
ImportHelper,
ExportHelper
)
from bpy.types import (
Operator,
Panel,
PropertyGroup
)
from bpy.props import (
BoolProperty,
EnumProperty,
FloatProperty,
PointerProperty,
StringProperty
)
class ExportGR2(Operator, ExportHelper):
"""Write a Granny file"""
bl_idname = "export_scene.gr2"
bl_label = "Export GR2"
filename_ext = '.GR2'
filter_glob: StringProperty(
default="*.gr2",
options={'HIDDEN'},
)
def execute(self, context):
from ..file_gr2 import export_gr2
return global_functions.run_code("export_gr2.write_file(context, self.filepath, self.report)")
class ImportGR2(Operator, ImportHelper):
"""Import a Granny file"""
bl_idname = "import_scene.gr2"
bl_label = "Import GR2"
filename_ext = '.GR2'
filter_glob: StringProperty(
default="*.gr2",
options={'HIDDEN'},
)
def execute(self, context):
from ..file_gr2 import import_gr2
return global_functions.run_code("import_gr2.load_file(context, self.filepath, self.report)")
def menu_func_export(self, context):
self.layout.operator(ExportGR2.bl_idname, text="Halo Granny V2 (.GR2)")
def menu_func_import(self, context):
self.layout.operator(ImportGR2.bl_idname, text="Halo Granny V2 (.GR2)")
classeshalo = (
ExportGR2,
ImportGR2,
)
def register():
for clshalo in classeshalo:
bpy.utils.register_class(clshalo)
#bpy.types.TOPBAR_MT_file_export.append(menu_func_export)
#bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
def unregister():
#bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)
#bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
for clshalo in classeshalo:
bpy.utils.unregister_class(clshalo)
if __name__ == '__main__':
register()
|
import sympy as sy
import numpy as np
import random
from sympy import *
def threePointCubicApprox(x,y,xSlopePoint,yPrime):
C3 = (y[2] - y[0])/((x[2] - x[1])*(x[2] - x[0])**2) - (y[1] - y[0])/((x[2] - x[1])*(x[1] - x[0])**2) + yPrime/((x[1]-x[0])*(x[2] - x[0]))
C2 = (((y[1] - y[0])/(x[1] - x[0])) - yPrime)/(x[1] - x[0]) - C3*(2*x[0] + x[1])
C1 = yPrime - 2*C2*x[0] - 3*C3*x[0]**2
C0 = y[0] - C1*x[0] - C2*x[0]**2 - C3*x[0]**3
return [C0,C1,C2,C3]
def threePointQuadraticApprox(x, y):
#Inputs: Vector of x values and y values. These vectors must be equal length
#Outputs: Coefficients for polynomial equation according to the form C0 + C1*x + C2*x^2...
C2 = (((y[2]-y[0])/(x[2]-x[0])) - ((y[1]-y[0])/(x[1]-x[0])))/(x[2]-x[1])
C1 = (y[1] - y[0])/(x[1]-x[0]) - C2*(x[0]+x[1])
C0 = y[0] - C1*x[0] - C2*x[0]**2
return [C0,C1,C2]
def twoPointLinearApprox(x, y):
#Inputs: Vector of x values and y values. These vectors must be equal length
#Outputs: Coefficients for polynomial equation according to the form C0 + C1*x + C2*x^2...
C1 = (y[1] - y[0])/(x[1]-x[0])
C0 = y[0] - C1*x[0]
return [C0,C1]
def getValueOfPoly(c,x):
#Inputs: Coefficients for polynomial equation according to the form C0 + C1*x + C2*x^2...
#Inputs: x - value to get value at
constantQuantity = len(c)
if constantQuantity == 1:
# Flat line
y = c[0]
elif constantQuantity == 2:
# Linear
y = c[0] + c[1] * x
elif constantQuantity == 3:
# Quadratic
y = c[0] + c[1]*x + c[2]*x**2
elif constantQuantity == 4:
# Cubic
y = c[0] + c[1]*x + c[2]*x**2 + c[3]*x**3
else:
print("Polynomial could not be calculated. Check getValueOfPoly function.")
y = 99999999
return y |
#!/usr/bin/env python
# File created on 20 Jul 2014
from __future__ import division
__author__ = "Yoshiki Vazquez Baeza"
__copyright__ = "Copyright 2013, The Emperor Project"
__credits__ = ["Yoshiki Vazquez Baeza"]
__license__ = "BSD"
__version__ = "0.9.3-dev"
__maintainer__ = "Yoshiki Vazquez Baeza"
__email__ = "yoshiki89@gmail.com"
__status__ = "Development"
from unittest import TestCase, main
from StringIO import StringIO
from skbio.math.stats.ordination import OrdinationResults
from emperor.core import Emperor
class TopLevelTests(TestCase):
def setUp(self):
or_f = StringIO(PCOA_STRING)
self.ord_res = OrdinationResults.from_file(or_f)
self.data = [['PC.354', 'Control', '20061218', 'Ctrol_mouse_I.D._354'],
['PC.355', 'Control', '20061218', 'Control_mouse_I.D._355'],
['PC.356', 'Control', '20061126', 'Control_mouse_I.D._356'],
['PC.481', 'Control', '20070314', 'Control_mouse_I.D._481'],
['PC.593', 'Control', '20071210', 'Control_mouse_I.D._593'],
['PC.607', 'Fast', '20071112', 'Fasting_mouse_I.D._607'],
['PC.634', 'Fast', '20080116', 'Fasting_mouse_I.D._634'],
['PC.635', 'Fast', '20080116', 'Fasting_mouse_I.D._635'],
['PC.636', 'Fast', '20080116', 'Fasting_mouse_I.D._636']]
self.headers = ['SampleID', 'Treatment', 'DOB', 'Description']
def test_str(self):
emp = Emperor(self.ord_res, self.data, self.headers)
self.assertEqual(HTML_STRING, str(emp))
def test_ids(self):
emp = Emperor(self.ord_res, self.data, self.headers)
self.assertEqual(['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593',
'PC.607', 'PC.634', 'PC.635', 'PC.636'],
emp.ids)
PCOA_STRING = """Eigvals 9
0.479412119045 0.29201495623 0.247449246064 0.201496072404 0.180076127632\
0.147806772727 0.135795927213 0.112259695609 0.0
Proportion explained 9
0.266887048633 0.162563704022 0.137754129161 0.11217215823 0.10024774995\
0.0822835130237 0.0755971173665 0.0624945796136 0.0
Species 0 0
Site 9 9
PC.636 -0.276542163845 -0.144964375408 0.0666467344429 -0.0677109454288\
0.176070269506 0.072969390136 -0.229889463523 -0.0465989416581\
-0.0
PC.635 -0.237661393984 0.0460527772512 -0.138135814766 0.159061025229\
-0.247484698646 -0.115211468101 -0.112864033263 0.0647940729676\
-0.0
PC.356 0.228820399536 -0.130142097093 -0.287149447883 0.0864498846421\
0.0442951919304 0.20604260722 0.0310003571386 0.0719920436501 -0.0
PC.481 0.0422628480532 -0.0139681511889 0.0635314615517 -0.346120552134\
-0.127813807608 0.0139350721063 0.0300206887328 0.140147849223 -0.0
PC.354 0.280399117569 -0.0060128286014 0.0234854344148 -0.0468109474823\
-0.146624450094 0.00566979124596 -0.0354299634191\
-0.255785794275 -0.0
PC.593 0.232872767451 0.139788385269 0.322871079774 0.18334700682\
0.0204661596818 0.0540589147147 -0.0366250872041 0.0998235721267\
-0.0
PC.355 0.170517581885 -0.194113268955 -0.0308965283066 0.0198086158783\
0.155100062794 -0.279923941712 0.0576092515759 0.0242481862127 -0.0
PC.607 -0.0913299284215 0.424147148265 -0.135627421345 -0.057519480907\
0.151363490722 -0.0253935675552 0.0517306152066 -0.038738217609\
-0.0
PC.634 -0.349339228244 -0.120787589539 0.115274502117 0.0694953933826\
-0.0253722182853 0.067853201946 0.244447634756 -0.0598827706386\
-0.0
Biplot 0 0
Site constraints 0 0
"""
HTML_STRING = """<!doctype html>
<html lang="en">
<head>
<title>Emperor</title>
<meta charset="utf-8">
<link rel="shortcut icon" href="emperor_required_resources/img/favicon.ico" />
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
<link rel="stylesheet" type="text/css" href="emperor_required_resources/emperor/css/emperor.css">
<link rel="stylesheet" type="text/css" href="emperor_required_resources/css/jquery-ui2.css">
<link rel="stylesheet" type="text/css" href="emperor_required_resources/css/colorPicker.css">
<link rel="stylesheet" type="text/css" href="emperor_required_resources/css/spectrum.css">
<link rel="stylesheet" type="text/css" href="emperor_required_resources/css/d3.parcoords.css">
<table id="logotable" style="vertical-align:middle;text-align:center;height:100%;width:100%;margin:0;padding:0;border:0;">
<tr><td><img src="emperor_required_resources/img/emperor.png" alt="Emperor" id="logo"/></td></tr>
</table>
<script type="text/javascript" src="emperor_required_resources/js/d3.v3.min.js"></script>
<script type="text/javascript" src="emperor_required_resources/js/d3.parcoords.js"></script>
<script type="text/javascript" src="emperor_required_resources/js/jquery-1.7.1.min.js"></script>
<script type="text/javascript" src="emperor_required_resources/js/jquery-ui-1.8.17.custom.min.js"></script>
<script src="emperor_required_resources/js/jquery.colorPicker.js"></script>
<script src="emperor_required_resources/js/spectrum.js"></script>
<script src="emperor_required_resources/js/Three.js"></script>
<script src="emperor_required_resources/js/js/Detector.js"></script>
<script src="emperor_required_resources/js/js/RequestAnimationFrame.js"></script>
<script src="emperor_required_resources/emperor/js/emperor.js"></script>
<script type="text/javascript" src="emperor_required_resources/js/THREEx.screenshot.js"></script>
<script type="text/javascript" src="emperor_required_resources/js/FileSaver.min.js"></script>
<script type="text/javascript">
var g_mappingFileHeaders = ['SampleID','Treatment','DOB','Description'];
var g_mappingFileData = { 'PC.636': ['PC.636','Fast','20080116','Fasting_mouse_I.D._636'],'PC.355': ['PC.355','Control','20061218','Control_mouse_I.D._355'],'PC.607': ['PC.607','Fast','20071112','Fasting_mouse_I.D._607'],'PC.634': ['PC.634','Fast','20080116','Fasting_mouse_I.D._634'],'PC.635': ['PC.635','Fast','20080116','Fasting_mouse_I.D._635'],'PC.593': ['PC.593','Control','20071210','Control_mouse_I.D._593'],'PC.356': ['PC.356','Control','20061126','Control_mouse_I.D._356'],'PC.481': ['PC.481','Control','20070314','Control_mouse_I.D._481'],'PC.354': ['PC.354','Control','20061218','Ctrol_mouse_I.D._354'] };
var g_spherePositions = new Array();
g_spherePositions['PC.354'] = { 'name': 'PC.354', 'color': 0, 'x': -0.276542, 'y': -0.144964, 'z': 0.066647, 'P1': -0.276542, 'P2': -0.144964, 'P3': 0.066647, 'P4': -0.067711, 'P5': 0.176070, 'P6': 0.072969, 'P7': -0.229889, 'P8': -0.046599 };
g_spherePositions['PC.355'] = { 'name': 'PC.355', 'color': 0, 'x': -0.237661, 'y': 0.046053, 'z': -0.138136, 'P1': -0.237661, 'P2': 0.046053, 'P3': -0.138136, 'P4': 0.159061, 'P5': -0.247485, 'P6': -0.115211, 'P7': -0.112864, 'P8': 0.064794 };
g_spherePositions['PC.356'] = { 'name': 'PC.356', 'color': 0, 'x': 0.228820, 'y': -0.130142, 'z': -0.287149, 'P1': 0.228820, 'P2': -0.130142, 'P3': -0.287149, 'P4': 0.086450, 'P5': 0.044295, 'P6': 0.206043, 'P7': 0.031000, 'P8': 0.071992 };
g_spherePositions['PC.481'] = { 'name': 'PC.481', 'color': 0, 'x': 0.042263, 'y': -0.013968, 'z': 0.063531, 'P1': 0.042263, 'P2': -0.013968, 'P3': 0.063531, 'P4': -0.346121, 'P5': -0.127814, 'P6': 0.013935, 'P7': 0.030021, 'P8': 0.140148 };
g_spherePositions['PC.593'] = { 'name': 'PC.593', 'color': 0, 'x': 0.280399, 'y': -0.006013, 'z': 0.023485, 'P1': 0.280399, 'P2': -0.006013, 'P3': 0.023485, 'P4': -0.046811, 'P5': -0.146624, 'P6': 0.005670, 'P7': -0.035430, 'P8': -0.255786 };
g_spherePositions['PC.607'] = { 'name': 'PC.607', 'color': 0, 'x': 0.232873, 'y': 0.139788, 'z': 0.322871, 'P1': 0.232873, 'P2': 0.139788, 'P3': 0.322871, 'P4': 0.183347, 'P5': 0.020466, 'P6': 0.054059, 'P7': -0.036625, 'P8': 0.099824 };
g_spherePositions['PC.634'] = { 'name': 'PC.634', 'color': 0, 'x': 0.170518, 'y': -0.194113, 'z': -0.030897, 'P1': 0.170518, 'P2': -0.194113, 'P3': -0.030897, 'P4': 0.019809, 'P5': 0.155100, 'P6': -0.279924, 'P7': 0.057609, 'P8': 0.024248 };
g_spherePositions['PC.635'] = { 'name': 'PC.635', 'color': 0, 'x': -0.091330, 'y': 0.424147, 'z': -0.135627, 'P1': -0.091330, 'P2': 0.424147, 'P3': -0.135627, 'P4': -0.057519, 'P5': 0.151363, 'P6': -0.025394, 'P7': 0.051731, 'P8': -0.038738 };
g_spherePositions['PC.636'] = { 'name': 'PC.636', 'color': 0, 'x': -0.349339, 'y': -0.120788, 'z': 0.115275, 'P1': -0.349339, 'P2': -0.120788, 'P3': 0.115275, 'P4': 0.069495, 'P5': -0.025372, 'P6': 0.067853, 'P7': 0.244448, 'P8': -0.059883 };
var g_ellipsesDimensions = new Array();
var g_segments = 8, g_rings = 8, g_radius = 0.007557;
var g_xAxisLength = 0.629738;
var g_yAxisLength = 0.618260;
var g_zAxisLength = 0.610021;
var g_xMaximumValue = 0.280399;
var g_yMaximumValue = 0.424147;
var g_zMaximumValue = 0.322871;
var g_xMinimumValue = -0.349339;
var g_yMinimumValue = -0.194113;
var g_zMinimumValue = -0.287149;
var g_maximum = 0.424147;
var g_pc1Label = "PC1 (0.27 %)";
var g_pc2Label = "PC2 (0.16 %)";
var g_pc3Label = "PC3 (0.14 %)";
var g_number_of_custom_axes = 0;
var g_fractionExplained = [0.002669, 0.001626, 0.001378, 0.001122, 0.001002, 0.000823, 0.000756, 0.000625];
var g_fractionExplainedRounded = [0.27, 0.16, 0.14, 0.11, 0.10, 0.08, 0.08, 0.06];
var g_taxaPositions = new Array();
var g_comparisonPositions = new Array();
var g_isSerialComparisonPlot = true;
var g_vectorPositions = new Array();
document.getElementById("logo").style.display = 'none';
document.getElementById("logotable").style.display = 'none';
</script>
</head>
<body>
<div id="overlay">
<div>
<img src="emperor_required_resources/img/emperor.png" alt="Emperor" id="small-logo"/>
<h1>WebGL is not enabled!</h1>
<p>Emperor's visualization framework is WebGL based, it seems that your system doesn't have this resource available. Here is what you can do:</p>
<p id="explanation"><strong>Chrome:</strong> Type "chrome://flags/" into the address bar, then search for "Disable WebGL". Disable this option if you haven't already. <em>Note:</em> If you follow these steps and still don't see an image, go to "chrome://flags/" and then search for "Override software rendering list" and enable this option.</p>
<p id="explanation"><strong>Safari:</strong> Open Safari's menu and select Preferences. Click on the advanced tab, and then check "Show Developer" menu. Then open the "Developer" menu and select "Enable WebGL".</p>
<p id="explanation"><strong>Firefox:</strong> Go to Options through Firefox > Options or Tools > Options. Go to Advanced, then General. Check "Use hardware acceleration when available" and restart Firefox.</p>
<p id="explanation"><strong>Other browsers:</strong> The only browsers that support WebGL are Chrome, Safari, and Firefox. Please switch to these browsers when using Emperor.</p>
<p id="explanation"><em>Note:</em> Once you went through these changes, reload the page and it should work!</p>
<p id="source">Sources: Instructions for <a href="https://www.biodigitalhuman.com/home/enabling-webgl.html">Chrome and Safari</a>, and <a href="http://www.infewbytes.com/?p=144">Firefox</a></p>
</div>
</div>
<div id="emperor-plot-toggle">
<form>
<div id="plottype">
<input id="pcoa" type="radio" id="pcoa" name="plottype" checked="checked" /><label for="pcoa">PCoA</label>
<input id="parallel" type="radio" id="parallel" name="plottype" /><label for="parallel">Parallel</label>
</div>
</form>
</div>
<div id="pcoaPlotWrapper" class="emperor-plot-wrapper">
<label id="pointCount" class="ontop">
</label>
<div id="finder" class="arrow-right">
</div>
<div id="labels" class="unselectable">
</div>
<div id="taxalabels" class="unselectable">
</div>
<div id="axislabels" class="axis-labels">
</div>
<div id="main-plot">
</div>
</div>
<div id="parallelPlotWrapper" class="emperor-plot-wrapper">
</div>
<div id="emperor-separator" class="emperor-separator" ondblclick="separatorDoubleClick()"></div>
<div id="emperor-menu">
<div id="emperor-menu-tabs">
<ul>
<li><a href="#keytab">Key</a></li>
<li><a href="#colorby">Colors</a></li>
<li><a href="#showby">Visibility</a></li>
<li><a href="#scalingby">Scaling</a></li>
<li><a href="#labelby">Labels</a></li>
<li><a href="#axes">Axes</a></li>
<li><a href="#options">Options</a></li>
</ul>
<div id="keytab" class="emperor-tab-div">
<form name="keyFilter">
<label>Filter </label><input name="filterBox" id="searchBox" type="text" onkeyup="filterKey()"></input>
</form>
<div id="key">
</div>
</div>
<div id="colorby" class="emperor-tab-div">
<input type="checkbox" onchange="toggleContinuousAndDiscreteColors(this)" id="discreteorcontinuouscolors" name="discreteorcontinuouscolors"> Use gradient colors</input>
<br><br>
<select id="colorbycombo" onchange="colorByMenuChanged()" size="3" class="emperor-tab-drop-down">
</select>
<div class="list" id="colorbylist">
</div>
</div>
<div id="showby" class="emperor-tab-div">
<table class="emperor-tab-table">
<tr>
<td align="center">
<select id="showbycombo" onchange="showByMenuChanged()" class="emperor-tab-drop-down">
</select>
</td>
</tr>
<tr>
<td>
<div class="list" id="showbylist" style="height:100%%;width:100%%">
</div>
</td>
</tr>
<tr>
<td style="padding-left: 12px; padding-right:12px;">
<hr class='section-break'>
<br>
<label for="sphereopacity" class="text">Global Sphere Opacity</label>
<label id="sphereopacity" class="slidervalue"></label>
<div id="sopacityslider" class="slider-range-max"></div>
</td>
</tr>
<tr>
<td align="center">
<button id="toggle-visibility-selection-button" onClick="toggleVisibleCategories()">Invert Selected</button>
</td>
</tr>
</table>
</div>
<div id="scalingby" class="emperor-tab-div">
<table class="emperor-tab-table">
<tr>
<td align="center">
<select id="scalingbycombo" onchange="scalingByMenuChanged()" class="emperor-tab-drop-down">
</select>
</td>
</tr>
<tr>
<td>
<div class="list" id="scalingbylist" style="height:100%%;width:100%%">
</div>
</td>
</tr>
<tr>
<td style="padding-left: 12px; padding-right:12px;">
<hr class='section-break'>
<br>
<label for="sphereradius" class="text">Global Sphere Scale</label>
<label id="sphereradius" class="slidervalue"></label>
<div id="sradiusslider" class="slider-range-max"></div>
</td>
</tr>
</table>
</div>
<div id="labelby" class="emperor-tab-div">
<div id="labels-top">
<form name="plotoptions">
<input type="checkbox" onClick="toggleLabels()">Samples Label Visibility</input>
</form>
<br>
<label for="labelopacity" class="text">Label Opacity</label>
<label id="labelopacity" class="slidervalue"></label>
<div id="lopacityslider" class="slider-range-max"></div>
<div id="label-color-holder clearfix">
<table class="emperor-tab-table">
<tr><td><div id="labelColor" class="colorbox"></div></td><td><label>Master Label Color</label></td></tr>
<br><br>
</table></div>
</div>
<br>
<select id="labelcombo" onchange="labelMenuChanged()" class="emperor-tab-drop-down">
</select>
<div class="list" id="label-list">
</div>
</div>
<div id="axes" class="emperor-tab-div">
<div id="pcoaaxes">
<div class="list" id="axeslist">
</div>
</div>
</div>
<div id="options" class="emperor-tab-div">
<table class="emperor-tab-table">
<tr><td><div id="axeslabelscolor" class="colorbox" name="axeslabelscolor"></div></td><td title="Axes Labels Color">Axes Labels Color</td></tr>
<tr><td><div id="axescolor" class="colorbox" name="axescolor"></div></td><td title="Axes Color Title">Axes Color</td></tr>
<tr><td><div id="rendererbackgroundcolor" class="colorbox" name="rendererbackgroundcolor"></div></td><td title="Background Color Title">Background Color</td></tr>
<tr><td colspan="2">
<div id="pcoaviewoptions" class="">
<form name="settingsoptionscolor">
</form>
<div id="pcoaoptions" class="">
<form name="settingsoptions">
<input type="checkbox" onchange="toggleScaleCoordinates(this)" id="scale_checkbox" name="scale_checkbox">Scale coords by percent explained</input>
</form>
</div>
<br><input id="reset" class="button" type="submit" value="Recenter Camera" style="" onClick="resetCamera()">
<br><br>
<hr class='section-break'>
<br>Filename <small>(only letters, numbers, ., - and _)</small>:
<br><input name="saveas_name" id="saveas_name" value="screenshot" type="text"/>
<br><input id="saveas_legends" class="checkbox" type="checkbox" style=""> Create legend
<input id="saveas" class="button" type="submit" value="Save as SVG" style="" onClick="saveSVG()"/>
<br><br>For a PNG, simply press 'ctrl+p'.
<div id="paralleloptions" class="">
</div>
</div>
<br>
</td></tr>
</table>
</div>
</div>
</div>
</body>
</html>
"""
if __name__ == "__main__":
main()
|
# Module for logging config
import logging
import sys
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
log = logging.getLogger("main")
def get_logger(lgr):
return logging.getLogger(lgr)
if __name__ == '__main__':
print(type(log))
|
"""
sentry.tagstore.base
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2017 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import re
from sentry.constants import TAG_LABELS
from sentry.utils.services import Service
# Valid pattern for tag key names
TAG_KEY_RE = re.compile(r'^[a-zA-Z0-9_\.:-]+$')
# These tags are special and are used in pairing with `sentry:{}`
# they should not be allowed to be set via data ingest due to ambiguity
INTERNAL_TAG_KEYS = frozenset(
('release', 'dist', 'user', 'filename', 'function'))
# TODO(dcramer): pull in enum library
class TagKeyStatus(object):
VISIBLE = 0
PENDING_DELETION = 1
DELETION_IN_PROGRESS = 2
class TagStorage(Service):
__all__ = (
'is_valid_key',
'is_valid_value',
'is_reserved_key',
'prefix_reserved_key',
'get_standardized_key',
'get_tag_key_label',
'get_tag_value_label',
'create_tag_key',
'get_or_create_tag_key',
'create_tag_value',
'get_or_create_tag_value',
'create_group_tag_key',
'get_or_create_group_tag_key',
'create_group_tag_value',
'get_or_create_group_tag_value',
'create_event_tag',
'get_tag_key',
'get_tag_keys',
'get_tag_value',
'get_tag_values',
'get_group_tag_key',
'get_group_tag_keys',
'get_group_tag_value',
'get_group_tag_values',
'delete_tag_key',
'delete_group_tag_key',
'delete_all_group_tag_keys',
'delete_all_group_tag_values',
'get_group_values_seen',
'get_group_event_ids',
'get_tag_value_qs',
'get_group_tag_value_qs',
'get_group_tag_value_count',
'get_top_group_tag_values',
'get_first_release',
'get_last_release',
'incr_tag_key_values_seen',
'incr_tag_value_times_seen',
'incr_group_tag_key_values_seen',
'incr_group_tag_value_times_seen',
'update_project_for_group',
'get_group_ids_for_users',
'get_group_tag_values_for_users',
'get_tags_for_search_filter',
'get_event_tag_qs',
)
def is_valid_key(self, key):
return bool(TAG_KEY_RE.match(key))
def is_valid_value(self, value):
return '\n' not in value
def is_reserved_key(self, key):
return key in INTERNAL_TAG_KEYS
def prefix_reserved_key(self, key):
# XXX(dcramer): kill sentry prefix for internal reserved tags
if self.is_reserved_key(key):
return 'sentry:{0}'.format(key)
else:
return key
def get_standardized_key(self, key):
if key.startswith('sentry:'):
return key.split('sentry:', 1)[-1]
return key
def get_tag_key_label(self, key):
return TAG_LABELS.get(key) or key.replace('_', ' ').title()
def get_tag_value_label(self, key, value):
label = value
if key == 'sentry:user':
if value.startswith('id:'):
label = value[len('id:'):]
elif value.startswith('email:'):
label = value[len('email:'):]
elif value.startswith('username:'):
label = value[len('username:'):]
elif value.startswith('ip:'):
label = value[len('ip:'):]
elif key == 'sentry:release':
from sentry.models import Release
label = Release.get_display_version(value)
return label
def create_tag_key(self, project_id, key, **kwargs):
"""
>>> create_tag_key(1, "key1")
"""
raise NotImplementedError
def get_or_create_tag_key(self, project_id, key, **kwargs):
"""
>>> get_or_create_tag_key(1, "key1")
"""
raise NotImplementedError
def create_tag_value(self, project_id, key, value, **kwargs):
"""
>>> create_tag_key(1, "key1", "value1")
"""
raise NotImplementedError
def get_or_create_tag_value(self, project_id, key, value, **kwargs):
"""
>>> get_or_create_tag_key(1, "key1", "value1")
"""
raise NotImplementedError
def create_group_tag_key(self, project_id, group_id, key, **kwargs):
"""
>>> create_group_tag_key(1, 2, "key1")
"""
raise NotImplementedError
def get_or_create_group_tag_key(self, project_id, group_id, key, **kwargs):
"""
>>> get_or_create_group_tag_key(1, 2, "key1")
"""
raise NotImplementedError
def create_group_tag_value(self, project_id, group_id, key, value, **kwargs):
"""
>>> create_group_tag_value(1, 2, "key1", "value1")
"""
raise NotImplementedError
def get_or_create_group_tag_value(self, project_id, group_id, key, value, **kwargs):
"""
>>> get_or_create_group_tag_value(1, 2, "key1", "value1")
"""
raise NotImplementedError
def create_event_tag(self, project_id, group_id, event_id, key_id, value_id):
"""
>>> create_event_tag(1, 2, 3, 4, 5)
"""
raise NotImplementedError
def get_tag_key(self, project_id, key, status=TagKeyStatus.VISIBLE):
"""
>>> get_tag_key(1, "key1")
"""
raise NotImplementedError
def get_tag_keys(self, project_ids, keys=None, status=TagKeyStatus.VISIBLE):
"""
>>> get_tag_key([1, 2], ["key1", "key2"])
>>> get_tag_key(1, ["key1", "key2"])
"""
raise NotImplementedError
def get_tag_value(self, project_id, key, value):
"""
>>> get_tag_value(1, "key1", "value1")
"""
raise NotImplementedError
def get_tag_values(self, project_ids, key, values=None):
"""
>>> get_tag_values([1, 2], "key1", ["value1, "value2"])
>>> get_tag_values(1, "key1", ["value1, "value2"])
"""
raise NotImplementedError
def get_group_tag_key(self, group_id, key):
"""
>>> get_group_tag_key(1, "key1")
"""
raise NotImplementedError
def get_group_tag_keys(self, group_ids, keys=None, limit=None):
"""
>>> get_group_tag_keys([1, 2], ["key1", "key2"])
>>> get_group_tag_keys(1, ["key1", "key2"])
"""
raise NotImplementedError
def get_group_tag_value(self, group_id, key, value):
"""
>>> get_group_tag_value(1, "key1", "value1")
"""
raise NotImplementedError
def get_group_tag_values(self, group_ids, keys=None, values=None):
"""
>>> get_group_tag_values([1, 2], ["key1", "key2"], ["value1", "value2"])
>>> get_group_tag_values(1, ["key1", "key2"], ["value1", "value2"])
"""
raise NotImplementedError
def delete_tag_key(self, project_id, key):
"""
>>> delete_tag_key(1, "key1")
"""
raise NotImplementedError
def delete_group_tag_key(self, group_id, key):
"""
>>> delete_group_tag_key(1, "key1")
"""
raise NotImplementedError
def delete_all_group_tag_keys(self, group_id):
"""
>>> delete_all_group_tag_keys(1)
"""
raise NotImplementedError
def delete_all_group_tag_values(self, group_id):
"""
>>> delete_all_group_tag_values(1)
"""
raise NotImplementedError
def incr_tag_key_values_seen(self, project_id, key, count=1):
"""
>>> incr_tag_key_values_seen(1, "key1")
"""
raise NotImplementedError
def incr_tag_value_times_seen(self, project_id, key, value, extra=None, count=1):
"""
>>> incr_tag_value_times_seen(1, "key1", "value1")
"""
raise NotImplementedError
def incr_group_tag_key_values_seen(self, project_id, group_id, key, count=1):
"""
>>> incr_group_tag_key_values_seen(1, 2, "key1")
"""
raise NotImplementedError
def incr_group_tag_value_times_seen(self, group_id, key, value, extra=None, count=1):
"""
>>> incr_group_tag_value_times_seen(1, "key1", "value1")
"""
raise NotImplementedError
def get_group_event_ids(self, project_id, group_id, tags):
"""
>>> get_group_event_ids(1, 2, {'key1': 'value1', 'key2': 'value2'})
"""
raise NotImplementedError
def get_tag_value_qs(self, project_id, key, query=None):
"""
>>> get_tag_value_qs(1, 'environment', query='prod')
"""
raise NotImplementedError
def get_group_tag_value_qs(self, group_id, key):
"""
>>> get_group_tag_value_qs(1, 'environment')
"""
raise NotImplementedError
def get_group_values_seen(self, group_ids, key):
"""
>>> get_group_values_seen([1, 2], 'key1')
"""
raise NotImplementedError
def get_group_tag_value_count(self, group_id, key):
"""
>>> get_group_tag_value_count(1, 'key1')
"""
raise NotImplementedError
def get_top_group_tag_values(self, group_id, key, limit=3):
"""
>>> get_top_group_tag_values(1, 'key1')
"""
raise NotImplementedError
def get_first_release(self, group_id):
"""
>>> get_first_release(1)
"""
raise NotImplementedError
def get_last_release(self, group_id):
"""
>>> get_last_release(1)
"""
raise NotImplementedError
def update_project_for_group(self, group_id, old_project_id, new_project_id):
"""
>>> update_project_for_group(1, 2, 3)
"""
raise NotImplementedError
def get_group_ids_for_users(self, project_ids, event_users, limit=100):
"""
>>> get_group_ids_for_users([1,2], [EventUser(1), EventUser(2)])
"""
raise NotImplementedError
def get_group_tag_values_for_users(self, event_users, limit=100):
"""
>>> get_group_tag_values_for_users([EventUser(1), EventUser(2)])
"""
raise NotImplementedError
def get_tags_for_search_filter(self, project_id, tags):
"""
>>> get_tags_for_search_filter(1, [('key1', 'value1'), ('key2', 'value2')])
"""
raise NotImplementedError
def get_event_tag_qs(self, **kwargs):
"""
>>> get_event_tag_qs(event_id=1, key_id=2)
"""
raise NotImplementedError
|
import requests
from bs4 import BeautifulSoup
from lxml import html
import pandas as pd
import os.path
session_requests = requests.session()
login_url = "https://www.gradescope.com/login"
result = session_requests.get(login_url)
tree = html.fromstring(result.text)
authenticity_token = list(set(tree.xpath("//input[@name='authenticity_token']/@value")))[0]
payload = {
"session[email]": "<Username>",
"session[password]": "<Password>",
"authenticity_token": authenticity_token
}
result = session_requests.post(
login_url,
data = payload,
headers = dict(referer=login_url)
)
url = 'https://www.gradescope.com/account'
result = session_requests.get(
url,
headers = dict(referer = url)
)
c = result.content
soup = BeautifulSoup(c, features="lxml")
# Creates a list of course names
course_names = []
raw_course_names = soup.find_all('h3', {"class": "courseBox--shortname"})
for each in raw_course_names:
each = str(each)
new_each = each.lstrip('<h3 class="courseBox--shortname">')
new_each = new_each.rstrip('</h3>')
course_names.append(new_each)
# Creates a list of the unique course number identifier to be used in the course url
course_numbers = []
raw_course_numbers = soup.find_all('a', {"class": "courseBox"}, )
for each in raw_course_numbers:
each = str(each)
new_each = each[36:42]
course_numbers.append(new_each)
# Creates a list of unique course urls
course_urls = []
url_header = "https://www.gradescope.com/courses/"
for each in course_numbers:
url = url_header + each
course_urls.append(url)
# Initializing a list for dictionaries of key-value pairs of {assignment name : due_date}
assignments = []
# Here, we want to go through each course page and create a list of assignments for each page
# We can add these assignments to a list and then add this list to their respective place in the dictionary course_with_assignments
for each in course_urls:
result = session_requests.get(
each,
headers = dict(referer = each)
)
c = result.content
soup = BeautifulSoup(c, features="lxml")
tables = soup.find('table', {"class":"table"})
assignment_lists = pd.read_html(str(tables))[0]
course_assignments = {}
names = assignment_lists['Name'].tolist()
due_dates = assignment_lists['Due Date'].tolist()
for i in range(0,len(names)):
course_assignments[str(names[i])] = str(due_dates[i])
assignments.append(course_assignments)
# This creates a list of courses with their course numbers
list_of_courses = []
for i in range(0, len(course_names)):
list_of_courses.append(course_names[i] + " | " + course_numbers[i])
# Creates a dictionary with the key-value pair of {course : list_of_assignments}
course_with_assignments = {}
# Bringing everything together! Dictionary with key-value pairs of {course : course_assignments}
for i in range(0,len(list_of_courses)):
course_with_assignments[list_of_courses[i]] = assignments[i]
# Now converting our dictionary to the text file gsParser_results so we can put it into js database..
import json
course_with_assignments = json.dumps(course_with_assignments)
save_path = os.path.abspath(".\\App\\scripts")
completeName = os.path.join(save_path, 'gsParser_results.txt')
with open(completeName, 'w') as file:
file.write(course_with_assignments)
|
# -*- coding: utf-8 -*-
import time
import re
import os
import sys
import numpy as np
from bs4 import BeautifulSoup
# reload(sys)
# sys.setdefaultencoding('utf-8')
# sys.path.append("/usr/bin/pdf2txt")
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
if len(sys.argv) != 2:
sys.exit("Erreur argument manquant !")
def convertPdfToTxt(name):
nameRaw = name.split(".")[0]
rst = "pdftotext -enc UTF-8 '{}' '{}.txt'".format(name,nameRaw)
os.system(rst)
def convertPdfToHtml(item):
name = str(item.split(".")[0])
print("Convert " + name + " to HTML")
os.system('rm -f {}.html'.format(name))
query = "pdf2txt -t html -o {}.html {}.pdf".format(name,name)
os.system(query)
def getResume(data):
splitted = ""
if re.search("Abstract",data):
splitted = data.split("Abstract")[1]
elif re.search("ABSTRACT",data):
splitted = data.split("ABSTRACT")[1]
# Get first paragraphe
splitted2 = splitted.split("\n\n")[0]
# Cut after Keywords
splitted2 = splitted2.split("Keywords")[0]
# Replace jump line by spaces
splitted2 = splitted2.replace("\n"," ")
# Delete characters which aren't Alpha and Space
splitted2 = re.sub('[^a-zA-Z ]+', '', splitted2[:5]) + splitted2[5:]
return splitted2
def getTitle(txtFile, title):
data = open(title + ".html","r")
# print("getTitle for " + nameFile + ".html")
soup = BeautifulSoup(data, "html.parser")
spans = soup.find_all("span")
highestItem = []
highestSize = 0
fontSize = 0
content = ""
if len(spans) <= 0:
return "No title found"
else:
for span in spans:
s = str(span["style"])
start = "font-size:"
end = "px;"
if s.find(start) >= 0:
fontSize = s[s.find(start)+len(start):s.rfind(end)-1]
# print("style : ")
fontSize = int(fontSize)
content = span.text
# print(content + " : " + str(fontSize))
# print("-----")
if (fontSize >= 10 and fontSize <= 35 and len(content) > 8):
highestItem.append((fontSize,content));
if len(highestItem) > 0:
highestItem.sort(key=lambda x: x[0], reverse=True)
noDuplicated = list(set(highestItem))
highestSize = highestItem[0][0]
res = ""
for elem in noDuplicated:
if elem[0] == highestSize:
res += elem[1]
res = res.replace('\n', ' ')
if res.find('(cid:') >= 0:
res = txtFile.split("\n")[0]
print("----------------" + res)
return res
return "Empty"
def getReferences(data):
if re.search("References\n",data):
if re.search("So far, we only",data):
rslt = data.split("References\n")[2]
else:
rslt = data.split("References\n")[1]
elif re.search("REFERENCES",data):
rslt = data.split("REFERENCES")[1]
if re.search("IEEE TRANSACTIONS",rslt):
rslt = rslt.split("IEEE TRANSACTIONS")[0]
# Get each line of the page
page = rslt.split("\n")
for line in page:
if line == "\n" or len(line) <= 15 or re.match("^[\[\]0-9\.\ \|]+$",line) or re.match("",line):
# print(line)
page.remove(line)
# .split("\n\n")[0]
return "\n".join(page)
def getAuthors(data):
data = open(data + ".html","r")
soup = BeautifulSoup(data, "html.parser")
font_spans = soup.find_all("span", attrs={"style":re.compile("font-size:1[1-3]px")})
rslt = ""
if(font_spans != [] ):
for i in range(len(font_spans)):
# print(font_spans[i].text)
rslt+= font_spans[i].text
# rslt = font_spans[0].text + font_spans[1].text + font_spans[2].text
# print("1: " + str(rslt))
else:
for r in soup.find_all("span")[3:6] :
rslt += r.text
# print("2: " + str(rslt))
rslt=rslt.split('\n')[0]
# print (rslt)
return rslt
def getConclusion(data):
# Get everything after the keyword "Conclusion"
if re.search("Conclusion",data):
rslt = data.split('Conclusion')[1]
elif re.search("CONCLUSION",data):
rslt = data.split("CONCLUSION")[1]
else :
rslt=""
return rslt
if re.search("Acknowledgments",data):
rslt = rslt.split("Acknowledgments")[0]
elif re.search("ACKNOWLEDGEMNTS",data):
rslt = rslt.split("ACKNOWLEDGMENTS")[0]
if re.search("Acknowledgements",data):
rslt = rslt.split("Acknowledgements")[0]
elif re.search("ACKNOWLEDGEMENTS",data):
rslt = rslt.split("ACKNOWLEDGEMENTS")[0]
# Delemit the end
if re.search("References",data):
rslt = rslt.split("References")[0]
elif re.search("REFERENCES",data):
rslt = rslt.split("REFERENCES")[0]
# Get each line of the page
conclu = rslt.split("\n")
# print(conclu)
i=0
if (conclu!=[]):
for line in conclu:
if i==0 and not(re.match("([A-Z])\w+",line)):
del conclu[i]
if line == "\n" or len(line) <= 15 or re.match("^[\[\]0-9\.\ \|]+$",line) or re.match("^[0-9]+$",line) or re.search(".0x0c.",line):
del conclu[i]
i+=1
# print(line)
return "\n".join(conclu)
def getIntroduction(data):
splitted = ""
if re.search("Introduction",data):
splitted = data.split("Introduction")[1]
elif re.search("INTRODUCTION",data):
splitted = data.split("INTRODUCTION")[1]
if re.search("\nII",data):
splitted = splitted.split("II")[0]
# if re.search("\n2.",data):
# splitted = splitted.split("\n2.")[0]
if re.search("2\n\n",data):
splitted = splitted.split("2\n\n")[0]
if re.search("\n\n2",data):
splitted = splitted.split("\n\n2")[0]
intro = splitted.split("\n")
i=0
for line in intro:
if i==0 and not(re.match("([A-Z])\w+",line)):
del intro[i]
if line == "\n" or re.match("^[\[\]0-9\.\ \|]+$",line) or re.match("^[0-9]+$",line) or re.search(".0x0c.",line):
del intro[i]
i+=1
# print(line)
return "\n".join(intro)
def getDiscution(data):
if re.search("Discussion\n",data):
rslt = data.split('Discussion\n')[1]
elif re.search("DISCUSSION",data):
rslt = data.split("DISCUSSION")[1]
elif re.search("Discussion:",data):
rslt = data.split("Discussion:")[1]
else:
return ""
if re.search("Conclusion",rslt):
rslt = rslt.split("Conclusion")[0]
elif re.search("CONCLUSION",rslt):
rslt = rslt.split("CONCLUSION")[0]
# Get each line of the page
page = rslt.split("\n")
for line in page:
if line == "\n" or len(line) <= 15 or re.match("^[\[\]0-9\.\ \|]+$",line):
# print(line)
page.remove(line)
# .split("\n\n")[0]
return "\n".join(page)
def getCorps(data):
start = 'introduction\n'
s = data
s = s[s.lower().find(start) + len(start):s.lower().rfind('conclusion')]
if len(s) <= 10:
s = s[s.lower().rfind(start) + len(start):s.lower().rfind('references\n')]
return s
def showChoices(ls):
id = 0
for item in ls[:-1]:
print("[{}] {}".format(str(id),item))
id += 1
print("Veuillez saisir la liste des documents:")
choices = str(input())
choices = np.array(choices.split(","))
rslt = []
# For each index selected
for choice in choices.astype(int):
# If he enter a bad index
if (choice < len(ls) - 1):
rslt.append((choice, ls[choice]))
elif (choice > len(ls) - 1):
print("Document inexistant !")
return showChoices(ls)
return rslt
os.system("rm -f *.txt")
os.system("rm -f *.xml")
# Delete spaces in files names
os.system("""
for file in *.pdf; do mv "$file" "$(echo $file | sed 's/ /_/g')"; done
""")
# List all the PDF available
ls = "ls *.pdf"
c = os.popen(ls).read()
splitted = c.split("\n")
# Read the args
if sys.argv[1] == "-t":
out='txt'
elif sys.argv[1] == "-x":
out='xml'
else:
print("argument incorrect")
# For each selected files
for file in showChoices(splitted):
choice,item = file
# Convert each PDF to TXT
convertPdfToTxt(item)
# Cut the extension and keep the name only
nameFile = item.split(".")[0]
try:
# Open the converted PDF
with open(nameFile + ".txt", 'r') as f:
convertPdfToHtml(item)
data = f.read()
# data = f.read().decode('utf-8')
# Documents Informations
fileName = nameFile
title = getTitle(data, nameFile)
author = getAuthors(nameFile)
resume = getResume(data)
bibliographie = getReferences(data)
conclusion = getConclusion(data)
introduction = getIntroduction(data)
# introduction = "rien"
discution = getDiscution(data)
corps = getCorps(data)
# If the user want to export as TXT
if out == "txt":
# Create the output file
os.system('touch resultat.txt')
# Write informations inside the file
with open("resultat.txt", 'a') as res:
res.write("\n")
res.write("File name: " + str(fileName))
res.write("\n\n")
res.write("Authors: " + str(author))
res.write("\n\n")
res.write("Title: " + str(title))
res.write("\n\n")
res.write("Résumé: " + str(resume))
res.write("\n\n")
res.write("Introduction: " + str(introduction))
res.write("\n\n")
res.write("Corps:" + str(corps))
res.write("\n\n")
res.write("Discution: " + str(discution))
res.write("\n\n")
res.write("Conclusion: \n" + str(conclusion))
res.write("\n\n")
res.write("Biblio: " + str(bibliographie))
res.write("\n\n")
res.write("\n\n-------------------------------------------------------------\n")
res.close()
# If the user want to export as XML
elif out == "xml":
root = Element('article')
preamble = SubElement(root, 'preamble')
preamble.text = str(fileName)
titre = SubElement(root, 'titre')
titre.text = str(title)
auteur = SubElement(root, 'auteur')
auteur.text = str(author)
abstract = SubElement(root, 'abstract')
abstract.text = str(resume)
abstract = SubElement(root, 'introduction')
abstract.text = str(introduction)
abstract = SubElement(root, 'corps')
abstract.text = str(corps)
abstract = SubElement(root, 'conclusion')
abstract.text = str(conclusion)
abstract = SubElement(root, 'discussion')
abstract.text = str(discution)
biblio = SubElement(root, 'biblio')
biblio.text = str(bibliographie)
xml = tostring(root)
# Create the output file
os.system('touch resultat.xml')
# Write the XML object inside
with open("resultat.xml", 'a') as res:
res.write(xml)
res.close()
f.close()
except Exception as e:
print(nameFile + ".txt n'a pas pu être convertie !")
print(e)
os.system("rm -f *.html") if len([o for o in os.listdir('.') if o.endswith('.html')]) > 0 else None
print("#####################################")
print("####### Conversion réussite ! #######")
print("#####################################") |
from . import scraper
__all__ = ["scraper"]
|
class EverythingEquals:
def __eq__(self, other):
return True
mock_equal = EverythingEquals()
def assert_equal_dicts(d1, d2, ignore_keys=[]):
def check_equal():
ignored = set(ignore_keys)
for k1, v1 in d1.items():
if k1 not in ignored and (k1 not in d2 or d2[k1] != v1):
print(k1, v1, d2[k1])
return False
for k2, v2 in d2.items():
if k2 not in ignored and k2 not in d1:
print(k2, v2)
return False
return True
return check_equal()
|
from django.contrib import admin
from .models import Comment
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('target', 'content', 'nickname', 'website', 'created_time')
fields = ('target', 'content', 'nickname', 'website')
def save_model(self, request, obj, form, change):
obj.owner = request.user
return super(CommentAdmin, self).save_model(request, obj, form, change)
# Register your models here.
|
'''
/**
* created by cicek on 09.04.2018 16:44
*/
''' |
'''
This module contains generic structures that fit various needs.
These structures are not meant to be used as is(except void_desc)
and need to be included in a descriptor before it is sanitized.
Critical keys will be missing if they aren't sanitized.
'''
import supyr_struct
from supyr_struct.defs.frozen_dict import FrozenDict
void_desc = FrozenDict(
NAME='voided', TYPE=supyr_struct.field_types.Void, NAME_MAP={}
)
def remaining_data_length(node=None, parent=None, attr_index=None,
rawdata=None, new_value=None, *args, **kwargs):
'''
Size getter for the amount of data left in the rawdata
starting at kwargs['offset'] + kwargs['root_offset']
If not provided, offset and root_offset default to 0.
'''
if new_value is not None:
# there is no size to set for an open ended data stream
return
if rawdata is not None:
# the data is being initially read
return (len(rawdata) - kwargs.get('offset', 0) +
kwargs.get('root_offset', 0))
elif parent is not None:
# the data already exists, so just return its length
remainder = parent[attr_index]
try:
return len(remainder)
except Exception:
pass
return 0
# used when you just want to read the rest of the rawdata into a bytes object
remaining_data = supyr_struct.field_types.BytearrayRaw(
"remaining_data", SIZE=remaining_data_length
)
# DEPRECATED. Remove when possible
def no_case(*a, **kw):
pass
|
from rest_framework import serializers
from .models import Funcionario
from .models import Registro
class FuncionarioSerializer(serializers.ModelSerializer):
class Meta:
model = Funcionario
fields = ['id', 'matricula', 'nome']
class FuncionarioSimpleSerializer(serializers.ModelSerializer):
class Meta:
model = Funcionario
fields = ['id', 'matricula', 'nome']
class RegistroSerializer(serializers.ModelSerializer):
class Meta:
model = Registro
fields = ['id', 'funcionario', 'dia', 'primeiro_registro', 'segundo_registro', 'terceiro_registro', 'quarto_registro'] |
"""Blog administration."""
from django.contrib import admin
from .models import Post, Tag
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
"""Admin panel for blog posts."""
list_display = ("__str__", "created", "modified", "published")
list_filter = ("created", "published")
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
"""Admin panel for tags."""
|
import os,pty,serial,pymysql,time, datetime
from serial import SerialException
db = pymysql.connect("localhost", "flowerbot", "mycroft", "sensordata")
curs=db.cursor()
z1baudrate = 57600
z1port = '/dev/ttyACM0' # set the correct port before run it
z1serial = serial.Serial(port=z1port, baudrate=z1baudrate)
z1serial.timeout = 2
def getCurrentSensorData():
errorcount = 0
if z1serial.is_open:
while True:
size = z1serial.inWaiting()
if (size):
try:
data = z1serial.readline()
except SerialException as e:
print("serial exception")
dataString = ""
try:
dataString = data.decode();
except UnicodeDecodeError as e:
print("decode error")
if dataString.startswith(" moist_"):
pieces = data.split()
if len(pieces) > 0:
moistPieces = pieces[0].decode().split("_")
if len(moistPieces) > 0:
moistString = moistPieces[1]
if len(pieces) > 1:
luxPieces = pieces[1].decode().split("_")
if len(luxPieces) > 0:
luxString = luxPieces[1]
if len(pieces) > 2:
pressurePieces = pieces[2].decode().split("_")
if len(pressurePieces) > 0:
pressureString = pressurePieces[1]
if len(pieces) > 3:
humPieces = pieces[3].decode().split("_")
if len(humPieces) > 0:
humString = humPieces[1]
if len(pieces) > 4:
tempPieces = pieces[4].decode().split("_")
if len(tempPieces) > 0:
tempString = tempPieces[1]
try:
moist = float(moistString)
except ValueError as er:
print("moisture not readable")
moist = 0
try:
lux = float(luxString)
except ValueError as er:
print("lux not readable")
lux = 0
try:
pressure = float(pressureString)
except ValueError as er:
print("pressure not readable")
pressure = 0
try:
hum = float(humString)
except ValueError as er:
print("humidity not readable")
hum = 0
try:
temp = float(tempString)
except ValueError as er:
print("temperature not readable")
temp = 0
return (moist, lux, pressure, hum, temp)
else:
errorcount+=1
if errorcount > 100:
return "no data"
z1serial.flushInput()
time.sleep(60.0/1000.0)
else:
return "serial not open"
def getSensorData(time):
select_stmt = "SELECT moisture, lux, pressure, humidity, temperature FROM history ORDER BY ABS(DATEDIFF(date, %s))"
with db:
curs.execute(select_stmt, time)
sensordata = curs.fetchone()
return sensordata
def getLastWatered():
select_stmt = "SELECT date FROM watering ORDER BY date DESC"
with db:
curs.execute(select_stmt)
last_watered = curs.fetchone()
now = datetime.datetime.now()
days = now-last_watered[0]
return days.days
|
#!/usr/bin/python
#############################################################################
# Licensed Materials - Property of HCL*
# (C) Copyright HCL Technologies Ltd. 2017, 2018 All rights reserved.
# * Trademark of HCL Technologies Limited
#############################################################################
import waconn
import argparse
parser = argparse.ArgumentParser(description='Add a job in to the model')
parser.add_argument('-jn','--jobName', help='job name', required=True, metavar="JOB_NAME")
parser.add_argument('-jw','--jobWorkstationName', help='job workstation name', required=True, metavar="JOB_WORKSTATION_NAME")
parser.add_argument('-jsw','--jsWorkstationName', help='job stream workstation name', required=False, metavar="JS_WORKSTATION_NAME")
parser.add_argument('-id','--jsInternalIdentifier', help='job stream internal id', required=True, metavar="JS_ID")
parser.add_argument('-ja','--jobAlias', help='job alias', required=True, metavar="JOB_ALIAS")
args = parser.parse_args()
conn = waconn.WAConn('waconn.ini','/twsd')
# first rest call to get the jd id
url = '/model/jobdefinition/header/query'
filters = {
"filters": {
"jobDefinitionFilter": {
"jobDefinitionName": args.jobName,
"workstationName":args.jobWorkstationName
}
}
}
# we get the first result
headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'How-Many': '1'}
print('Connecting to '+url)
resp = conn.post(url, json=filters, headers=headers)
r = resp.json()
for jd in r:
jobId=jd["id"]
print("the jd id is: " + jobId)
jsWorkstationName=args.jobWorkstationName
if args.jsWorkstationName:
jsWorkstationName = args.jsWorkstationName
url = '/plan/current/jobstream/' + jsWorkstationName + '%3B' + args.jsInternalIdentifier + '/action/submit_job'
filters = {
"jobDefinitionId": jobId,
"alias": args.jobAlias
}
# we get the first result
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
print('Connecting to '+url)
resp = conn.post(url, json=filters, headers=headers)
jobInplanInstance = resp.json()
# now we can submit the job into the js
url = '/plan/current/job/action/submit_ad_hoc_job'
filters = {
"job": jobInplanInstance
}
# we get the first result
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
print ('Connecting to '+url)
resp = conn.post(url, json=filters, headers=headers)
r = resp.json()
print ('Submitted '+r["id"])
|
"""
Description:
Author: Jiaqi Gu (jqgu@utexas.edu)
Date: 2021-10-07 03:37:23
LastEditors: Jiaqi Gu (jqgu@utexas.edu)
LastEditTime: 2021-10-14 00:27:33
"""
from typing import Dict, List, Optional
import numpy as np
import torch
from pyutils.general import logger
from pyutils.torch_train import set_torch_deterministic
from torch import nn
from .layers import SuperBlockConv2d, SuperBlockLinear
from .layers.super_mesh import SuperCRLayer, SuperDCFrontShareLayer, super_layer_name_dict
from .layers.utils import GradientMask
__all__ = ["SuperModel_CLASS_BASE"]
class SuperModel_CLASS_BASE(nn.Module):
_conv_linear = (
SuperBlockConv2d,
SuperBlockLinear,
)
def __init__(
self,
*args,
super_layer_name: str = "ps_dc_cr",
super_layer_config: Dict = {},
device=torch.device("cuda:0"),
**kwargs,
):
super().__init__(*args, **kwargs)
self.area_multiplier = nn.Parameter(torch.zeros(1), requires_grad=False)
self.area_aux_variable = nn.Parameter(torch.zeros(1), requires_grad=False)
self.area = torch.tensor(0)
def adjust_min_max_blocks(self, arch: Dict = {}):
ps_weight = arch["device_cost"]["ps_weight"]
dc_weight = arch["device_cost"]["dc_weight"]
cr_weight = arch["device_cost"]["cr_weight"]
upper_bound = arch["device_cost"]["area_upper_bound"]
## we assume the min area for each layer consists of k PS, 1 DC, and 0 CR
n_waveguides = self.block_list[0]
min_area_per_layer = ps_weight * n_waveguides + dc_weight * 1 + cr_weight * 0
max_area_per_layer = (
ps_weight * n_waveguides
+ dc_weight * (n_waveguides // 2)
+ cr_weight * ((n_waveguides // 2) * (n_waveguides // 2 - 1) // 2)
)
## calculate max block
n_blocks = int(upper_bound / min_area_per_layer)
n_blocks = n_blocks if n_blocks % 2 == 0 else n_blocks + 1 # cast to next even number
## calculate min block
n_front_share_blocks = int(upper_bound * 0.7 / max_area_per_layer)
n_front_share_blocks = (
n_front_share_blocks if n_front_share_blocks % 2 == 0 else n_front_share_blocks - 1
)
old_n_blocks = arch["n_blocks"]
old_n_front_share_blocks = arch["n_front_share_blocks"]
n_blocks = min(n_blocks, old_n_blocks)
n_front_share_blocks = min(n_blocks, max(n_front_share_blocks, old_n_front_share_blocks))
arch["n_blocks"] = n_blocks
arch["n_front_share_blocks"] = n_front_share_blocks
logger.info(
f"Max block number 'n_blocks' is adjusted from {old_n_blocks} to {n_blocks} due to area constraint: A <= {upper_bound}, min block size = {min_area_per_layer}"
)
logger.info(
f"Min block number 'n_front_share_blocks' is adjusted from {old_n_front_share_blocks} to {n_front_share_blocks} due to area constraint: A <= {upper_bound}, max block size = {max_area_per_layer}"
)
def build_super_layer(self, name: str, *args, **kwargs):
## must be called after build_layers()
# only one super layer since we need to share DC and CR layers
self.adjust_min_max_blocks(kwargs["arch"])
self.super_layer = super_layer_name_dict[name](*args, **kwargs)
self.super_layer.build_sampling_coefficients()
for m in self.super_layer.super_layers_all:
if isinstance(m, SuperCRLayer) and m.weight.requires_grad:
m.reset_parameters(alg=kwargs["arch"]["cr_layer_init_alg"])
for m in self.modules():
## build independent ps layers for each CONV/Linear layer
if isinstance(m, (self._conv_linear)):
m.super_layer = self.super_layer
m.super_ps_layers = self.super_layer.build_ps_layers(m.grid_dim_x, m.grid_dim_y)
self._total_trainable_parameters = set([p for p in self.parameters() if p.requires_grad])
self.partition_parameters()
def partition_parameters(self, arch_param_list=["theta"]):
## collect architecture parameters
self.arch_params = []
if "theta" in arch_param_list:
self.arch_params.append(self.super_layer.sampling_coeff)
if "perm" in arch_param_list:
for layer in self.super_layer.super_layers_all:
if isinstance(layer, (SuperCRLayer,)):
if layer.weight.requires_grad:
self.arch_params.append(layer.weight)
if "dc" in arch_param_list:
for layer in self.super_layer.super_layers_all:
if isinstance(layer, (SuperDCFrontShareLayer,)):
if layer.weight.requires_grad:
self.arch_params.append(layer.weight)
self.weight_params = list(self._total_trainable_parameters - set(self.arch_params))
def set_super_layer_transfer_matrix(self):
x = torch.eye(self.super_layer.n_waveguides, device=self.device, dtype=torch.cfloat)
n_blocks = self.super_layer.n_blocks
V = self.super_layer.forward(x, start_block=0, end_block=n_blocks // 2)
U = self.super_layer.forward(x, start_block=n_blocks // 2, end_block=n_blocks)
for m in self.modules():
if isinstance(m, self._conv_linear):
m.set_super_layer_transfer_matrices(U, V)
def set_sample_arch(self, sample_arch: List) -> None:
if getattr(self, "super_layer", None):
self.super_layer.set_sample_arch(sample_arch)
@property
def arch_space(self) -> List:
space = [layer.arch_space for layer in self.super_layer.super_layers_all]
# for the number of sampled blocks
space.append(
list(range(self.super_layer.n_front_share_blocks, self.super_layer.n_blocks + 1, 2))
) # n_sample_block must be even number
return space
def get_parameters(self, name_list=[]):
params = []
for name in name_list:
if name == "theta":
params.append(self.super_layer.sampling_coeff)
elif name == "weight":
for m in self.modules():
if isinstance(m, self._conv_linear):
params.append(m.weight)
if m.bias is not None:
params.append(m.bias)
elif isinstance(m, torch.nn.BatchNorm2d):
params.append(m.weight)
params.append(m.bias)
elif name == "ps_phi":
for m in self.modules():
if isinstance(m, self._conv_linear):
params += [i.weight for i in m.super_ps_layers]
elif name == "dc_t":
for layer in self.super_layer.super_layers_all:
if isinstance(layer, SuperDCFrontShareLayer) and layer.trainable:
params.append(layer.weight)
elif name == "cr_p":
for layer in self.super_layer.super_layers_all:
if isinstance(layer, SuperCRLayer) and layer.trainable:
params.append(layer.weight)
return params
def get_perm_loss(self):
loss = []
for layer in self.super_layer.super_layers_all:
if hasattr(layer, "get_perm_loss"):
loss.append(layer.get_perm_loss().detach().data.item())
return loss
def get_alm_perm_loss(self, rho: float = 0.1):
loss = 0
for layer in self.super_layer.super_layers_all:
if hasattr(layer, "get_alm_perm_loss"):
loss = loss + layer.get_alm_perm_loss(rho=rho)
return loss
def update_alm_multiplier(self, rho: float = 0.1, max_lambda: Optional[float] = None):
for layer in self.super_layer.super_layers_all:
if hasattr(layer, "update_alm_multiplier"):
layer.update_alm_multiplier(rho=rho, max_lambda=max_lambda)
def get_alm_multiplier(self):
return [
layer.alm_multiplier.data.mean().item()
for layer in self.super_layer.super_layers_all
if hasattr(layer, "alm_multiplier")
]
def _find_first_active_block(self, mask):
# first active block to be penalized is either in U or V , depends on which unitary has deeper structures
with torch.no_grad():
n_blk = int(np.ceil(mask.size(0) / 2))
blk_U = mask[:n_blk]
blk_V = mask[n_blk:]
blk_U_sum = blk_U.sum()
blk_V_sum = blk_V.sum()
if blk_U_sum > blk_V_sum: # penalize first blk in U
i = 0
while i < n_blk:
if mask[i] > 0.5: # 1 in boolean mask, larger than 0.5 in soft mask
return i
i += 1
elif blk_U_sum < blk_V_sum: # penalize first blk in V
i = n_blk
while i < mask.size(0):
if mask[i] > 0.5:
return i
i += 1
else:
if blk_U_sum == 0: # no active block
return None # no penalty
i = 0
while i < n_blk: # same depth, penalize U
if mask[i] > 0.5:
return i
i += 1
def get_crossing_density_loss(self, margin=1):
permutation_list = []
n_crossings = []
n_waveguides = self.block_list[0]
max_crossing_density = (n_waveguides // 2) * (n_waveguides // 2 - 1) // 2
for layer in self.super_layer.super_layers_all[:-1]: # remove the last pseudo-permutation
if isinstance(layer, SuperCRLayer):
permutation_list.append(layer.build_weight())
n_crossings.append(layer.get_num_crossings())
self.num_crossings = n_crossings
loss = torch.zeros(1, device=self.device)
eye = torch.eye(n_waveguides, dtype=torch.float, device=self.device)
for n_cr, p in zip(n_crossings, permutation_list):
if n_cr > max_crossing_density * margin:
loss = loss + torch.nn.functional.mse_loss(p, eye)
return loss
def get_crossing_loss(
self,
alg: str = "mse",
crossing_weight: float = 1.0,
arch_mask=None,
first_active_block_idx: Optional[int] = None,
):
if alg in {"kl", "mse"}:
loss = []
arch_mask = arch_mask[:-1]
for layer in self.super_layer.super_layers_all[:-1]: # remove the last permutation
if hasattr(layer, "get_crossing_loss"):
loss.append(layer.get_crossing_loss(alg=alg))
loss = torch.stack(loss).dot(arch_mask)
return loss
else:
raise NotImplementedError(f"Only support alg = (kl, mse), but got {alg}")
def get_dc_loss(
self, dc_weight: float = 1.0, arch_mask=None, first_active_block_idx: Optional[int] = None
):
# first active block:
# int >=0 : only penalize the first active block
# -1: no penalty
# None: penalize all
weight_list = []
for layer in self.super_layer.super_layers_all:
if isinstance(layer, SuperDCFrontShareLayer):
weight_list.append(
layer.weight_quantizer(layer.weight).mul(2 / (2 ** 0.5 - 2)).add(2 / (2 - 2 ** 0.5)).sum()
) # {sqrt(2)/2, 1} -> {1, 0}
weight_list = torch.stack(weight_list)
# arch_mask = self.super_layer.arch_mask[:, 1]
# arch_mask = GradientMask.apply(arch_mask, first_active_block_idx)
return weight_list.dot(arch_mask).mul(dc_weight)
def get_ps_loss(
self, ps_weight: float = 1.0, arch_mask=None, first_active_block_idx: Optional[int] = None
):
# arch_mask = self.super_layer.arch_mask[:, 1]
# arch_mask = GradientMask.apply(arch_mask, first_active_block_idx)
return arch_mask.sum().mul(self.block_list[0] * ps_weight)
def get_area_bound_loss(
self,
ps_weight: float = 1.0,
dc_weight: float = 1.0,
cr_weight: float = 1.0,
upper_bound: float = 100,
lower_bound: float = 70,
first_active_block: bool = False,
):
if first_active_block:
first_active_block_idx = self._find_first_active_block(self.super_layer.arch_mask.data[:, 1])
else:
first_active_block_idx = None
arch_mask = GradientMask.apply(
self.super_layer.arch_mask[..., 1],
first_active_block_idx,
self.super_layer.arch_mask.size(0), # scale the penalty
)
ps_loss = self.get_ps_loss(ps_weight, arch_mask=arch_mask)
dc_loss = self.get_dc_loss(dc_weight, arch_mask=arch_mask) # .detach()
cr_loss = self.get_crossing_loss(alg="mse", crossing_weight=cr_weight, arch_mask=arch_mask)
with torch.no_grad():
cr_area_soft = self.get_num_crossings_soft(arch_mask=arch_mask)[1] * cr_weight
self.area = ps_loss.data + dc_loss.data + cr_area_soft
area_loss = ps_loss + dc_loss + 100 * cr_loss
if self.area.item() > upper_bound * 0.95:
loss = area_loss / (upper_bound * 0.95) - 1 # penalize area violation with a margin
return loss
elif self.area.item() < lower_bound * 1.05:
loss = 1 - area_loss / (lower_bound * 1.05)
return loss
else:
return torch.zeros_like(area_loss) # accelerate BP
def update_area_aux_variable(
self,
ps_weight: float = 1.0,
dc_weight: float = 1.0,
cr_weight: float = 1.0,
upper_bound: float = 100,
rho: float = 0.1,
):
with torch.no_grad():
ps_loss = self.get_ps_loss(ps_weight).detach()
dc_loss = self.get_dc_loss(dc_weight).detach()
cr_loss = self.get_crossing_loss(alg="nn", crossing_weight=cr_weight).detach()
self.updated_area_margin = ps_loss + dc_loss + cr_loss - upper_bound
self.area_aux_variable.data.fill_(
max(0, -(self.updated_area_margin + self.area_multiplier / rho).item())
)
def update_area_multiplier(self, rho: float = 0.1):
self.area_multiplier.data += rho * (self.updated_area_margin + self.area_aux_variable)
def get_area_multiplier(self):
return self.area_multiplier.data
def build_sampling_coefficient(self):
self.super_layer.build_sampling_coefficients()
def set_gumbel_temperature(self, T: float = 5.0):
self.super_layer.set_gumbel_temperature(T)
def build_arch_mask(self, mode="random", batch_size: int = 32):
self.super_layer.build_arch_mask(mode=mode, batch_size=batch_size)
def get_num_crossings(self):
if getattr(self, "num_crossings", None) is not None:
return self.num_crossings, sum(self.num_crossings)
n_cr = []
for layer in self.super_layer.super_layers_all:
if hasattr(layer, "get_num_crossings"):
n_cr.append(layer.get_num_crossings())
total = sum(n_cr)
return n_cr, total
def get_num_crossings_soft(self, arch_mask=None):
n_cr = []
for layer in self.super_layer.super_layers_all:
if hasattr(layer, "get_num_crossings"):
n_cr.append(layer.get_num_crossings())
total = sum(i * j for i, j in zip(n_cr, arch_mask))
return n_cr, total
def get_perm_matrix(self):
with torch.no_grad():
return [
layer.build_weight().detach().data
for layer in self.super_layer.super_layers_all
if hasattr(layer, "alm_multiplier")
]
def get_num_dc(self):
n_dc = [
int((layer.weight.data < 0).float().sum().item())
for layer in self.super_layer.super_layers_all
if isinstance(layer, SuperDCFrontShareLayer)
]
return n_dc, sum(n_dc)
def reset_parameters(self, random_state: int = None) -> None:
for name, m in self.named_modules():
if isinstance(m, self._conv_linear):
if random_state is not None:
# deterministic seed, but different for different layer, and controllable by random_state
set_torch_deterministic(random_state + sum(map(ord, name)))
m.reset_parameters()
elif isinstance(m, nn.BatchNorm2d) and m.affine:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def train(self, mode: bool = True):
super().train(mode)
def enable_arch_params(self):
for p in self.arch_params:
p.requires_grad_(True)
def freeze_arch_params(self):
for p in self.arch_params:
p.requires_grad_(False)
def enable_weight_params(self):
for p in self.weight_params:
p.requires_grad_(True)
def freeze_weight_params(self):
for p in self.weight_params:
p.requires_grad_(False)
def set_phase_noise(self, noise_std: float = 0.0):
for m in self.modules():
if isinstance(m, self._conv_linear):
for layer in m.super_ps_layers:
layer.set_phase_noise(noise_std=noise_std)
def set_dc_noise(self, noise_std: float = 0.0):
for m in self.super_layer.super_layers_all:
if isinstance(m, SuperDCFrontShareLayer):
m.set_dc_noise(noise_std=noise_std)
def load_arch_solution(self, checkpoint):
logger.info(f"Loading architecture solution from {checkpoint} ...")
state_dict = torch.load(checkpoint, map_location=self.device)
state_dict = state_dict.get("state_dict", state_dict)
state_dict_new = {}
for name, p in state_dict.items():
if name.startswith("super_layer."):
state_dict_new[name[12:]] = p
self.super_layer.load_state_dict(state_dict_new)
def fix_arch_solution(self):
logger.info("Fix DC and CR layer solution...")
self.super_layer.fix_layer_solution()
logger.info("Fix Block solution...")
self.super_layer.fix_block_solution()
self.super_layer.set_gumbel_temperature(0.001)
self.super_layer.build_arch_mask(mode="softmax")
def sample_submesh(
self,
n_samples: int = 100,
ps_weight: float = 1.0,
dc_weight: float = 1.0,
cr_weight: float = 1.0,
upper_bound: float = 100,
lower_bound: float = 80,
):
logger.info("Fix DC and CR layer solution...")
self.super_layer.fix_layer_solution()
with torch.no_grad():
num_crossings = self.get_num_crossings()
num_dc = self.get_num_dc()
logger.info(f"num CRs: {num_crossings}")
logger.info(f"num DCs: {num_dc}")
logger.info("Search feasible theta...")
solution = None
import tqdm
with torch.no_grad():
area_list = []
theta = self.super_layer.sampling_coeff.data.clone()
distribution = torch.softmax(theta, dim=-1).cpu().numpy()
n_blocks = distribution.shape[0]
n_ops = theta.size(-1)
for i in tqdm.tqdm(range(n_samples)):
for j in range(n_blocks):
op = np.random.choice(n_ops, p=distribution[j, :])
self.super_layer.sampling_coeff.data[j] = -1000
self.super_layer.sampling_coeff.data[j, op] = 1000
self.super_layer.set_gumbel_temperature(2 ** (i / n_samples))
self.super_layer.build_arch_mask(mode="softmax")
area_loss = (
self.get_area_bound_loss(
ps_weight=ps_weight,
dc_weight=dc_weight,
cr_weight=cr_weight,
upper_bound=upper_bound,
lower_bound=lower_bound,
first_active_block=False,
)
.detach()
.data.item()
)
area_list.append(self.area.item())
# if area_loss < 1e-8: # meet area bound constraints
if lower_bound <= self.area.item() <= upper_bound: # meet area bound constraints
solution = (
self.area.item(),
self.super_layer.sampling_coeff.data.argmax(dim=-1).cpu().numpy().tolist(),
num_crossings,
num_dc,
)
break
self.super_layer.sampling_coeff.data.copy_(theta)
else:
logger.info(
f"No feasible submesh found. Area ranges: [{np.min(area_list), np.max(area_list)}], which violates area constraints [{lower_bound}, {upper_bound}]"
)
solution = (None, None, None, None)
total_cr = int(sum(i * j for i, j in zip(solution[2][0], solution[1])))
total_dc = int(sum(i * j for i, j in zip(solution[3][0], solution[1])))
ps_solution = [self.super_layer.n_waveguides] * len(solution[1])
total_ps = int(sum(i * j for i, j in zip(ps_solution, solution[1])))
logger.info(f"Found possible solution: \n")
logger.info(f"\t Area = {solution[0]:.4f}")
logger.info(f"\tBlock mask = {solution[1]}")
logger.info(
f"\t CR = {solution[2][0]}, total #CR = {total_cr:4d}, total CR area = {total_cr*cr_weight:.4f}"
)
logger.info(
f"\t DC = {solution[3][0]}, total #DC = {total_dc:4d}, total DC area = {total_dc*dc_weight:.4f}"
)
logger.info(
f"\t PS = {ps_solution}, total #PS = {total_ps:4d}, total PS area = {total_ps*ps_weight:.4f}"
)
logger.info("Fix arch solution and enable fast mode")
assert (solution[0] - (total_cr * cr_weight + total_dc * dc_weight + total_ps * ps_weight)) / (
solution[0]
) < 0.01
self.super_layer.fix_block_solution()
def check_perm(self):
with torch.no_grad():
res = []
for m in self.super_layer.super_layers_all:
if isinstance(m, SuperCRLayer):
res.append(m.check_perm(m.build_weight().detach().data.argmax(dim=-1)))
return res
def sinkhorn_perm(self, n_step=10, t_min=0.1, noise_std=0.01, svd=True, legal_mask=None):
with torch.no_grad():
i = 0
for m in self.super_layer.super_layers_all:
if isinstance(m, SuperCRLayer):
legal = legal_mask[i]
if True: # not legal:
w = m.build_weight().data.abs()
# logger.info(f"Layer {i}: {w}")
# w = sinkhorn(w, n_step=n_step, t_min=t_min, noise_std=0, svd=False)
# w = w.div(0.01).softmax(dim=-1)
# logger.info(f"Layer {i}: {w}")
# logger.info(w.sum(dim=-2))
# logger.info(w.sum(dim=-1))
# logger.info(w)
# w = sinkhorn(w, n_step=n_step, t_min=t_min, noise_std=0.1, svd=True)
# w = sinkhorn(w, n_step=n_step, t_min=t_min, noise_std=0, svd=False)
w = m.unitary_projection(w, n_step=n_step, t=t_min, noise_std=noise_std)
# logger.info(f"Layer {i}: {w}")
# logger.info(w)
# logger.info(w.sum(dim=-2))
# logger.info(w.sum(dim=-1))
m.weight.data.copy_(w)
i += 1
def forward(self, x):
raise NotImplementedError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.