content stringlengths 5 1.05M |
|---|
"""Ajout population suppression pathologie respi et allergie
Revision ID: e741811f9af0
Revises: ca7c02fcb035
Create Date: 2021-03-10 17:45:26.560460
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'e741811f9af0'
down_revision = 'ca7c02fcb035'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('inscription', sa.Column('population', postgresql.ARRAY(sa.String()), nullable=True))
op.execute("UPDATE inscription SET population = ARRAY['allergie_pollens'] WHERE allergie_pollen = true;")
op.execute("UPDATE inscription SET population = ARRAY['pathologie_respiratoire'] WHERE pathologie_respiratoire = true;")
op.execute("UPDATE inscription SET population = ARRAY['allergie_pollens', 'pathologie_respiratoire'] WHERE allergie_pollen = true AND pathologie_respiratoire = true;")
op.drop_column('inscription', 'allergie_pollen')
op.drop_column('inscription', 'pathologie_respiratoire')
def downgrade():
op.add_column('inscription', sa.Column('pathologie_respiratoire', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.execute("UPDATE inscription SET pathologie_respiratoire = true WHERE 'pathologie_respiratoire' = ANY(population)")
op.add_column('inscription', sa.Column('allergie_pollen', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.execute("UPDATE inscription SET allergie_pollen = true WHERE 'allergie_pollens' = ANY(population)")
op.drop_column('inscription', 'population')
|
#!/usr/bin/env python
#
# Copyright (c) 2016, SICS, Swedish ICT
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Joakim Eriksson, joakime@sics.se
#
# Read out trailer info from a flash file. Or generate a trailer info
# and append to a file (to create a flash file).
#
import sys, binascii,datetime,argparse
magic = 0x2144DF1C
inc = 0
def get_version(inc):
now = datetime.datetime.now()
v = 0x80000000 | (now.year & 0x1fff) << 17 | (now.month & 0xf) << 13 | ((now.day & 0x1f) << 8) | 0x40
v = v | (inc & 0x3f);
version = chr(0x00) + chr(0x90) + chr(0xda) + chr(0x02) + chr((v >> 24) & 0xff) + chr((v >> 16) & 0xff) + chr((v >> 8) & 0xff) + chr(v & 0xff)
return ''.join(version)
def get_crc(data):
crc32 = binascii.crc32(data) & 0xffffffff
return crc32
def rev32(crc32):
return (crc32 & 0xff) << 24 | ((crc32 >> 24) & 0xff) | ((crc32 & 0xff00) << 8) | ((crc32 & 0x00ff0000) >> 8)
def clean_hex(data):
if data.startswith('0x'):
data = data[2:]
if data.endswith('ULL'):
data = data[:-3]
return data
parser = argparse.ArgumentParser(description='Parse and create trailers.')
parser.add_argument("-vV", action="store_true", help="print version")
parser.add_argument("-vT", action="store_true", help="print type")
parser.add_argument("-vS", action="store_true", help="print image start")
parser.add_argument("-vC", action="store_true", help="print CRC")
parser.add_argument("-vP", action="store_true", help="print product type")
parser.add_argument("-v", action="store_true", help="verbose output")
parser.add_argument("-V", help="image version");
parser.add_argument("-I", help="image increment");
parser.add_argument("-T", help="image type");
parser.add_argument("-A", help="image start address");
parser.add_argument("-P", help="product type");
parser.add_argument("-i", help="input file");
args = parser.parse_args()
if (args.i):
file = open(args.i, 'r')
else:
file = sys.stdin
if args.P:
product = binascii.unhexlify(clean_hex(args.P))
if args.T:
image_type = binascii.unhexlify(clean_hex(args.T))
if args.I:
inc = int(args.I)
if args.V:
if args.V == "now":
version = get_version(inc)
if args.A:
image_start = binascii.unhexlify(clean_hex(args.A))
data = file.read()
# only add if multiple things are set so that trailer can be created
add = ('image_start' in globals()) & ('product' in globals()) & ('version' in globals())
if args.v:
print >> sys.stderr, "Read ",len(data), "bytes", " will add:", add
if not add:
if (args.v):
print "Trailer:", binascii.hexlify(data[-40:])
# product type before the trailer
pos = -40
if (args.vP):
print binascii.hexlify(data[pos : pos + 8]).upper()
pos = pos + 8
# type first in the trailer
if (args.vT):
print binascii.hexlify(data[pos : pos + 8]).upper()
pos = pos + 8
# version
if (args.vV):
print binascii.hexlify(data[pos : pos + 8]).upper()
pos = pos + 8
# start of image
if (args.vS):
print "0x" + (binascii.hexlify(data[pos : pos + 4]).upper())
pos = pos + 4
# ignore...
pos = pos + 4
# print "Magic : " + binascii.hexlify(data[pos : pos + 4])
pos = pos + 4
fc = data[pos :]
filecrc = (ord(fc[0]) << 24) | (ord(fc[1]) << 16) | (ord(fc[2]) << 8) | ord(fc[3])
if (args.vC):
print "0x" + (hex(rev32(filecrc))[2:]).upper()
if(args.v):
print "CRC from file:", binascii.hexlify(fc), hex(filecrc)
print "Total CRC:", hex(get_crc(data))
print "calc CRC:", hex(rev32(get_crc(data[:-4])))
#print "CRC32 :", hex(get_crc(data)), "=", hex(filecrc)
#version = get_version(0)
#print binascii.hexlify(version)
if add:
if len(data) % 4 > 0:
if args.v:
print >> sys.stderr, "Padded with: ", 4 - (len(data) % 4), "bytes."
data = data + '\0' * (4 - (len(data) % 4))
# add image type and version and start address (8 + 8 + 4 bytes)
trailer = '' + image_type + version + image_start
# add the config and magic CRC value 4 + 4 bytes
trailer = trailer + binascii.unhexlify('0000040851597466')
if args.v:
print >> sys.stderr, "Trailer:", binascii.hexlify(trailer), len(trailer)
data = data + product + trailer
crc = rev32(get_crc(data))
data = data + chr((crc >> 24) & 0xff) + chr((crc >> 16) & 0xff) + chr((crc >> 8) & 0xff) + chr((crc >> 0) & 0xff)
if args.v:
print >> sys.stderr, "Trailer:", binascii.hexlify(data[-40:]), " CRC:", hex(get_crc(data)), " Size: ", len(data)
sys.stdout.write(data)
|
import mimetypes
import os
from .posts import Post
extra_content_types = (
('text/plain', ['.md', '.markdown']),
)
for type_, ext_list in extra_content_types:
for ext in ext_list:
mimetypes.add_type(type_, ext, strict=False)
class Storage(object):
content_types = ('text/plain',)
def __init__(self, path=b'.'):
self.path = path
def find_posts(self):
text_exts = mimetypes.guess_all_extensions('text/plain', strict=False)
all_text_files = []
for root, dirs, files in os.walk(self.path):
names = [n for n in files if os.path.splitext(n)[1] in text_exts]
all_text_files.extend(os.path.join(root, n) for n in names)
return [Post.from_file(filename) for filename in all_text_files]
|
from __future__ import division
from cctbx.array_family import flex
import mmtbx.f_model
import mmtbx.f_model
from scitbx import lbfgs as scitbx_lbfgs
from libtbx import adopt_init_args
import random
from mmtbx import bulk_solvent
def lbfgs_run(target_evaluator,
min_iterations=0,
max_iterations=None,
traditional_convergence_test=1,
use_curvatures=False):
ext = scitbx_lbfgs.ext
minimizer = ext.minimizer(target_evaluator.n)
minimizer.error = None
if (traditional_convergence_test):
is_converged = ext.traditional_convergence_test(target_evaluator.n)
else:
raise RuntimeError
is_converged = ext.drop_convergence_test(min_iterations)
try:
icall = 0
requests_f_and_g = True
requests_diag = use_curvatures
while 1:
if (requests_f_and_g):
icall += 1
x, f, g, d = target_evaluator(
requests_f_and_g=requests_f_and_g,
requests_diag=requests_diag)
#if (requests_diag):
# print "x,f,d:", tuple(x), f, tuple(d)
#else:
# print "x,f:", tuple(x), f
if (use_curvatures):
if (d is None): d = flex.double(x.size())
have_request = minimizer.run(x, f, g, d)
else:
have_request = minimizer.run(x, f, g)
if (have_request):
requests_f_and_g = minimizer.requests_f_and_g()
requests_diag = minimizer.requests_diag()
continue
assert not minimizer.requests_f_and_g()
assert not minimizer.requests_diag()
if (traditional_convergence_test):
if (minimizer.iter() >= min_iterations and is_converged(x, g)): break
else:
if (is_converged(f)): break
if (max_iterations is not None and minimizer.iter() >= max_iterations):
break
if (use_curvatures):
have_request = minimizer.run(x, f, g, d)
else:
have_request = minimizer.run(x, f, g)
if (not have_request): break
requests_f_and_g = minimizer.requests_f_and_g()
requests_diag = minimizer.requests_diag()
except RuntimeError, e:
minimizer.error = str(e)
minimizer.n_calls = icall
return minimizer
class refinement_flags(object):
def __init__(self, refine_k=False, refine_b=False, refine_kb=False,
refine_u=False):
adopt_init_args(self, locals())
class minimizer:
def __init__(self, tgc, min_iterations=0, max_iterations=25):
adopt_init_args(self, locals())
self.x = self.tgc.x()
self.n = self.x.size()
def run(self, use_curvatures=0):
self.minimizer = lbfgs_run(
target_evaluator=self,
min_iterations=self.min_iterations,
max_iterations=self.max_iterations,
use_curvatures=use_curvatures)
self(requests_f_and_g=True, requests_diag=False)
return self
def __call__(self, requests_f_and_g, requests_diag):
self.tgc.update(x=self.x)
if (not requests_f_and_g and not requests_diag):
requests_f_and_g = True
requests_diag = True
if (requests_f_and_g):
self.f = self.tgc.target()
self.g = self.tgc.gradients()
self.d = None
if (requests_diag):
self.d = self.tgc.curvatures()
#assert self.d.all_ne(0)
if(self.d.all_eq(0)): self.d=None
else:
self.d = 1 / self.d
return self.x, self.f, self.g, self.d
class tgc(object):
def __init__(self,
f_obs,
f_calc,
f_masks,
ss,
k_sols=None,
b_sols=None,
ps=None,
u_star=[0,0,0,0,0,0], b_max=300, b_min=0, k_min=0.001, k_max=50):
if(ps is not None): assert [k_sols, b_sols].count(None) == 2
else: assert [k_sols, b_sols].count(None) == 0
adopt_init_args(self, locals())
self.kbu = mmtbx.f_model.manager_kbu(
f_obs = self.f_obs,
f_calc = self.f_calc,
f_masks = self.f_masks,
ss = self.ss,
k_sols = self.k_sols,
b_sols = self.b_sols,
u_star = self.u_star)
self.t_g_c = None
self.use_scale=None
self.refine_kb=False
self.refine_k=False
self.refine_b=False
self.refine_u=False
self.refine_p=False
self.space_group = self.f_obs.space_group()
self.adp_constraints = self.space_group.adp_constraints()
def set_refine_kb(self):
self.refine_kb=True
self.refine_u=False
self.refine_k=False
self.refine_b=False
self.refine_p=False
def set_refine_k(self):
self.refine_k=True
self.refine_b=False
self.refine_kb=False
self.refine_u=False
self.refine_p=False
def set_refine_b(self):
self.refine_k=False
self.refine_b=True
self.refine_kb=False
self.refine_u=False
self.refine_p=False
def set_refine_u(self):
self.refine_k=False
self.refine_b=False
self.refine_kb=False
self.refine_u=True
self.refine_p=False
u_star = self.space_group.average_u_star(u_star = self.kbu.u_star())
self.kbu.update(u_star = u_star)
assert self.adp_constraints.n_independent_params() <= 6
def set_refine_p(self):
self.refine_kb=False
self.refine_u=False
self.refine_k=False
self.refine_b=False
self.refine_p=True
def set_use_scale(self, value):
assert value in [True, False]
self.use_scale=value
def normalize(self, parameters, p_min, p_max):
result = flex.double()
for p in parameters:
if(p < p_min): p = p_min
if(p > p_max): p = p_max
result.append(p)
return result
def x(self):
if(self.refine_k):
return self.normalize(self.kbu.k_sols(), self.k_min, self.k_max)
if(self.refine_b):
return self.normalize(self.kbu.b_sols(), self.b_min, self.b_max)
if(self.refine_kb):
x = self.normalize(self.kbu.k_sols(), self.k_min, self.k_max)
x.extend(self.normalize(self.kbu.b_sols(), self.b_min, self.b_max))
return x
if(self.refine_u):
#return flex.double(self.kbu.u_star())
return flex.double(
self.adp_constraints.independent_params(self.kbu.u_star()))
def target(self):
return self.t_g_c.target()
def gradients(self):
if(self.refine_k): return self.t_g_c.grad_k_sols()
if(self.refine_b): return self.t_g_c.grad_b_sols()
if(self.refine_kb):
g=self.t_g_c.grad_k_sols()
g.extend(self.t_g_c.grad_b_sols())
return g
if(self.refine_u):
#return flex.double(self.t_g_c.grad_u_star())
return flex.double(
self.adp_constraints.independent_gradients(all_gradients=self.t_g_c.grad_u_star()))
def curvatures(self):
if(self.refine_k): return self.t_g_c.curv_k_sols()
if(self.refine_b): return self.t_g_c.curv_b_sols()
if(self.refine_kb):
d = self.t_g_c.curv_k_sols()
d.extend(self.t_g_c.curv_b_sols())
return d
def update(self, x):
if(self.refine_k): self.kbu.update(k_sols=x)
if(self.refine_b): self.kbu.update(b_sols=x)
if(self.refine_kb):
self.kbu.update(
k_sols=x[:len(x)//2],
b_sols=x[len(x)//2:])
if(self.refine_u):
#u_star = x
u_star = self.adp_constraints.all_params(list(x))
self.kbu.update(u_star = list(u_star))
if(self.use_scale):
sc = bulk_solvent.scale(self.f_obs.data(), self.kbu.data.f_model)
else:
sc = 1.0
self.t_g_c = bulk_solvent.ls_kbp_sol_u_star(
f_model = self.kbu.data,
f_obs = self.f_obs.data(),
scale = sc,
kb_sol_grad = self.refine_k or self.refine_b or self.refine_kb,
p_sol_grad = False,
u_star_grad = self.refine_u,
kb_sol_curv = self.refine_k or self.refine_b or self.refine_kb,
p_sol_curv = False)
def minimize_k_once(self, use_curvatures):
self.set_refine_k()
self.set_use_scale(value = True)
return minimizer(tgc = self).run(use_curvatures=use_curvatures)
def minimize_b_once(self, use_curvatures):
self.set_refine_b()
return minimizer(tgc = self).run(use_curvatures=use_curvatures)
def minimize_kb_sequential(self, use_curvatures_options=[False, True],
n_cycles=5):
#print "start r:", self.kbu.r_factor()
for use_curvatures in use_curvatures_options*n_cycles:
self.set_use_scale(value = True)
m = self.minimize_k_once(use_curvatures=use_curvatures)
#print "k_sols r:", self.kbu.r_factor(), "curv:", use_curvatures
m = self.minimize_b_once(use_curvatures=use_curvatures)
#print "b_sols r:", self.kbu.r_factor(), "curv:", use_curvatures
def minimize_kbu_sequential(self, use_curvatures_options=[False, True],
n_cycles=5):
#print "start r:", self.kbu.r_factor()
for use_curvatures in use_curvatures_options*n_cycles:
self.set_use_scale(value = True)
m = self.minimize_k_once(use_curvatures=use_curvatures)
#print "k_sols r:", self.kbu.r_factor(), "curv:", use_curvatures
m = self.minimize_b_once(use_curvatures=use_curvatures)
#print "b_sols r:", self.kbu.r_factor(), "curv:", use_curvatures
m = self.minimize_kb_once(use_curvatures=use_curvatures)
#print "kb_sols r:", self.kbu.r_factor(), "curv:", use_curvatures
m = self.minimize_u_once()
#print "u_star r:", self.kbu.r_factor(), "curv:", use_curvatures
def minimize_kb_once(self, use_curvatures):
self.set_refine_kb()
return minimizer(tgc = self).run(use_curvatures=use_curvatures)
def minimize_u_once(self):
self.set_refine_u()
return minimizer(tgc = self).run(use_curvatures=False)
def minimize_u(self, n_cycles=5):
#print "minimize_u, r:", self.kbu.r_factor()
for it in xrange(n_cycles):
start_r = self.kbu.r_factor()
save_b_cart = self.kbu.b_cart()
self.set_refine_u()
self.set_use_scale(value = True)
minimizer(tgc = self).run(use_curvatures=False)
#print " minimize_u, r:", self.kbu.r_factor()
r = self.kbu.r_factor()
bc = list(flex.abs(flex.double(self.kbu.b_cart())))
if(r>start_r and r>1.e-2 and max(bc)>100):
self.kbu.update(b_cart = save_b_cart)
break
def minimize_kb(self, use_curvatures_options,
set_use_scale_options=[True, False], n_cycles=5):
#print "minimize_kb, r:", self.kbu.r_factor()
for use_curvatures in use_curvatures_options*n_cycles:
start_r = self.kbu.r_factor()
save_k_sols = self.kbu.k_sols()
save_b_sols = self.kbu.b_sols()
#self.set_use_scale(value = random.choice(set_use_scale_options))
self.set_use_scale(value = True)
m = self.minimize_kb_once(use_curvatures=use_curvatures)
r = self.kbu.r_factor()
if(r>start_r and r>1.e-2 and (flex.min(self.kbu.k_sols())<0 or
flex.max(self.kbu.k_sols())>1 or flex.min(self.kbu.b_sols())<0 or
flex.max(self.kbu.k_sols())>100.)):
self.kbu.update(k_sols = save_k_sols, b_sols = save_b_sols)
#print " minimize_kb, r:", self.kbu.r_factor()
# assert m.minimizer.n_calls == m.minimizer.nfun()
def minimize_kbu(self, n_cycles=10):
#print "minimize_kbu start r:", self.kbu.r_factor()
for use_curvatures in [False, True]*n_cycles:
#print " minimize_kbu r:", self.kbu.r_factor()
start_r = self.kbu.r_factor()
save_k_sols = self.kbu.k_sols()
save_b_sols = self.kbu.b_sols()
save_b_cart = self.kbu.b_cart()
#self.set_use_scale(value = random.choice([True, False]))
self.set_use_scale(value = True)
m = self.minimize_kb_once(use_curvatures=use_curvatures)
r = self.kbu.r_factor()
if(r>start_r and r>1.e-2 and (flex.min(self.kbu.k_sols())<0 or
flex.max(self.kbu.k_sols())>1 or flex.min(self.kbu.b_sols())<0 or
flex.max(self.kbu.k_sols())>100.)):
self.kbu.update(k_sols = save_k_sols, b_sols = save_b_sols)
# assert m.minimizer.n_calls == m.minimizer.nfun()
m = self.minimize_u_once()
# assert m.minimizer.n_calls == m.minimizer.nfun()
r = self.kbu.r_factor()
bc = list(flex.abs(flex.double(self.kbu.b_cart())))
if(r>start_r and r>1.e-2 and max(bc)>100):
self.kbu.update(b_cart = save_b_cart)
break
def show_k_sols(self):
print "k_sols:", [round(k,3) for k in self.kbu.k_sols()], self.kbu.r_factor()
def show_kbu(self):
print "k_sols:", [round(k,3) for k in self.kbu.k_sols()]
print "b_sols:", [round(b,3) for b in self.kbu.b_sols()]
print "b_cart:", [round(b,3) for b in self.kbu.b_cart()]
|
import asyncio
import logging
import types
from collections import defaultdict
from typing import Dict
logger = logging.getLogger(__name__)
class EventManager:
__subscribers: Dict = defaultdict(set)
@classmethod
def subscribe(cls, subscriber, event):
cls.__subscribers[event].add(subscriber)
@classmethod
async def trigger(cls, event):
async_tasks = []
for subscriber in cls.__subscribers.get(event.__class__, []):
task = asyncio.create_task(subscriber(event))
async_tasks.append(task)
await asyncio.gather(*async_tasks)
@classmethod
def _reset(cls):
"""Never call this outside tests!"""
cls.__subscribers = defaultdict(set)
@classmethod
def subscribers(cls) -> Dict:
return dict(cls.__subscribers)
|
import os
from ossid.datasets.utils import getSampler
import numpy as np
from numpy.lib.type_check import imag
import torch
import time
import torchvision.transforms as transforms
from scipy.spatial.transform import Rotation as R
def expandBox(x1, y1, x2, y2, img_h, img_w, expand_ratio):
cx, cy = (x1+x2) / 2, (y1+y2) / 2
w, h = x2-x1, y2-y1
x1, x2 = max(0, cx - w / 2 * expand_ratio), min(img_w-1, cx + w / 2 * expand_ratio)
y1, y2 = max(0, cy - h / 2 * expand_ratio), min(img_h-1, cy + h / 2 * expand_ratio)
return x1, y1, x2, y2
def quatAngularDiffBatch(Q1, Q2):
'''
Q1 is of shape (M, 4) and Q2 is of shape (N, 4)
return a matrix of (M, N) containing angular difference between them
'''
M, _ = Q1.shape
N, _ = Q2.shape
Q1 = torch.from_numpy(Q1)
Q2 = torch.from_numpy(Q2)
Q1 = Q1.reshape((M, 4, 1))
Q2 = Q2.T.reshape((1, 4, N))
product = torch.abs((Q1*Q2).sum(axis=1))
angle_diff = 2*torch.acos(torch.min(product, torch.ones_like(product) * 1-1e-7))
return to_np(angle_diff)
def normalizeImageRange(img):
'''
img: torch.Tensor of size (B, 3, H, W)
'''
img = (img - img.new_tensor((0.485, 0.456, 0.406)).reshape(1, 3, 1, 1) ) \
/ img.new_tensor((0.229, 0.224, 0.225)).reshape(1, 3, 1, 1)
return img
# def denormalizeImageRange(img):
# '''
# image: ndarray of size (3, H, W)
# '''
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]
# mean = np.asarray(mean).reshape((3, 1, 1))
# std = np.asarray(std).reshape((3, 1, 1))
# img = img * std + mean
# return img
def normalizeImage(img):
'''
Arguments:
img: image of shape (3, H, W), range (0, 255)
'''
img = img.astype(np.float32)
img = img / 255.0 # ToTensor
# img = (img - np.asarray((0.485, 0.456, 0.406)).reshape((3, 1, 1))) \
# / np.asarray((0.229, 0.224, 0.225)).reshape((3, 1, 1)) # Normalize
return img
def tensor_to_PIL(image):
"""
converts a tensor normalized image (imagenet mean & std) into a PIL RGB image
will not work with batches (if batch size is 1, squeeze before using this)
Input:
image: torch.Tensor of size (3, H, W), normalized by the mean and variance from ImageNet
"""
inv_normalize = transforms.Normalize(
mean=[-0.485/0.229, -0.456/0.224, -0.406/0.255],
std=[1/0.229, 1/0.224, 1/0.255],
)
inv_tensor = inv_normalize(image)
inv_tensor = torch.clamp(inv_tensor, 0, 1)
original_image = transforms.ToPILImage()(inv_tensor).convert("RGB")
return original_image
def perturbTrans(mat, n_perturb = 500):
rot_mag = np.random.normal(0, 0.2, n_perturb)
rot_axis = np.random.normal(0, 1.0, (n_perturb, 3))
rot_axis = rot_axis / np.linalg.norm(rot_axis, ord=2, axis=1, keepdims=True)
rotvec = rot_axis * rot_mag[:, None]
rot_perturb = R.from_rotvec(rotvec)
rot_perturb = rot_perturb.as_matrix()
trans_perturb = np.random.normal(0, 0.01, (n_perturb, 3))
mat_new = mat.copy()[None]
mat_new = np.repeat(mat_new, n_perturb, axis=0)
mat_new[:, :3, :3] = np.einsum("ijk,ikl->ijl", rot_perturb, mat_new[:, :3, :3])
mat_new[:, :3, 3] += trans_perturb
return mat_new
def randRotMat(Z_max=90, X_max=30, Y_max=30):
Z_angle = np.random.uniform(-Z_max, Z_max, None)
X_angle = np.random.uniform(-X_max, X_max, None)
Y_angle = np.random.uniform(-Y_max, Y_max, None)
rot_mat = R.from_euler('ZXY', [Z_angle, X_angle, Y_angle], degrees=True).as_matrix()
return rot_mat
def estimateRigidBodyTransform(P, Q):
'''
Compute the rigid body transformation R and t given two set of
N corresponding points in 3D.
Inputs:
P - a (3, N) matrix containing the before-transform points
Q - a (3, N) matrix containing the after-transform points
Outputs:
R, t
'''
d, N = P.shape
p_cen = P.mean(axis = 1).reshape((d, 1))
q_cen = Q.mean(axis = 1).reshape((d, 1))
X = P - p_cen
Y = Q - q_cen
S = X.dot(Y.T)
u, sigma, vh = np.linalg.svd(S)
U = u
V = vh.T
middle = np.eye(d)
middle[-1, -1] = np.linalg.det(V.dot(U.T))
R = V.dot(middle).dot(U.T)
t = q_cen - R.dot(p_cen)
return R, t
def meta2K(meta_data):
if type(meta_data['camera_fx']) is torch.Tensor:
cam_K = np.asarray([
[meta_data['camera_fx'].item(), 0, meta_data['camera_cx'].item()],
[0, meta_data['camera_fy'].item(), meta_data['camera_cy'].item()],
[0, 0, 1]
])
else:
cam_K = np.asarray([
[meta_data['camera_fx'], 0, meta_data['camera_cx']],
[0, meta_data['camera_fy'], meta_data['camera_cy']],
[0, 0, 1]
])
return cam_K
def K2meta(cam_K):
meta_data = {
"camera_fx": cam_K[0,0],
"camera_fy": cam_K[1,1],
"camera_cx": cam_K[0,2],
"camera_cy": cam_K[1,2],
"camera_scale": 1.0
}
return meta_data
def dict_to(dictionary, device):
for k,v in dictionary.items():
if(type(v) is torch.Tensor):
dictionary[k]=v.to(device)
def torch_norm_fast(tensor, axis):
return torch.sqrt((tensor**2).sum(axis))
def to_np(x):
if type(x) is np.ndarray or type(x) is float or type(x) is int:
return x
if torch.is_tensor(x):
return x.detach().cpu().numpy()
else:
return x.detach().data.cpu().numpy()
def torch2Img(img, normalized = False):
disp_img = to_np(img)
if len(disp_img.shape) == 4:
disp_img = disp_img[0]
disp_img = disp_img.transpose((1,2,0))
if(normalized):
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
disp_img = disp_img * std + mean
return disp_img
class TorchTimer:
def __init__(self, heading = None, agg_list = None, verbose = True):
self.verbose = verbose
if not self.verbose:
return
if(agg_list is None and heading is None):
heading = ""
self.agg_list = agg_list
self.heading = heading
self.start = torch.cuda.Event(enable_timing=True)
self.end = torch.cuda.Event(enable_timing=True)
def __enter__(self):
if not self.verbose:
return self
self.start.record()
self.start_cpu = time.time()
return self
def __exit__(self, *args):
if not self.verbose:
return
self.end.record()
torch.cuda.synchronize()
self.interval_cpu = time.time() - self.start_cpu
self.interval = self.start.elapsed_time(self.end)/1000.0
if(self.agg_list is not None):
if(self.heading is not None):
self.agg_list.append((self.heading, self.interval, self.interval_cpu))
else:
self.agg_list.append((self.interval, self.interval_cpu))
if (self.heading is not None and self.verbose):
print('{} GPU:{}, CPU:{}'.format(self.heading, self.interval, self.interval_cpu))
class Timer:
def __init__(self, heading = "", agg_list = None, verbose = True):
self.verbose = verbose
if not self.verbose:
return
self.heading = heading
def __enter__(self):
if not self.verbose:
return self
self.start = time.time()
return self
def __exit__(self, *args):
if not self.verbose:
return
self.end = time.time()
self.interval = self.end - self.start
print(self.heading, self.interval)
def depth2xyz(depth, cam_K):
h, w = depth.shape
ymap, xmap = np.meshgrid(np.arange(w), np.arange(h))
# Here rightward is the positive x direction
# And downward is the postive y direction
x = ymap
y = xmap
z = depth
x = (x - cam_K[0,2]) * z / cam_K[0,0]
y = (y - cam_K[1,2]) * z / cam_K[1,1]
xyz = np.stack([x, y, z], axis=2)
return xyz
def kpts2cloud(kpts, depth, cam_K):
raise Exception("This function seems wrong (about x and y)")
x = kpts[:, 0]
y = kpts[:, 1]
z = depth[x, y]
x = (x - cam_K[0,2]) * z / cam_K[0,0]
y = (y - cam_K[1,2]) * z / cam_K[1,1]
P_w = np.vstack((x, y, z)).T
return P_w
def projCloud(pts, cam_K):
'''
Project a point cloud in 3D into 2D image plane
Note in the camera coordinate, rightward is the positive x, and downward is the positive y
pts: (n, 3) points in 3D, relative to the camera coordinate frame
cam_K: matrix of camera intrinsics, with the entry [2,2] being 1
'''
# x, y are in the camera coordinate
x = pts[:, 0]
y = pts[:, 1]
z = pts[:, 2]
# px and py are in the image coordinate (down x, right y)
py = (cam_K[0,0] * x / z) + cam_K[0,2]
px = (cam_K[1,1] * y / z) + cam_K[1,2]
P = np.vstack((px, py)).T
return P
def torch_norm_fast(tensor, axis):
return torch.sqrt((tensor**2).sum(axis))
def dict_to(data, device):
for k,v in data.items():
if(type(v) is torch.Tensor):
data[k]=v.to(device)
def move_to(obj, device):
if torch.is_tensor(obj):
return obj.to(device)
elif isinstance(obj, dict):
res = {}
for k, v in obj.items():
res[k] = move_to(v, device)
return res
elif isinstance(obj, list):
res = []
for v in obj:
res.append(move_to(v, device))
return res
else:
raise TypeError("Invalid type for move_to")
def cosSim(mdesc0, mdesc1, axis=1):
assert mdesc0.dim() == 3
assert mdesc1.dim() == 3
assert axis in [1, 2]
if axis == 1:
dot = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1)
elif axis == 2:
dot = torch.einsum('bnd,bmd->bnm', mdesc0, mdesc1)
denom = torch_norm_fast(mdesc0, axis).unsqueeze(2) * torch_norm_fast(mdesc1, axis).unsqueeze(1)
scores = dot / denom
return scores
# Q1 is of shape (M, 4) and Q2 is of shape (N, 4)
# return a matrix of (M, N) containing angular difference between them
def quatAngularDiffBatch(Q1, Q2):
M, _ = Q1.shape
N, _ = Q2.shape
Q1 = Q1.reshape((M, 4, 1))
Q2 = Q2.T.reshape((1, 4, N))
product = np.absolute((Q1*Q2).sum(axis=1))
angle_diff = 2*np.arccos(np.minimum(product, np.ones_like(product) * 1-1e-7))
return angle_diff
def makeDir(path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
def robustCrop(image, x1, x2, y1, y2):
assert x2 > x1
assert y2 > y1
from_h, from_w = image.shape[:2]
to_h, to_w = x2 - x1, y2 - y1
crop = np.zeros((to_h, to_w, *(image.shape[2:])), dtype=image.dtype)
from_x1, from_y1 = max(0, x1), max(0, y1)
from_x2, from_y2 = min(from_h, x2), min(from_w, y2)
to_x1, to_y1 = max(0, -x1), max(0, -y1)
to_x2, to_y2 = min(to_h, from_h-x1), min(to_w, from_w-y1)
crop[to_x1:to_x2, to_y1:to_y2] = image[from_x1:from_x2, from_y1:from_y2]
return crop
def heatmapGaussain(img_h, img_w, cx, cy, sigma, normalize=False):
img_h, img_w = int(round(img_h)), int(round(img_w))
# Initializing value of x-axis and y-axis
# in the range -1 to 1
x, y = np.meshgrid(np.arange(img_w), np.arange(img_h))
dst = np.sqrt((x-cx)**2 + (y-cy)**2)
# Calculating Gaussian array
gauss = np.exp(-(dst**2 / ( 2.0 * sigma**2 ) ) )
if normalize:
gauss = gauss / gauss.sum()
return gauss |
"""Assortment of layers for use in models.py.
Refer to StackGAN paper: https://arxiv.org/pdf/1612.03242.pdf
for variable names and working.
Authors:
Abhiraj Tiwari (abhirajtiwari@gmail.com)
Sahil Khose (sahilkhose18@gmail.com)
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
def conv3x3(in_channels, out_channels):
"""3x3 conv with same padding"""
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
class ResBlock(nn.Module):
def __init__(self, channel_num):
super(ResBlock, self).__init__()
self.block = nn.Sequential(
conv3x3(channel_num, channel_num),
nn.BatchNorm2d(channel_num),
nn.ReLU(True),
conv3x3(channel_num, channel_num),
nn.BatchNorm2d(channel_num))
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.block(x)
out += residual
out = self.relu(out)
return out
def _downsample(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(0.2, inplace=True)
)
def _upsample(in_channels, out_channels):
return nn.Sequential(
nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
class CAug(nn.Module):
"""Module for conditional augmentation.
Takes input as bert embeddings of annotations and sends output to Stage 1 and 2 generators.
"""
def __init__ (self, emb_dim=768, n_g=128, device="cuda"): #! CHANGE THIS TO CUDA
"""
@param emb_dim (int) : Size of annotation embeddings.
@param n_g (int) : Dimension of mu, epsilon and c_0_hat
@param device (torch.device) : cuda/cpu
"""
super(CAug, self).__init__()
self.emb_dim = emb_dim
self.n_g = n_g
self.fc = nn.Linear(self.emb_dim, self.n_g*2, bias=True) # To split in mu and sigma
self.relu = nn.ReLU()
self.device = device
def forward(self, text_emb):
"""
@param text_emb (torch.tensor): Text embedding. (batch, emb_dim)
@returns c_0_hat (torch.tensor): Gaussian conditioning variable. (batch, n_g)
"""
enc = self.relu(self.fc(text_emb)).squeeze(1) # (batch, n_g*2)
mu = enc[:, :self.n_g] # (batch, n_g)
logvar = enc[:, self.n_g:] # (batch, n_g)
sigma = (logvar * 0.5).exp_() # exp(logvar * 0.5) = exp(log(var^0.5)) = sqrt(var) = std
epsilon = Variable(torch.FloatTensor(sigma.size()).normal_())
c_0_hat = epsilon.to(self.device) * sigma + mu # (batch, n_g)
return c_0_hat, mu, logvar
######################### STAGE 1 #########################
class Stage1Generator(nn.Module):
"""
Stage 1 generator.
Takes in input from Conditional Augmentation and outputs 64x64 image to Stage1Discrimantor.
"""
def __init__(self, n_g=128, n_z=100, emb_dim=768):
"""
@param n_g (int) : Dimension of c_0_hat.
@param n_z (int) : Dimension of noise vector.
"""
super(Stage1Generator, self).__init__()
self.n_g = n_g
self.n_z = n_z
self.emb_dim = emb_dim
self.inp_ch = self.n_g*8
# (batch, bert_size) -> (batch, n_g)
self.caug = CAug(emb_dim=self.emb_dim)
# (batch, n_g + n_z) -> (batch, inp_ch * 4 * 4)
self.fc = nn.Sequential(
nn.Linear(self.n_g + self.n_z, self.inp_ch * 4 * 4, bias=False),
nn.BatchNorm1d(self.inp_ch * 4 * 4),
nn.ReLU(True)
)
# (batch, inp_ch, 4, 4) -> (batch, inp_ch//2, 8, 8)
self.up1 = _upsample(self.inp_ch, self.inp_ch // 2)
# -> (batch, inp_ch//4, 16, 16)
self.up2 = _upsample(self.inp_ch // 2, self.inp_ch // 4)
# -> (batch, inp_ch//8, 32, 32)
self.up3 = _upsample(self.inp_ch // 4, self.inp_ch // 8)
# -> (batch, inp_ch//16, 64, 64)
self.up4 = _upsample(self.inp_ch // 8, self.inp_ch // 16)
# -> (batch, 3, 64, 64)
self.img = nn.Sequential(
conv3x3(self.inp_ch // 16, 3),
nn.Tanh()
)
def forward(self, text_emb, noise):
"""
@param c_0_hat (torch.tensor) : Output of Conditional Augmentation (batch, n_g)
@returns out (torch.tensor) : Generator 1 image output (batch, 3, 64, 64)
"""
c_0_hat, mu, logvar = self.caug(text_emb)
# -> (batch, n_g + n_z) (batch, 128 + 100)
c_z = torch.cat((c_0_hat, noise), dim=1)
# -> (batch, 1024 * 4 * 4)
inp = self.fc(c_z)
# -> (batch, 1024, 4, 4)
inp = inp.view(-1, self.inp_ch, 4, 4)
inp = self.up1(inp) # (batch, 512, 8, 8)
inp = self.up2(inp) # (batch, 256, 16, 16)
inp = self.up3(inp) # (batch, 128, 32, 32)
inp = self.up4(inp) # (batch, 64, 64, 64)
fake_img = self.img(inp) # (batch, 3, 64, 64)
return None, fake_img, mu, logvar
class Stage1Discriminator(nn.Module):
"""
Stage 1 discriminator
"""
def __init__(self, n_d=128, m_d=4, emb_dim=768, img_dim=64):
super(Stage1Discriminator, self).__init__()
self.n_d = n_d
self.m_d = m_d
self.emb_dim = emb_dim
self.fc_for_text = nn.Linear(self.emb_dim, self.n_d)
self.down_sample = nn.Sequential(
# (batch, 3, 64, 64) -> (batch, img_dim, 32, 32)
nn.Conv2d(3, img_dim, kernel_size=4, stride=2, padding=1, bias=False), # (batch, 64, 32, 32)
nn.LeakyReLU(0.2, inplace=True),
# -> (batch, img_dim * 2, 16, 16)
_downsample(img_dim, img_dim*2), # (batch, 128, 16, 16)
# -> (batch, img_dim * 4, 8, 8)
_downsample(img_dim*2, img_dim*4), # (batch, 256, 8, 8)
# -> (batch, img_dim * 8, 4, 4)
_downsample(img_dim*4, img_dim*8) # (batch, 512, 4, 4)
)
self.out_logits = nn.Sequential(
# (batch, img_dim*8 + n_d, 4, 4) -> (batch, img_dim*8, 4, 4)
conv3x3(img_dim*8 + self.n_d, img_dim*8),
nn.BatchNorm2d(img_dim*8),
nn.LeakyReLU(0.2, inplace=True),
# -> (batch, 1)
nn.Conv2d(img_dim*8, 1, kernel_size=4, stride=4),
nn.Sigmoid()
)
def forward(self, text_emb, img):
# image encode
enc = self.down_sample(img)
# text emb
compressed = self.fc_for_text(text_emb)
compressed = compressed.unsqueeze(2).unsqueeze(3).repeat(1, 1, self.m_d, self.m_d)
con = torch.cat((enc, compressed), dim=1)
output = self.out_logits(con)
return output.view(-1)
######################### STAGE 2 #########################
class Stage2Generator(nn.Module):
"""
Stage 2 generator.
Takes in input from Conditional Augmentation and outputs 256x256 image to Stage2Discrimantor.
"""
def __init__(self, stage1_gen, n_g=128, n_z=100, ef_size=128, n_res=4, emb_dim=768):
"""
@param n_g (int) : Dimension of c_0_hat.
"""
super(Stage2Generator, self).__init__()
self.n_g = n_g
self.n_z = n_z
self.ef_size = ef_size
self.n_res = n_res
self.emb_dim = emb_dim
self.stage1_gen = stage1_gen
# Freezing the stage 1 generator:
for param in self.stage1_gen.parameters():
param.requires_grad = False
# (batch, bert_size) -> (batch, n_g)
self.caug = CAug(emb_dim=self.emb_dim)
# -> (batch, n_g*4, 16, 16)
self.encoder = nn.Sequential(
conv3x3(3, n_g), # (batch, 128, 64, 64)
nn.LeakyReLU(0.2, inplace=True), #? Paper: leaky, code: relu
_downsample(n_g, n_g*2), # (batch, 256, 32, 32)
_downsample(n_g*2, n_g*4) # (batch, 512, 16, 16)
)
# (batch, ef_size + n_g * 4, 16, 16) -> (batch, n_g * 4, 16, 16)
# (batch, 128 + 512, 16, 16) -> (batch, 512, 16, 16)
self.cat_conv = nn.Sequential(
conv3x3(self.ef_size + self.n_g * 4, self.n_g * 4),
nn.BatchNorm2d(self.n_g * 4),
nn.ReLU(inplace=True)
)
# -> (batch, n_g * 4, 16, 16)
# (batch, 512, 16, 16)
self.residual = nn.Sequential(
*[
ResBlock(self.n_g * 4) for _ in range(self.n_res)
]
)
# -> (batch, n_g * 2, 32, 32)
self.up1 = _upsample(n_g * 4, n_g * 2) # (batch, 256, 32, 32)
# -> (batch, n_g, 64, 64)
self.up2 = _upsample(n_g * 2, n_g) # (batch, 128, 64, 64)
# -> (batch, n_g // 2, 128, 128)
self.up3 = _upsample(n_g, n_g // 2) # (batch, 64, 128, 128)
# -> (batch, n_g // 4, 256, 256)
self.up4 = _upsample(n_g // 2, n_g // 4) # (batch, 32, 256, 256)
# (batch, 3, 256, 256)
self.img = nn.Sequential(
conv3x3(n_g // 4, 3),
nn.Tanh()
)
def forward(self, text_emb, noise):
"""
@param c_0_hat (torch.tensor) : Output of Conditional Augmentation (batch, n_g)
@param s1_image (torch.tensor) : Ouput of Stage 1 Generator (batch, 3, 64, 64)
@returns out (torch.tensor) : Generator 2 image output (batch, 3, 256, 256)
"""
_, stage1_img, _, _ = self.stage1_gen(text_emb, noise)
stage1_img = stage1_img.detach()
encoded_img = self.encoder(stage1_img)
c_0_hat, mu, logvar = self.caug(text_emb)
c_0_hat = c_0_hat.unsqueeze(2).unsqueeze(3).repeat(1, 1, 16, 16)
# -> (batch, ef_size + n_g * 4, 16, 16) # (batch, 640, 16, 16)
concat_out = torch.cat((encoded_img, c_0_hat), dim=1)
# -> (batch, n_g * 4, 16, 16)
h_out = self.cat_conv(concat_out)
h_out = self.residual(h_out)
h_out = self.up1(h_out)
h_out = self.up2(h_out)
h_out = self.up3(h_out)
# -> (batch, ng // 4, 256, 256)
h_out = self.up4(h_out)
# -> (batch, 3, 256, 256)
fake_img = self.img(h_out)
return stage1_img, fake_img, mu, logvar
class Stage2Discriminator(nn.Module):
"""
Stage 2 discriminator
"""
def __init__(self, n_d=128, m_d=4, emb_dim=768, img_dim=256):
super(Stage2Discriminator, self).__init__()
self.n_d = n_d
self.m_d = m_d
self.emb_dim = emb_dim
self.fc_for_text = nn.Linear(self.emb_dim, self.n_d)
self.down_sample = nn.Sequential(
# (batch, 3, 64, 64) -> (batch, img_dim//4, 128, 128)
nn.Conv2d(3, img_dim//4, kernel_size=4, stride=2, padding=1, bias=False), # (batch, 64, 128, 128)
nn.LeakyReLU(0.2, inplace=True),
# -> (batch, img_dim//2, 64, 64)
_downsample(img_dim//4, img_dim//2), # (batch, 128, 64, 64)
# -> (batch, img_dim, 32, 32)
_downsample(img_dim//2, img_dim), # (batch, 256, 32, 32)
# -> (batch, img_dim*2, 16, 16)
_downsample(img_dim, img_dim*2), # (batch, 512, 16, 16)
# -> (batch, img_dim*4, 8, 8)
_downsample(img_dim*2, img_dim*4), # (batch, 1024, 8, 8)
# -> (batch, img_dim*8, 4, 4)
_downsample(img_dim*4, img_dim*8), # (batch, 2096, 4, 4)
# -> (batch, img_dim*4, 4, 4)
conv3x3(img_dim*8, img_dim*4), # (batch, 1024, 4, 4)
nn.BatchNorm2d(img_dim*4),
nn.LeakyReLU(0.2, inplace=True),
# -> (batch, img_dim*2, 4, 4)
conv3x3(img_dim * 4, img_dim * 2), # (batch, 512, 4, 4)
nn.BatchNorm2d(img_dim * 2),
nn.LeakyReLU(0.2, inplace=True)
)
self.out_logits = nn.Sequential(
# (batch, img_dim*2 + n_d, 4, 4) -> (batch, img_dim*2, 4, 4)
conv3x3(img_dim*2 + self.n_d, img_dim*2),
nn.BatchNorm2d(img_dim*2),
nn.LeakyReLU(0.2, inplace=True),
# -> (batch, 1)
nn.Conv2d(img_dim*2, 1, kernel_size=4, stride=4),
nn.Sigmoid()
)
def forward(self, text_emb, img):
# image encode
enc = self.down_sample(img)
# text emb
compressed = self.fc_for_text(text_emb)
compressed = compressed.unsqueeze(2).unsqueeze(3).repeat(1, 1, self.m_d, self.m_d)
con = torch.cat((enc, compressed), dim=1)
output = self.out_logits(con)
return output.view(-1)
######################### #########################
if __name__ == "__main__":
batch_size = 2
n_z = 100
emb_dim = 1024 # 768
emb = torch.randn((batch_size, emb_dim))
noise = torch.empty((batch_size, n_z)).normal_()
generator1 = Stage1Generator(emb_dim=emb_dim)
generator2 = Stage2Generator(generator1, emb_dim=emb_dim)
discriminator1 = Stage1Discriminator(emb_dim=emb_dim)
discriminator2 = Stage2Discriminator(emb_dim=emb_dim)
_, gen1, _, _ = generator1(emb, noise)
print("output1 image dimensions :", gen1.size()) # (batch_size, 3, 64, 64)
assert gen1.shape == (batch_size, 3, 64, 64)
print()
disc1 = discriminator1(emb, gen1)
print("output1 discriminator", disc1.size()) # (batch_size)
# assert disc1.shape == (batch_size)
print()
_, gen2, _, _ = generator2(emb, noise)
print("output2 image dimensions :", gen2.size()) # (batch_size, 3, 256, 256)
assert gen2.shape == (batch_size, 3, 256, 256)
print()
disc2 = discriminator2(emb, gen2)
print("output2 discriminator", disc2.size()) # (batch_size)
# assert disc2.shape == (batch_size)
print()
ca = CAug(emb_dim=emb_dim, n_g=128, device='cpu')
out_ca, _, _ = ca(emb)
print("Conditional Aug output size: ", out_ca.size()) # (batch_size, 128)
assert out_ca.shape == (batch_size, 128)
###* Checking init weights
# import engine
# netG = Stage1Generator()
# netG.apply(engine.weights_init)
pass
|
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
"""The core plotting utilities."""
from dataclasses import dataclass, replace
from typing import Optional, Tuple
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
# flake8: noqa: E402
from staticchar.basic_types import TimePeriod
def get_figure_and_axes(ax: Optional[plt.Axes]) -> Tuple[Optional[plt.Figure], plt.Axes]:
"""An auxiliary function, used to get a new figure is axis was not provided."""
if ax is None:
return plt.subplots()
else:
return None, ax
def mark_phase(
ax: plt.Axes,
point: Optional[float] = None,
interval: Optional[TimePeriod] = None,
color: str = "C3",
alpha: float = 0.3,
) -> None:
"""Adds to `ax` a shadowed vertical region specified by `interval` and a vertical line at `point`.
Args:
ax: axes to which region and line should be applied
point: X-axis location where a vertical line should be drawn. If None, no line is drawn
interval: X-axis location where a vertical region should be drawn. If None, no region is drawn
color: color of the line and the region
alpha: alpha (controls opacity) of the region
Note:
This function is not pure.
"""
if interval is not None:
ax.axvspan(interval.tmin, interval.tmax, alpha=alpha, color=color)
if point is not None:
ax.axvline(point, color=color)
@dataclass
class AnnotationSpec:
legend: bool = False
title: bool = False
xlabel: bool = False
ylabel: bool = False
def copy(self) -> "AnnotationSpec": # pragma: no cover
return replace(self)
def apply(
self, ax: plt.Axes, title: Optional[str] = None, xlabel: Optional[str] = None, ylabel: Optional[str] = None
):
if self.legend:
ax.legend()
if self.title and title is not None:
ax.set_title(title)
if self.xlabel and xlabel is not None:
ax.set_xlabel(xlabel)
if self.ylabel and ylabel is not None:
ax.set_ylabel(ylabel)
|
class IParamProvider(object):
def __call__(self, pyfunc, param_name):
"""
:type pyfunc: rope.base.pyobjectsdef.PyFunction
:type param_name: str
:rtype: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject or None
"""
raise NotImplementedError
class IReturnProvider(object):
"""
:type resolve: rope.base.oi.type_hinting.resolvers.interfaces.IResolver
"""
resolve = None
def __call__(self, pyfunc):
"""
:type pyfunc: rope.base.pyobjectsdef.PyFunction
:rtype: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject or None
"""
raise NotImplementedError
class IAssignmentProvider(object):
"""
:type resolve: rope.base.oi.type_hinting.resolvers.interfaces.IResolver
"""
resolve = None
def __call__(self, pyname):
"""
:type pyname: rope.base.pynamesdef.AssignedName
:rtype: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject or None
"""
raise NotImplementedError
|
#!/usr/bin/env python
import asyncio
import argparse
import io
import re
import aiohttp
import pandas as pd
import requests
import pytrthree
from apscheduler.schedulers.asyncio import AsyncIOScheduler
TRTH_HTTP_LIST = 'http://tickhistory.thomsonreuters.com/HttpPull/List'
TRTH_HTTP_DWLD = 'https://tickhistory.thomsonreuters.com/HttpPull/Download'
class Downloader:
def __init__(self, args):
self.args = args
self.api = pytrthree.TRTH(config=args.config)
self.credentials = {'user': self.api.config['credentials']['username'],
'pass': self.api.config['credentials']['password']}
self.results = self.list_results()
z = zip(self.results['name'].apply(self.parse_fname), self.results['size'])
self.progress = {fname: dict(downloaded=0, total=total, state=None) for fname, total in z}
self.requests = {group: data['name'].apply(self.parse_fname).tolist()
for group, data in self.results.groupby('id')}
self.loop = asyncio.get_event_loop()
self.scheduler = AsyncIOScheduler()
self.scheduler.add_job(self.print_progress, 'interval', seconds=5)
self.semaphore = asyncio.Semaphore(args.max)
def start(self):
files = [f for f in self.results['name'] if re.search(args.regex, f)]
file_list = '\n'.join([f.split('/')[-1] for f in files])
self.api.logger.info(f'Downloading {len(files)} files:\n{file_list}')
if not self.args.dryrun:
fut = asyncio.gather(*[self.download(f) for f in files])
self.scheduler.start()
self.loop.run_until_complete(fut)
@staticmethod
def parse_fname(x):
return '-'.join(x.split('-')[2:])
def list_results(self):
params = {'dir': '/api-results', 'mode': 'csv', **self.credentials}
r = requests.get(TRTH_HTTP_LIST, params=params)
df = pd.read_csv(io.StringIO(r.content.decode('utf-8')))
df.columns = ['type', 'name', 'size', 'date']
types = df['name'].apply(pytrthree.utils.parse_rid_type).apply(pd.Series)
df['id'] = types[0]
df['type'] = types[1].replace('', 'part000')
return df
async def download(self, file):
async with aiohttp.ClientSession() as session:
params = {'file': file, **self.credentials}
async with self.semaphore, session.get(TRTH_HTTP_DWLD, params=params, timeout=None) as resp:
await self.save_stream(resp, file)
async def save_stream(self, resp, file):
filename = self.parse_fname(file)
self.api.logger.info(f'Downloading {filename}')
self.progress[filename]['state'] = 'D'
with open(filename, 'wb') as f:
while True:
chunk = await resp.content.read(256*1024)
self.progress[filename]['downloaded'] += len(chunk)
if not chunk:
break
f.write(chunk)
self.progress[filename]['downloaded'] = self.progress[filename]['total']
self.progress[filename]['state'] = 'C'
self.api.logger.info(f'Finished downloading {filename}')
if self.args.cancel:
self.maybe_cancel_request(filename)
def maybe_cancel_request(self, filename):
rid = pytrthree.utils.parse_rid_type(filename)[0]
completed = [self.progress[fname]['state'] == 'C' for fname in self.requests[rid]]
report = [pytrthree.utils.parse_rid_type(fname)[1] == 'report' for fname in self.requests[rid]]
# print(completed)
# print(report)
if all(completed) and any(report):
self.api.logger.info(f'Canceling {rid}')
# self.api.cancel_request()
# api.cancel
def print_progress(self):
completed = 0
for fname, progress in self.progress.items():
if progress['state'] == 'D':
pct = progress['downloaded'] / progress['total']
self.api.logger.info(f'{fname}: {pct:.1%}')
elif progress['state'] == 'C':
completed +=1
if completed:
self.api.logger.info(f'Completed: {completed}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download TRTH files from HTTP.')
parser.add_argument('--config', action='store', type=argparse.FileType('r'), required=True,
help='TRTH API configuration (YAML file)')
parser.add_argument('--max', action='store', type=int, default=10,
help='Maximum number of concurrent downloads. Default: 10.')
parser.add_argument('--regex', action='store', type=str, default='.*',
help='Option regular expression to filter which files to download.')
parser.add_argument('--cancel', action='store_true',
help='Whether or not to cancel requests after all parts have been downloaded.')
parser.add_argument('--dryrun', action='store_true',
help='Dry run mode. Use this to test which files will be downloaded.')
args = parser.parse_args()
downloader = Downloader(args)
downloader.start()
|
#!/usr/bin/env python
u"""
test_legendre.py (11/2021)
"""
import numpy as np
import gravity_toolkit
#-- PURPOSE: test unnormalized Legendre polynomials
def test_unnormalized(l=3, x=[-1.0, -0.9, -0.8]):
obs = gravity_toolkit.legendre(l, x)
expected = np.array([
[-1.00000, -0.47250, -0.08000],
[0.00000, -1.99420, -1.98000],
[0.00000, -2.56500, -4.32000],
[0.00000, -1.24229, -3.24000]
])
assert np.isclose(obs, expected, atol=1e-05).all()
#-- PURPOSE: test fully-normalized Legendre polynomials
def test_normalized(l=3, x=[-1.0, -0.9, -0.8]):
obs = gravity_toolkit.legendre(l, x, NORMALIZE=True)
expected = np.array([
[-2.64575, -1.25012, -0.21166],
[-0.00000, 2.15398, 2.13864],
[0.00000, -0.87611, -1.47556],
[-0.00000, 0.17323, 0.45180]
])
assert np.isclose(obs, expected, atol=1e-05).all()
#-- PURPOSE: test fully-normalized zonal Legendre polynomials
def test_zonal(l=3, x=[-1.0, -0.9, -0.8]):
obs,_ = gravity_toolkit.legendre_polynomials(l, x)
expected = np.array([
[1.00000, 1.00000, 1.00000],
[-1.73205, -1.55885, -1.38564],
[2.23607, 1.59879, 1.02859],
[-2.64575, -1.25012, -0.21166],
])
assert np.isclose(obs, expected, atol=1e-05).all()
#-- PURPOSE: compare fully-normalized Legendre polynomials
def test_plms(l=240, x=0.1):
obs = gravity_toolkit.legendre(l, x, NORMALIZE=True)
#-- calculate associated Legendre polynomials
holmes,_ = gravity_toolkit.plm_holmes(l, x)
colombo,_ = gravity_toolkit.plm_colombo(l, x)
mohlenkamp = gravity_toolkit.plm_mohlenkamp(l, x)
#-- compare Legendre polynomials
assert np.isclose(obs, holmes[l,:]).all()
assert np.isclose(holmes, colombo).all()
assert np.isclose(holmes, mohlenkamp).all()
|
import numpy as np
import os
import os.path as osp
import shutil
from mmcv.utils import check_file_exist, is_str, mkdir_or_exist
import argparse
import cv2
from mmcv.fileio import FileClient
import mmcv
def reorder_image(img, input_order='HWC'):
"""Reorder images to 'HWC' order.
If the input_order is (h, w), return (h, w, 1);
If the input_order is (c, h, w), return (h, w, c);
If the input_order is (h, w, c), return as it is.
Args:
img (ndarray): Input image.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
If the input image shape is (h, w), input_order will not have
effects. Default: 'HWC'.
Returns:
ndarray: reordered image.
"""
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
if len(img.shape) == 2:
img = img[..., None]
return img
if input_order == 'CHW':
img = img.transpose(1, 2, 0)
return img
def psnr(img1, img2, crop_border=0, input_order='HWC'):
"""Calculate PSNR (Peak Signal-to-Noise Ratio).
Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edges of an image. These
pixels are not involved in the PSNR calculation. Default: 0.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
Returns:
float: psnr result.
"""
assert img1.shape == img2.shape, (
f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, None]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, None]
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20. * np.log10(255. / np.sqrt(mse))
if __name__ == "__main__":
filepath_train = '/root/cwt1/ntire2021/work_dirs/edvr_g8_600k_large_s2_l_compress/results_600k_train/'
filepath_train_h = '/root/cwt1/ntire2021/work_dirs/edvr_g8_600k_large_s2_l_compress/results_600k_train_h/'
filepath_train_w = '/root/cwt1/ntire2021/work_dirs/edvr_g8_600k_large_s2_l_compress/results_600k_train_w/'
filepath_train_wh = '/root/cwt1/ntire2021/work_dirs/edvr_g8_600k_large_s2_l_compress/results_600k_train_wh/'
filepath_merge = '/root/cwt1/ntire2021/work_dirs/edvr_g8_600k_large_s2_l_compress/results_600k_train_merge/'
filepath_gt = '/root/cwt1/ntire2021/data/video_compress_track2/images/train_raw/'
frame_names = range(1, 21)
count = 0
psnr_mean = []
for frame in frame_names:
print("frame:", frame)
_train_path = os.path.join(filepath_train, f'{frame:03d}')
_train_path_h = os.path.join(filepath_train_h, f'{frame:03d}')
_train_path_w = os.path.join(filepath_train_w, f'{frame:03d}')
_train_path_wh = os.path.join(filepath_train_wh, f'{frame:03d}')
_gt_path = os.path.join(filepath_gt, f'{frame:03d}')
_save_path = os.path.join(filepath_merge, f'{frame:03d}')
mkdir_or_exist(_save_path)
imagenames = os.listdir(_train_path)
for imagename in imagenames:
count += 1
if count % 100 == 0:
print(count)
# if count > 10:
# break
img_train = cv2.imread(os.path.join(_train_path, imagename))
img_train_h = cv2.imread(os.path.join(_train_path_h, imagename))
img_train_w = cv2.imread(os.path.join(_train_path_w, imagename))
img_train_wh = cv2.imread(os.path.join(_train_path_wh, imagename))
img_train = img_train.astype(np.float32)
img_train_h = cv2.flip(img_train_h, 0).astype(np.float32)
img_train_w = cv2.flip(img_train_w, 1).astype(np.float32)
img_train_wh = cv2.flip(img_train_wh, -1).astype(np.float32)
image_save = (img_train + img_train_w + img_train_h +
img_train_wh) / 4
image_save = image_save.astype(np.uint8)
imagename_save_path = os.path.join(_save_path, imagename)
cv2.imwrite(imagename_save_path, image_save)
|
from __future__ import absolute_import
import subprocess
import sys
from typing import List
from colorama import Fore
import kslurm.text as txt
from kslurm.args import print_help
from kslurm.models import SlurmModel
from kslurm.slurm import SlurmCommand
def kbatch(script: str, args: List[str]):
"""Submit a job using sbatch
Supports scripts (e.g. ./script.sh) or direct commands (e.g. cp dir/file.txt dir2)
When your command contains bash interpreted elements such as $VARIABLES and
$(subshells), these will be immediately expanded. Normally, this behaviour
is fine, but sometimes they should only be interpretted on the allocated cluster.
For instance, $SLURM_TMPDIR will evaluate to "" unless it is interpretted later.
To force this behaviour, wrap the $VARIABLE or $(subshell) in quotes:
'$SLURM_TMPDIR'
'$(hostname)'
"""
slurm = SlurmCommand(args, SlurmModel())
if slurm.help:
print_help(script, SlurmModel(), kbatch.__doc__) # type: ignore
exit()
command = slurm.command if slurm.command else f"{Fore.RED}Must provide a command"
print(txt.KBATCH_MSG.format(slurm_args=slurm.slurm_args, command=command))
if slurm.command:
proc = subprocess.run(
slurm.batch, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
out = proc.stdout.decode()
if proc.returncode != 0:
print(Fore.WHITE + out)
return proc.returncode
if slurm.test:
# output will be the issued command, so we print it
print(Fore.WHITE + out)
else:
# We subtract the last character of the output
# to remove the final "\n" character and get the
# job_id
slurmid = out[:-1]
print(
f"""Scheduled job {slurmid}.
To cancel, run:
scancel {slurmid}
"""
)
def main():
kbatch(sys.argv[0], sys.argv[1:])
|
class Solution(object):
def minTaps(self, n, ranges):
"""
:type n: int
:type ranges: List[int]
:rtype: int
"""
ranges = [(i-r,i+r) for i,r in enumerate(ranges)]
ranges.sort(reverse = True)
watered = 0
ans = 0
while ranges:
far_right = []
while ranges and ranges[-1][0] <= watered:
far_right.append(ranges.pop()[1])
if not far_right:
return -1
watered = max(far_right)
ans += 1
if watered >= n:
return ans
return ans
abc = Solution()
abc.minTaps(7, [1,2,1,0,2,1,0,1])
|
class OpenTAXIIAuthAPI(object):
'''Abstract class that represents OpenTAXII Authentication API.
This class defines required methods that need to exist in
a specific Authentication API implementation.
'''
def init_app(self, app):
pass
def authenticate(self, username, password):
'''Authenticate a user.
:param str username: username
:param str password: password
:return: auth token
:rtype: string
'''
raise NotImplementedError()
def get_account(self, token):
'''Get account for auth token.
:param str token: auth token
:return: an account entity
:rtype: `opentaxii.entities.Account`
'''
raise NotImplementedError()
def create_account(self, account, password):
'''Create an account.
:param str username: username
:param str password: password
:param cool is_admin: is a new user admin?
:return: an account entity
:rtype: `opentaxii.entities.Account`
'''
raise NotImplementedError()
def update_account(self, obj, password=None):
'''Update an account.
:param AccountEntity obj: an ipdated user entity (old one matched by username)
:param str password: a new password
:return: an account entity
:rtype: `opentaxii.entities.Account`
'''
raise NotImplementedError()
def get_accounts(self):
raise NotImplementedError()
def delete_account(self, username):
raise NotImplementedError()
|
"""
By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.
That is, + + + = 23.
Find the maximum total from top to bottom of the triangle below:
"""
inputdata = """
75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23
"""
lines = inputdata.splitlines()[1:]
arr = [[int(word) for word in line.split()] for line in lines]
print arr
# simple solution
for i in range(len(arr)-2, -1, -1):
for j in range(0, i+1):
arr[i][j] += max(arr[i+1][j], arr[i+1][j+1])
print arr[0][0]
# Below doesn't work, becomes complicated if it should be
# tried to be fixed....
#
# attempt with Djikstras shortest path variant
# Seems to work on simple cases that I can work out by hand. Finds a
# solution that is 10 less than the correct solution, unknown why
class Node:
"""A node is an entry in the triangle tree.
The graph is a 2D array"""
def __init__(self, vertex, value, graph):
self.vertex, self.value, self.graph = vertex, value, graph
self.dist = 0
self.prev = None
self.next = None
def neighbors(self):
x, y = self.vertex
neighbor = []
if x < len(self.graph.array) - 1:
neighbor.append((x+1, y))
neighbor.append((x+1, y+1))
neighbor = [self.graph.nodes[vertex] for vertex in neighbor]
return neighbor
def __str__(self):
x, y = self.vertex
return "(%d, %d): dist = %d" % (x, y, self.dist)
def setPrev(self, prevNode):
self.prev = prevNode
prevNode.next = self
def addToDistAndPropagate(self, addon):
self.dist += addon
for v in self.neighbors():
if v.vertex not in self.graph.Q:
v.addToDistAndPropagate(addon)
def getdist(self):
total = self.value
n = self.prev
while n is not None:
total += n.value
n = n.prev
return total
class Graph:
"""The graph represents the whole triangle"""
def __init__(self, array):
self.array = array
self.Q = {}
for i, row in enumerate(array): # Initialization
for j, v in enumerate(row):
n = Node((i, j), v, self)
self.Q[n.vertex] = n
self.nodes = self.Q.copy()
def maxNode(self, nodeDict):
mn = nodeDict.itervalues().next()
for node in nodeDict.values():
if node.dist > mn.dist:
mn = node
return mn
def __str__(self):
s = ""
for node in graph.nodes.values():
s += str(node) + "\n"
return s[:-1]
def Dijkstra(arr):
graph = Graph(arr) # Initialization
graph.Q[(0, 0)].dist = graph.Q[(0, 0)].value # dist from source to source
while graph.Q: # while not empty
u = graph.maxNode(graph.Q)
for v in u.neighbors():
alt = u.getdist() + v.value
if alt > v.getdist():
# v.addToDistAndPropagate(alt - v.dist)
v.setPrev(u)
graph.Q.pop(u.vertex)
return graph
# works for all of these
arr = [[10],
[8, 9],
[7, 6, 5],
[1, 2, 3, 4]]
arr = [[5],
[1, 4],
[2, 1, 1],
[5, 3, 1, 1]]
arr = [[3],
[7, 4],
[2, 4, 6],
[8, 5, 9, 3]]
arr = [[1],
[1, 1],
[1, 1, 1],
[1, 1, 1, 1]]
#
# get 10 short for the given triangle
arr = [[int(word) for word in line.split()] for line in lines]
# solver
graph = Dijkstra(arr)
print graph
u = graph.maxNode(graph.nodes)
road = []
while u.prev is not None:
road.append(u)
u = u.prev
road.append(u)
road = road[::-1]
print "Road:"
total = sum([v.value for v in road])
for node in road:
print node
u = graph.maxNode(graph.nodes)
print "Max final node:", total
|
from ..utils import Object
class GetLogVerbosityLevel(Object):
"""
Returns current verbosity level of the internal logging of TDLib. This is an offline method. Can be called before authorization. Can be called synchronously
Attributes:
ID (:obj:`str`): ``GetLogVerbosityLevel``
No parameters required.
Returns:
LogVerbosityLevel
Raises:
:class:`telegram.Error`
"""
ID = "getLogVerbosityLevel"
def __init__(self, extra=None, **kwargs):
self.extra = extra
pass
@staticmethod
def read(q: dict, *args) -> "GetLogVerbosityLevel":
return GetLogVerbosityLevel()
|
import stringrnautils
import argparse
import os
import logging
logger = logging.getLogger(os.path.basename(__file__))
logging.basicConfig(level=logging.INFO)
def is_valid_path(arg):
if not os.path.exists(arg):
parser.error("Given master file path %s does not exist." % arg)
return arg
def main():
stringrnautils.combine_masterfiles(("miRTarBase_NPInter_SBexcluded.tsv", "starbase.tsv"),
'experiments.tsv', args.gold_standard_file, 'experiments', 40,
negative_evidence=False,rebenchmark_everything=True,
ignore_fraction=0.3)
if __name__ == '__main__':
logger.info("Integrating Experiments channel.")
parser = argparse.ArgumentParser()
parser.add_argument('gold_standard_file', type=is_valid_path,
help='The gold standard file to benchmark against.')
args = parser.parse_args()
main()
logger.info('Done.' + os.linesep + os.linesep)
|
#!/usr/bin/env python
import os
def test(testSuite):
#rt = os.system("cd ../demo && python dom_from_html_file.py employee_table.html")
#if rt:
# return 0
rt = os.system("cd ../demo && python dom_from_xml_file.py addr_book1.xml")
if rt:
return 0
#os.system("cd ../demo && python generate_html1.py")
#if rt:
# return 0
rt = os.system("cd ../demo && python iterator1.py addr_book1.xml")
if rt:
return 0
rt = os.system("cd ../demo && python visitor1.py addr_book1.xml")
if rt:
return 0
rt = os.system("cd ../demo && python trace_ns.py book_catalog1.xml")
if rt:
return 0
rt = os.system("cd ../demo && python xll_replace.py addr_book1.xml")
if rt:
return 0
rt = os.system("cd ../demo && python xpointer_query.py root\(\).child\(1\) addr_book1.xml")
if rt:
return 0
return 1
|
#Needs rewriting, and splitting into funcs
from collections import deque
# def create_bomb(material_sum, target):
# if material_sum == target:
# if target == 40:
# detura_b += 1
# if target == 60:
# cherry_b += 1
# if target == 120:
# smoke_b += 1
# bombs.popleft()
bombs = deque([int(x) for x in input().split(", ")])
casings = [int(x) for x in input().split(", ")]
detura_b = 0
cherry_b = 0
smoke_b = 0
while True:
current_bomb = bombs[0]
current_casing = casings[-1]
sum_b_c = current_bomb + current_casing
if sum_b_c == 40:
detura_b += 1
bombs.popleft()
casings.pop()
elif sum_b_c == 60:
cherry_b += 1
bombs.popleft()
casings.pop()
elif sum_b_c == 120:
smoke_b += 1
bombs.popleft()
casings.pop()
else:
casings[-1] -= 5
if not bombs or not casings:
print("You don't have enough materials to fill the bomb pouch.")
break
if detura_b >= 3 and cherry_b >= 3 and smoke_b >= 3:
print("Bene! You have successfully filled the bomb pouch!")
break
if bombs:
print(f"Bomb Effects: {', '.join(map(str, bombs))}")
else:
print(f"Bomb Effects: empty")
if casings:
print(f"Bomb Casings: {', '.join(map(str, casings))}")
else:
print(f"Bomb Casings: empty")
print(f"Cherry Bombs: {cherry_b}")
print(f"Datura Bombs: {detura_b}")
print(f"Smoke Decoy Bombs: {smoke_b}") |
# Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Drift Monitoring Lab
# MAGIC
# MAGIC In this lab, you will look at simulated data for an ice cream shop. This data contains a first and second period of data, like we saw in the previous lesson. Your job is to use the techniques you just learned to identify any potential drift occuring across the two time periods.
# MAGIC
# MAGIC The data contains the following columns:
# MAGIC
# MAGIC **Numeric:**
# MAGIC * `temperature`: The temperature on the given day
# MAGIC * `number_of_cones_sold`: The number of ice cream cones sold on the given day
# MAGIC * `number_bowls_sold`: The number of bowls sold, as opposed to cones
# MAGIC * `total_store_sales`: The total amount of money in sales done by the other, non ice cream products at the shop.
# MAGIC * `total_sales_predicted`: Our imaginary model's prediction for the total_store_sales that day.
# MAGIC
# MAGIC **Categorical:**
# MAGIC * `most_popular_ice_cream_flavor`: The most popular ice cream flavor on a given day
# MAGIC * `most_popular_sorbet_flavor`: The most popular sorbet flavor on a given day
# MAGIC
# MAGIC
# MAGIC In this situation, we have an imaginary model attempting to predict the total sales at the store of other, non ice cream items at the store, such as t-shirts or other merchandise.
# MAGIC Given the first and second time period of simulated data, identify any potential drift and analyze how you might handle it.
# COMMAND ----------
# MAGIC %run "../../Includes/Drift-Monitoring-Setup"
# COMMAND ----------
# MAGIC %md Let's take a look at the first time period ice cream dataframe!
# COMMAND ----------
df.head()
# COMMAND ----------
# MAGIC %md You will try to identify the forms of simulated drift in this dataset. The dataset was changed in the following ways:
# MAGIC
# MAGIC 1. An upstream data management error converted Fahrenheit to Celsius
# MAGIC 2. The number of cones sold stayed constant
# MAGIC 3. The most popular flavor of ice cream distribution changed, but no nulls were introduced
# MAGIC 4. Bowls became more popular, and the number of bowls sold increased
# MAGIC 5. The most popular sorbet flavors had nulls introduced, and although they are still evenly distributed, the counts thus changed
# MAGIC 6. The `total_store_sales` of other, non ice cream merchandise, increased
# MAGIC 7. The prediction of `total_store_sales` decreased
# MAGIC
# MAGIC Keep these changes in mind and see how we would detect them using the tools we have learned.
# COMMAND ----------
# MAGIC %md %md Let's take a look at the second time period ice cream dataframe!
# COMMAND ----------
df2.head()
# COMMAND ----------
# MAGIC %md We have defined a `Monitor` class for you. Please invoke it below to answer the following questions.
# COMMAND ----------
import pandas as pd
import seaborn as sns
from scipy import stats
import numpy as np
from scipy.spatial import distance
class Monitor():
def __init__(self, pdf1, pdf2, cat_cols, num_cols, alpha=.05):
"""
Pass in two pandas dataframes with the same columns for two time windows
List the categorical and numeric columns, and optionally provide an alpha level
"""
assert (pdf1.columns == pdf2.columns).all(), "Columns do not match"
self.pdf1 = pdf1
self.pdf2 = pdf2
self.alpha = alpha
self.categorical_columns = cat_cols
self.continuous_columns = num_cols
def run(self):
"""
Call to run drift monitoring
"""
self.handle_numeric_js()
self.handle_categorical()
def handle_numeric_ks(self):
"""
Handle the numeric features with the Two-Sample Kolmogorov-Smirnov (KS) Test with Bonferroni Correction
"""
corrected_alpha = self.alpha / len(self.continuous_columns)
for num in self.continuous_columns:
ks_stat, ks_pval = stats.ks_2samp(self.pdf1[num], self.pdf2[num], mode="asymp")
if ks_pval <= corrected_alpha:
self.on_drift(num)
def handle_numeric_js(self):
for num in self.continuous_columns:
# Run test comparing old and new for that attribute
range_min = min(self.pdf1[num].min(), self.pdf2[num].min())
range_max = max(self.pdf1[num].max(), self.pdf2[num].max())
base = np.histogram(self.pdf1[num], bins=20, range=(range_min, range_max))
comp = np.histogram(self.pdf2[num], bins=20, range=(range_min, range_max))
js_stat = distance.jensenshannon(base[0], comp[0], base=2)
if js_stat >= 0.3:
self.on_drift(num)
def handle_categorical(self):
"""
Handle the Categorical features with Two-Way Chi-Squared Test with Bonferroni Correction
"""
corrected_alpha = self.alpha / len(self.categorical_columns)
for feature in self.categorical_columns:
pdf_count1 = pd.DataFrame(self.pdf1[feature].value_counts()).sort_index().rename(columns={feature:"pdf1"})
pdf_count2 = pd.DataFrame(self.pdf2[feature].value_counts()).sort_index().rename(columns={feature:"pdf2"})
pdf_counts = pdf_count1.join(pdf_count2, how="outer").fillna(0)
obs = np.array([pdf_counts["pdf1"], pdf_counts["pdf2"]])
_, p, _, _ = stats.chi2_contingency(obs)
if p < corrected_alpha:
self.on_drift(feature)
def generate_null_counts(self, palette="#2ecc71"):
"""
Generate the visualization of percent null counts of all features
Optionally provide a color palette for the visual
"""
cm = sns.light_palette(palette, as_cmap=True)
return pd.concat([100 * self.pdf1.isnull().sum() / len(self.pdf1),
100 * self.pdf2.isnull().sum() / len(self.pdf2)], axis=1,
keys=["pdf1", "pdf2"]).style.background_gradient(cmap=cm, text_color_threshold=0.5, axis=1)
def generate_percent_change(self, palette="#2ecc71"):
"""
Generate visualization of percent change in summary statistics of numeric features
Optionally provide a color palette for the visual
"""
cm = sns.light_palette(palette, as_cmap=True)
summary1_pdf = self.pdf1.describe()[self.continuous_columns]
summary2_pdf = self.pdf2.describe()[self.continuous_columns]
percent_change = 100 * abs((summary1_pdf - summary2_pdf) / (summary1_pdf + 1e-100))
return percent_change.style.background_gradient(cmap=cm, text_color_threshold=0.5, axis=1)
def on_drift(self, feature):
"""
Complete this method with your response to drift. Options include:
- raise an alert
- automatically retrain model
"""
print(f"Drift found in {feature}!")
# COMMAND ----------
# MAGIC %md Create a `Monitor` object based on our first and second period of ice cream data to identify drift.
# COMMAND ----------
drift_monitor = Monitor(
df,
df2,
cat_cols = ["most_popular_ice_cream_flavor", "most_popular_sorbet_flavor"],
num_cols = ["temperature", "number_of_cones_sold", "number_bowls_sold", "total_store_sales", "total_sales_predicted"]
)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Summary Statistics
# MAGIC
# MAGIC Look over and compare some of the data and their summary stats. Use the `drift_monitor` class to generate the null counts and percent changes. Does anything jump out at you?
# COMMAND ----------
# TODO
# COMMAND ----------
# TODO
# COMMAND ----------
# MAGIC %md ### Statistical Tests
# MAGIC
# MAGIC Now let's try the Jensen Shannon and Two-Way Chi-Squared Test with Bonferroni Correction.
# MAGIC
# MAGIC Both of these are implemented for you when you call `drift_monitor.run()`. It will print a feature name if a statisitically significant p-value was found by the respective test on that feature or if the JS stat is above our predetermined threshold.
# MAGIC
# MAGIC Examine the results and compare them to the changes we made.
# COMMAND ----------
# TODO
# COMMAND ----------
# MAGIC %md ### Closer Look
# MAGIC
# MAGIC ***Using these summary statistics and statistical tests were we able to catch all of our drift?***
# MAGIC
# MAGIC Imagine you were running this ice cream shop:
# MAGIC * ***How would you handle each situation?***
# MAGIC * ***How would it affect our model or the business?***
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2021 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="http://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="http://help.databricks.com/">Support</a>
|
import pathlib
from dataclasses import asdict
import jinja2
import structlog
import yaml
from scenario_player.utils.configuration.nodes import NodesConfig
from scenario_player.utils.configuration.scenario import ScenarioConfig
from scenario_player.utils.configuration.settings import EnvironmentConfig, SettingsConfig
from scenario_player.utils.configuration.token import TokenConfig
log = structlog.get_logger(__name__)
class ScenarioDefinition:
"""Interface for a Scenario `.yaml` file.
Takes care of loading the yaml from the given `yaml_path`, and validates
its contents.
"""
def __init__(
self,
yaml_path: pathlib.Path,
data_path: pathlib.Path,
environment: EnvironmentConfig,
) -> None:
self._scenario_dir = None
self.path = yaml_path
# Use the scenario file as jinja template and only parse the yaml, afterwards.
with yaml_path.open() as f:
yaml_template = jinja2.Template(f.read(), undefined=jinja2.StrictUndefined)
rendered_yaml = yaml_template.render(**asdict(environment))
self._loaded = yaml.safe_load(rendered_yaml)
self.settings = SettingsConfig(self._loaded, environment)
self.settings.sp_root_dir = data_path
self.token = TokenConfig(self._loaded, self.scenario_dir.joinpath("token.info"))
deploy_token = self.token.address is None
self.nodes = NodesConfig(self._loaded, environment="development" if deploy_token else None)
self.scenario = ScenarioConfig(self._loaded)
# If the environment sets a list of matrix servers, the nodes must not
# choose other servers, so let's set the first server from the list as
# default.
self.nodes.dict["default_options"]["matrix-server"] = environment.matrix_servers[0]
self.nodes.dict["default_options"]["environment-type"] = environment.environment_type
@property
def name(self) -> str:
"""Return the name of the scenario file, sans extension."""
return self.path.stem
@property
def scenario_dir(self) -> pathlib.Path:
if not self._scenario_dir:
self._scenario_dir = self.settings.sp_scenario_root_dir.joinpath(self.name)
assert self._scenario_dir
self._scenario_dir.mkdir(exist_ok=True, parents=True)
return self._scenario_dir
@property
def snapshot_dir(self) -> pathlib.Path:
snapshot_dir = self.scenario_dir.joinpath("snapshot")
snapshot_dir.mkdir(parents=True, exist_ok=True)
return snapshot_dir
|
import ctypes, sys, subprocess, threading, traceback
Kernel32 = ctypes.windll.Kernel32
class Console():
def __init__(self, ischild=True, outputonly=True):
if ischild:
# detach console if it is attached
Kernel32.FreeConsole()
# try allocate new console
result = Kernel32.AllocConsole()
if result > 0:
# if we succeed open handle to the console output
self.stdout = open('CONOUT$', mode='w')
# check if sys.stdout is a pipe instead of a tty
if not sys.stdout.isatty():
# is pipe so open handle to console input
self.stdin = open('CONIN$', mode='r')
# start thread to read from console input
self.cin_thread = threading.Thread(target=self._cin_loop)
self.cin_thread.daemon = True
self.cin_thread.start()
self._cout_loop()
else:
# if frozen we assume its names Console.exe
if hasattr(sys, 'frozen'):
args = ['Console.exe']
else: # otherwise use python
args = [sys.executable, __file__]
kwargs = {'stdin':subprocess.PIPE}
if not outputonly:
kwargs['stdout'] = subprocess.PIPE
self.p = subprocess.Popen(args, **kwargs)
def _cin_loop(self):
'''reads from the visible console and writes
it to the pipe back to parent process'''
while True:
data = self.stdin.read(1)
if not data:
break
sys.stdout.write(data)
sys.stdout.flush()
def _cout_loop(self):
'''reads from the pipe from parent process
and writes it to the visible console'''
while True:
data = sys.stdin.read(1)
if not data:
break
self.stdout.write(data)
def write(self, data):
self.p.stdin.write(data)
self.p.stdin.flush()
def read(self, size=None):
return self.p.stdout.read(size)
def readline(self):
return self.p.stdout.readline()
def readlines(self):
return self.p.stdout.readlines()
if (__name__ == '__main__'):
p = Console() |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
from celery.schedules import crontab
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mindynode_service.settings')
app = Celery('mindynode_service')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
# @app.on_after_configure.connect
# def setup_periodic_tasks(sender, **kwargs):
# from .mindynode_nltk.tasks import task_update_feeds
# sender.add_periodic_task(10.0, task_update_feeds.s(), name='update every 10 second')
app.conf.beat_schedule = {
'update-feeds': {
'task': 'mindynode_nltk.tasks.task_update_feeds',
'schedule': crontab(hour='*/1')
}
} |
"""Tests for Adam."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.keras import optimizers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow_riemopt.optimizers.riemannian_adam import RiemannianAdam
def get_beta_accumulators(opt, dtype):
local_step = math_ops.cast(opt.iterations + 1, dtype)
beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_t = math_ops.cast(opt._get_hyper("beta_2"), dtype)
beta_2_power = math_ops.pow(beta_2_t, local_step)
return (beta_1_power, beta_2_power)
class RiemannianAdamOptimizerTest(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testBasic(self):
for i, dtype in enumerate(
[dtypes.half, dtypes.float32, dtypes.float64]
):
for amsgrad in [False, True]:
with self.cached_session(use_gpu=True):
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array(
[0.01, 0.01], dtype=dtype.as_numpy_dtype
)
var0 = variables.Variable(var0_np, name="var0_%d" % i)
var1 = variables.Variable(var1_np, name="var1_%d" % i)
var0_ref = variables.Variable(
var0_np, name="var0_ref_%d" % i
)
var1_ref = variables.Variable(
var1_np, name="var1_ref_%d" % i
)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
beta1 = 0.9
beta2 = 0.999
epsilon = 1e-8
opt = RiemannianAdam(
learning_rate=learning_rate,
beta_1=beta1,
beta_2=beta2,
epsilon=epsilon,
amsgrad=amsgrad,
)
opt_ref = adam.Adam(
learning_rate=learning_rate,
beta_1=beta1,
beta_2=beta2,
epsilon=epsilon,
amsgrad=amsgrad,
)
if not context.executing_eagerly():
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
update_ref = opt_ref.apply_gradients(
zip([grads0, grads1], [var0_ref, var1_ref])
)
self.evaluate(variables.global_variables_initializer())
# Run 3 steps
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(
opt, dtype
)
self.assertAllCloseAccordingToType(
beta1 ** (t + 1), self.evaluate(beta_1_power)
)
self.assertAllCloseAccordingToType(
beta2 ** (t + 1), self.evaluate(beta_2_power)
)
if not context.executing_eagerly():
self.evaluate(update)
self.evaluate(update_ref)
else:
opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
opt_ref.apply_gradients(
zip([grads0, grads1], [var0_ref, var1_ref])
)
# Validate updated params
self.assertAllCloseAccordingToType(
self.evaluate(var0_ref),
self.evaluate(var0),
rtol=1e-4,
atol=1e-4,
)
self.assertAllCloseAccordingToType(
self.evaluate(var1_ref),
self.evaluate(var1),
rtol=1e-4,
atol=1e-4,
)
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
for amsgrad in [False, True]:
with ops.Graph().as_default(), self.cached_session(
use_gpu=True
):
var0_np = np.array(
[1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype
)
grads0_np = np.array(
[0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype
)
var1_np = np.array(
[3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype
)
grads1_np = np.array(
[0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype
)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
var0_ref = variables.Variable(var0_np)
var1_ref = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices),
constant_op.constant([3]),
)
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices),
constant_op.constant([3]),
)
opt = RiemannianAdam(amsgrad=amsgrad)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
opt_ref = adam.Adam(amsgrad=amsgrad)
update_ref = opt_ref.apply_gradients(
zip([grads0, grads1], [var0_ref, var1_ref])
)
self.evaluate(variables.global_variables_initializer())
beta_1_power, beta_2_power = get_beta_accumulators(
opt, dtype
)
# Run 3 steps
for t in range(3):
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
self.assertAllCloseAccordingToType(
0.999 ** (t + 1), self.evaluate(beta_2_power)
)
update.run()
update_ref.run()
# Validate updated params
self.assertAllCloseAccordingToType(
self.evaluate(var0_ref), self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
self.evaluate(var1_ref), self.evaluate(var1)
)
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
var0_ref = variables.Variable(var0_np)
var1_ref = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = RiemannianAdam()
update1 = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
update2 = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
update1_ref = adam.Adam().apply_gradients(
zip([grads0], [var0_ref])
)
update2_ref = adam.Adam().apply_gradients(
zip([grads1], [var1_ref])
)
self.evaluate(variables.global_variables_initializer())
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps
for t in range(3):
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
self.assertAllCloseAccordingToType(
0.999 ** (t + 1), self.evaluate(beta_2_power)
)
if t % 2 == 0:
update1.run()
else:
update2.run()
update1_ref.run()
update2_ref.run()
# Validate updated params
self.assertAllCloseAccordingToType(
self.evaluate(var0_ref), self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
self.evaluate(var1_ref), self.evaluate(var1)
)
if __name__ == "__main__":
test.main()
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: features/base.py
# Purpose: Feature extractors base classes.
#
# Authors: Christopher Ariza
# Michael Scott Cuthbert
#
# Copyright: Copyright © 2011-2014 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
from __future__ import print_function
import unittest
import os
from music21 import common
from music21 import converter
from music21 import corpus
from music21 import exceptions21
from music21 import stream
from music21 import text
from music21 import environment
_MOD = 'features/base.py'
environLocal = environment.Environment(_MOD)
#-------------------------------------------------------------------------------
class FeatureException(exceptions21.Music21Exception):
pass
class Feature(object):
'''
An object representation of a feature, capable of presentation in a variety of formats,
and returned from FeatureExtractor objects.
Feature objects are simple. It is FeatureExtractors that store all metadata and processing
routines for creating Feature objects.
'''
def __init__(self):
# these values will be filled by the extractor
self.dimensions = None # number of dimensions
# data storage; possibly use numpy array
self.vector = None
# consider not storing this values, as may not be necessary
self.name = None # string name representation
self.description = None # string description
self.isSequential = None # True or False
self.discrete = None # is discrete or continuous
def _getVectors(self):
'''Prepare a vector of appropriate size and return
'''
return [0] * self.dimensions
def prepareVectors(self):
'''Prepare the vector stored in this feature.
'''
self.vector = self._getVectors()
def normalize(self):
'''Normalize the vector between 0 and 1, assuming there is more than one value.
'''
if self.dimensions == 1:
return # do nothing
m = max(self.vector)
if m == 0:
return # do nothing
scalar = 1. / m # get floating point scalar for speed
temp = self._getVectors()
for i, v in enumerate(self.vector):
temp[i] = v * scalar
self.vector = temp
#-------------------------------------------------------------------------------
class FeatureExtractorException(exceptions21.Music21Exception):
pass
class FeatureExtractor(object):
'''A model of process that extracts a feature from a Music21 Stream. The main public interface is the extract() method.
The extractor can be passed a Stream or a reference to a DataInstance. All Stream's are internally converted to a DataInstance if necessary. Usage of a DataInstance offers significant performance advantages, as common forms of the Stream are cached for easy processing.
'''
def __init__(self, dataOrStream=None, *arguments, **keywords):
self.stream = None # the original Stream, or None
self.data = None # a DataInstance object: use to get data
self.setData(dataOrStream)
self._feature = None # Feature object that results from processing
if not hasattr(self, "name"):
self.name = None # string name representation
if not hasattr(self, "description"):
self.description = None # string description
if not hasattr(self, "isSequential"):
self.isSequential = None # True or False
if not hasattr(self, "dimensions"):
self.dimensions = None # number of dimensions
if not hasattr(self, "discrete"):
self.discrete = True # default
if not hasattr(self, "normalize"):
self.normalize = False # default is no
def setData(self, dataOrStream):
'''Set the data that this FeatureExtractor will process. Either a Stream or a DataInstance object can be provided.
'''
if dataOrStream is not None:
if (hasattr(dataOrStream, 'classes') and 'Stream' in
dataOrStream.classes):
#environLocal.printDebug(['creating new DataInstance: this should be a Stream:', dataOrStream])
# if we are passed a stream, create a DataInstrance to
# manage the
# its data; this is less efficient but is good for testing
self.stream = dataOrStream
self.data = DataInstance(self.stream)
# if a DataInstance, do nothing
else:
self.stream = None
self.data = dataOrStream
def getAttributeLabels(self):
'''Return a list of string in a form that is appropriate for data storage.
>>> fe = features.jSymbolic.AmountOfArpeggiationFeature()
>>> fe.getAttributeLabels()
['Amount_of_Arpeggiation']
>>> fe = features.jSymbolic.FifthsPitchHistogramFeature()
>>> fe.getAttributeLabels()
['Fifths_Pitch_Histogram_0', 'Fifths_Pitch_Histogram_1', 'Fifths_Pitch_Histogram_2', 'Fifths_Pitch_Histogram_3', 'Fifths_Pitch_Histogram_4', 'Fifths_Pitch_Histogram_5', 'Fifths_Pitch_Histogram_6', 'Fifths_Pitch_Histogram_7', 'Fifths_Pitch_Histogram_8', 'Fifths_Pitch_Histogram_9', 'Fifths_Pitch_Histogram_10', 'Fifths_Pitch_Histogram_11']
'''
post = []
if self.dimensions == 1:
post.append(self.name.replace(' ', '_'))
else:
for i in range(self.dimensions):
post.append('%s_%s' % (self.name.replace(' ', '_'), i))
return post
def _fillFeatureAttributes(self, feature=None):
'''Fill the attributes of a Feature with the descriptors in the FeatureExtractor.
'''
# operate on passed-in feature or self._feature
if feature is None:
feature = self._feature
feature.name = self.name
feature.description = self.description
feature.isSequential = self.isSequential
feature.dimensions = self.dimensions
feature.discrete = self.discrete
return feature
def _prepareFeature(self):
'''Prepare a new Feature object for data acquisition.
>>> s = stream.Stream()
>>> fe = features.jSymbolic.InitialTimeSignatureFeature(s)
>>> fe._prepareFeature()
>>> fe._feature.name
'Initial Time Signature'
>>> fe._feature.dimensions
2
>>> fe._feature.vector
[0, 0]
'''
self._feature = Feature()
self._fillFeatureAttributes() # will fill self._feature
self._feature.prepareVectors() # will vector with necessary zeros
def _process(self):
'''Do processing necessary, storing result in _feature.
'''
# do work in subclass, calling on self.data
pass
def extract(self, source=None):
'''Extract the feature and return the result.
'''
if source is not None:
self.stream = source
# preparing the feature always sets self._feature to a new instance
self._prepareFeature()
self._process() # will set Feature object to _feature
# assume we always want to normalize?
if self.normalize:
self._feature.normalize()
return self._feature
def getBlankFeature(self):
'''Return a properly configured plain feature as a place holder
>>> from music21 import features
>>> fe = features.jSymbolic.InitialTimeSignatureFeature()
>>> fe.getBlankFeature().vector
[0, 0]
'''
f = Feature()
self._fillFeatureAttributes(f)
f.prepareVectors() # will vector with necessary zeros
return f
#-------------------------------------------------------------------------------
class StreamForms(object):
'''A dictionary-like wrapper of a Stream, providing
numerous representations, generated on-demand, and cached.
A single StreamForms object can be created for an
entire Score, as well as one for each Part and/or Voice.
A DataSet object manages one or more StreamForms
objects, and exposes them to FeatureExtractors for usage.
'''
def __init__(self, streamObj, prepareStream=True):
self.stream = streamObj
if self.stream is not None:
if prepareStream:
self._base = self._prepareStream(self.stream)
else: # possibly make a copy?
self._base = self.stream
else:
self._base = None
# basic data storage is a dictionary
self._forms = {}
def keys(self):
# will only return forms that are established
return self._forms.keys()
def _prepareStream(self, streamObj):
'''
Common routines done on Streams prior to processing. Return a new Stream
'''
# this causes lots of deepcopys, but an inPlace operation loses
# accuracy on feature extractors
streamObj = streamObj.stripTies(retainContainers=True)
return streamObj
def __getitem__(self, key):
'''Get a form of this Stream, using a cached version if available.
'''
# first, check for cached version
if key in self._forms:
return self._forms[key]
# else, process, store, and return
elif key in ['flat']:
self._forms['flat'] = self._base.flat
return self._forms['flat']
elif key in ['flat.pitches']:
self._forms['flat.pitches'] = self._base.flat.pitches
return self._forms['flat.pitches']
elif key in ['flat.notes']:
self._forms['flat.notes'] = self._base.flat.notes
return self._forms['flat.notes']
elif key in ['getElementsByClass.Measure']:
# need to determine if should concatenate
# measure for all parts if a score?
if 'Score' in self._base.classes:
post = stream.Stream()
for p in self._base.parts:
# insert in overlapping offset positions
for m in p.getElementsByClass('Measure'):
post.insert(m.getOffsetBySite(p), m)
else:
post = self._base.getElementsByClass('Measure')
self._forms['getElementsByClass.Measure'] = post
return self._forms['getElementsByClass.Measure']
elif key in ['flat.getElementsByClass.TimeSignature']:
self._forms['flat.getElementsByClass.TimeSignature'] = self._base.flat.getElementsByClass('TimeSignature')
return self._forms['flat.getElementsByClass.TimeSignature']
elif key in ['flat.getElementsByClass.KeySignature']:
self._forms['flat.getElementsByClass.KeySignature'] = self._base.flat.getElementsByClass('KeySignature')
return self._forms['flat.getElementsByClass.KeySignature']
elif key in ['flat.getElementsByClass.Harmony']:
self._forms['flat.getElementsByClass.Harmony'] = self._base.flat.getElementsByClass('Harmony')
return self._forms['flat.getElementsByClass.Harmony']
elif key in ['metronomeMarkBoundaries']: # already flat
self._forms['metronomeMarkBoundaries'] = self._base.metronomeMarkBoundaries()
return self._forms['metronomeMarkBoundaries']
# some methods that return new streams
elif key in ['chordify']:
if 'Score' in self._base.classes:
# options here permit getting part information out
# of chordified representation
self._forms['chordify'] = self._base.chordify(
addPartIdAsGroup=True, removeRedundantPitches=False)
else: # for now, just return a normal Part or Stream
self._forms['chordify'] = self._base
return self._forms['chordify']
elif key in ['chordify.getElementsByClass.Chord']:
# need flat here, as chordify might return Measures
x = self.__getitem__('chordify').flat.getElementsByClass('Chord')
self._forms['chordify.getElementsByClass.Chord'] = x
return self._forms['chordify.getElementsByClass.Chord']
# create a Part in a Score for each Instrument
elif key in ['partitionByInstrument']:
from music21 import instrument
x = instrument.partitionByInstrument(self._base)
self._forms['partitionByInstrument'] = x
return self._forms['partitionByInstrument']
# create a dictionary of encountered set classes and a count
elif key in ['chordifySetClassHistogram']:
histo = {}
for c in self.__getitem__('chordify.getElementsByClass.Chord'):
key = c.forteClassTnI
if key not in histo:
histo[key] = 0
histo[key] += 1
self._forms['chordifySetClassHistogram'] = histo
return self._forms['chordifySetClassHistogram']
# a dictionary of pitch class sets
elif key in ['chordifyPitchClassSetHistogram']:
histo = {}
for c in self.__getitem__('chordify.getElementsByClass.Chord'):
key = c.orderedPitchClassesString
if key not in histo:
histo[key] = 0
histo[key] += 1
self._forms['chordifyPitchClassSetHistogram'] = histo
return self._forms['chordifyPitchClassSetHistogram']
# dictionary of common chord types
elif key in ['chordifyTypesHistogram']:
histo = {}
# keys are methods on Chord
keys = ['isTriad', 'isSeventh', 'isMajorTriad', 'isMinorTriad', 'isIncompleteMajorTriad', 'isIncompleteMinorTriad', 'isDiminishedTriad', 'isAugmentedTriad', 'isDominantSeventh', 'isDiminishedSeventh', 'isHalfDiminishedSeventh']
for c in self.__getitem__('chordify.getElementsByClass.Chord'):
for key in keys:
if key not in histo:
histo[key] = 0
# get the function attr, call it, check bool
if getattr(c, key)():
histo[key] += 1
# not breaking here means that we may get multiple
# hits for the same chord
self._forms['chordifyTypesHistogram'] = histo
return self._forms['chordifyTypesHistogram']
# a dictionary of intervals
#self.flat.melodicIntervals(skipRests=True, skipChords=False, skipGaps=True)
# a dictionary of quarter length values
elif key in ['noteQuarterLengthHistogram']:
histo = {}
for n in self.__getitem__('flat.notes'):
key = n.quarterLength
if key not in histo:
histo[key] = 0
histo[key] += 1
self._forms['noteQuarterLengthHistogram'] = histo
return self._forms['noteQuarterLengthHistogram']
# data lists / histograms
elif key in ['pitchClassHistogram']:
histo = [0] * 12
for p in self.__getitem__('flat.pitches'): # recursive call
histo[p.pitchClass] += 1
self._forms['pitchClassHistogram'] = histo
return self._forms['pitchClassHistogram']
elif key in ['midiPitchHistogram']:
histo = [0] * 128
for p in self.__getitem__('flat.pitches'): # recursive call
histo[p.midi] += 1
self._forms['midiPitchHistogram'] = histo
return self._forms['midiPitchHistogram']
# bins for all abs spans between adjacent melodic notes
elif key in ['midiIntervalHistogram']:
# note that this does not optimize and cache part presentations
histo = [0] * 128
# if we have parts, must add one at a time
if self._base.hasPartLikeStreams():
parts = self._base.parts
else:
parts = [self._base] # emulate a list
for p in parts:
# will be flat
# edit June 2012:
# was causing millions of deepcopy calls
# so I made it inPlace, but for some reason
# code errored with 'p =' not present
# also, this part has measures...so should retainContains be True?
p = p.stripTies(retainContainers=False, inPlace=True)
# noNone means that we will see all connections, even w/ a gap
post = p.findConsecutiveNotes(skipRests=True,
skipChords=True, skipGaps=True, noNone=True)
for i, n in enumerate(post):
if i < len(post) - 1: # if not last
iNext = i + 1
nNext = post[iNext]
try:
histo[abs(n.midi - nNext.midi)] += 1
except AttributeError:
pass # problem with not having midi
self._forms['midiIntervalHistogram'] = histo
return self._forms['midiIntervalHistogram']
elif key in ['contourList']:
# list of all directed half steps
cList = []
# if we have parts, must add one at a time
if self._base.hasPartLikeStreams():
parts = self._base.parts
else:
parts = [self._base] # emulate a list
for p in parts:
# this may be unnecessary but we cannot accessed cached part data
# edit June 2012:
# was causing lots of deepcopy calls, so I made
# it inPlace=True, but errors when 'p =' no present
# also, this part has measures...so should retainContains be True?
p = p.stripTies(retainContainers=False, inPlace=True) # will be flat
# noNone means that we will see all connections, even w/ a gap
post = p.findConsecutiveNotes(skipRests=True,
skipChords=False, skipGaps=True, noNone=True)
for i, n in enumerate(post):
if i < (len(post) - 1): # if not last
iNext = i + 1
nNext = post[iNext]
if n.isChord:
ps = n.sortDiatonicAscending().pitches[-1].midi
else: # normal note
ps = n.midi
if nNext.isChord:
psNext = nNext.sortDiatonicAscending().pitches[-1].midi
else: # normal note
psNext = nNext.midi
cList.append(psNext - ps)
#environLocal.printDebug(['contourList', cList])
self._forms['contourList'] = cList
return self._forms['contourList']
elif key in ['flat.analyzedKey']:
# this will use default weightings
self._forms['analyzedKey'] = self.__getitem__('flat').analyze(
method='key')
return self._forms['analyzedKey']
elif key in ['flat.tonalCertainty']:
# this will use default weightings
foundKey = self.__getitem__('flat.analyzedKey')
self._forms['flat.tonalCertainty'] = foundKey.tonalCertainty()
return self._forms['flat.tonalCertainty']
elif key in ['metadata']:
self._forms['metadata'] = self._base.metadata
return self._forms['metadata']
elif key in ['secondsMap']:
secondsMap = self.__getitem__('flat').secondsMap
post = []
# filter only notes; all elements would otherwise be gathered
for bundle in secondsMap:
if 'GeneralNote' in bundle['element'].classes:
post.append(bundle)
self._forms['secondsMap'] = post
return self._forms['secondsMap']
elif key in ['assembledLyrics']:
self._forms['assembledLyrics'] = text.assembleLyrics(self._base)
return self._forms['assembledLyrics']
else:
raise AttributeError('no such attribute: %s' % key)
#-------------------------------------------------------------------------------
class DataInstance(object):
'''
A data instance for analysis. This object prepares a Stream
(by stripping ties, etc.) and stores
multiple commonly-used stream representations once, providing rapid processing.
'''
def __init__(self, streamObj=None, id=None): #@ReservedAssignment
self.stream = streamObj
# perform basic operations that are performed on all
# streams
# store an id for the source stream: file path url, corpus url
# or metadata title
if id is not None:
self._id = id
else:
if hasattr(self.stream, 'metadata'):
self._id = self.stream.metadata # may be None
# the attribute name in the data set for this label
self._classLabel = None
# store the class value for this data instance
self._classValue = None
# store a dictionary of StreamForms
self._forms = StreamForms(self.stream)
# if parts exist, store a forms for each
self._formsByPart = []
if hasattr(self.stream, 'parts'):
self.partsCount = len(self.stream.parts)
for p in self.stream.parts:
# note that this will join ties and expand rests again
self._formsByPart.append(StreamForms(p))
else:
self.partsCount = 0
# TODO: store a list of voices, extracted from each part,
# presently this will only work on a measure stream
self._formsByVoice = []
if hasattr(self.stream, 'voices'):
for v in self.stream.voices:
self._formsByPart.append(StreamForms(v))
def setClassLabel(self, classLabel, classValue=None):
'''Set the class label, as well as the class value if known. The class label is the attribute name used to define the class of this data instance.
>>> #_DOCS_SHOW s = corpus.parse('bwv66.6')
>>> s = stream.Stream() #_DOCS_HIDE
>>> di = features.DataInstance(s)
>>> di.setClassLabel('Composer', 'Bach')
'''
self._classLabel = classLabel
self._classValue = classValue
def getClassValue(self):
if self._classValue is None:
return ''
else:
return self._classValue
def getId(self):
if self._id is None:
return ''
else:
# make sure there are no spaces
return self._id.replace(' ', '_')
def __getitem__(self, key):
'''Get a form of this Stream, using a cached version if available.
>>> s = corpus.parse('bwv66.6')
>>> di = features.DataInstance(s)
>>> len(di['flat'])
193
>>> len(di['flat.pitches'])
163
>>> len(di['flat.notes'])
163
>>> len(di['getElementsByClass.Measure'])
40
>>> len(di['getElementsByClass.Measure'])
40
>>> len(di['flat.getElementsByClass.TimeSignature'])
4
'''
if key in ['parts']:
# return a list of Forms for each part
return self._formsByPart
elif key in ['voices']:
# return a list of Forms for voices
return self._formsByVoices
# try to create by calling the attribute
# will raise an attribute error if there is a problem
return self._forms[key]
#-------------------------------------------------------------------------------
class OutputFormatException(exceptions21.Music21Exception):
pass
class OutputFormat(object):
'''Provide output for a DataSet, passed as an initial argument.
'''
def __init__(self, dataSet=None):
# assume a two dimensional array
self._ext = None # store a file extension if necessary
# pass a data set object
self._dataSet = dataSet
def getHeaderLines(self):
'''Get the header as a list of lines.
'''
pass # define in subclass
def write(self, fp=None, includeClassLabel=True, includeId=True):
'''Write the file. If not file path is given, a temporary file will be written.
'''
if fp is None:
fp = environLocal.getTempFile(suffix=self._ext)
if not fp.endswith(self._ext):
raise
f = open(fp, 'w')
f.write(self.getString(includeClassLabel=includeClassLabel,
includeId=includeId))
f.close()
return fp
class OutputTabOrange(OutputFormat):
'''Tab delimited file format used with Orange.
http://orange.biolab.si/doc/reference/Orange.data.formats/
'''
def __init__(self, dataSet=None):
OutputFormat.__init__(self, dataSet=dataSet)
self._ext = '.tab'
def getHeaderLines(self, includeClassLabel=True, includeId=True):
'''Get the header as a list of lines.
>>> f = [features.jSymbolic.ChangesOfMeterFeature]
>>> ds = features.DataSet()
>>> ds.addFeatureExtractors(f)
>>> of = features.OutputTabOrange(ds)
>>> for x in of.getHeaderLines(): print(x)
['Identifier', 'Changes_of_Meter']
['string', 'discrete']
['meta', '']
>>> ds = features.DataSet(classLabel='Composer')
>>> ds.addFeatureExtractors(f)
>>> of = features.OutputTabOrange(ds)
>>> for x in of.getHeaderLines(): print(x)
['Identifier', 'Changes_of_Meter', 'Composer']
['string', 'discrete', 'discrete']
['meta', '', 'class']
'''
post = []
post.append(self._dataSet.getAttributeLabels(
includeClassLabel=includeClassLabel, includeId=includeId))
# second row meta data
row = []
for x in self._dataSet.getDiscreteLabels(
includeClassLabel=includeClassLabel, includeId=includeId):
if x is None: # this is a string entry
row.append('string')
elif x is True: # if True, it is discrete
row.append('discrete')
else:
row.append('continuous')
post.append(row)
# third row metadata
row = []
for x in self._dataSet.getClassPositionLabels(includeId=includeId):
if x is None: # the id value
row.append('meta')
elif x is True: # if True, it is the class column
row.append('class')
else:
row.append('')
post.append(row)
return post
def getString(self, includeClassLabel=True, includeId=True, lineBreak=None):
'''Get the complete DataSet as a string with the appropriate headers.
'''
if lineBreak is None:
lineBreak = '\n'
msg = []
header = self.getHeaderLines(includeClassLabel=includeClassLabel,
includeId=includeId)
data = header + self._dataSet.getFeaturesAsList(
includeClassLabel=includeClassLabel)
for row in data:
sub = []
for e in row:
sub.append(str(e))
msg.append('\t'.join(sub))
return lineBreak.join(msg)
class OutputCSV(OutputFormat):
'''Comma-separated value list.
'''
def __init__(self, dataSet=None):
OutputFormat.__init__(self, dataSet=dataSet)
self._ext = '.csv'
def getHeaderLines(self, includeClassLabel=True, includeId=True):
'''Get the header as a list of lines.
>>> f = [features.jSymbolic.ChangesOfMeterFeature]
>>> ds = features.DataSet(classLabel='Composer')
>>> ds.addFeatureExtractors(f)
>>> of = features.OutputCSV(ds)
>>> of.getHeaderLines()[0]
['Identifier', 'Changes_of_Meter', 'Composer']
'''
post = []
post.append(self._dataSet.getAttributeLabels(
includeClassLabel=includeClassLabel, includeId=includeId))
return post
def getString(self, includeClassLabel=True, includeId=True, lineBreak=None):
if lineBreak is None:
lineBreak = '\n'
msg = []
header = self.getHeaderLines(includeClassLabel=includeClassLabel,
includeId=includeId)
data = header + self._dataSet.getFeaturesAsList(
includeClassLabel=includeClassLabel, includeId=includeId)
for row in data:
sub = []
for e in row:
sub.append(str(e))
msg.append(','.join(sub))
return lineBreak.join(msg)
class OutputARFF(OutputFormat):
'''An ARFF (Attribute-Relation File Format) file.
See http://weka.wikispaces.com/ARFF+%28stable+version%29 for more details
>>> oa = features.OutputARFF()
>>> oa._ext
'.arff'
'''
def __init__(self, dataSet=None):
OutputFormat.__init__(self, dataSet=dataSet)
self._ext = '.arff'
def getHeaderLines(self, includeClassLabel=True, includeId=True):
'''Get the header as a list of lines.
>>> f = [features.jSymbolic.ChangesOfMeterFeature]
>>> ds = features.DataSet(classLabel='Composer')
>>> ds.addFeatureExtractors(f)
>>> of = features.OutputARFF(ds)
>>> for x in of.getHeaderLines(): print(x)
@RELATION Composer
@ATTRIBUTE Identifier STRING
@ATTRIBUTE Changes_of_Meter NUMERIC
@ATTRIBUTE class {}
@DATA
'''
post = []
# get three parallel lists
attrs = self._dataSet.getAttributeLabels(
includeClassLabel=includeClassLabel, includeId=includeId)
discreteLabels = self._dataSet.getDiscreteLabels(
includeClassLabel=includeClassLabel, includeId=includeId)
classLabels = self._dataSet.getClassPositionLabels(includeId=includeId)
post.append('@RELATION %s' % self._dataSet.getClassLabel())
for i, attrLabel in enumerate(attrs):
discrete = discreteLabels[i]
classLabel = classLabels[i]
if not classLabel: # a normal attribute
if discrete is None: # this is an identifier
post.append('@ATTRIBUTE %s STRING' % attrLabel)
elif discrete is True:
post.append('@ATTRIBUTE %s NUMERIC' % attrLabel)
else: # this needs to be a NOMINAL type
post.append('@ATTRIBUTE %s NUMERIC' % attrLabel)
else:
values = self._dataSet.getUniqueClassValues()
post.append('@ATTRIBUTE class {%s}' % ','.join(values))
# include start of data declaration
post.append('@DATA')
return post
def getString(self, includeClassLabel=True, includeId=True, lineBreak=None):
if lineBreak is None:
lineBreak = '\n'
msg = []
header = self.getHeaderLines(includeClassLabel=includeClassLabel,
includeId=includeId)
for row in header:
msg.append(row)
data = self._dataSet.getFeaturesAsList(
includeClassLabel=includeClassLabel)
# data is separated by commas
for row in data:
sub = []
for e in row:
sub.append(str(e))
msg.append(','.join(sub))
return lineBreak.join(msg)
#-------------------------------------------------------------------------------
class DataSetException(exceptions21.Music21Exception):
pass
class DataSet(object):
'''
A set of features, as well as a collection of data to operate on
Multiple DataInstance objects, a FeatureSet, and an OutputFormat.
>>> ds = features.DataSet(classLabel='Composer')
>>> f = [features.jSymbolic.PitchClassDistributionFeature, features.jSymbolic.ChangesOfMeterFeature, features.jSymbolic.InitialTimeSignatureFeature]
>>> ds.addFeatureExtractors(f)
>>> ds.addData('bwv66.6', classValue='Bach')
>>> ds.addData('bach/bwv324.xml', classValue='Bach')
>>> ds.process()
>>> ds.getFeaturesAsList()[0]
['bwv66.6', 0.0, 1.0, 0.375, 0.03125, 0.5, 0.1875, 0.90625, 0.0, 0.4375, 0.6875, 0.09375, 0.875, 0, 4, 4, 'Bach']
>>> ds.getFeaturesAsList()[1]
['bach/bwv324.xml', 0.12, 0.0, 1.0, 0.12, 0.56..., 0.0, ..., 0.52..., 0.0, 0.68..., 0.0, 0.56..., 0, 4, 4, 'Bach']
>>> ds = ds.getString()
By default, all exceptions are caught and printed if debug mode is on.
Set ds.failFast = True to not catch them.
Set ds.quiet = False to print them regardless of debug mode.
'''
def __init__(self, classLabel=None, featureExtractors=()):
# assume a two dimensional array
self.dataInstances = []
self.streams = []
# order of feature extractors is the order used in the presentations
self._featureExtractors = []
# the label of the class
self._classLabel = classLabel
# store a multidimensional storage of all features
self._features = []
self.failFast = False
self.quiet = True
# set extractors
self.addFeatureExtractors(featureExtractors)
def getClassLabel(self):
return self._classLabel
def addFeatureExtractors(self, values):
'''Add one or more FeatureExtractor objects, either as a list or as an individual object.
'''
# features are instantiated here
# however, they do not have a data assignment
if not common.isListLike(values):
values = [values]
# need to create instances
for sub in values:
self._featureExtractors.append(sub())
def getAttributeLabels(self, includeClassLabel=True,
includeId=True):
'''Return a list of all attribute labels. Optionally add a class label field and/or an id field.
>>> f = [features.jSymbolic.PitchClassDistributionFeature, features.jSymbolic.ChangesOfMeterFeature]
>>> ds = features.DataSet(classLabel='Composer', featureExtractors=f)
>>> ds.getAttributeLabels(includeId=False)
['Pitch_Class_Distribution_0', 'Pitch_Class_Distribution_1', 'Pitch_Class_Distribution_2', 'Pitch_Class_Distribution_3', 'Pitch_Class_Distribution_4', 'Pitch_Class_Distribution_5', 'Pitch_Class_Distribution_6', 'Pitch_Class_Distribution_7', 'Pitch_Class_Distribution_8', 'Pitch_Class_Distribution_9', 'Pitch_Class_Distribution_10', 'Pitch_Class_Distribution_11', 'Changes_of_Meter', 'Composer']
'''
post = []
# place ids first
if includeId:
post.append('Identifier')
for fe in self._featureExtractors:
post += fe.getAttributeLabels()
if self._classLabel is not None and includeClassLabel:
post.append(self._classLabel.replace(' ', '_'))
return post
def getDiscreteLabels(self, includeClassLabel=True, includeId=True):
'''Return column labels for discrete status.
>>> f = [features.jSymbolic.PitchClassDistributionFeature, features.jSymbolic.ChangesOfMeterFeature]
>>> ds = features.DataSet(classLabel='Composer', featureExtractors=f)
>>> ds.getDiscreteLabels()
[None, False, False, False, False, False, False, False, False, False, False, False, False, True, True]
'''
post = []
if includeId:
post.append(None) # just a spacer
for fe in self._featureExtractors:
# need as many statements of discrete as there are dimensions
post += [fe.discrete] * fe.dimensions
# class label is assumed always discrete
if self._classLabel is not None and includeClassLabel:
post.append(True)
return post
def getClassPositionLabels(self, includeId=True):
'''Return column labels for the presence of a class definition
>>> f = [features.jSymbolic.PitchClassDistributionFeature, features.jSymbolic.ChangesOfMeterFeature]
>>> ds = features.DataSet(classLabel='Composer', featureExtractors=f)
>>> ds.getClassPositionLabels()
[None, False, False, False, False, False, False, False, False, False, False, False, False, False, True]
'''
post = []
if includeId:
post.append(None) # just a spacer
for fe in self._featureExtractors:
# need as many statements of discrete as there are dimensions
post += [False] * fe.dimensions
# class label is assumed always discrete
if self._classLabel is not None:
post.append(True)
return post
def addData(self, dataOrStreamOrPath, classValue=None, id=None): #@ReservedAssignment
'''Add a Stream, DataInstance, or path to a corpus or local file to this data set.
The class value passed here is assumed to be the same as the classLable assigned at startup.
'''
if self._classLabel is None:
raise DataSetException('cannot add data unless a class label for this DataSet has been set.')
s = None
if isinstance(dataOrStreamOrPath, DataInstance):
di = dataOrStreamOrPath
s = di.stream
elif common.isStr(dataOrStreamOrPath):
# could be corpus or file path
if os.path.exists(dataOrStreamOrPath) or dataOrStreamOrPath.startswith('http'):
s = converter.parse(dataOrStreamOrPath)
else: # assume corpus
s = corpus.parse(dataOrStreamOrPath)
# assume we can use this string as an id
di = DataInstance(s, id=dataOrStreamOrPath)
else:
# for now, assume all else are streams
s = dataOrStreamOrPath
di = DataInstance(dataOrStreamOrPath, id=id)
di.setClassLabel(self._classLabel, classValue)
self.dataInstances.append(di)
self.streams.append(s)
def process(self):
'''Process all Data with all FeatureExtractors. Processed data is stored internally as numerous Feature objects.
'''
# clear features
self._features = []
for data in self.dataInstances:
row = []
for fe in self._featureExtractors:
fe.setData(data)
# in some cases there might be problem; to not fail
try:
fReturned = fe.extract()
except Exception as e: # for now take any error # pylint: disable=broad-except
fList = ['failed feature extactor:', fe, str(e)]
if self.quiet is True:
environLocal.printDebug(fList)
else:
environLocal.warn(fList)
if self.failFast is True:
raise e
# provide a blank feature extactor
fReturned = fe.getBlankFeature()
row.append(fReturned) # get feature and store
# rows will align with data the order of DataInstances
self._features.append(row)
def getFeaturesAsList(self, includeClassLabel=True, includeId=True, concatenateLists=True):
'''Get processed data as a list of lists, merging any sub-lists in multi-dimensional features.
'''
post = []
for i, row in enumerate(self._features):
v = []
di = self.dataInstances[i]
if includeId:
v.append(di.getId())
for f in row:
if concatenateLists:
v += f.vector
else:
v.append(f.vector)
if includeClassLabel:
v.append(di.getClassValue())
post.append(v)
if not includeClassLabel and not includeId:
return post[0]
else:
return post
def getUniqueClassValues(self):
'''Return a list of unique class values.
'''
post = []
for di in self.dataInstances:
v = di.getClassValue()
if v not in post:
post.append(v)
return post
def _getOutputFormat(self, featureFormat):
if featureFormat.lower() in ['tab', 'orange', 'taborange', None]:
outputFormat = OutputTabOrange(dataSet=self)
elif featureFormat.lower() in ['csv', 'comma']:
outputFormat = OutputCSV(dataSet=self)
elif featureFormat.lower() in ['arff', 'attribute']:
outputFormat = OutputARFF(dataSet=self)
else:
return None
return outputFormat
def _getOutputFormatFromFilePath(self, fp):
'''Get an output format from a file path if possible, otherwise return None.
>>> ds = features.DataSet()
>>> ds._getOutputFormatFromFilePath('test.tab')
<music21.features.base.OutputTabOrange object at ...>
>>> ds._getOutputFormatFromFilePath('test.csv')
<music21.features.base.OutputCSV object at ...>
>>> ds._getOutputFormatFromFilePath('junk') is None
True
'''
# get format from fp if possible
of = None
if '.' in fp:
if self._getOutputFormat(fp.split('.')[-1]) is not None:
of = self._getOutputFormat(fp.split('.')[-1])
return of
def getString(self, outputFmt='tab'):
'''Get a string representation of the data set in a specific format.
'''
# pass reference to self to output
outputFormat = self._getOutputFormat(outputFmt)
return outputFormat.getString()
def write(self, fp=None, format=None, includeClassLabel=True): #@ReservedAssignment
'''
Set the output format object.
'''
if format is None and fp is not None:
outputFormat = self._getOutputFormatFromFilePath(fp)
else:
outputFormat = self._getOutputFormat(format)
if OutputFormat is None:
raise DataSetException('no output format could be defined from file path %s or format %s' % (fp, format))
outputFormat.write(fp=fp, includeClassLabel=includeClassLabel)
def allFeaturesAsList(streamInput):
'''
returns a tuple containing ALL currentingly implemented feature extractors. The first
in the tuple are jsymbolic vectors, and the second native vectors. Vectors are NOT nested
streamInput can be Add a Stream, DataInstance, or path to a corpus or local file to this data set.
>>> #_DOCS_SHOW s = corpus.parse('bwv66.6')
>>> s = converter.parse('tinynotation: 4/4 c4 d e2') #_DOCS_HIDE
>>> f = features.allFeaturesAsList(s)
>>> f[1][0:3]
[[1], [0.6899992497638124], [2]]
>>> len(f[0]) > 65
True
>>> len(f[1]) > 20
True
'''
from music21.features import jSymbolic, native
ds = DataSet(classLabel='')
f = [f for f in jSymbolic.featureExtractors]
ds.addFeatureExtractors(f)
ds.addData(streamInput)
ds.process()
jsymb = ds.getFeaturesAsList( includeClassLabel=False, includeId=False, concatenateLists=False)
ds._featureExtractors = []
ds._features = []
n = [f for f in native.featureExtractors]
ds.addFeatureExtractors(n)
ds.process()
nat = ds.getFeaturesAsList(includeClassLabel=False, includeId=False, concatenateLists=False)
return (jsymb, nat)
#-------------------------------------------------------------------------------
def extractorsById(idOrList, library=('jSymbolic', 'native')):
'''Given one or more :class:`~music21.features.FeatureExtractor` ids, return the appropriate subclass. An optional `library` argument can be added to define which module is used. Current options are jSymbolic and native.
>>> [x.id for x in features.extractorsById('p20')]
['P20']
>>> [x.id for x in features.extractorsById(['p19', 'p20'])]
['P19', 'P20']
>>> [x.id for x in features.extractorsById(['r31', 'r32', 'r33', 'r34', 'r35', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10', 'p11', 'p12', 'p13', 'p14', 'p15', 'p16', 'p19', 'p20', 'p21'])]
['R31', 'R32', 'R33', 'R34', 'R35', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'P9', 'P10', 'P11', 'P12', 'P13', 'P14', 'P15', 'P16', 'P19', 'P20', 'P21']
Get all feature extractors from all libraries
>>> y = [x.id for x in features.extractorsById('all')]
>>> y[0:3], y[-3:-1]
(['M1', 'M2', 'M3'], ['MD1', 'MC1'])
'''
from music21.features import jSymbolic
from music21.features import native
if not common.isListLike(library):
library = [library]
featureExtractors = []
for l in library:
if l.lower() in ['jsymbolic', 'all']:
featureExtractors += jSymbolic.featureExtractors
elif l.lower() in ['native', 'all']:
featureExtractors += native.featureExtractors
if not common.isListLike(idOrList):
idOrList = [idOrList]
flatIds = []
for featureId in idOrList:
featureId = featureId.strip().lower()
featureId.replace('-', '')
featureId.replace(' ', '')
flatIds.append(featureId)
post = []
if len(flatIds) == 0:
return post
for fe in featureExtractors:
if fe.id.lower() in flatIds or flatIds[0].lower() == 'all':
post.append(fe)
return post
def extractorById(idOrList, library=('jSymbolic', 'native')):
'''Get the first feature matched by extractorsById().
>>> s = stream.Stream()
>>> s.append(note.Note('A4'))
>>> fe = features.extractorById('p20')(s) # call class
>>> fe.extract().vector
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]
'''
ebi = extractorsById(idOrList=idOrList, library=library)
if len(ebi) > 0:
return ebi[0]
return None # no match
def vectorById(streamObj, vectorId, library=('jSymbolic', 'native')):
'''Utility function to get a vector from an extractor
>>> s = stream.Stream()
>>> s.append(note.Note('A4'))
>>> features.vectorById(s, 'p20')
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]
'''
fe = extractorById(vectorId)(streamObj) # call class with stream
if fe is None:
return None # could raise exception
return fe.extract().vector
def getIndex(featureString, extractorType=None):
'''
returns the list index of the given feature extractor and the feature extractor
category (jsymbolic or native). If feature extractor string is not in either
jsymbolic or native feature extractors, returns None
optionally include the extractorType ('jsymbolic' or 'native' if known
and searching will be made more efficient
>>> features.getIndex('Range')
(59, 'jsymbolic')
>>> features.getIndex('Ends With Landini Melodic Contour')
(19, 'native')
>>> features.getIndex('abrandnewfeature!')
>>> features.getIndex('Fifths Pitch Histogram','jsymbolic')
(68, 'jsymbolic')
>>> features.getIndex('Tonal Certainty','native')
(1, 'native')
'''
from music21.features import jSymbolic, native
if extractorType == None or extractorType == 'jsymbolic':
indexcnt=0
for feature in jSymbolic.featureExtractors:
if feature().name == featureString:
return indexcnt, 'jsymbolic'
indexcnt+=1
if extractorType == None or extractorType == 'native':
indexcnt=0
for feature in native.featureExtractors:
if feature().name == featureString:
return indexcnt, 'native'
indexcnt+=1
return None
#-------------------------------------------------------------------------------
class Test(unittest.TestCase):
def runTest(self):
pass
# def testGetAllExtractorsMethods(self):
# '''
# ahh..this test taks a realy long time....
# '''
# from music21 import stream, features, pitch
# s = corpus.parse('bwv66.6').measures(1,5)
# self.assertEqual( len(features.alljSymbolicFeatures(s)), 70)
# self.assertEqual(len (features.allNativeFeatures(s)),21)
# self.assertEqual(str(features.alljSymbolicVectors(s)[1:5]),
#'[[2.6630434782608696], [2], [2], [0.391304347826087]]')
# self.assertEqual(str(features.allNativeVectors(s)[0:4]),
#'[[1], [1.0328322202181006], [2], [1.0]]')
def testStreamFormsA(self):
from music21 import features
s = corpus.parse('corelli/opus3no1/1grave')
di = features.DataInstance(s)
self.assertEqual(len(di['flat']), 291)
self.assertEqual(len(di['flat.notes']), 238)
#di['chordify'].show('t')
self.assertEqual(len(di['chordify']), 20)
self.assertEqual(len(di['chordify.getElementsByClass.Chord']), 144)
self.assertEqual(di['chordifySetClassHistogram'], {'2-2': 6, '2-3': 12, '2-4': 21, '2-5': 5,
'3-10': 4, '3-11': 33, '3-2': 3, '3-4': 7,
'3-6': 7, '3-7': 9, '3-8': 6, '3-9': 16,
'1-1': 15})
self.maxDiff = None
self.assertEqual(di['chordifyTypesHistogram'], {'isMinorTriad': 8, 'isAugmentedTriad': 0,
'isTriad': 37, 'isSeventh': 0, 'isDiminishedTriad': 4,
'isDiminishedSeventh': 0, 'isIncompleteMajorTriad': 21,
'isHalfDiminishedSeventh': 0, 'isMajorTriad': 25,
'isDominantSeventh': 0, 'isIncompleteMinorTriad': 12})
self.assertEqual(di['noteQuarterLengthHistogram'], {0.5: 116, 1.0: 39, 1.5: 27, 2.0: 31, 3.0: 2, 4.0: 3,
0.75: 4, 0.25: 16})
# can access parts by index
self.assertEqual(len(di['parts']), 3)
# stored in parts are StreamForms instances, caching their results
self.assertEqual(len(di['parts'][0]['flat.notes']), 71)
self.assertEqual(len(di['parts'][1]['flat.notes']), 66)
# getting a measure by part
self.assertEqual(len(di['parts'][0]['getElementsByClass.Measure']), 19)
self.assertEqual(len(di['parts'][1]['getElementsByClass.Measure']), 19)
self.assertEqual(di['parts'][0]['pitchClassHistogram'], [9, 1, 11, 0, 9, 13, 0, 11, 0, 12, 5, 0])
# the sum of the two arrays is the pitch class histogram of the complete
# work
self.assertEqual(di['pitchClassHistogram'], [47, 2, 25, 0, 25, 42, 0, 33, 0, 38, 22, 4])
def testStreamFormsB(self):
from music21 import features, note
s = stream.Stream()
for p in ['c4', 'c4', 'd-4', 'd#4', 'f#4', 'a#4', 'd#5', 'a5']:
s.append(note.Note(p))
di = features.DataInstance(s)
self.assertEqual(di['midiIntervalHistogram'], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# # in most cases will want to get a vector for each part
# s = corpus.parse('corelli/opus3no1/1grave')
# di = features.DataInstance(s)
# self.assertEqual(di['parts'][0]['midiIntervalHistogram'], [9, 1, 4, 3, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
#
# self.assertEqual(di['parts'][1]['midiIntervalHistogram'], [0, 1, 3, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def testStreamFormsC(self):
from pprint import pformat
from music21 import features, note
s = stream.Stream()
for p in ['c4', 'c4', 'd-4', 'd#4', 'f#4', 'a#4', 'd#5', 'a5']:
s.append(note.Note(p))
di = features.DataInstance(s)
self.assertEqual(pformat(di['secondsMap']), """[{'durationSeconds': 0.5,
'element': <music21.note.Note C>,
'endTimeSeconds': 0.5,
'offsetSeconds': 0.0,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note C>,
'endTimeSeconds': 1.0,
'offsetSeconds': 0.5,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note D->,
'endTimeSeconds': 1.5,
'offsetSeconds': 1.0,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note D#>,
'endTimeSeconds': 2.0,
'offsetSeconds': 1.5,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note F#>,
'endTimeSeconds': 2.5,
'offsetSeconds': 2.0,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note A#>,
'endTimeSeconds': 3.0,
'offsetSeconds': 2.5,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note D#>,
'endTimeSeconds': 3.5,
'offsetSeconds': 3.0,
'voiceIndex': None},
{'durationSeconds': 0.5,
'element': <music21.note.Note A>,
'endTimeSeconds': 4.0,
'offsetSeconds': 3.5,
'voiceIndex': None}]""", pformat(di['secondsMap']))
def testDataSetOutput(self):
from music21 import features
# test just a few features
featureExtractors = features.extractorsById(['ql1', 'ql2', 'ql4'], 'native')
# need to define what the class label will be
ds = features.DataSet(classLabel='Composer')
ds.addFeatureExtractors(featureExtractors)
# add works, defining the class value
ds.addData('bwv66.6', classValue='Bach')
ds.addData('corelli/opus3no1/1grave', classValue='Corelli')
ds.process()
# manually create an output format and get output
of = OutputCSV(ds)
post = of.getString(lineBreak='//')
self.assertEqual(post, 'Identifier,Unique_Note_Quarter_Lengths,Most_Common_Note_Quarter_Length,Range_of_Note_Quarter_Lengths,Composer//bwv66.6,3,1.0,1.5,Bach//corelli/opus3no1/1grave,8,0.5,3.75,Corelli')
# without id
post = of.getString(lineBreak='//', includeId=False)
self.assertEqual(post, 'Unique_Note_Quarter_Lengths,Most_Common_Note_Quarter_Length,Range_of_Note_Quarter_Lengths,Composer//3,1.0,1.5,Bach//8,0.5,3.75,Corelli')
ds.write(format='tab')
ds.write(format='csv')
ds.write(format='arff')
def testFeatureFail(self):
from music21 import features
from music21 import base
featureExtractors = ['p10', 'p11', 'p12', 'p13']
featureExtractors = features.extractorsById(featureExtractors,
'jSymbolic')
ds = features.DataSet(classLabel='Composer')
ds.addFeatureExtractors(featureExtractors)
# create problematic streams
s = stream.Stream()
#s.append(None) # will create a wrapper -- NOT ANYMORE
s.append(base.ElementWrapper(None))
ds.addData(s, classValue='Monteverdi')
ds.addData(s, classValue='Handel')
# process with all feature extractors, store all features
ds.process()
#---------------------------------------------------------------------------
# silent tests
def xtestComposerClassificationJSymbolic(self):
'''Demonstrating writing out data files for feature extraction. Here, features are used from the jSymbolic library.
'''
from music21 import features
featureExtractors = ['r31', 'r32', 'r33', 'r34', 'r35', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10', 'p11', 'p12', 'p13', 'p14', 'p15', 'p16', 'p19', 'p20', 'p21']
# will return a list
featureExtractors = features.extractorsById(featureExtractors,
'jSymbolic')
#worksBach = corpus.getBachChorales()[100:143] # a middle range
worksMonteverdi = corpus.getMonteverdiMadrigals()[:43]
worksBach = corpus.getBachChorales()[:5]
# worksMonteverdi = corpus.getMonteverdiMadrigals()[:5]
# need to define what the class label will be
ds = features.DataSet(classLabel='Composer')
ds.addFeatureExtractors(featureExtractors)
# add works, defining the class value
# for w in worksBach:
# ds.addData(w, classValue='Bach')
for w in worksMonteverdi:
ds.addData(w, classValue='Monteverdi')
for w in worksBach:
ds.addData(w, classValue='Bach')
# process with all feature extractors, store all features
ds.process()
ds.write(format='tab')
ds.write(format='csv')
ds.write(format='arff')
def xtestRegionClassificationJSymbolicA(self):
'''Demonstrating writing out data files for feature extraction. Here, features are used from the jSymbolic library.
'''
from music21 import features
featureExtractors = features.extractorsById(['r31', 'r32', 'r33', 'r34', 'r35', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10', 'p11', 'p12', 'p13', 'p14', 'p15', 'p16', 'p19', 'p20', 'p21'],
'jSymbolic')
oChina1 = corpus.parse('essenFolksong/han1')
oChina2 = corpus.parse('essenFolksong/han2')
oMitteleuropa1 = corpus.parse('essenFolksong/boehme10')
oMitteleuropa2 = corpus.parse('essenFolksong/boehme20')
ds = features.DataSet(classLabel='Region')
ds.addFeatureExtractors(featureExtractors)
# add works, defining the class value
for o, name in [(oChina1, 'han1'),
(oChina2, 'han2')]:
for w in o.scores:
songId = 'essenFolksong/%s-%s' % (name, w.metadata.number)
ds.addData(w, classValue='China', id=songId)
for o, name in [(oMitteleuropa1, 'boehme10'),
(oMitteleuropa2, 'boehme20')]:
for w in o.scores:
songId = 'essenFolksong/%s-%s' % (name, w.metadata.number)
ds.addData(w, classValue='Mitteleuropa', id=songId)
# process with all feature extractors, store all features
ds.process()
ds.getString(format='tab') # pylint: disable=unexpected-keyword-arg
ds.getString(format='csv') # pylint: disable=unexpected-keyword-arg
ds.getString(format='arff') # pylint: disable=unexpected-keyword-arg
def xtestRegionClassificationJSymbolicB(self):
'''Demonstrating writing out data files for feature extraction. Here, features are used from the jSymbolic library.
'''
from music21 import features
# features common to both collections
featureExtractors = features.extractorsById(['r31', 'r32', 'r33', 'r34', 'r35', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10', 'p11', 'p12', 'p13', 'p14', 'p15', 'p16', 'p19', 'p20', 'p21'],
'jSymbolic')
# first bundle
ds = features.DataSet(classLabel='Region')
ds.addFeatureExtractors(featureExtractors)
oChina1 = corpus.parse('essenFolksong/han1')
oMitteleuropa1 = corpus.parse('essenFolksong/boehme10')
# add works, defining the class value
for o, name in [(oChina1, 'han1')]:
for w in o.scores:
songId = 'essenFolksong/%s-%s' % (name, w.metadata.number)
ds.addData(w, classValue='China', id=songId)
for o, name in [(oMitteleuropa1, 'boehme10')]:
for w in o.scores:
songId = 'essenFolksong/%s-%s' % (name, w.metadata.number)
ds.addData(w, classValue='Mitteleuropa', id=songId)
# process with all feature extractors, store all features
ds.process()
ds.write('/_scratch/chinaMitteleuropaSplit-a.tab')
ds.write('/_scratch/chinaMitteleuropaSplit-a.csv')
ds.write('/_scratch/chinaMitteleuropaSplit-a.arff')
# create second data set from alternate collections
ds = features.DataSet(classLabel='Region')
ds.addFeatureExtractors(featureExtractors)
oChina2 = corpus.parse('essenFolksong/han2')
oMitteleuropa2 = corpus.parse('essenFolksong/boehme20')
# add works, defining the class value
for o, name in [(oChina2, 'han2')]:
for w in o.scores:
songId = 'essenFolksong/%s-%s' % (name, w.metadata.number)
ds.addData(w, classValue='China', id=songId)
for o, name in [(oMitteleuropa2, 'boehme20')]:
for w in o.scores:
songId = 'essenFolksong/%s-%s' % (name, w.metadata.number)
ds.addData(w, classValue='Mitteleuropa', id=songId)
# process with all feature extractors, store all features
ds.process()
ds.write('/_scratch/chinaMitteleuropaSplit-b.tab')
ds.write('/_scratch/chinaMitteleuropaSplit-b.csv')
ds.write('/_scratch/chinaMitteleuropaSplit-b.arff')
def xtestOrangeBayesA(self):
'''Using an already created test file with a BayesLearner.
'''
import orange # @UnresolvedImport
data = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/bachMonteverdi-a/bachMonteverdi-a.tab')
classifier = orange.BayesLearner(data)
for i in range(len(data)):
c = classifier(data[i])
print("original", data[i].getclass(), "BayesLearner:", c)
def xtestClassifiersA(self):
'''Using an already created test file with a BayesLearner.
'''
import orange, orngTree # @UnresolvedImport
data1 = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/chinaMitteleuropa-b/chinaMitteleuropa-b1.tab')
data2 = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/chinaMitteleuropa-b/chinaMitteleuropa-b2.tab')
majority = orange.MajorityLearner
bayes = orange.BayesLearner
tree = orngTree.TreeLearner
knn = orange.kNNLearner
for classifierType in [majority, bayes, tree, knn]:
print('')
for classifierData, classifierStr, matchData, matchStr in [
(data1, 'data1', data1, 'data1'),
(data1, 'data1', data2, 'data2'),
(data2, 'data2', data2, 'data2'),
(data2, 'data2', data1, 'data1'),
]:
# train with data1
classifier = classifierType(classifierData)
mismatch = 0
for i in range(len(matchData)):
c = classifier(matchData[i])
if c != matchData[i].getclass():
mismatch += 1
print('%s %s: misclassified %s/%s of %s' % (classifierStr, classifierType, mismatch, len(matchData), matchStr))
# if classifierType == orngTree.TreeLearner:
# orngTree.printTxt(classifier)
def xtestClassifiersB(self):
'''Using an already created test file with a BayesLearner.
'''
import orange, orngTree # @UnresolvedImport
data1 = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/chinaMitteleuropa-b/chinaMitteleuropa-b1.tab')
data2 = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/chinaMitteleuropa-b/chinaMitteleuropa-b2.tab', use = data1.domain)
data1.extend(data2)
data = data1
majority = orange.MajorityLearner
bayes = orange.BayesLearner
tree = orngTree.TreeLearner
knn = orange.kNNLearner
folds = 10
for classifierType in [majority, bayes, tree, knn]:
print('')
cvIndices = orange.MakeRandomIndicesCV(data, folds)
for fold in range(folds):
train = data.select(cvIndices, fold, negate=1)
test = data.select(cvIndices, fold)
for classifierData, classifierStr, matchData, matchStr in [
(train, 'train', test, 'test'),
]:
# train with data1
classifier = classifierType(classifierData)
mismatch = 0
for i in range(len(matchData)):
c = classifier(matchData[i])
if c != matchData[i].getclass():
mismatch += 1
print('%s %s: misclassified %s/%s of %s' % (classifierStr, classifierType, mismatch, len(matchData), matchStr))
def xtestOrangeClassifiers(self):
'''This test shows how to compare four classifiers; replace the file path with a path to the .tab data file.
'''
import orange, orngTree # @UnresolvedImport
data = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/bachMonteverdi-a/bachMonteverdi-a.tab')
# setting up the classifiers
majority = orange.MajorityLearner(data)
bayes = orange.BayesLearner(data)
tree = orngTree.TreeLearner(data, sameMajorityPruning=1, mForPruning=2)
knn = orange.kNNLearner(data, k=21)
majority.name="Majority"
bayes.name="Naive Bayes"
tree.name="Tree"
knn.name="kNN"
classifiers = [majority, bayes, tree, knn]
# print the head
print("Possible classes:", data.domain.classVar.values)
print("Original Class", end=' ')
for l in classifiers:
print("%-13s" % (l.name), end=' ')
print()
for example in data:
print("(%-10s) " % (example.getclass()), end=' ')
for c in classifiers:
p = c([example, orange.GetProbabilities])
print("%5.3f " % (p[0]), end=' ')
print("")
def xtestOrangeClassifierTreeLearner(self):
import orange, orngTree # @UnresolvedImport
data = orange.ExampleTable('/Volumes/xdisc/_sync/_x/src/music21Ext/mlDataSets/bachMonteverdi-a/bachMonteverdi-a.tab')
tree = orngTree.TreeLearner(data, sameMajorityPruning=1, mForPruning=2)
#tree = orngTree.TreeLearner(data)
for i in range(len(data)):
p = tree(data[i], orange.GetProbabilities)
print("%d: %5.3f (originally %s)" % (i+1, p[1], data[i].getclass()))
orngTree.printTxt(tree)
#-------------------------------------------------------------------------------
# define presented order in documentation
_DOC_ORDER = [FeatureExtractor]
if __name__ == "__main__":
#import sys
#sys.argv.append('StreamFormsA')
import music21
music21.mainTest(Test)
#------------------------------------------------------------------------------
# eof
|
import argparse
import random
from score import parse
# inp is an input file as a single string
# return your output as a string
def solve(seed, inp, log):
# TODO: Solve the problem
random.seed(seed)
ns = parse(inp)
return '0'
|
#pylint: disable=import-error
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Concatenate, Convolution1D, GlobalMaxPooling1D, Embedding, Dropout, Lambda, Flatten, Add
from tensorflow.keras.layers import Conv1D
from tensorflow.keras.callbacks import TensorBoard
import sys
import numpy as np
from tensorflow.keras import backend as K
import rb.processings.diacritics.utils as utils
import functools
class BertCNN_hugging(object):
"""
Class to implement simple Bert + Character Level Convolutional Neural Network
The model is used to classify diacritics
"""
def __init__(self, window_size, alphabet_size, embedding_size, conv_layers, fc_hidden_size, num_of_classes, batch_max_sentences, batch_max_windows,
bert_trainable, cnn_dropout_rate, bert_wrapper, learning_rate, init_model, optimizer='adam', loss='categorical_crossentropy'):
"""
Initialization for the Bert + Character Level CNN model.
Args:
CNN side
window_size (int): Size of window
alphabet_size (int): Size of alphabets to create embeddings for
embedding_size (int): Size of embeddings
conv_layers (list[list[int]]): List of Convolution layers for model
cnn_dropout_rate (float): Dropout Rate for CNN
Bert side
bert_wrapper (obj): Bert wrapper
bert_trainable (bool): Whether to train BERT or not
batch_max_sentences (int): Maximum sentences in batch
batch_max_windows (int): Maximum windows in batch
init_model (string): Name of model to start training from
fc_hidden_size (int): Size of hidden layer between features and prediction
num_of_classes (int): Number of classes in data
optimizer (str): Training optimizer
loss (str): Loss function
learning_rate (float): Learning rate to use for training
"""
self.window_size = window_size
self.alphabet_size = alphabet_size
self.embedding_size = embedding_size
self.conv_layers = conv_layers
self.total_number_of_filters = functools.reduce(lambda x,y: x+y[0], conv_layers, 0)
self.num_of_classes = num_of_classes
self.cnn_dropout_rate = cnn_dropout_rate
self.learning_rate = learning_rate
self.fc_hidden_size = fc_hidden_size
self.bert_wrapper = bert_wrapper
self.bert_wrapper.bert_layer.trainable = bert_trainable
self.batch_max_sentences = batch_max_sentences
self.batch_max_windows = batch_max_windows
self.init_model = init_model
if optimizer == "adam":
self.optimizer = keras.optimizers.Adam(lr=self.learning_rate)
if loss == "categorical_crossentropy":
self.loss = keras.losses.CategoricalCrossentropy(from_logits=False)
self._build_model() # builds self.model variable
def _build_embedding_mask(self):
embedding_mask_weights = np.ones((self.alphabet_size, self.num_of_classes))
# a -> a, ă, â
embedding_mask_weights[2] = [1,1,1,0,0]
# s -> s, ș
embedding_mask_weights[10] = [1,0,0,1,0]
# t -> t, ț
embedding_mask_weights[13] = [1,0,0,0,1]
# i -> i, î
embedding_mask_weights[16] = [1,0,1,0,0]
return embedding_mask_weights
def _build_model(self):
"""
Build and compile the Bert + Character Level CNN model
Returns: None
"""
# Input layers
input_bert_ids = Input(shape=(self.batch_max_sentences, self.bert_wrapper.max_seq_len), name='bert_input_ids', dtype='int32')
input_bert_att = Input(shape=(self.batch_max_sentences, self.bert_wrapper.max_seq_len), name='bert_attention_ids', dtype='int32')
input_bert_seg = Input(shape=(self.batch_max_sentences, self.bert_wrapper.max_seq_len), name='bert_segment_ids', dtype='int32')
input_token_ids = Input(shape=(self.batch_max_windows,), name='token_ids', dtype='int32')
input_sent_ids = Input(shape=(self.batch_max_windows,), name='sent_ids', dtype='int32')
input_mask = Input(shape=(self.batch_max_windows,), name='mask', dtype='float32')
input_char_windows = Input(shape=(self.batch_max_windows, self.window_size), name='char_windows', dtype='int32')
keras_internal_batch_size = K.shape(input_token_ids)[0]
##########################################################################
###################### Bert ############################################
input_bert_ids_reshaped = tf.reshape(input_bert_ids, shape=(-1, self.bert_wrapper.max_seq_len), name="reshape_input_bert_ids")
input_bert_att_reshaped = tf.reshape(input_bert_att, shape=(-1, self.bert_wrapper.max_seq_len), name="reshape_attention_bert_ids")
input_bert_seg_reshaped = tf.reshape(input_bert_seg, shape=(-1, self.bert_wrapper.max_seq_len), name="reshape_input_bert_seg")
# shape = (?batch_size x max_sent, max_seq_len)
bert_output = self.bert_wrapper.bert_layer([input_bert_ids_reshaped, input_bert_att_reshaped, input_bert_seg_reshaped])[0]
# print(bert_output)
# bert_output = self.bert_wrapper.bert_layer([input_bert_ids_reshaped, input_bert_seg_reshaped])[0]
# bert_output = (?batch_size x max_sent, bert_max_seq_len, bert_hidden_size)
bert_output = tf.reshape(bert_output, shape=(-1, self.batch_max_sentences, self.bert_wrapper.max_seq_len, self.bert_wrapper.hidden_size), name="bert_output")
# bert_output = (?batch_size, max_sent, bert_max_seq_len, bert_hidden_size)
# print(bert_output)
# sys.exit()
##########################################################################
##########################################################################
###################### CharCNN #########################################
embedding_mask_weights = self._build_embedding_mask()
input_char_windows_reshaped = tf.reshape(input_char_windows, shape=(-1, self.window_size), name="reshape_input_char_windows")
# shape = (?batch_size x max_windows, window_size)
# char mask
char_mask = Embedding(self.alphabet_size, self.num_of_classes, input_length=1, trainable=False, weights=[embedding_mask_weights], name="mask_embedding")(input_char_windows_reshaped[:, (self.window_size-1)//2])
char_mask = tf.reshape(char_mask,(-1, self.batch_max_windows, self.num_of_classes), name="reshape_char_mask")
# Embedding layer
x = Embedding(self.alphabet_size, self.embedding_size, input_length=self.window_size, trainable=True, name="sequence_embedding")(input_char_windows_reshaped)
# x = (?batch_size, window_size, embedding_size)
middle_char_embedding = x[:,(self.window_size-1)//2]
# Convolution layers
convolution_output = []
for num_filters, filter_width in self.conv_layers:
conv = Conv1D(filters=num_filters, kernel_size=filter_width, activation='tanh',
name='Conv1D_{}_{}'.format(num_filters, filter_width))(x)
# conv = (?batch_size, window_size-filter_size+1, num_filters)
pool = GlobalMaxPooling1D(name='MaxPoolingOverTime_{}_{}'.format(num_filters, filter_width))(conv)
# pool = (?batch_size, num_filters)
convolution_output.append(pool)
if convolution_output != []:
x = Concatenate()(convolution_output)
# x = (?batch_size, total_number_of_filters)
x = Dropout(rate=self.cnn_dropout_rate)(x)
# concatenate middle char
x = Concatenate()([x, middle_char_embedding])
self.total_number_of_filters = self.total_number_of_filters + self.embedding_size
else:
x = Flatten()(x)
self.total_number_of_filters = self.window_size * self.embedding_size
char_cnn_output = Dropout(rate=self.cnn_dropout_rate)(x)
char_cnn_output = tf.reshape(char_cnn_output, shape=(-1, self.batch_max_windows, self.total_number_of_filters), name="char_cnn_output")
# char_cnn_otput = (?batch_size, max_windows, total_filters)
##########################################################################
# get bert tokens coresponding to sent_ids and token_ids
batch_indexes = tf.range(0, keras_internal_batch_size, name="range_batch_indexes")
batch_indexes = tf.reshape(batch_indexes, (-1,1), name="reshape_batch_indexes")
batch_indexes = tf.tile(batch_indexes, (1,self.batch_max_windows), name="tile_batch_indexes")
indices = tf.stack([batch_indexes, input_sent_ids, input_token_ids], axis = 2)
bert_tokens = tf.gather_nd(bert_output, indices, name="bert_tokens")
# apply bert dropout here?
# bert_tokens = (?batch_size, max_windows, bert_hidden_size)
bert_cnn_concatenation = Concatenate()([bert_tokens, char_cnn_output])
# bert_cnn_concatenation = char_cnn_output
# hidden layer
hidden = Dense(self.fc_hidden_size, activation='relu')(bert_cnn_concatenation)
# Output layer
predictions = Dense(self.num_of_classes, activation='softmax')(hidden)
# mask predictions based on middle char
masked_predictions = keras.layers.multiply([predictions, char_mask])
input_mask_reshaped = tf.reshape(input_mask, (-1, 1), name="reshape_input_mask")
# mask prediction based on window mask
# extended_mask = tf.reshape(input_mask, (-1, self.batch_max_windows, 1))
# extended_mask = tf.tile(extended_mask, [1, 1, self.num_of_classes])
# masked_predictions = keras.layers.multiply([masked_predictions, extended_mask])
flatten_masked_predictions = tf.reshape(masked_predictions, shape=(-1, self.num_of_classes), name="resh_flatmaskpred")
# flatten_masked_predictions = masked_predictions
# flatten_masked_prediction = (?batch_size x max_windows, num_of_classes)
# Build and compile model
model = Model(inputs=[input_bert_ids, input_bert_att, input_bert_seg, input_token_ids, input_sent_ids, input_mask, input_char_windows], outputs=[flatten_masked_predictions, input_mask_reshaped])
weights = np.ones(self.num_of_classes)
model.compile(optimizer=self.optimizer, loss=[weighted_categorical_crossentropy(weights, self.num_of_classes).loss, None], metrics=[categorical_acc])
# model.compile(optimizer=self.optimizer, loss=[self.loss, None], metrics=[tf.keras.metrics.categorical_accuracy])
# self.bert_wrapper.load_weights()
if self.init_model != None:
# TODO: make this automatic from main
# model.load_weights("rb/processings/diacritics/rotransformers/bert_models/" + self.init_model)
loaded_model = tf.keras.models.load_model("rb/processings/diacritics/rotransformers/bert_models/" + self.init_model,
custom_objects={'loss':weighted_categorical_crossentropy(np.ones(5), 5).loss, 'categorical_acc': categorical_acc}, compile=False)
# weights = loaded_model.get_weights()
# model = Model(inputs=[input_bert_ids, input_bert_att, input_bert_seg, input_token_ids, input_sent_ids, input_mask, input_char_windows], outputs=[flatten_masked_predictions, input_mask_reshaped])
model.compile(optimizer=self.optimizer, loss=[weighted_categorical_crossentropy(weights, self.num_of_classes).loss, None], metrics=[categorical_acc])
weights = [layer.get_weights() for layer in loaded_model.layers]
print(len(loaded_model.layers), len(model.layers))
for layer, weight in zip(model.layers, weights):
print(layer.name)
if layer.name == "tf_roberta_model":# or layer.name == "tf_bert_model":
continue
layer.set_weights(weight)
# sys.exit()
self.model = model
print("Bert+CharCNN model built: ")
self.model.summary()
def train(self, train_dataset, train_batch_size, train_size, dev_dataset, dev_batch_size, dev_size, epochs, file_evalname, char_to_id_dict, model_filename):
best_wa_dia = -1
best_wa_all = -1
best_ca_dia = -1
best_ca_all = -1
best_epoch = -1
dev_steps = (dev_size // dev_batch_size) + 1
if dev_batch_size == 1:
dev_steps += 1
for i in range(epochs):
print("EPOCH ", (i+1))
self.model.fit(train_dataset, steps_per_epoch=train_size//train_batch_size, epochs=1, verbose=1)
wa_dia, wa_all, ca_dia, ca_all, _ = utils.evaluate_model(self.model, file_evalname, dev_dataset, dev_steps)
# wa_dia, wa_all, ca_dia, ca_all, = 0, 0, 0, 0
if wa_dia > best_wa_dia:
best_wa_dia = wa_dia
best_wa_all = wa_all
best_ca_dia = ca_dia
best_ca_all = ca_all
best_epoch = i+1
self.model.save(model_filename, save_format='tf')
print("Best model: epoch =", best_epoch, "best word_accuracy_dia =", format(best_wa_dia, '.4f'), "best word_accuracy_all =", format(best_wa_all, '.4f'),
"best char_accuracy_dia =", format(best_ca_dia, '.4f'), "best char_accuracy_all =", format(best_ca_all, '.4f'))
print("---------------")
def categorical_acc(y_true, y_pred):
# TODO: change this to number of classes
y_true = tf.reshape(y_true, shape=(-1, 5), name="reshape_acc")
return keras.metrics.categorical_accuracy(y_true, y_pred)
class weighted_categorical_crossentropy(object):
"""
A weighted version of keras.objectives.categorical_crossentropy
Variables:
weights: numpy array of shape (C,) where C is the number of classes
Usage:
loss = weighted_categorical_crossentropy(weights).loss
model.compile(loss=loss,optimizer='adam')
"""
def __init__(self,weights,num_of_classes):
self.weights = K.variable(weights)
self.num_of_classes = num_of_classes
def loss(self, y_true, y_pred):
y_true = tf.reshape(y_true, shape=(-1, self.num_of_classes), name="reshape_loss")
# scale preds so that the class probas of each sample sum to 1
y_pred = y_pred / K.sum(y_pred, axis=-1, keepdims=True)
# clip
y_pred = K.clip(y_pred, K.epsilon(), 1)
# calc
loss = y_true*K.log(y_pred)*self.weights
loss =-K.sum(loss,-1)
return loss
|
import cv2
def main():
cam = cv2.VideoCapture(0)
while True:
retval, img = cam.read()
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'QWERTY', (10, 200), font, 1, (255, 0, 0), 2, cv2.LINE_AA)
cv2.rectangle(img, (200, 200), (400, 400), (255, 255, 255), 3)
cv2.imshow('Name', img)
if cv2.waitKey(1) == 27:
break
main()
|
__author__ = 'admiral0'
import os.path as path
import re
from .Exceptions import JsonNotValid, ModDoesNotExist, ModJsonDoesNotExist, ModVersionDoesNotExistInRepo
from .Common import *
def validate_version(ver):
assert type(ver) is str
if not re.match(r'^[a-zA-Z_0-9\.\-()]+$', ver):
return ['Version ' + ver + ' must match ^[a-zA-Z_0-9\.\-()]+$']
return []
def validate_minecraft(mver, vv):
if not type(mver) is list:
return ['Minecraft version must be an array for version ' + vv]
for mv in mver:
if not re.match(minecraft_version_regex, mv):
return ['Minecraft version ' + mv + ' does not match ^\d+\.\d+(\.\d+)?$ pattern in version ' + vv]
return []
def validate_versions(d, m):
if not type(d) is dict:
return ['Versions is not a dict!']
error = []
for ver in d.keys():
for err in validate_version(ver):
error.append(err)
try:
if not path.isfile(path.join(m.mod_dir, d[ver]['file'])):
error.append('File ' + d[ver]['file'] + ' has not been found in mod folder:' + m.mod_dir)
except KeyError:
error.append('Key \'file\' is missing in version ' + ver)
try:
for err in validate_minecraft(d[ver]['minecraft'], ver):
error.append(err)
except KeyError:
error.append('Key \'minecraft\' is missing in version ' + ver)
if 'type' in d[ver]:
if d[ver]['type'] not in ['universal', 'client', 'server']:
error.append('Type for ver ' + ver + 'must be one of universal, client or server')
return error
class Mod:
_elements = {
'author': {
'type': str,
'required': True,
'validate': lambda val, m: [] if 0 < len(val) < 255 else ['Length of author must be between 0 and 255']
},
'description': {
'type': str,
'required': False,
'validate': lambda val, m: [],
},
'name': {
'type': str,
'required': True,
'validate': lambda val, m: [] if 0 < len(val) < 255 else ['Length of the name must be between 0 and 255']
},
'url': {
'type': str,
'required': True,
'validate': lambda val, m: [] if re.match(url_regex, val) else ['Must be a link']
},
'versions': {
'type': dict,
'required': True,
'validate': lambda val, m: validate_versions(val, m)
}
}
def __init__(self, mod_path):
if not path.isdir(mod_path):
raise ModDoesNotExist(mod_path)
self.mod_dir = mod_path
self.json_path = path.join(mod_path, mod_file_name)
if not path.isfile(self.json_path):
raise ModJsonDoesNotExist(self.json_path)
self.data = read_json(self.json_path)
self.validate()
self.slug = path.basename(mod_path)
def validate(self):
errors = validate(self._elements, self.data, self)
if len(errors) > 0:
raise JsonNotValid(self.json_path, errors)
def get_version(self, version):
if version not in self.data['versions'].keys():
raise ModVersionDoesNotExistInRepo(self.slug, version)
return self.data['versions'][version]
|
# Problem: https://www.hackerrank.com/challenges/whats-your-name/problem
# Score: 10.0
print('Hello {0} {1}! You just delved into python.'.format(input(),input()))
|
from puma.runnable.message.status_message import RunInChildScopeStatusMessage, StartedStatusMessage, StatusMessage, status_message_type # noqa: F401
from puma.runnable.message.status_message_buffer import StatusMessageBuffer # noqa: F401
from puma.runnable.message.command_message import (CommandMessage, RemoteObjectGetAttributeCommandMessage, RemoteObjectMethodCommandMessage, # noqa: F401, I100
RunInChildScopeCommandMessage, StopCommandMessage) # noqa: F401
from puma.runnable.message.command_message_buffer import CommandMessageBuffer # noqa: F401
from puma.runnable.message.status_buffer import StatusBuffer, StatusBufferPublisher, StatusBufferSubscription # noqa: F401, I100
|
import unittest
import gym
import abp.openai.envs
from mock import Mock, patch
from abp.openai.wrappers import RewardWrapper
class DecomposedRewardWrapperTests(unittest.TestCase):
def setUp(self):
self.env = gym.make("Yahtzee-v0")
def test_should_call_env_with_reward_decomposition(self):
with patch.object(self.env.unwrapped, '_step', wraps=self.env.unwrapped._step) as mock_method:
wrapped_env = RewardWrapper(self.env)
wrapped_env.reset()
dummy_action = ([0] * 5, 0)
wrapped_env.step(dummy_action, True)
mock_method.assert_called_with(dummy_action, decompose_reward = True)
def tearDown(self):
del self.env
|
import numpy as np
import tensorflow as tf
from scipy.misc import imread
def calc_histogram(image):
values_range = tf.constant([0, 255], dtype = tf.float32)
histogram = tf.histogram_fixed_width(tf.to_float(image), values_range, 256)
return histogram
def histogram_loss(img1, img2):
hist1 = calc_histogram(img1)
hist2 = calc_histogram(img2)
loss = tf.losses.mean_squared_error(hist1, hist2)
return loss
def batch_hist_loss(batch1, batch2, batch_size):
batch_hist_val = 0
for batch_index in range(batch_size):
batch_hist_val += histogram_loss(batch1[batch_index], batch2[batch_index])
return(batch_hist_val / batch_size)
|
#analisador de IMC
peso = float(input('Qual é o seu peso? (Kg) '))
altura = float(input('Qual é a sua altura? (m) '))
imc = peso / (altura ** 2)
print('Seu IMC é {:.1f}'.format(imc))
if imc < 18.5:
print('\033[1;34mVocê está abaixo do peso ideal!\033[m')
elif 18.5 <= imc < 25:
print('\033[1;32mPeso ideal!\033[m')
elif 25 <= imc < 30:
print('\033[1;33mVocê está com SOBREPESO!\033[m')
elif 30 <= imc < 40:
print('\033[1;31mVocê está OBESO, precisa de atenção!\033[m')
else:
print('\033[1;31mVocê está com OBESIDADE MÓRBIDA, precisa de tratamento com urgência!\033[m') |
from django.shortcuts import render
from django.urls import reverse
from django.views.generic import TemplateView
class TestView(TemplateView):
template_name = "home/test.html"
def get_context_data(self, **kwargs):
context = super().get_context_data()
context["id"] = "lorem ipsum"
return context
class ServiceWorkerView(TemplateView):
template_name = 'sw.js'
content_type = 'application/javascript'
name = 'sw.js'
def get_context_data(self, **kwargs):
return {
'test_url': reverse('test'),
}
|
class Solution(object):
def orderOfLargestPlusSign(self, N, mines):
banned = {tuple(mine) for mine in mines}
dp = [[0] * N for _ in range(N)]
ans = 0
for r in range(N):
count = 0
for c in range(N):
count = 0 if (r, c) in banned else count+1
dp[r][c] = count
count = 0
for c in range(N-1, -1, -1):
count = 0 if (r, c) in banned else count+1
if count < dp[r][c]:
dp[r][c] = count
for c in range(N):
count = 0
for r in range(N):
count = 0 if (r, c) in banned else count+1
if count < dp[r][c]:
dp[r][c] = count
count = 0
for r in range(N-1, -1, -1):
count = 0 if (r, c) in banned else count+1
if count < dp[r][c]:
dp[r][c] = count
if dp[r][c] > ans:
ans = dp[r][c]
return ans
|
import os
from transformers import AutoConfig
from onnxruntime_extensions.onnxprocess import trace_for_onnx, pyfunc_from_model, build_customop_model
from onnxruntime_extensions.onnxprocess import torch_wrapper as torch
# Create a cache directory to store pretrained model.
cache_dir = os.path.join(".", "cache_models")
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
model_name_or_path = "gpt2"
device = "cpu"
beam_width = 4
config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)
num_attention_heads = config.n_head
hidden_size = config.n_embd
num_layer = config.n_layer
gpt2_full_model_path = "./gpt2_full.onnx"
# this model was generated by this script
# https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/transformers/notebooks/Inference_GPT2-OneStepSearch_OnnxRuntime_CPU.ipynb
onnx_model_path = "gpt2_one_step_search.onnx"
func_one_step = pyfunc_from_model(onnx_model_path)
def get_tokenizer(model_name_or_path, cache_dir):
from transformers import GPT2Tokenizer # noqa
tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)
tokenizer.padding_side = "left"
tokenizer.pad_token = tokenizer.eos_token
gpt2_encoder_model_path = './gpt2_tok.onnx'
build_customop_model('GPT2Tokenizer', gpt2_encoder_model_path, model=tokenizer)
return tokenizer, pyfunc_from_model(gpt2_encoder_model_path)
def inference_and_dump_full_model(tokenizer, func_tokenizer, input_text, num_tokens_to_produce = 30):
with trace_for_onnx(input_text, num_tokens_to_produce, names=func_tokenizer.input_names) as tc_sess:
inputs, num_tokens = tc_sess.get_inputs()
input_ids, attention_mask = func_tokenizer(inputs, padding=True, padding_side='left')
attention_mask = attention_mask.type(torch.float)
position_ids = (attention_mask.long().cumsum(-1) - 1)
# position_ids.masked_fill_(position_ids < 0, 0)
# Empty Past State for generating first word
# batch_size = input_ids.size()[0]
batch_size = 1
past_shape = [2, batch_size, num_attention_heads, 0, hidden_size // num_attention_heads]
empty_past = []
for _ in range(num_layer):
empty_past.append(torch.empty(*past_shape).type(torch.float32).to(device))
beam_select_idx = torch.zeros([1, batch_size]).long()
input_log_probs = torch.zeros([batch_size, 1])
input_unfinished_sents = torch.ones([batch_size, 1], dtype=torch.bool)
prev_step_scores = torch.zeros([batch_size, 1])
beam_size = beam_width
prev_step_results = input_ids.clone().detach().to(device)
cfg = torch.control_flow()
for states in cfg.loop(num_tokens, torch.tensor(True), input_ids, position_ids,
attention_mask, beam_select_idx, input_log_probs,
input_unfinished_sents, prev_step_results, prev_step_scores, *empty_past):
step = states[0]
states[1].symbolic_shape = ['batch_size', 'seq_len']
states[2].symbolic_shape = ['batch_size', 'seq_len']
states[3].symbolic_shape = ['batch_size', 'all_seq_len']
states[4].symbolic_shape = [1, 'batch_size']
# prev_step_results
states[7].symbolic_shape = ['batch_size', 'total_seq_len']
for st_ in states[-num_layer:]:
st_.symbolic_shape = [2, 'batch_size', num_attention_heads, 'past_seq_len', hidden_size // num_attention_heads]
prev_attention_mask = states[3]
outputs = func_one_step(*states[1:])
last_state = outputs[0].clone().detach().cpu()
input_ids = last_state.reshape([batch_size * beam_size, -1]).to(device)
input_unfinished_sents_id = -3
prev_step_results = outputs[-2].clone().detach().to(device)
# position_ids = (torch.tensor([context_length + step - 1
# ]).unsqueeze(0).repeat(batch_size * beam_size, 1).to(device))
position_ids = torch.zeros([batch_size * beam_size, 1], dtype=torch.int64) + attention_mask.size()[-1]
factor = (~step.type(torch.bool)).type(torch.int64)
prev_attention_mask = prev_attention_mask.repeat(factor * (batch_size * beam_size - 1) + 1, 1).to(device)
attention_mask = torch.cat(
[
prev_attention_mask,
torch.ones([batch_size * beam_size, 1], dtype=torch.float),
],
1,
).to(device)
beam_select_idx = outputs[input_unfinished_sents_id - 2].clone().detach().to(device)
input_log_probs = outputs[input_unfinished_sents_id - 1].clone().detach().to(device)
input_unfinished_sents = outputs[input_unfinished_sents_id].clone().detach().to(device)
prev_step_scores = outputs[-1].clone().detach().to(device)
past = []
for i in range(num_layer):
past_i = outputs[i + 1].clone().detach()
past.append(past_i.to(device))
any_unfinished = input_unfinished_sents.any()
input_ids.symbolic_shape = ['total_batch_size', 'seq_len']
position_ids.symbolic_shape = ['total_batch_size', 'seq_len']
attention_mask.symbolic_shape = ['total_batch_size', 'all_seq_len']
prev_step_results.symbolic_shape = ['total_batch_size', 'step_seq_len']
for st_ in past:
st_.symbolic_shape = [2, 'total_batch_size', num_attention_heads, 'all_seq_len', hidden_size // num_attention_heads]
cfg.flow_output(any_unfinished, input_ids,
position_ids, attention_mask, beam_select_idx,
input_log_probs, input_unfinished_sents, prev_step_results, prev_step_scores, *past)
result_id = 6
all_token_ids = cfg.finalize()[result_id]
tc_sess.save_as_onnx(gpt2_full_model_path, all_token_ids)
print(tokenizer.decode(all_token_ids.t[0], skip_special_tokens=True))
def verify_bsfull_model(input_text):
from onnxruntime_extensions import PyOrtFunction
gpt2_all = PyOrtFunction.from_model(gpt2_full_model_path)
outputs = gpt2_all(input_text, 30)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
if __name__ == "__main__":
tokenizer, func_tokenizer = get_tokenizer(model_name_or_path, cache_dir)
input_text = ['best hotel in bay area.']
inference_and_dump_full_model(tokenizer, func_tokenizer, input_text)
verify_bsfull_model(input_text)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .functions import get_data, use_data, use_data_parametrize
|
# -*- coding: utf-8 -*-
import yaml
import json
import sys
import argparse
from Twittbot import Twittbot
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--account", help="Select this account", action='store')
parser.add_argument("-m", "--hashtag", help="Request tweets with this hashtag in it", action='store')
parser.add_argument("-c", "--contest", help="Play to twitter contests and giveaways", action='store_true')
parser.add_argument("-t", "--trend", help="Use twitter trends instead of a specific hashtag", action='store_true')
parser.add_argument("-s", "--stole", help="Stole someone tweet in the top trend section", action='store_true')
parser.add_argument("-n", "--numbers", help="Number of tweets the script will request", action='store', default=10, type=int)
parser.add_argument("-p", "--post", help='Post a tweet from a specified file (can be used with "-i" option)', action='store')
parser.add_argument("-i", "--image", help='Post an image from a specified file (can be used with "-p" option)', action='store')
parser.add_argument("-f", "--followback", help="Follow back people that follow you", action='store_true')
args = parser.parse_args()
if not args.account:
print('Missing: You must choose an account from tokens.json unsing the -a option\n', file=sys.stderr)
parser.print_help()
sys.exit()
if not args.followback and not args.trend and not args.hashtag and not args.stole and not args.contest and not args.post and not args.image:
parser.print_help()
sys.exit()
with open('tokens.json', 'r') as json_file:
tokens = json.load(json_file)
if args.account not in tokens.keys():
print(f'{args.account} is not in the token.json file, choose one of this account: {", ".join(tokens.keys())}', file=sys.stderr)
sys.exit()
consumer_key = tokens[args.account]["API_KEY"]
consumer_secret = tokens[args.account]["API_SECRET"]
access_token = tokens[args.account]["ACCESS_TOKEN"]
access_secret = tokens[args.account]["ACCESS_SECRET"]
config = yaml.load(open("./config.yaml", 'r'), Loader=yaml.Loader)
twittbot = Twittbot(args.account, config)
twittbot.connect_api(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token=access_token, access_secret=access_secret)
if args.followback:
twittbot.followback()
if args.trend:
twittbot.trend(args.numbers)
if args.contest:
twittbot.handle_contest(args.numbers)
if args.hashtag:
twittbot.handle_hashtag(args.hashtag, args.numbers)
if args.stole:
twittbot.stole()
if args.post or args.image:
twittbot.tweet(args.post, args.image)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'window6.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(905, 879)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.horizontalSlider = QtWidgets.QSlider(self.centralwidget)
self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider.setObjectName("horizontalSlider")
self.horizontalLayout_3.addWidget(self.horizontalSlider)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setText("")
self.label.setObjectName("label")
self.horizontalLayout_3.addWidget(self.label)
self.gridLayout.addLayout(self.horizontalLayout_3, 2, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_7 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_7.setObjectName("pushButton_7")
self.horizontalLayout.addWidget(self.pushButton_7)
self.pushButton_6 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_6.setObjectName("pushButton_6")
self.horizontalLayout.addWidget(self.pushButton_6)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setText("")
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setObjectName("pushButton_4")
self.horizontalLayout.addWidget(self.pushButton_4)
self.pushButton_9 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_9.setObjectName("pushButton_9")
self.horizontalLayout.addWidget(self.pushButton_9)
self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_5.setObjectName("pushButton_5")
self.horizontalLayout.addWidget(self.pushButton_5)
self.pushButton_8 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_8.setObjectName("pushButton_8")
self.horizontalLayout.addWidget(self.pushButton_8)
self.pushButton_10 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_10.setObjectName("pushButton_10")
self.horizontalLayout.addWidget(self.pushButton_10)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout.addWidget(self.pushButton_3)
self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1)
self.listWidget_3 = QtWidgets.QListWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.listWidget_3.sizePolicy().hasHeightForWidth())
self.listWidget_3.setSizePolicy(sizePolicy)
self.listWidget_3.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.listWidget_3.setDragEnabled(True)
self.listWidget_3.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.listWidget_3.setDefaultDropAction(QtCore.Qt.MoveAction)
self.listWidget_3.setFlow(QtWidgets.QListView.LeftToRight)
self.listWidget_3.setObjectName("listWidget_3")
self.gridLayout.addWidget(self.listWidget_3, 5, 0, 1, 1)
self.listWidget = QtWidgets.QListWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.listWidget.sizePolicy().hasHeightForWidth())
self.listWidget.setSizePolicy(sizePolicy)
self.listWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.listWidget.setDragEnabled(True)
self.listWidget.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.listWidget.setDefaultDropAction(QtCore.Qt.MoveAction)
self.listWidget.setFlow(QtWidgets.QListView.LeftToRight)
self.listWidget.setLayoutMode(QtWidgets.QListView.SinglePass)
self.listWidget.setObjectName("listWidget")
self.gridLayout.addWidget(self.listWidget, 3, 0, 1, 1)
self.listWidget_2 = QtWidgets.QListWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.listWidget_2.sizePolicy().hasHeightForWidth())
self.listWidget_2.setSizePolicy(sizePolicy)
self.listWidget_2.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.listWidget_2.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.listWidget_2.setDragEnabled(True)
self.listWidget_2.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.listWidget_2.setDefaultDropAction(QtCore.Qt.MoveAction)
self.listWidget_2.setFlow(QtWidgets.QListView.LeftToRight)
self.listWidget_2.setObjectName("listWidget_2")
self.gridLayout.addWidget(self.listWidget_2, 4, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 905, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "VideoEditor"))
self.pushButton_6.setText(_translate("MainWindow", "打开背景"))
self.pushButton_7.setText(_translate("MainWindow", "打开目标"))
self.pushButton.setText(_translate("MainWindow", "打开音频"))
self.pushButton_4.setText(_translate("MainWindow", "设置切分起始时间"))
self.pushButton_9.setText(_translate("MainWindow", "tiktok"))
self.pushButton_5.setText(_translate("MainWindow", "影流之主"))
self.pushButton_8.setText(_translate("MainWindow", "原地切分"))
self.pushButton_10.setText(_translate("MainWindow", "重复"))
self.pushButton_3.setText(_translate("MainWindow", "生成"))
|
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
# The smallest positive number is 1, hence we set that
smallest = 1
while True:
# We check if this number is present in nums, if it does we add 1 to the number
if smallest in nums:
smallest += 1
else:
# else we just return the previously computed smallest number
return smallest
|
from copy import deepcopy
from lbrynet.schema.proto import signature_pb2 as signature_pb
from lbrynet.schema.schema import VERSION_MAP, ECDSA_CURVES
from lbrynet.schema.schema.schema import Schema
class Signature(Schema):
@classmethod
def load(cls, message):
_signature = deepcopy(message)
_message_pb = signature_pb.Signature()
_message_pb.version = VERSION_MAP[_signature.pop("version")]
_message_pb.signatureType = ECDSA_CURVES[_signature.pop("signatureType")]
_message_pb.certificateId = _signature.pop("certificateId")
_message_pb.signature = _signature.pop("signature")
return cls._load(_signature, _message_pb)
|
"""
Questo modulo contiente degli esempi per la visualizzazione 3D di poligoni tramite il pacchetto plotly.
Per approfondimenti visitare: https://plot.ly/python/
"""
import numpy as np
import plotly.graph_objs as go
import plotly.offline as poff
ottagono_3d = np.array([[-0.33095549, 2.80413031, 5.46457847, 6.63418824, 5.86620843,
3.45398123, 0.31889543, -2.34155273, -3.5111625 , -2.74318269],
[-0.24405635, 0.3613702 , 3.7662977 , 8.67015958, 13.19984728,
15.62517404, 15.01974749, 11.61481999, 6.71095811, 2.18127042],
[10.55212849, 9.10664097, 6.003798 , 2.42878012, -0.25287733,
-1.01687236, 0.42861517, 3.53145814, 7.10647601, 9.78813346]])
x = ottagono_3d[0, :].tolist()
y = ottagono_3d[1, :].tolist()
z = ottagono_3d[2, :].tolist()
# Ripetizione primo punto per "chiudere" il giro intorno al poligono
x.append(x[0])
y.append(y[0])
z.append(z[0])
perimetro = go.Scatter3d(x=x, y=y, z=z,
mode='lines',
marker=dict(
color='red'
)
)
area = go.Mesh3d(x=x, y=y, z=z, color='#FFB6C1', opacity=0.60)
fig_3d = go.Figure(data=[perimetro, area])
poff.plot(fig_3d)
# ATTENZIONE: Se il poligono risultasse parallelo al piano (y,z), cioè avesse x costante per ogni suo vertice,
# si deve aggiungere il seguente attributo: area.delaunayaxis = 'x'. Analogamente funziona per y e z costante.
# Se non aggiunto, si ottiene l'effetto seguente:
x_costante = [0] * len(x)
perimetro_cost = go.Scatter3d(x=x_costante, y=y, z=z,
mode='lines',
marker=dict(
color='red'
)
)
area_cost = go.Mesh3d(x=x_costante, y=y, z=z, color='#FFB6C1', opacity=0.60)
fig_3d_cost = go.Figure(data=[perimetro_cost, area_cost])
poff.plot(fig_3d_cost)
# Per realizzare più plot nella stessa figura, basta aggiungerli alla lista passanta in argomento come "data"
# all'oggetto go.Figure, cioè:
all_polygons = [perimetro, area, perimetro_cost, area_cost]
fig_3d_alltogether = go.Figure(data=all_polygons)
poff.plot(fig_3d_alltogether)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 7 10:42:43 2019
@author: benka
"""
import tensorflow as tf
import Data_Reader
import BuildNetVgg16
logs_dir= "logs/"# "path to logs directory where trained model and information will be stored"
model_path="Model_Zoo/vgg16.npy"# "Path to pretrained vgg16 model for encoder"
NUM_CLASSES = 2 # Number of classes
keep_prob = tf.placeholder(tf.float32, name="keep_probabilty") # Dropout probability
image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image") # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB
# -------------------------Build Net----------------------------------------------------------------------------------------------
Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) # Create class instance for the net
Net.build(image, NUM_CLASSES, keep_prob) # Build net and load intial weights (weights before training)
#-------------------------Load Trained model if you dont have trained model see: Train.py-----------------------------------------------------------------------------------------------------------------------------
sess = tf.Session() #Start Tensorflow session
print("Setting up Saver...")
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(logs_dir)
if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model restored...")
tf.profiler.profile(
sess.graph,
options=tf.profiler.ProfileOptionBuilder.float_operation())
else:
print("ERROR NO TRAINED MODEL IN: "+ckpt.model_checkpoint_path+" See Train.py for creating train network ")
sys.exit()
'''
g = tf.Graph()
run_meta = tf.RunMetadata()
with g.as_default():
A = tf.Variable(tf.random_normal( [25,16] ))
B = tf.Variable(tf.random_normal( [16,9] ))
C = tf.matmul(A,B) # shape=[25,9]
opts = tf.profiler.ProfileOptionBuilder.float_operation()
flops = tf.profiler.profile(g, run_meta=run_meta, cmd='op', options=opts)
if flops is not None:
print('Flops should be ~',2*25*16*9)
print('25 x 25 x 9 would be',2*25*25*9) # ignores internal dim, repeats first
print('TF stats gives',flops.total_float_ops)''' |
import redis
app_redis = redis.Redis() |
from indy_common.authorize.auth_actions import AuthActionAdd, AuthActionEdit
from indy_common.constants import REVOC_REG_DEF
def test_rev_reg_def_adding(write_request_validation, req, is_owner):
authorized = req.identifier in ("trustee_identifier", "steward_identifier", "trust_anchor_identifier")
assert authorized == write_request_validation(req,
[AuthActionAdd(txn_type=REVOC_REG_DEF,
field='some_field',
value='some_value',
is_owner=is_owner)])
def test_rev_reg_def_editing(write_request_validation, req, is_owner):
authorized = is_owner
assert authorized == write_request_validation(req,
[AuthActionEdit(txn_type=REVOC_REG_DEF,
field='some_field',
old_value='old_value',
new_value='new_value',
is_owner=is_owner)])
|
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Polynomial manipulation (adding, composing, finding coefficients, etc)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import random
# Dependency imports
from mathematics_dataset import example
from mathematics_dataset.sample import number
from mathematics_dataset.sample import ops
from mathematics_dataset.sample import polynomials
from mathematics_dataset.util import composition
import numpy as np
from six.moves import range
import sympy
_ENTROPY_TRAIN = (3, 10)
_ENTROPY_INTERPOLATE = (8, 8)
def _make_modules(entropy):
"""Returns modules given "difficulty" parameters."""
sample_args_pure = composition.PreSampleArgs(1, 1, *entropy)
sample_args_composed = composition.PreSampleArgs(2, 4, *entropy)
sample_args_mixed = composition.PreSampleArgs(1, 4, *entropy)
return {
'coefficient_named':
functools.partial(coefficient_named, None, sample_args_pure),
'evaluate':
functools.partial(evaluate, None, sample_args_pure),
'evaluate_composed':
functools.partial(evaluate, None, sample_args_composed),
# TODO(b/124038948): consider doing pure sample args for 'add'?
'add':
functools.partial(add, None, sample_args_mixed),
'expand':
functools.partial(expand, None, sample_args_pure),
'collect':
functools.partial(collect, None, sample_args_pure),
'compose':
functools.partial(compose, None, sample_args_mixed),
# Rearranging powers:
'simplify_power':
functools.partial(simplify_power, None, sample_args_pure),
}
def train(entropy_fn):
"""Returns dict of training modules."""
return _make_modules(entropy_fn(_ENTROPY_TRAIN))
def test():
"""Returns dict of testing modules."""
return _make_modules(_ENTROPY_INTERPOLATE)
def test_extra():
"""Returns dict of extrapolation testing modules."""
return {
}
def coefficient_named(value, sample_args, context=None):
"""E.g., "Express x^2 + 2x in the form h * x^2 + k * x + t and give h."."""
del value # not used
if context is None:
context = composition.Context()
variable = sympy.Symbol(context.pop())
entropy, sample_args = sample_args.peel()
degree = random.randint(1, 4)
if random.choice([False, True]):
coefficients = polynomials.sample_coefficients(
degree, entropy/2, min_non_zero=random.randint(degree - 1, degree))
expanded = polynomials.expand_coefficients(coefficients, entropy/2)
expression = polynomials.coefficients_to_polynomial(expanded, variable)
else:
expression = polynomials.sample_with_brackets(variable, degree, entropy)
coefficients = list(reversed(sympy.Poly(expression).all_coeffs()))
named_coeffs = [sympy.Symbol(context.pop()) for _ in range(degree + 1)]
canonical = polynomials.coefficients_to_polynomial(named_coeffs, variable)
if random.random() < 0.2: # only small probability of non-zero power
power = random.randint(0, degree)
else:
non_zero_powers = [i for i in range(degree + 1) if coefficients[i] != 0]
power = random.choice(non_zero_powers)
value = coefficients[power]
named_coeff = named_coeffs[power]
template = random.choice([
'Ekspresikan {expression} sebagai {canonical} dan berikan {target}. '
'Atur ulang {expression} menjadi {canonical} dan berikan {target}.',
'Ekspresikan {expression} dalam bentuk {canonical} dan berikan {target}.',
'Atur ulang {expression} ke bentuk {canonical} dan berikan {target}.',
])
return example.Problem(
question=example.question(
context, template, expression=expression, canonical=canonical,
target=named_coeff),
answer=value)
_TEMPLATES = [
'Berapakah {composed}?',
'Hitung {composed}.',
'Beri {composed}.',
'Tentukan {composed}.',
]
@composition.module(number.is_integer)
def evaluate(value, sample_args, context=None):
"""Entity for evaluating an integer-valued polynomial at a given point."""
is_question = context is None
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
if value is None:
entropy_value = random.uniform(1, 1 + entropy/3)
entropy = max(0, entropy - entropy_value)
value = number.integer(entropy_value, signed=True)
entropy_input = random.uniform(1, 1 + entropy/3)
entropy = max(0, entropy - entropy_input)
input_ = number.integer(entropy_input, signed=True)
degree = random.randint(1, 3)
entropies = entropy * np.random.dirichlet(list(range(1, degree + 1)))
# Calculate coefficients in reverse order.
target = value
coeffs_reversed = []
for i, coeff_entropy in enumerate(entropies):
power = degree - i
coeff = number.integer(coeff_entropy, signed=True)
if input_ != 0:
coeff += int(round(target / input_ ** power))
if coeff == 0 and i == 0:
# Don't allow zero in leading coefficient.
coeff += random.choice([-1, 1])
coeffs_reversed.append(coeff)
target -= coeff * (input_ ** power)
coeffs_reversed.append(target)
coefficients = list(reversed(coeffs_reversed))
(polynomial_entity, input_) = context.sample(
sample_args, [composition.Polynomial(coefficients), input_])
composed = polynomial_entity.handle.apply(input_.handle)
if is_question:
template = random.choice(_TEMPLATES)
return example.Problem(
question=example.question(context, template, composed=composed),
answer=value)
else:
return composition.Entity(
context=context,
value=value,
expression=composed,
description='Misalkan {self} be {composed}.',
composed=composed)
# TODO(b/124039290): merge with compose? both add and compose do similar things.
@composition.module(composition.is_integer_polynomial)
def add(value, sample_args, context=None):
"""E.g., "Let f(x)=2x+1, g(x)=3x+2. What is 5*f(x) - 7*g(x)?"."""
is_question = context is None
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
if value is None:
max_degree = 3
degree = random.randint(1, max_degree)
entropy -= math.log10(max_degree)
entropy_value = entropy / 2
entropy -= entropy_value
value = polynomials.sample_coefficients(
degree, entropy=entropy_value, min_non_zero=random.randint(1, 3))
value = composition.Polynomial(value)
c1, c2, coeffs1, coeffs2 = polynomials.coefficients_linear_split(
value.coefficients, entropy)
coeffs1 = polynomials.trim(coeffs1)
coeffs2 = polynomials.trim(coeffs2)
c1, c2, fn1, fn2 = context.sample(
sample_args,
[c1, c2, composition.Polynomial(coeffs1), composition.Polynomial(coeffs2)]
)
var = sympy.var(context.pop())
expression = (
c1.handle * fn1.handle.apply(var) + c2.handle * fn2.handle.apply(var))
if is_question:
answer = polynomials.coefficients_to_polynomial(value.coefficients, var)
answer = answer.sympy()
template = random.choice(_TEMPLATES)
return example.Problem(
question=example.question(context, template, composed=expression),
answer=answer)
else:
intermediate_symbol = context.pop()
intermediate = sympy.Function(intermediate_symbol)(var)
return composition.Entity(
context=context,
value=value,
description='Misalkan {intermediate} = {composed}.',
handle=composition.FunctionHandle(intermediate_symbol),
intermediate=intermediate,
composed=expression)
def expand(value, sample_args, context=None):
"""E.g., "Expand (x**2 + 1)**2."."""
del value # not used
if context is None:
context = composition.Context()
variable = sympy.Symbol(context.pop())
entropy, sample_args = sample_args.peel()
min_order = 1
max_order = 5
order = random.randint(min_order, max_order)
entropy -= math.log10(max_order - min_order + 1)
expression_ = polynomials.sample_with_brackets(variable, order, entropy)
expanded = sympy.expand(expression_)
template = random.choice([
'Sederhanakan {expression}.'
])
return example.Problem(
question=example.question(context, template, expression=expression_),
answer=expanded)
@composition.module(composition.is_polynomial)
def collect(value, sample_args, context=None):
"""Collect terms in an unsimplified polynomial."""
is_question = context is None
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
if value is None:
entropy_value, entropy = entropy * np.random.dirichlet([2, 3])
degrees = [random.randint(1, 3)]
value = composition.Polynomial(
polynomials.sample_coefficients(degrees, entropy_value))
assert isinstance(value, composition.Polynomial)
coefficients = value.coefficients
all_coefficients_are_integer = True
for coeff in coefficients.flat:
if not number.is_integer(coeff):
all_coefficients_are_integer = False
break
if all_coefficients_are_integer:
coefficients = polynomials.expand_coefficients(coefficients, entropy)
else:
# put back the unused entropy
sample_args = composition.SampleArgs(
sample_args.num_modules, sample_args.entropy + entropy)
num_variables = coefficients.ndim
variables = [sympy.Symbol(context.pop()) for _ in range(num_variables)]
unsimplified = polynomials.coefficients_to_polynomial(coefficients, variables)
simplified = unsimplified.sympy().expand()
# Bit of a hack: handle the very rare case where no number constants appearing
if not ops.number_constants(unsimplified):
unsimplified = ops.Add(unsimplified, ops.Constant(0))
context.sample_by_replacing_constants(sample_args, unsimplified)
if is_question:
template = 'Sederhanakan {unsimplified}.'
return example.Problem(
question=example.question(context, template, unsimplified=unsimplified),
answer=simplified)
else:
function_symbol = context.pop()
function = sympy.Function(function_symbol)(*variables)
return composition.Entity(
context=context,
value=value,
handle=composition.FunctionHandle(function_symbol),
expression=unsimplified,
polynomial_variables=variables,
description='Misalkan {function} = {unsimplified}.',
function=function,
unsimplified=unsimplified)
def compose(value, sample_args, context=None):
"""E.g., "Let f(x)=2x+1, let g(x)=3x+10. What is f(g(x))?"."""
del value # unused
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
entropy_f, entropy_g = entropy * np.random.dirichlet([1, 1])
coeffs_f = polynomials.sample_coefficients([random.randint(1, 2)], entropy_f)
coeffs_g = polynomials.sample_coefficients([random.randint(1, 2)], entropy_g)
entity_f, entity_g = context.sample(
sample_args,
[composition.Polynomial(coeffs_f), composition.Polynomial(coeffs_g)])
variable = sympy.var(context.pop())
poly_f = polynomials.coefficients_to_polynomial(coeffs_f, variable)
poly_g = polynomials.coefficients_to_polynomial(coeffs_g, variable)
poly_f_g = poly_f.sympy().subs(variable, poly_g.sympy()).expand()
expression = composition.FunctionHandle(entity_f, entity_g).apply(variable)
template = random.choice(_TEMPLATES)
return example.Problem(
question=example.question(context, template, composed=expression),
answer=poly_f_g)
def simplify_power(value, sample_args, context=None):
"""E.g., "Simplify ((x**2)**3/x**4)**2/x**3."."""
del value # unused
if context is None:
context = composition.Context()
entropy, sample_args = sample_args.peel()
variable = sympy.symbols(context.pop(), positive=True)
unsimplified = polynomials.sample_messy_power(variable, entropy)
answer = unsimplified.sympy()
template = random.choice([
'Sederhanakan {unsimplified} dengan asumsi {variable} adalah positif.',
])
return example.Problem(
example.question(
context, template, unsimplified=unsimplified, variable=variable),
answer)
|
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python2.4
#
"""Utility functions for the Speedometer service."""
__author__ = 'mdw@google.com (Matt Welsh)'
import cgi
import datetime
import logging
import random
import sys
import time
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from django.utils import simplejson as json
def StringToTime(thestr):
"""Convert an ISO8601 timestring into a datetime object."""
try:
strtime, extra = thestr.split('.')
except:
# Must not be a '.' in the string
strtime = thestr[:-1] # Get rid of 'Z' at end
extra = 'Z'
dt = datetime.datetime(*time.strptime(strtime, "%Y-%m-%dT%H:%M:%S")[0:6])
# Strip 'Z' off of end
if (extra[-1] != 'Z'): raise ValueError, "Timestring does not end in Z"
usecstr = extra[:-1]
# Append extra zeros to end of usecstr if needed
while (len(usecstr) < 6):
usecstr = usecstr + '0'
usec = int(usecstr)
dt = dt.replace(microsecond=usec)
return dt
def TimeToString(dt):
"""Convert a DateTime object to an ISO8601-encoded string."""
return dt.isoformat() + 'Z'
def MicrosecondsSinceEpochToTime(microsec_since_epoch):
"""Convert microseconds since epoch UTC to a datetime object."""
sec = int(microsec_since_epoch / 1000000)
usec = int(microsec_since_epoch % 1000000)
dt = datetime.datetime.utcfromtimestamp(sec)
dt = dt.replace(microsecond=usec)
return dt
def TimeToMicrosecondsSinceEpoch(dt):
"""Convert a datetime object to microseconds since the epoch UTC."""
epoch = datetime.datetime(1970, 1, 1)
diff = dt - epoch
microsec_since_epoch = int(((diff.days * 86400) + (diff.seconds)) * 1000000)
microsec_since_epoch += diff.microseconds
return microsec_since_epoch
_SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
def ConvertToDict(model, include_fields=None, exclude_fields=None,
timestamps_in_microseconds=False):
"""Convert an AppEngine Model object to a Python dict ready for json dump.
For each property in the model, set a value in the returned dict
with the property name as its key.
"""
output = {}
for key, prop in model.properties().iteritems():
if include_fields is not None and key not in include_fields: continue
if exclude_fields is not None and key in exclude_fields: continue
value = getattr(model, key)
if value is None or isinstance(value, _SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datetime.date):
if timestamps_in_microseconds:
output[key] = TimeToMicrosecondsSinceEpoch(value)
else:
output[key] = TimeToString(value)
elif isinstance(value, db.GeoPt):
output[key] = {'latitude': value.lat, 'longitude': value.lon}
elif isinstance(value, db.Model):
output[key] = ConvertToDict(value, include_fields, exclude_fields,
timestamps_in_microseconds)
elif isinstance(value, users.User):
output[key] = value.email()
else:
raise ValueError('cannot encode ' + repr(prop))
return output
def ConvertToJson(model, include_fields=None, exclude_fields=None):
"""Convert an AppEngine Model object to a JSON-encoded string."""
return json.dumps(ConvertToDict(model, include_fields, exclude_fields))
def ConvertFromDict(model, input_dict, include_fields=None,
exclude_fields=None):
"""Fill in Model fields with values from a dict.
For each key in the dict, set the value of the corresponding field
in the given Model object to that value.
If the Model implements a method 'JSON_DECODE_key' for a given key 'key',
this method will be invoked instead with an argument containing
the value. This allows Model subclasses to override the decoding
behavior on a per-key basis.
"""
for k, v in input_dict.items():
if include_fields is not None and k not in include_fields: continue
if exclude_fields is not None and k in exclude_fields: continue
if hasattr(model, 'JSON_DECODE_' + k):
method = getattr(model, 'JSON_DECODE_' + k)
method(v)
else:
setattr(model, k, v)
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Native objects for holding DataONE Exceptions.
DataONEException and its subclasses are the primary carriers of exceptions in d1_python.
Serialize and deserialize from/to PyXB, clear text, HTTP headers.
Notes:
traceInformation:
traceInformation is an xs:anyType, meaning that is essentially the root of a new XML
document of arbitrary complexity. Since the contents of the elements are unknown at
the time when the PyXB binding are created, PyXB cannot automatically serialize and
deserialize the traceInformation field together with the rest of the
``DataONEException`` XML type.
To make it easier to use the traceInformation element, we support a special case
where it can be read and written as a single string of bytes, where the contents are
application specific. Any other content must be generated and parsed as XML by the
user.
Example of serialized DataONE Exception:
.. highlight: xml
::
<error detailCode="1020" errorCode="404" name="NotFound" identifier="testpid">
<description>Attempted to perform operation on non-existing object</description>
<traceInformation>view_handler.py(128)
views.py(102)
auth.py(392)
auth.py(315)
</traceInformation>
</error>
"""
import io
import logging
import traceback
import d1_common.type_conversions
import d1_common.types.dataoneErrors
import d1_common.xml
def xml_is_dataone_exception(xml_str):
"""Return True if XML doc is a valid DataONE Exception."""
try:
return pyxb_is_dataone_exception(deserialize(xml_str))
except ServiceFailure:
return False
def pyxb_is_dataone_exception(obj_pyxb):
"""Return True if PyXB object is a valid DataONE Exception."""
return isinstance(obj_pyxb, d1_common.types.dataoneErrors.DataONEException)
def deserialize(dataone_exception_xml):
"""Deserialize a DataONE Exception XML doc."""
try:
dataone_exception_pyxb = d1_common.xml.deserialize_d1_exception(
dataone_exception_xml
)
except ValueError as e:
raise ServiceFailure(
detailCode="0",
description='Deserialization failed. error="{}" doc="{}"'.format(
str(e),
"<empty response>"
if not dataone_exception_xml
else dataone_exception_xml,
),
traceInformation=traceback.format_exc(),
)
else:
x = create_exception_by_name(
dataone_exception_pyxb.name,
dataone_exception_pyxb.detailCode,
dataone_exception_pyxb.description,
_get_trace_information_content(dataone_exception_pyxb),
dataone_exception_pyxb.identifier,
dataone_exception_pyxb.nodeId,
)
return x
def deserialize_from_headers(headers):
"""Deserialize a DataONE Exception that is stored in a map of HTTP headers (used in
responses to HTTP HEAD requests)."""
return create_exception_by_name(
_get_header(headers, "DataONE-Exception-Name"),
_get_header(headers, "DataONE-Exception-DetailCode"),
_get_header(headers, "DataONE-Exception-Description"),
_get_header(headers, "DataONE-Exception-TraceInformation"),
_get_header(headers, "DataONE-Exception-Identifier"),
_get_header(headers, "DataONE-Exception-NodeId"),
)
def _get_header(headers, header):
lower_case_headers = dict(
list(zip(list(map(str.lower, list(headers.keys()))), list(headers.values())))
)
try:
header = lower_case_headers[header.lower()]
except LookupError:
return None
# As a header must be on a single line, the Python stack uses a
# convention of replacing newlines with " / ".
return header.replace(" / ", "\n")
# noinspection PyIncorrectDocstring
def create_exception_by_name(
name,
detailCode="0",
description="",
traceInformation=None,
identifier=None,
nodeId=None,
):
"""Create a DataONEException based object by name.
Args:
name: str
The type name of a DataONE Exception. E.g. NotFound.
If an unknown type name is used, it is automatically set to ServiceFailure. As
the XML Schema for DataONE Exceptions does not restrict the type names, this
may occur when deserializing an exception not defined by DataONE.
detailCode: int
Optional index into a table of predefined error conditions.
See Also:
For remaining args, see: ``DataONEException()``
"""
try:
dataone_exception = globals()[name]
except LookupError:
dataone_exception = ServiceFailure
return dataone_exception(
detailCode, description, traceInformation, identifier, nodeId
)
# noinspection PyIncorrectDocstring
def create_exception_by_error_code(
errorCode,
detailCode="0",
description="",
traceInformation=None,
identifier=None,
nodeId=None,
):
"""Create a DataONE Exception object by errorCode.
See Also: For args, see: ``DataONEException()``
"""
try:
dataone_exception = ERROR_CODE_TO_EXCEPTION_DICT[errorCode]
except LookupError:
dataone_exception = ServiceFailure
return dataone_exception(
detailCode, description, traceInformation, identifier, nodeId
)
def _get_trace_information_content(err_pyxb):
assert d1_common.type_conversions.is_pyxb(err_pyxb)
if err_pyxb.traceInformation is None:
return None
try:
return "\n".join(err_pyxb.traceInformation.content())
except TypeError:
return d1_common.xml.serialize_to_xml_str(
err_pyxb.traceInformation, pretty=True, strip_prolog=True
)
# ===============================================================================
class DataONEException(Exception):
"""Base class for exceptions raised by DataONE."""
def __init__(
self,
errorCode,
detailCode="0",
description="",
traceInformation=None,
identifier=None,
nodeId=None,
):
"""Args: errorCode: int HTTP Status code for the error. E.g., NotFound is 404.
detailCode: int
Optional index into a table of predefined error conditions.
description: str
Optional additional information about the error, intended for users. E.g., if the
error is NotFound, this may the resource that was not found.
traceInformation: str
Optional additional information about the error, intended for developers. E.g.,
stack traces or source code references.
identifier: str
Optional Persistent ID (PID) or Series ID (SID).
nodeId: str
Optional Node Identifier URN. E.g., urn:node:MyNode
"""
self.errorCode = errorCode
self.detailCode = detailCode
self.description = description
self.traceInformation = traceInformation
self.identifier = identifier
self.nodeId = nodeId
def __repr__(self):
s = "{}({})".format(
self.__class__.__name__,
", ".join(
[
'{}="{}"'.format(attr_str, getattr(self, attr_str))
for attr_str in [
"errorCode",
"detailCode",
"description",
"traceInformation",
"identifier",
"nodeId",
]
]
),
)
logging.error(s)
return s
def __str__(self):
"""String representation of the exception."""
msg = io.StringIO()
msg.write(self._fmt("name", self.name))
msg.write(self._fmt("errorCode", self.errorCode))
msg.write(self._fmt("detailCode", self.detailCode))
msg.write(self._fmt("description", self.description))
msg.write(self._fmt("traceInformation", self.traceInformation))
msg.write(self._fmt("identifier", self.identifier))
msg.write(self._fmt("nodeId", self.nodeId))
return msg.getvalue()
def _fmt(self, tag, msg):
"""Format a string for inclusion in the exception's string representation.
If msg is None, format to empty string. If msg has a single line, format to:
tag: msg If msg has multiple lines, format to: tag: line 1 line 2 Msg is
truncated to 1024 chars.
"""
msg = msg or "<unset>"
msg = str(msg)
msg = msg.strip()
if not msg:
return
if len(msg) > 2048:
msg = msg[:1024] + "..."
if msg.count("\n") <= 1:
return "{}: {}\n".format(tag, msg.strip())
else:
return "{}:\n {}\n".format(tag, msg.replace("\n", "\n ").strip())
def friendly_format(self):
"""Serialize to a format more suitable for displaying to end users."""
if self.description is not None:
msg = self.description
else:
msg = "errorCode: {} / detailCode: {}".format(
self.errorCode, self.detailCode
)
return self._fmt(self.name, msg).strip()
def serialize_to_transport(self, encoding="utf-8", xslt_url=None):
"""Serialize to XML ``bytes`` with prolog.
Args:
encoding: str
Encoding to use for XML doc bytes
xslt_url: str
If specified, add a processing instruction to the XML doc that specifies the
download location for an XSLT stylesheet.
Returns:
bytes: XML holding a DataONEError based type.
"""
assert encoding in ("utf-8", "UTF-8")
dataone_exception_pyxb = self.get_pyxb()
return d1_common.xml.serialize_for_transport(
dataone_exception_pyxb, xslt_url=xslt_url
)
def serialize_to_display(self, xslt_url=None):
"""Serialize to a pretty printed Unicode str, suitable for display.
Args: xslt_url: url Optional link to an XSLT stylesheet. If provided, a
processing instruction for the stylesheet is included in the XML prolog.
"""
return d1_common.xml.serialize_to_xml_str(
self.get_pyxb(), pretty=True, xslt_url=xslt_url
)
def encode(self, encoding="utf-8"):
"""Serialize to UTF-8 encoded XML bytes with prolog."""
return self.serialize_to_transport(encoding)
def serialize_to_headers(self):
"""Serialize to a dict of HTTP headers.
Used in responses to HTTP HEAD requests. As with regular HTTP GET requests, HEAD
requests may return DataONE Exceptions. Since a response to a HEAD request
cannot include a body, the error is returned as a set of HTTP headers instead of
an XML document.
"""
return {
"DataONE-Exception-Name": self.__class__.__name__,
"DataONE-Exception-ErrorCode": self._format_header(self.errorCode),
"DataONE-Exception-DetailCode": self._format_header(self.detailCode),
"DataONE-Exception-Description": self._format_header(self.description),
"DataONE-Exception-TraceInformation": self._format_header(
self.traceInformation
),
"DataONE-Exception-Identifier": self._format_header(self.identifier),
"DataONE-Exception-NodeID": self._format_header(self.nodeId),
}
def get_pyxb(self):
"""Generate a DataONE Exception PyXB object.
The PyXB object supports directly reading and writing the individual values that
may be included in a DataONE Exception.
"""
dataone_exception_pyxb = d1_common.types.dataoneErrors.error()
dataone_exception_pyxb.name = self.__class__.__name__
dataone_exception_pyxb.errorCode = self.errorCode
dataone_exception_pyxb.detailCode = self.detailCode
if self.description is not None:
dataone_exception_pyxb.description = self.description
dataone_exception_pyxb.traceInformation = self.traceInformation
if self.identifier is not None:
dataone_exception_pyxb.identifier = self.identifier
if self.nodeId is not None:
dataone_exception_pyxb.nodeId = self.nodeId
return dataone_exception_pyxb
def _format_header(self, v):
if v is None:
return ""
else:
return str(v)[:1024].replace("\n", " / ")
@property
def name(self):
"""Returns:
str: Type name of object based on DataONEException. E.g.:
``AuthenticationTimeout``.
"""
return self.__class__.__name__
# ===============================================================================
class AuthenticationTimeout(DataONEException):
"""DataONE Exception of type AuthenticationTimeout.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 408, detailCode, description, traceInformation, identifier, nodeId
)
class IdentifierNotUnique(DataONEException):
"""DataONE Exception of type IdentifierNotUnique.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 409, detailCode, description, traceInformation, identifier, nodeId
)
class InsufficientResources(DataONEException):
"""DataONE Exception of type InsufficientResources.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 413, detailCode, description, traceInformation, identifier, nodeId
)
class InvalidCredentials(DataONEException):
"""DataONE Exception of type InvalidCredentials.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 401, detailCode, description, traceInformation, identifier, nodeId
)
class InvalidRequest(DataONEException):
"""DataONE Exception of type InvalidRequest.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 400, detailCode, description, traceInformation, identifier, nodeId
)
class InvalidSystemMetadata(DataONEException):
"""DataONE Exception of type InvalidSystemMetadata.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 400, detailCode, description, traceInformation, identifier, nodeId
)
class InvalidToken(DataONEException):
"""DataONE Exception of type InvalidToken.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 401, detailCode, description, traceInformation, identifier, nodeId
)
class NotAuthorized(DataONEException):
"""DataONE Exception of type NotAuthorized.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 401, detailCode, description, traceInformation, identifier, nodeId
)
class NotFound(DataONEException):
"""DataONE Exception of type NotFound.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
# TODO: add link to resolve()
DataONEException.__init__(
self, 404, detailCode, description, traceInformation, identifier, nodeId
)
# noinspection PyShadowingBuiltins
class NotImplemented(DataONEException):
"""DataONE Exception of type NotImplemented.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 501, detailCode, description, traceInformation, identifier, nodeId
)
class ServiceFailure(DataONEException):
"""DataONE Exception of type ServiceFailure.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 500, detailCode, description, traceInformation, identifier, nodeId
)
class UnsupportedMetadataType(DataONEException):
"""DataONE Exception of type UnsupportedMetadataType.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 400, detailCode, description, traceInformation, identifier, nodeId
)
class UnsupportedType(DataONEException):
"""DataONE Exception of type UnsupportedType.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 400, detailCode, description, traceInformation, identifier, nodeId
)
class SynchronizationFailed(DataONEException):
"""DataONE Exception of type SynchronizationFailed.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 0, detailCode, description, traceInformation, identifier, nodeId
)
class VersionMismatch(DataONEException):
"""DataONE Exception of type VersionMismatch.
See Also: ``DataONEException()``
"""
def __init__(
self,
detailCode,
description=None,
traceInformation=None,
identifier=None,
nodeId=None,
):
DataONEException.__init__(
self, 409, detailCode, description, traceInformation, identifier, nodeId
)
ERROR_CODE_TO_EXCEPTION_DICT = {
400: InvalidRequest,
# 400: InvalidSystemMetadata,
# 400: UnsupportedMetadataType,
# 400: UnsupportedType,
# 401: InvalidCredentials,
# 401: InvalidToken,
401: NotAuthorized,
404: NotFound,
408: AuthenticationTimeout,
409: IdentifierNotUnique,
# 409: VersionMismatch,
413: InsufficientResources,
500: ServiceFailure,
501: NotImplemented,
}
|
import abc
import datetime
import argparse
from collections import namedtuple
from time import time, sleep
import multiprocessing as mp
from multiprocessing import Pool, Lock
import logging
import sys, os
from typing import Tuple
import gym
this_folder = '/'.join(os.getcwd().split('/')[:])
parent_folder = '/'.join(os.getcwd().split('/')[:-1])
Tichu_gym_folder = parent_folder+"/Tichu-gym"
for p in [this_folder, parent_folder, Tichu_gym_folder]: # Adds the parent folder (ie. game) to the python path
if p not in sys.path:
sys.path.append(p)
from gamemanager import TichuGame
from gym_agents.strategies import make_random_tichu_strategy, never_announce_tichu_strategy, always_announce_tichu_strategy
from gym_agents import *
from gym_agents.mcts import *
from gym_tichu.envs.internals.utils import time_since, check_param
import logginginit
logger = logging.getLogger(__name__)
_this_folder = os.path.dirname(os.path.realpath(__file__)) # Folder where this file is located
class Experiment(object, metaclass=abc.ABCMeta):
@property
def name(self)->str:
return self.__class__.__name__
@property
@abc.abstractmethod
def agents(self)->Tuple[DefaultGymAgent, DefaultGymAgent, DefaultGymAgent, DefaultGymAgent]:
pass
def run(self, target_points):
logger.warning("Running {}".format(self.name))
start_t = time()
start_ftime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
players_vs_string = (
"""
0: {0}
2: {2}
VS.
1: {1}
3: {3}
""").format(*[p.info for p in self.agents])
logger.info("Playing: " + players_vs_string)
game_res = self._run_game(target_points=target_points)
end_ftime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
results_string = (
"""
################################## {me.name} ##################################
Log-folder: {log_folder}
Start-time: {start_time}
End-time: {end_time}
Duration: {duration}
{players_vs_string}
Final Points: {points}
""").format(me=self, log_folder=log_folder_name,
start_time=start_ftime, end_time=end_ftime, duration=time_since(start_t),
players_vs_string=players_vs_string, points=game_res.points)
game_history_string = str(game_res.history)
logger.info(results_string)
logger.debug(game_history_string)
with open(log_folder_name+"/results.log", "a") as f:
f.write(results_string)
logger.warning("Finished Running {}".format(self.name))
@abc.abstractmethod
def _run_game(self, target_points):
pass
class SimpleExperiment(Experiment, metaclass=abc.ABCMeta):
"""
Experiment that runs a game to a given amount of points with 4 agents.
The only method to overwrite is **_init_agents**
"""
def __init__(self):
agents = self._init_agents()
self._agents = agents
@property
def agents(self)->Tuple[DefaultGymAgent, DefaultGymAgent, DefaultGymAgent, DefaultGymAgent]:
return self._agents
def _run_game(self, target_points=1000):
game = TichuGame(*self.agents)
return game.start_game(target_points=target_points)
@abc.abstractmethod
def _init_agents(self)->Tuple[DefaultGymAgent, DefaultGymAgent, DefaultGymAgent, DefaultGymAgent]:
raise NotImplementedError()
# CHEAT vs NONCHEAT
class CheatVsNonCheatUCB1(SimpleExperiment):
def _init_agents(self):
return (BaseMonteCarloAgent(InformationSetMCTS(), iterations=100, cheat=True),
BaseMonteCarloAgent(InformationSetMCTS(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS(), iterations=100, cheat=True),
BaseMonteCarloAgent(InformationSetMCTS(), iterations=100))
#EPIC
class EpicVsIsMcts(SimpleExperiment):
def _init_agents(self):
return (BaseMonteCarloAgent(EpicISMCTS(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS(), iterations=100),
BaseMonteCarloAgent(EpicISMCTS(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS(), iterations=100))
class EpicNoRolloutVsEpic(SimpleExperiment):
def _init_agents(self):
return (BaseMonteCarloAgent(EpicNoRollout(), iterations=100),
BaseMonteCarloAgent(EpicISMCTS(), iterations=100),
BaseMonteCarloAgent(EpicNoRollout(), iterations=100),
BaseMonteCarloAgent(EpicISMCTS(), iterations=100))
class EpicNoRolloutVsIsmcts(SimpleExperiment):
def _init_agents(self):
return (BaseMonteCarloAgent(EpicNoRollout(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS(), iterations=100),
BaseMonteCarloAgent(EpicNoRollout(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS(), iterations=100))
# BEST ACTION
class BestAction_MaxUcb_Vs_MostVisited(SimpleExperiment):
def _init_agents(self):
return (BaseMonteCarloAgent(InformationSetMCTSHighestUcbBestAction(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTSHighestUcbBestAction(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS(), iterations=100))
# REWARD
class Reward_Relative_Vs_Absolute(SimpleExperiment):
def _init_agents(self):
return (BaseMonteCarloAgent(InformationSetMCTS_relative_evaluation(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS_absolute_evaluation(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS_relative_evaluation(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS_absolute_evaluation(), iterations=100))
class Reward_Relative_Vs_Ranking(SimpleExperiment):
def _init_agents(self):
return (BaseMonteCarloAgent(InformationSetMCTS_relative_evaluation(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS_relative_evaluation(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS(), iterations=100))
class Reward_Ranking_Vs_Absolute(SimpleExperiment):
def _init_agents(self):
return (BaseMonteCarloAgent(InformationSetMCTS(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS_absolute_evaluation(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS_absolute_evaluation(), iterations=100))
# Move Groups
class MoveGroups_With_Vs_No(SimpleExperiment):
def _init_agents(self):
return (BaseMonteCarloAgent(InformationSetMCTS_move_groups(), iterations=1200, max_time=10),
BaseMonteCarloAgent(DefaultIsmcts(), iterations=1000, max_time=10),
BaseMonteCarloAgent(InformationSetMCTS_move_groups(), iterations=1200, max_time=10),
BaseMonteCarloAgent(DefaultIsmcts(), iterations=1000, max_time=10))
# TICHU
class Tichu_Random_Vs_Never(SimpleExperiment):
def _init_agents(self):
return (BalancedRandomAgent(announce_tichu=make_random_tichu_strategy(announce_weight=0.5)),
BalancedRandomAgent(announce_tichu=never_announce_tichu_strategy),
BalancedRandomAgent(announce_tichu=never_announce_tichu_strategy),
BalancedRandomAgent(announce_tichu=never_announce_tichu_strategy),)
class Tichu_Always_Vs_Never(SimpleExperiment):
def _init_agents(self):
return (BalancedRandomAgent(announce_tichu=always_announce_tichu_strategy),
BalancedRandomAgent(announce_tichu=never_announce_tichu_strategy),
BalancedRandomAgent(announce_tichu=never_announce_tichu_strategy),
BalancedRandomAgent(announce_tichu=never_announce_tichu_strategy),)
# SPLIT AGENTS
class FirstMctsThenRandomVsRandom(SimpleExperiment):
def _init_agents(self):
return (make_first_ismcts_then_random_agent(switch_length=5),
BalancedRandomAgent(),
make_first_ismcts_then_random_agent(switch_length=5),
BalancedRandomAgent())
# DETERMINIZATION
class RandomVsPoolDeterminization(SimpleExperiment):
def _init_agents(self):
return (BaseMonteCarloAgent(InformationSetMCTS(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTSPoolDeterminization(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTS(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTSPoolDeterminization(), iterations=100))
class RandomVsSingleDeterminization(SimpleExperiment):
def _init_agents(self):
return (BaseMonteCarloAgent(make_default_ismctsearch(name='Det_Random', determinizationpolicy=RandomDeterminePolicy), iterations=1200, max_time=10, cheat=False),
BaseMonteCarloAgent(make_default_ismctsearch(name='Det_Single', determinizationpolicy=SingleDeterminePolicy), iterations=1200, max_time=10, cheat=False),
BaseMonteCarloAgent(make_default_ismctsearch(name='Det_Random', determinizationpolicy=RandomDeterminePolicy), iterations=1200, max_time=10, cheat=False),
BaseMonteCarloAgent(make_default_ismctsearch(name='Det_Single', determinizationpolicy=SingleDeterminePolicy), iterations=1200, max_time=10, cheat=False))
class SingleVsPoolDeterminization(SimpleExperiment):
def _init_agents(self):
return (BaseMonteCarloAgent(InformationSetMCTSSingleDeterminization(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTSPoolDeterminization(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTSSingleDeterminization(), iterations=100),
BaseMonteCarloAgent(InformationSetMCTSPoolDeterminization(), iterations=100))
# DQN
class DQNUntrainedVsRandom(SimpleExperiment):
def _init_agents(self):
return (DQNAgent2L_56x5(weights_file=None),
BalancedRandomAgent(),
DQNAgent2L_56x5(weights_file=None),
BalancedRandomAgent())
class DQNRandomVsDQNLearned(SimpleExperiment):
def _init_agents(self):
return (DQNAgent2L_56x5(weights_file='{}/dqn/dqn_random.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_learned.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_random.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_learned.h5f'.format(_this_folder)))
class DQNRandomVsDQNLearning(SimpleExperiment):
def _init_agents(self):
return (DQNAgent2L_56x5(weights_file='{}/dqn/dqn_random.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_learning.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_random.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_learning.h5f'.format(_this_folder)))
class DQNLearnedVsDQNLearning(SimpleExperiment):
def _init_agents(self):
return (DQNAgent2L_56x5(weights_file='{}/dqn/dqn_learned.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_learning.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_learned.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_learning.h5f'.format(_this_folder)))
class DQNRandomVsDQNismcts(SimpleExperiment):
def _init_agents(self):
return (DQNAgent2L_56x5(weights_file='{}/dqn/dqn_random.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_ismcts.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_random.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_ismcts.h5f'.format(_this_folder)))
class DQNLearnedVsDQNismcts(SimpleExperiment):
def _init_agents(self):
return (DQNAgent2L_56x5(weights_file='{}/dqn/dqn_learned.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_ismcts.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_learned.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_ismcts.h5f'.format(_this_folder)))
class DQNLearningVsDQNismcts(SimpleExperiment):
def _init_agents(self):
return (DQNAgent2L_56x5(weights_file='{}/dqn/dqn_learning.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_ismcts.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_learning.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_ismcts.h5f'.format(_this_folder)))
class DQNUntrainedVsDQNismcts(SimpleExperiment):
def _init_agents(self):
return (DQNAgent2L_56x5(weights_file=None),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_ismcts.h5f'.format(_this_folder)),
DQNAgent2L_56x5(weights_file=None),
DQNAgent2L_56x5(weights_file='{}/dqn/dqn_ismcts.h5f'.format(_this_folder)))
# Multiple Experiments Together
class MultipleExperiments(Experiment):
def __init__(self, experiment_clazzes, nbr_to_run_each: int, parallel: bool, poolsize: int=5):
self.experiment_clazzes = experiment_clazzes
self.nbr_to_run_each = nbr_to_run_each
self.parallel = parallel
self._current_agents = None
self.poolsize = poolsize
assert nbr_to_run_each > 0
@property
def name(self) -> str:
return ' and '.join(ex.__name__ for ex in self.experiment_clazzes)
@property
def agents(self):
raise AttributeError("MultipleExperiments has no agents, this should not be used")
def run(self, target_points):
if self.parallel:
return self._run_parallel(target_points=target_points)
else:
return self._run_sequential(target_points=target_points)
def _run_parallel(self, target_points):
"""
Runs all experiments in different processes. (in a pool of size 'self.poolsize')
:param target_points:
:return: None
"""
logger.warning("Running the MultipleExperiments in Pool (of size {}): {} ({} times each)".format(self.poolsize, self.experiment_clazzes, self.nbr_to_run_each))
assert self.poolsize > 0
with Pool(processes=self.poolsize) as pool:
# run all experiments in Pool
jobs = list()
for n in range(self.nbr_to_run_each):
for exp in self.experiment_clazzes:
experiment = exp()
print("experiment: ", experiment)
jobs.append(pool.apply_async(experiment.run, (), {'target_points': target_points}))
# wait for processes to complete
for k, job in enumerate(jobs):
logger.warning("waiting for job {k}".format(k=k))
job.get()
print("Pool exit")
def _run_sequential(self, target_points):
"""
Runs all experiments in this process.
Each experiment is run 'self.nbr_to_run_each' times.
:param target_points:
:return: None
"""
logger.warning("Running the MultipleExperiments sequential")
for n in range(self.nbr_to_run_each):
for exp in self.experiment_clazzes:
logger.warning("Sequential MultipleExperiments starting {}".format(exp))
# logger.warning("agents: "+str(list(a.info for a in exp().agents)))
exp().run(target_points=target_points)
def _run_game(self, target_points):
raise AttributeError("MultipleExperiments has single game to be run, this should not be used")
# To be able to do:
# exp = MultipleExperiments([exp1, exp2], nbr_to_run_each=2, parallel=True)
# exp().run(target_points=args.target_points)
# ^
def __call__(self):
return self
class Tournament(Experiment):
"""
Given some agents plays each agent against each other agent once.
NOTE: There are n*(n+1)/2 games played.
"""
def __init__(self, *agents):
check_param(len(agents) >= 2)
self.participating_agents = list(agents)
@property
def agents(self) -> Tuple[DefaultGymAgent, DefaultGymAgent, DefaultGymAgent, DefaultGymAgent]:
raise AttributeError("Tournament has no agents, this should not be used")
def _run_game(self, target_points):
raise AttributeError("Tournament has no agents, this should not be used")
def run(self, target_points):
for k0, agent0 in enumerate(self.participating_agents):
for agent1 in self.participating_agents[k0+1:]:
expclazz = type("Tournament_{}_vs_{}".format(agent0.__class__.__name__, agent1.__class__.__name__),
(SimpleExperiment, object),
{'_init_agents': lambda self_: (agent0, agent1, agent0, agent1)})
exp = expclazz()
logger.warning("Tournament starting game {}".format(exp))
exp.run(target_points=target_points)
# To be able to do:
# exp = Tournament(agent1, agent2)
# exp().run(target_points=args.target_points)
# ^
def __call__(self):
return self
experiments = {
'best_action_tournament': Tournament(
BaseMonteCarloAgent(
make_default_ismctsearch(name='BestAction_MostVisited', bestactionpolicy=MostVisitedBestActionPolicy),
iterations=100, cheat=False
),
BaseMonteCarloAgent(
make_default_ismctsearch(name='BestAction_MaxUCB', bestactionpolicy=HighestUCBBestActionPolicy),
iterations=100, cheat=False
),
BaseMonteCarloAgent(
make_default_ismctsearch(name='BestAction_HighestAvgReward', bestactionpolicy=HighestAvgRewardBestActionPolicy),
iterations=100, cheat=False
)
),
'determinization_tournament': Tournament(
BaseMonteCarloAgent(
make_default_ismctsearch(name='Det_Random', determinizationpolicy=RandomDeterminePolicy),
iterations=1200, max_time=10, cheat=False
),
BaseMonteCarloAgent(
make_default_ismctsearch(name='Det_Pool', determinizationpolicy=PoolDeterminePolicy),
iterations=1200, max_time=10, cheat=False
),
BaseMonteCarloAgent(
make_default_ismctsearch(name='Det_Single', determinizationpolicy=SingleDeterminePolicy),
iterations=1200, max_time=10, cheat=False
)
),
'reward_tournament': Tournament(
BaseMonteCarloAgent(
make_default_ismctsearch(name='Eval_Ranking', evaluationpolicy=RankingEvaluationPolicy),
iterations=1200, max_time=5, cheat=False
),
BaseMonteCarloAgent(
make_default_ismctsearch(name='Eval_Absolute', evaluationpolicy=AbsoluteEvaluationPolicy),
iterations=1200, max_time=5, cheat=False
),
BaseMonteCarloAgent(
make_default_ismctsearch(name='Eval_Relative', evaluationpolicy=RelativeEvaluationPolicy),
iterations=1200, max_time=5, cheat=False
),
BaseMonteCarloAgent(
make_default_ismctsearch(name='Eval_Norm_Relative', evaluationpolicy=RelativeNormalizedEvaluationPolicy),
iterations=1200, max_time=5, cheat=False
),
BaseMonteCarloAgent(
make_default_ismctsearch(name='Eval_Norm_Absolute', evaluationpolicy=AbsoluteNormalizedEvaluationPolicy),
iterations=1200, max_time=5, cheat=False
)
),
'split_tournament': Tournament(
make_first_ismcts_then_random_agent(switch_length=9),
make_first_ismcts_then_random_agent(switch_length=7),
make_first_ismcts_then_random_agent(switch_length=5),
make_first_ismcts_then_random_agent(switch_length=3),
BalancedRandomAgent()
),
'split_tournament_upper': Tournament(
make_first_ismcts_then_random_agent(switch_length=13),
make_first_ismcts_then_random_agent(switch_length=12),
make_first_ismcts_then_random_agent(switch_length=11),
make_first_ismcts_then_random_agent(switch_length=10),
make_first_ismcts_then_random_agent(switch_length=9),
BalancedRandomAgent()
),
'epic_tournament': Tournament(
BaseMonteCarloAgent(
make_default_ismctsearch(name='Epic_ismcts', nodeidpolicy=EpicNodePolicy),
iterations=1200, max_time=5, cheat=True
),
BaseMonteCarloAgent(
make_default_ismctsearch(name='Epic_norollout', nodeidpolicy=EpicNodePolicy, treepolicy=NoRolloutPolicy),
iterations=1200, max_time=5, cheat=True
),
BaseMonteCarloAgent(DefaultIsmcts(), iterations=1200, max_time=5, cheat=True),
),
'nn_rollout_tournament': Tournament(
BaseMonteCarloAgent(
make_default_ismctsearch(name='2L_ismcts', rolloutpolicy=DQNAgent2L_56x5_2_sepRolloutPolicy),
iterations=1200, max_time=5, cheat=True
),
BaseMonteCarloAgent(DefaultIsmcts(), iterations=1200, max_time=5, cheat=True),
BalancedRandomAgent(),
# TODO add more agents
),
'best_tournament': Tournament(
BaseMonteCarloAgent(
make_best_ismctsearch(name='Best'),
iterations=100, cheat=False
),
BaseMonteCarloAgent(
make_best_ismctsearch(name='Best_randomRollout', rolloutpolicy=RandomRolloutPolicy),
iterations=100, cheat=False
),
BaseMonteCarloAgent(
make_best_ismctsearch(name='Best_randomRollout', determinizationpolicy=RandomDeterminePolicy),
iterations=100, cheat=False
),
BaseMonteCarloAgent(
make_best_ismctsearch(name='Best_movegroups', treepolicy=MoveGroupsTreeSelectionPolicy),
iterations=100, cheat=False
),
BaseMonteCarloAgent(DefaultIsmcts(), iterations=100, cheat=False),
),
'minmax_tournament': Tournament(
MinimaxAgent(depth=9),
BaseMonteCarloAgent(DefaultIsmcts(), iterations=100, cheat=False),
BalancedRandomAgent()
),
'cheat_tournament': Tournament(
BaseMonteCarloAgent(DefaultIsmcts(), iterations=100, cheat=False),
BaseMonteCarloAgent(DefaultIsmcts(), iterations=100, cheat=0.2),
BaseMonteCarloAgent(DefaultIsmcts(), iterations=100, cheat=0.6),
BaseMonteCarloAgent(DefaultIsmcts(), iterations=100, cheat=0.8),
BaseMonteCarloAgent(DefaultIsmcts(), iterations=100, cheat=True),
),
'cheat_vs_noncheat': CheatVsNonCheatUCB1,
'all_rewards': MultipleExperiments([Reward_Relative_Vs_Absolute, Reward_Relative_Vs_Ranking, Reward_Ranking_Vs_Absolute], nbr_to_run_each=10, parallel=True),
'relative_vs_absolute_reward': Reward_Relative_Vs_Absolute,
'relative_vs_ranking_reward': Reward_Relative_Vs_Ranking,
'ranking_vs_absolute_reward': Reward_Ranking_Vs_Absolute,
'all_split_experiments': MultipleExperiments([FirstMctsThenRandomVsRandom], nbr_to_run_each=10, parallel=True),
'first_mcts_then_random_vs_random': FirstMctsThenRandomVsRandom,
'all_determinization_vs': MultipleExperiments([RandomVsPoolDeterminization], nbr_to_run_each=10, parallel=True),
'random_vs_pool_determinization': RandomVsPoolDeterminization,
'random_vs_single_determinization': RandomVsSingleDeterminization,
'single_vs_pool_determinization': SingleVsPoolDeterminization,
'all_dqn_vs_dqn': MultipleExperiments([DQNUntrainedVsRandom, DQNRandomVsDQNLearned,
DQNRandomVsDQNLearning, DQNLearnedVsDQNLearning,
DQNRandomVsDQNismcts, DQNLearnedVsDQNismcts,
DQNLearningVsDQNismcts, DQNUntrainedVsDQNismcts],
nbr_to_run_each=1, parallel=False),
'dqn_untrained_vs_random_agent': DQNUntrainedVsRandom,
'dqn_random_vs_learned': DQNRandomVsDQNLearned,
'dqn_random_vs_learning': DQNRandomVsDQNLearning,
'dqn_learned_vs_learning': DQNLearnedVsDQNLearning,
'dqn_random_vs_dqnismcts': DQNRandomVsDQNismcts,
'dqn_learned_vs_dqnismcts': DQNLearnedVsDQNismcts,
'dqn_learning_vs_dqnismcts': DQNLearningVsDQNismcts,
'dqn_untrained_vs_dqnismcts': DQNUntrainedVsDQNismcts,
'all_epic_vs': MultipleExperiments([EpicVsIsMcts, EpicNoRolloutVsEpic, EpicNoRolloutVsIsmcts], nbr_to_run_each=10, parallel=True),
'epic_vs_ismcts': EpicVsIsMcts,
'epic_norollout_vs_epic': EpicNoRolloutVsEpic,
'epic_norollout_vs_ismcts': EpicNoRolloutVsIsmcts,
'ismcts_best_action_maxucb_vs_most_visited': BestAction_MaxUcb_Vs_MostVisited,
'move_groups_vs_none': MoveGroups_With_Vs_No,
}
log_levels_map = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run Experiments', allow_abbrev=False)
# EXPERIMENT
parser.add_argument('experiment_name', metavar='experiment_name', type=str, choices=[k for k in experiments.keys()],
help='The name of the experiment to run')
# EXPERIMENT PARAMS
parser.add_argument('--target', dest='target_points', type=int, required=False, default=1000,
help='The number of points to play for')
parser.add_argument('--min_duration', dest='min_duration', type=int, required=False, default=None,
help='Repeat until this amount of minutes passed.')
parser.add_argument('--max_duration', dest='max_duration', type=int, required=False, default=None,
help='Does not start a new experiment when this amount of minutes passed. Must be bigger than --min_duration if specified. Overwrites --nbr_experiments')
parser.add_argument('--nbr_experiments', dest='nbr_experiments', type=int, required=False, default=1,
help='The amount of experiments to run sequentially (Default is 1). If --min_duration is specified, then stops when both constraints are satisfied.')
# LOGING
parser.add_argument('--log_mode', dest='log_mode', type=str, required=False, default='ExperimentMode', choices=[k for k in logginginit.logging_modes.keys()],
help="{}".format('\n'.join("{}: {}".format(modestr, str(mode)) for modestr, mode in logginginit.logging_modes.items())))
parser.add_argument('--ignore_debug', dest='ignore_debug', required=False, action='store_true',
help='Whether to log debug level (set flag to NOT log debug level). Overwrites the --log_mode setting for debug level')
parser.add_argument('--ignore_info', dest='ignore_info', required=False, action='store_true',
help='Whether to log info level (set flag to NOT log info (and debug) level). Overwrites the --log_mode setting for info level')
# POOL SIZE
parser.add_argument('--pool_size', dest='pool_size', type=int, required=False, default=5,
help='The amount of workers use in the Pool [default: 5].')
args = parser.parse_args()
print("args:", args)
# Times
start_t = time()
max_t = start_t + args.max_duration*60 if args.max_duration else float('inf')
min_t = start_t + args.min_duration*60 if args.min_duration else -2
if max_t < min_t:
raise ValueError("--max_duration must be bigger than --min_duration!")
# init logging
start_ftime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
log_folder_name = "./logs/" + args.experiment_name + "_" + start_ftime
logmode = logginginit.logging_modes[args.log_mode]
gym.undo_logger_setup()
min_loglevel = logging.DEBUG
if args.ignore_debug:
min_loglevel = logging.INFO
if args.ignore_info:
min_loglevel = logging.WARNING
logginginit.initialize_loggers(output_dir=log_folder_name, logging_mode=logmode, min_loglevel=min_loglevel)
# nbr expreiments
nbr_exp_left = args.nbr_experiments
# the experiment
exp = experiments[args.experiment_name]
# log the arguments
logger.warning("Experiment summary: ")
try:
expname = exp.__name__
except AttributeError:
# exp is probably a MultipleExperiments instance
expname = exp.name
logger.warning("exp: {}; args: {}".format(expname, args))
# run several experiments in multiple processors
pool_size = args.pool_size
if nbr_exp_left > 1 and pool_size > 1:
with Pool(processes=pool_size) as pool:
logger.warning("Running experiments in Pool (of size {})".format(pool_size))
# run all experiments in Pool
multiple_results = list()
for i in range(nbr_exp_left):
multiple_results.append(pool.apply_async(exp().run, (), {'target_points': args.target_points}))
# wait for processes to complete
for res in multiple_results:
res.get()
nbr_exp_left -= 1
# run experiment in parent process
while (nbr_exp_left > 0 or time() < min_t) and time() < max_t:
logger.warning("Running a experiment in parent process... ")
nbr_exp_left -= 1
exp().run(target_points=args.target_points)
logger.info("Total Experiments runningtime: {}".format(time_since(start_t)))
|
import pytest
from blight import action, tool
@pytest.mark.parametrize(
("action_class", "tool_class", "should_run_on"),
[
(action.Action, tool.CC, True),
(action.CCAction, tool.CC, True),
(action.CXXAction, tool.CC, False),
(action.CompilerAction, tool.CC, True),
(action.CPPAction, tool.CC, False),
(action.LDAction, tool.CC, False),
(action.ASAction, tool.CC, False),
],
)
def test_should_run_on(action_class, tool_class, should_run_on):
action = action_class({})
tool = tool_class([])
assert action._should_run_on(tool) == should_run_on
|
import json
from collections import OrderedDict
class PyposmatReferenceDataFile(object):
""" Used to interface with data from publications for validation purposes.
Args:
filename(str, optional): the filename of the data file to read (must be JSON).
"""
def __init__(self, filename=None):
self.filename = filename
self._bibtex = None
self._qois = None
@property
def bibtex(self):
if self._bibtex is None:
raise RuntimeError("bibtex value not set or not found in json")
else:
return self._bibtex
@property
def qois(self):
if self._qois is None:
raise RuntimeError("qois value not set")
else:
return self._qois
def read(self, filename=None):
# filename uses self.filename if not set
if filename is None:
if self.filename is None:
raise ValueError("must provide a filename to read")
else:
filename = self.filename
# filename must refer to a JSON file
if not filename.endswith(".json"):
raise ValueError("filename must be a json file")
# read JSON into a dict
with open(filename) as f:
raw_json = json.load(f)
# set the private attributes which the properties pull from
self._bibtex = raw_json.pop("bibtex", None)
qois = raw_json.pop("qois", None)
self._qois = OrderedDict(qois) |
from xbrr.base.reader.base_element_schema import BaseElementSchema
import bs4
class RoleSchema(BaseElementSchema):
def __init__(self,
uri="", href="", lazy_label=None):
super().__init__()
self.uri = uri
self.href = href
self.lazy_label=lazy_label
self._label = None
@property
def label(self):
if self._label is None:
xsduri = self.href.split('#')[0]
self.lazy_label(xsduri)
return self._label
@classmethod
def read_role_ref(cls, reader, xml, link_node, base_xsduri = None):
link_node_roles = [x["xlink:role"].rsplit("/")[-1] for x in xml.find_all(link_node)]
role_dic = {}
for element in xml.find_all('roleRef'):
role_name = element["xlink:href"].split("#")[-1]
link = element["xlink:href"]
if not link.startswith('http') and base_xsduri != None:
link = base_xsduri.rsplit("/",1)[0] + "/" + link
if role_name in link_node_roles:
role_dic[role_name] = RoleSchema(uri=element["roleURI"],
href=link,
lazy_label=lambda xsduri: RoleSchema.read_schema(reader, xsduri))
return role_dic
@classmethod
def read_schema(cls, reader, xsduri):
xml = reader.read_uri(xsduri)
for element in xml.find_all("link:roleType"):
# accounting standard='jp': EDINET/taxonomy/2020-11-01/taxonomy/jppfs/2020-11-01/jppfs_rt_2020-11-01.xsd
# accounting standard='ifrs': EDINET/taxonomy/2020-11-01/taxonomy/jpigp/2020-11-01/jpigp_rt_2020-11-01.xsd
# <link:roleType roleURI="http://disclosure.edinet-fsa.go.jp/role/jppfs/rol_BalanceSheet" id="rol_BalanceSheet">
# <link:definition>貸借対照表</link:definition>
# </link:roleType>
if element["id"] not in reader._role_dic:
continue
reader._role_dic[element["id"]]._label = element.find("link:definition").text
def to_dict(self):
return {
"name": self.href.split('#')[-1],
"label": self.label,
}
|
from django.test import TestCase
from cyder.cydns.txt.models import TXT
from cyder.cydns.domain.models import Domain
class TXTTests(TestCase):
def setUp(self):
self.o = Domain(name="org")
self.o.save()
self.o_e = Domain(name="oregonstate.org")
self.o_e.save()
def do_generic_add(self, data):
txt = TXT(**data)
txt.__repr__()
txt.save()
rtxt = TXT.objects.filter(**data)
self.assertTrue(len(rtxt) == 1)
return txt
def do_remove(self, data):
txt = self.do_generic_add(data)
txt.delete()
rmx = TXT.objects.filter(**data)
self.assertTrue(len(rmx) == 0)
def test_add_remove_txt(self):
label = "asdf"
data = "asdf"
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.do_generic_add(data)
label = "asdf"
data = "asdfasfd"
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.do_generic_add(data)
label = "df"
data = "aasdf"
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.do_generic_add(data)
label = "12314"
data = "dd"
data = {'label': label, 'txt_data': data, 'domain': self.o}
self.do_generic_add(data)
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import os
from typing import List, Dict, Iterator, Tuple, Any
import torch
from torch import nn
from fairseq import utils
from fairseq.data import encoders
logger = logging.getLogger(__name__)
def from_pretrained(
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == 'checkpoint_file':
checkpoint_file = v
elif (
k != 'path'
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path['path']
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith('.'):
kwargs['data'] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs['data'] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
'code': 'bpe_codes',
'bpecodes': 'bpe_codes',
'sentencepiece.bpe.model': 'sentencepiece_model',
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if 'user_dir' in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs['user_dir']))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
'args': args,
'task': task,
'models': models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, args, task, models):
super().__init__()
self.args = args
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
# optimize model for generation
for model in self.models:
model.prepare_for_inference_(args)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(getattr(args, 'replace_unk', None))
self.tokenizer = encoders.build_tokenizer(args)
self.bpe = encoders.build_bpe(args)
self.max_positions = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in models]
)
# this is useful for determining the device
self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]['tokens']) for hypos in batched_hypos]
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
# NOTE: this doesn't support translation tasks currently
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [hypos[0] for hypos in self.generate(tokenized_sentences, score_reference=True, **kwargs)]
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
beam: int = 5,
verbose: bool = False,
skip_invalid_size_inputs=False,
inference_step_args=None,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1:
return self.generate(
tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.copy(self.args)
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(self.models, gen_args)
inference_step_args = inference_step_args or {}
results = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
translations = self.task.inference_step(
generator, self.models, batch, **inference_step_args
)
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.args, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info('S\t{}'.format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo['tokens'])
logger.info('H\t{}\t{}'.format(hypo['score'], hypo_str))
logger.info('P\t{}'.format(
' '.join(map(lambda x: '{:.4f}'.format(x), hypo['positional_scores'].tolist()))
))
if hypo['alignment'] is not None and getarg('print_alignment', False):
logger.info('A\t{}'.format(
' '.join(['{}-{}'.format(src_idx, tgt_idx) for src_idx, tgt_idx in hypo['alignment']])
))
return outputs
def cal_attention(self, src_sentences: List[str], tgt_sentences: List[str], verbose: bool = False, **kwargs) -> List[torch.Tensor]:
if isinstance(src_sentences, list):
assert isinstance(tgt_sentences, list) and len(src_sentences) == len(tgt_sentences)
else:
type(src_sentences) == type(tgt_sentences)
return self.att_sample(src_sentences, tgt_sentences, verbose, **kwargs)
def att_sample(self, src_sentences: List[str], tgt_sentences: List[str], verbose: bool = False, **kwargs) -> List[torch.Tensor]:
if isinstance(src_sentences, str):
return self.att_sample([src_sentences], [tgt_sentences], verbose=verbose, **kwargs)[0]
tokenized_src_sentences = [self.encode(sentence) for sentence in src_sentences]
tokenized_tgt_sentences = [self.encode(sentence) for sentence in tgt_sentences]
from fairseq.data.encoders.byte_utils import SPACE
batched_hypos = self.generate_attention(tokenized_src_sentences, tokenized_tgt_sentences, verbose, **kwargs)
subword_input = [self.string(src) for src in batched_hypos[0][0]]
subword_trans = [self.string(tgt) for tgt in batched_hypos[0][1]]
attns = batched_hypos[0][2][0] # (n_src, n_tgt), Remove the first one, because of bos (not sure about this).
# print('debug: input:', len(subword_input[0].split(SPACE)), subword_input[0].split(SPACE))
# print('debug: target:', len(subword_trans[0].split(SPACE)), subword_trans[0].split(SPACE))
# print('debug: original_attns:', batched_hypos[0][2][0])
# print(batched_hypos[0][2][0].sum(dim=1))
# print('bpe:', self.bpe.__class__.__name__)
bpe_separator = '@@' # From subword_nmt_bpe.py
cur_idx, prev_flag = 0, False
for i, subword in enumerate(subword_input[0].split(SPACE)):
# print(subword, bpe_separator in subword)
if prev_flag:
attns[cur_idx, :] += attns[i, :]
else:
attns[cur_idx, :] = attns[i, :]
if bpe_separator in subword:
prev_flag = True
else:
cur_idx += 1
prev_flag = False
attns = attns[:cur_idx, :]
cur_idx, prev_flag = 0, False
for i, subword in enumerate(subword_trans[0].split(SPACE)):
# print(subword, bpe_separator in subword)
if prev_flag:
attns[:, cur_idx] += attns[:, i]
else:
attns[:, cur_idx] = attns[:, i]
if bpe_separator in subword:
# print('bpe_separator in.')
prev_flag = True
else:
cur_idx += 1
prev_flag = False
attns = attns[:, :cur_idx]
attns /= attns.sum(dim = 1, keepdim=True) # Re-normalize
# print(attns)
return [attns]
def aggregate_attention(self, attns: List[torch.Tensor]) -> torch.Tensor:
pass
def generate_attention(
self,
tokenized_src_sentences: List[torch.LongTensor],
tokenized_tgt_sentences: List[torch.LongTensor],
verbose: bool = False,
skip_invalid_size_inputs=False,
inference_step_args=None,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_src_sentences) and tokenized_src_sentences.dim() == 1:
return self.generate_attention(
tokenized_src_sentences.unsqueeze(0), tokenized_tgt_sentences.unsqueeze(0), verbose=verbose, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.copy(self.args)
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(self.models, gen_args)
inference_step_args = inference_step_args or {}
results = []
for batch in self._build_batches_with_targets(tokenized_src_sentences, tokenized_tgt_sentences, skip_invalid_size_inputs):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
attentions = self.task.forward_attention(
generator, self.models, batch, **inference_step_args
)
results.append(attentions)
return results
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(
self, tokens: List[List[int]], skip_invalid_size_inputs: bool
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
disable_iterator_cache=True,
).next_epoch_itr(shuffle=False)
return batch_iterator
def _build_batches_with_targets(
self, src_tokens: List[List[int]], tgt_tokens: List[List[int]], skip_invalid_size_inputs: bool
) -> Iterator[Dict[str, Any]]:
src_lengths = torch.LongTensor([t.numel() for t in src_tokens])
tgt_lengths = torch.LongTensor([t.numel() for t in tgt_tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_attention(src_tokens, src_lengths, tgt_tokens, tgt_lengths),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
disable_iterator_cache=True,
).next_epoch_itr(shuffle=False)
return batch_iterator
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
|
import cv2
def drawsafelines(image_np,Orientation,Line_Perc1,Line_Perc2):
posii=int(image_np.shape[1]-(image_np.shape[1]/3))
cv2.putText(image_np,'Blue Line : Bed Line',
(posii,30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0,0,255), 1, cv2.LINE_AA)
cv2.putText(image_np, 'Red Line : Safety Line',
(posii,50),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (255,0,0), 1, cv2.LINE_AA)
#1st condition-(Orientation=="bt")
Line_Position1 = int(image_np.shape[0] - (image_np.shape[0] * (Line_Perc1 / 100)))
Line_Position2 = int(image_np.shape[0] - (image_np.shape[0] * (Line_Perc2 / 100)))
cv2.line(img=image_np, pt1=(0, Line_Position1), pt2=(image_np.shape[1], Line_Position1), color=(0, 0, 255),
thickness=2, lineType=8, shift=0)
cv2.line(img=image_np, pt1=(0, Line_Position2), pt2=(image_np.shape[1], Line_Position2), color=(255, 0, 0),
thickness=2, lineType=8, shift=0)
#2nd condition-b
Line_Position1 = int(image_np.shape[0] * (Line_Perc1 / 100))
Line_Position2 = int(image_np.shape[0] * (Line_Perc2 / 100))
cv2.line(img=image_np, pt1=(0, Line_Position1), pt2=(image_np.shape[1], Line_Position1), color=(0, 0, 255),
thickness=2, lineType=8, shift=0)
cv2.line(img=image_np, pt1=(0, Line_Position2), pt2=(image_np.shape[1], Line_Position2), color=(255, 0, 0),
thickness=2, lineType=8, shift=0)
#3rd condition-r
Line_Position1 = int(image_np.shape[1] - (image_np.shape[1] * (Line_Perc1 / 100)))
Line_Position2 = int(image_np.shape[1] - (image_np.shape[1] * (Line_Perc2 / 100)))
cv2.line(img=image_np, pt1=(Line_Position1, 0), pt2=(Line_Position1, image_np.shape[0]), color=(0, 0, 255),
thickness=2, lineType=8, shift=0)
cv2.line(img=image_np, pt1=(Line_Position2, 0), pt2=(Line_Position2, image_np.shape[0]), color=(255, 0, 0),
thickness=2, lineType=8, shift=0)
#l-4th
Line_Position1 = int(image_np.shape[1] * (Line_Perc1 / 100))
Line_Position2 = int(image_np.shape[1] * (Line_Perc2 / 100))
cv2.line(img=image_np, pt1=(Line_Position1, 0), pt2=(Line_Position1, image_np.shape[0]), color=(0, 0, 255),
thickness=2, lineType=8, shift=0)
cv2.line(img=image_np, pt1=(Line_Position2, 0), pt2=(Line_Position2, image_np.shape[0]), color=(255, 0, 0),
thickness=2, lineType=8, shift=0)
if(Orientation=="bt"):
Line_Position1=int(image_np.shape[0]*(Line_Perc1/100))
Line_Position2=int(image_np.shape[0]*(Line_Perc2/100))
cv2.line(img=image_np, pt1=(0, Line_Position1), pt2=(image_np.shape[1], Line_Position1), color=(0, 0, 255), thickness=2, lineType=8, shift=0)
cv2.line(img=image_np, pt1=(0, Line_Position2), pt2=(image_np.shape[1], Line_Position2), color=(255, 0, 0), thickness=2, lineType=8, shift=0)
return Line_Position2;
elif(Orientation=="tb"):
Line_Position1=int(image_np.shape[0]-(image_np.shape[0]*(Line_Perc1/100)))
Line_Position2=int(image_np.shape[0]-(image_np.shape[0]*(Line_Perc2/100)))
cv2.line(img=image_np, pt1=(0, Line_Position1), pt2=(image_np.shape[1], Line_Position1), color=(0, 0, 255), thickness=2, lineType=8, shift=0)
cv2.line(img=image_np, pt1=(0, Line_Position2), pt2=(image_np.shape[1], Line_Position2), color=(255, 0, 0), thickness=2, lineType=8, shift=0)
return Line_Position2;
elif(Orientation=="lr"):
Line_Position1=int(image_np.shape[1]-(image_np.shape[1]*(Line_Perc1/100)))
Line_Position2=int(image_np.shape[1]-(image_np.shape[1]*(Line_Perc2/100)))
cv2.line(img=image_np, pt1=(Line_Position1, 0), pt2=(Line_Position1,image_np.shape[0]), color=(0, 0, 255), thickness=2, lineType=8, shift=0)
cv2.line(img=image_np, pt1=(Line_Position2, 0), pt2=(Line_Position2,image_np.shape[0]), color=(255, 0, 0), thickness=2, lineType=8, shift=0)
return Line_Position2;
elif(Orientation=="rl"):
Line_Position1=int(image_np.shape[1]*(Line_Perc1/100))
Line_Position2=int(image_np.shape[1]*(Line_Perc2/100))
cv2.line(img=image_np, pt1=(Line_Position1, 0), pt2=(Line_Position1,image_np.shape[0]), color=(0, 0, 255), thickness=2, lineType=8, shift=0)
cv2.line(img=image_np, pt1=(Line_Position2, 0), pt2=(Line_Position2,image_np.shape[0]), color=(255, 0, 0), thickness=2, lineType=8, shift=0)
return Line_Position2;
|
import unittest
from day6 import checksum, parse
class TestDay6(unittest.TestCase):
def test_example(self):
test_data = """
COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
""".split()
self.assertEqual(checksum(list(map(parse, test_data))), 42)
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path, re_path
from django_project.swagger import schema_view
urlpatterns = [
path("api/provision/", include("provision.urls")),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path("__debug__/", include(debug_toolbar.urls)),
re_path(
r"^swagger(?P<format>\.json|\.yaml)$",
schema_view.without_ui(cache_timeout=0),
name="schema-json",
),
re_path(
r"^swagger/$",
schema_view.with_ui("swagger", cache_timeout=0),
name="schema-swagger-ui",
),
re_path(
r"^redoc/$",
schema_view.with_ui("redoc", cache_timeout=0),
name="schema-redoc",
),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
# GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Run some automations to test things"
__revision__ = "$Revision: 214 $"
import time
from pywinauto import application
from pywinauto import tests
from pywinauto.findbestmatch import MatchError
from pywinauto import findwindows
#application.set_timing(3, .5, 10, .5, .4, .2, .2, .1, .2, .5)
"Run a quick test on Notepad"
app = application.Application()
app.start_(ur"notepad.exe")
app['Notepad'].Wait('ready')
app['Notepad'].MenuSelect("File->PageSetup")
# ----- Page Setup Dialog ----
# Select the 4th combobox item
app['PageSetupDlg']['ComboBox1'].Select(4)
# Select the 'Letter' combobox item
app['PageSetupDlg']['ComboBox1'].Select("Letter")
# ----- Next Page Setup Dialog ----
app['PageSetupDlg']['Printer'].Click()
app['PageSetupDlg']['Network'].Click()
# ----- Connect To Printer Dialog ----
# Select a checkbox
app['ConnectToPrinter']['ExpandByDef'].Check()
# Uncheck it again - but use Click this time!
app['ConnectToPrinter']['ExpandByDef'].Click()
app['ConnectToPrinter']['OK'].CloseClick()
# ----- 2nd Page Setup Dialog again ----
app['PageSetupDlg2']['Properties'].Click()
# ----- Document Properties Dialog ----
doc_props = app.window_(title_re = ".*Document Properties")
# Two ways of selecting tabs
doc_props['TabCtrl'].Select(2)
doc_props['TabCtrl'].Select("Layout")
# click a Radio button
doc_props['RotatedLandscape'].Click()
doc_props['Portrait'].Click()
# open the Advanced options dialog in two steps
advbutton = doc_props['Advanced']
advbutton.Click()
# ----- Advanced Options Dialog ----
# close the 4 windows
app.window_(title_re = ".* Advanced Options")['Ok'].Click()
# ----- Document Properties Dialog again ----
doc_props['Cancel'].CloseClick()
# ----- 2nd Page Setup Dialog again ----
app['PageSetup2']['OK'].CloseClick()
# ----- Page Setup Dialog ----
app['PageSetup']['Ok'].CloseClick()
# type some text
app['Notepad']['Edit'].SetEditText(u"I am typing s\xe4me text to Notepad"
"\r\n\r\nAnd then I am going to quit")
# exit notepad
app['NotepadDialog'].MenuSelect("File->Exit")
app['Notepad']['No'].CloseClick()
|
# This file provides fast way for defininig new SDRAM modules.
# Modules defined in this files, after verifying that the settings are correct,
# should be later moved to LiteDRAM repository in a PR and removed from here.
from litedram.modules import _TechnologyTimings, _SpeedgradeTimings, DDR4Module
class MTA4ATF1G64HZ(DDR4Module):
# geometry
ngroupbanks = 4
ngroups = 2
nbanks = ngroups * ngroupbanks
nrows = 128*1024
ncols = 1024
# timings
trefi = {"1x": 64e6/8192, "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
trfc = {"1x": (None, 350), "2x": (None, 260), "4x": (None, 160)}
technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, 6.25), tRRD=(4, 7.5), tZQCS=(128, None))
speedgrade_timings = {
"2666": _SpeedgradeTimings(tRP=13.75, tRCD=13.75, tWR=15, tRFC=trfc, tFAW=(28, 30), tRAS=32),
}
speedgrade_timings["default"] = speedgrade_timings["2666"]
|
from grovepi import *
from grove_rgb_lcd import *
import time
import smbus
import RPi.GPIO as GPIO
from grove_i2c_barometic_sensor_BMP180 import BMP085
class WeatherStation(object):
def __init__(self, port=7):
self.dht_sensor_port = port
setRGB(0, 255, 0)
def get(self):
try:
temp, hum = dht(self.dht_sensor_port, 0)
# Get the temperature and Humidity from the DHT sensor
t = str(temp)
h = str(hum)
print("Temp:" + t + "C " + "Humidity :" + h + "%")
setText("Temp:" + t + "C " + "Humidity :" + h + "%")
return t, h
except (IOError, TypeError) as e:
print
"Error"
class Barometer(object):
def __init__(self, mode=1):
print("a")
# Initialise the BMP085 and use STANDARD mode (default value)
# bmp = BMP085(0x77, debug=True)
self.bmp = BMP085(0x77, mode)
# To specify a different operating mode, uncomment one of the following:
# bmp = BMP085(0x77, 0) # ULTRALOWPOWER Mode
# bmp = BMP085(0x77, 1) # STANDARD Mode
# bmp = BMP085(0x77, 2) # HIRES Mode
# bmp = BMP085(0x77, 3) # ULTRAHIRES Mode
rev = GPIO.RPI_REVISION
if rev == 2 or rev == 3:
bus = smbus.SMBus(1)
else:
bus = smbus.SMBus(0)
def get(self):
try:
print("a")
temp = self.bmp.readTemperature()
print("b")
# Read the current barometric pressure level
pressure = self.bmp.readPressure() / 100.0
# To calculate altitude based on an estimated mean sea level pressure
# (1013.25 hPa) call the function as follows, but this won't be very accurate
# altitude = bmp.readAltitude()
# To specify a more accurate altitude, enter the correct mean sea level
# pressure level. For example, if the current pressure level is 1023.50 hPa
# enter 102350 since we include two decimal places in the integer value
print("c")
altitude = self.bmp.readAltitude(101560)
print("Temperature: %.2f C" % temp)
print("Pressure: %.2f hPa" % pressure)
print("Altitude: %.2f m" % altitude)
return temp, pressure, altitude
except Exception as e:
pass
barometer = Barometer()
# station= WeatherStation()
while True:
time.sleep(2)
# print(station.get())
print(barometer.get())
|
import fiona
import geopandas as gpd
from shapely.geometry import shape
import polyline
import googlemaps
from datetime import datetime, timedelta
import json
import pickle
gmaps = googlemaps.Client(key='apikey')
originalPoints = gpd.read_file('C:\\gitrepos\Commute\\LatLonPoints.shp')
#this fails if shapefile has no records so copy schema from other shapefile
try:
alreadyQueriedPoints = gpd.read_file('C:\\gitrepos\Commute\\TrafficData.shp')
except:
alreadyQueriedPoints = gpd.GeoDataFrame(columns=originalPoints.columns)
#tilde inverses resultset, so we get points not containd in alreadyQueriedPoints
pointsToQuery = originalPoints[~originalPoints.contains(alreadyQueriedPoints)].geometry
print(pointsToQuery.head())
pointIndex = 0
destination = 'commute destination'
#set departure time to next monday at 7:30 am
departureTime = datetime.now() + timedelta(days=(7-datetime.now().weekday()))
departureTime = departureTime.replace(hour=7, minute=30, second=0, microsecond=0)
try:
while pointIndex < 500:
origin = '{0},{1}'.format(pointsToQuery.iloc[pointIndex].y,pointsToQuery.iloc[pointIndex].x)
print(origin)
directions_result = gmaps.directions(origin=origin, destination=destination, departure_time=departureTime)
travel_time = directions_result[0]['legs'][0]['duration_in_traffic']['value'] / 60
alreadyQueriedPoints.loc[len(alreadyQueriedPoints)+1] = [len(alreadyQueriedPoints)+1,travel_time,pointsToQuery.iloc[pointIndex]]
pointIndex += 1
finally:
print(alreadyQueriedPoints.head())
alreadyQueriedPoints.to_file('C:\\gitrepos\Commute\\TrafficData.shp')
|
import os
import sqlite3
from hashlib import md5
DATABASE = os.path.join(os.path.dirname(__file__), 'data.db')
'''
--------------------------------
| table users |
--------------------------------
| id | int auto primary |
--------------------------------
| username | string not null |
--------------------------------
| password | string not null |
--------------------------------
| identity | int not null |
--------------------------------
| mail | string not null |
--------------------------------
identity : 1 teacher 0 student
'''
def init_db():
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute('PRAGMA foreign_keys=ON;')
sql_file = os.path.join(os.path.dirname(__file__), 'schema.sql')
with open(sql_file, 'r') as f:
sql = f.read()
c.executescript(sql)
conn.commit()
conn.close()
def add_user(username, password, identity, mail):
if not isinstance(password, str) or not isinstance(username, str):
return False
if identity not in (0, 1):
return False
try:
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute('PRAGMA foreign_keys=ON;')
password = get_md5(password)
c.execute('INSERT INTO users (username, password, identity, mail) VALUES (?, ?, ?, ?)',
(username, password, identity, mail))
conn.commit()
c.execute('select last_insert_rowid() from users')
id = c.fetchone()[0]
conn.close()
return id
except Exception as e:
print(e)
return False
def check_password(uid, password):
try:
if not isinstance(uid, int):
uid = int(uid)
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute('PRAGMA foreign_keys=ON;')
c.execute(f"SELECT * FROM users WHERE id = {uid}")
real_password = c.fetchone()[2]
conn.close()
if real_password == get_md5(password):
return True
else:
return False
except Exception as e:
print(e)
return False
def change_user(uid, username, password, identity, mail):
if not isinstance(password, str) or not isinstance(username, str):
return False
if identity not in (0, 1):
return False
try:
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute('PRAGMA foreign_keys=ON;')
password = get_md5(password)
c.execute(
f'UPDATE users SET username = "{username}", password = "{password}", identity = {identity}, mail = "{mail}" where id = {uid}')
conn.commit()
conn.close()
return True
except Exception as e:
print(e)
return False
def get_identity(uid):
try:
if not isinstance(uid, int):
uid = int(uid)
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute('PRAGMA foreign_keys=ON;')
c.execute(f"SELECT * FROM users WHERE id = {uid}")
identity = c.fetchone()[3]
conn.close()
return identity
except Exception as e:
print(e)
return -1
def get_user_info(uid):
try:
if not isinstance(uid, int):
uid = int(uid)
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute('PRAGMA foreign_keys=ON;')
c.execute(
f"SELECT id, username, identity, mail FROM users WHERE id = {uid}")
info = c.fetchone()
conn.close()
if info[0] == uid:
return info
else:
return None
except Exception as e:
print(e)
return None
'''
--------------------------------
| table rooms |
--------------------------------
| id | int auto primary |
--------------------------------
| tid | int not null |
--------------------------------
| name | string not null |
--------------------------------
| profile | string not null |
--------------------------------
foreign key tid references user(id)
'''
def add_room(tid, name, profile):
if isinstance(tid, int) and isinstance(name, str) and isinstance(profile, str):
try:
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute('PRAGMA foreign_keys=ON;')
c.execute('INSERT INTO rooms (tid, name, profile) VALUES (?, ?, ?)',
(tid, name, profile))
conn.commit()
c.execute('select last_insert_rowid() from rooms')
rid = c.fetchone()[0]
conn.close()
return rid
except Exception as e:
print(e)
return False
return False
def add_stu(uid, rid):
try:
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute('PRAGMA foreign_keys=ON;')
c.execute('INSERT INTO participants (uid, rid) VALUES (?, ?)',
(uid, rid))
conn.commit()
conn.close()
return True
except Exception as e:
print(e)
return False
def get_user_room(uid):
try:
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute('PRAGMA foreign_keys=ON;')
c.execute(f'SELECT rid FROM participants where uid = {uid}')
res = c.fetchall()
conn.close()
return res
except Exception as e:
print(e)
return None
def get_room_info(rid):
try:
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute('PRAGMA foreign_keys=ON;')
c.execute(
f'SELECT rooms.id, users.id, username, name, profile FROM rooms, users WHERE rooms.id={rid} and rooms.tid=users.id')
res = c.fetchone()
conn.close()
return res
except Exception as e:
print(e)
return None
def get_md5(password):
password = password + '114514'
return md5(password.encode('utf-8')).hexdigest()
if __name__ == '__main__':
print('Please import the module instead of running it!')
# get_info(1)
|
mun = 1
mun2 = 3
mun3 = 4
|
'''
A pseudo MSO neuron, with two dendrites (fake geometry).
There are synaptic inputs.
Second method.
'''
from brian2 import *
# Morphology
morpho = Soma(30*um)
morpho.L = Cylinder(diameter=1*um, length=100*um, n=50)
morpho.R = Cylinder(diameter=1*um, length=100*um, n=50)
# Passive channels
gL = 1e-4*siemens/cm**2
EL = -70*mV
Es = 0*mV
taus = 1*ms
eqs='''
Im = gL*(EL-v) : amp/meter**2
Is = gs*(Es-v) : amp (point current)
dgs/dt = -gs/taus : siemens
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs,
Cm=1*uF/cm**2, Ri=100*ohm*cm)
neuron.v = EL
# Regular inputs
stimulation = NeuronGroup(2, 'dx/dt = 300*Hz : 1', threshold='x>1', reset='x=0')
stimulation.x = [0, 0.5] # Asynchronous
# Synapses
w = 20*nS
S = Synapses(stimulation, neuron, pre = 'gs += w')
S.connect(0, morpho.L[99.9*um])
S.connect(1, morpho.R[99.9*um])
# Monitors
mon_soma = StateMonitor(neuron, 'v', record=[0])
mon_L = StateMonitor(neuron.L, 'v', record=True)
mon_R = StateMonitor(neuron, 'v', record=morpho.R[99.9*um])
run(50*ms, report='text')
subplot(211)
plot(mon_L.t/ms, mon_soma[0].v/mV, 'k')
plot(mon_L.t/ms, mon_L[morpho.L[99.9*um]].v/mV, 'r')
plot(mon_L.t/ms, mon_R[morpho.R[99.9*um]].v/mV, 'b')
ylabel('v (mV)')
subplot(212)
for i in [0, 5, 10, 15, 20, 25, 30, 35, 40, 45]:
plot(mon_L.t/ms, mon_L.v[i, :]/mV)
xlabel('Time (ms)')
ylabel('v (mV)')
show()
|
"""retriever.lib contains the core Data Retriever modules."""
|
#!/usr/bin/python -u
"""
test serial transfer between two devices
This module assumes two devices (e.g. FT232R based) are connected to the
host computer, with RX and TX of the two devices wired to each other so
they can communicate. It launches threads to send and receive traffic and
check that a random stream sent from one device is correctly received at
the other:
Testing 9600 baud
Half duplex d1->d2...
Bytes TX: 10000 RX: 10000
Checksum TX: ad7e985fdddfbc04e398daa781a9fad0 RX: ad7e985fdddfbc04e398daa781a9fad0
SUCCESS
Half duplex d2->d1...
Bytes TX: 10000 RX: 10000
Checksum TX: 61338c11fe18642a07f196094646295f RX: 61338c11fe18642a07f196094646295f
SUCCESS
Full duplex d1<=>d2...
Bytes TX: 10000 RX: 10000
Checksum TX: 7dcc7ed3b89e46592c777ec42c330fd8 RX: 7dcc7ed3b89e46592c777ec42c330fd8
SUCCESS
Bytes TX: 10000 RX: 10000
Checksum TX: 1a957192b8219aa02ad374dd518e37fd RX: 1a957192b8219aa02ad374dd518e37fd
SUCCESS
Copyright (c) 2015-2020 Ben Bass <benbass@codedstructure.net>
All rights reserved.
"""
import time
import random
import hashlib
import threading
from itertools import islice
from pylibftdi import Device, FtdiError
class RandomStream(object):
"""
Infinite iterator of random data which can be queried at any point
for the checksum of the data already yielded
"""
def __init__(self, block_size=1024):
self._block_size = block_size
# Note: `reset()` sets the initial attributes
self.reset()
@staticmethod
def _rand_gen(size):
"""Return a `bytes` instance of `size` random byte"""
return bytes(bytearray(random.getrandbits(8) for _ in range(size)))
def reset(self):
self.stream_hash = hashlib.md5()
self.rand_buf = self._rand_gen(self._block_size)
self.chk_tail = self.chk_head = 0
self.bytecount = 0
def _update_checksum(self):
self.stream_hash.update(self.rand_buf[self.chk_tail:self.chk_head])
self.chk_tail = self.chk_head
def checksum(self):
self._update_checksum()
return self.stream_hash.hexdigest()
def __iter__(self):
while True:
# Use slice rather than index to avoid bytes->int conversion
# in Python3
data = self.rand_buf[self.chk_head:self.chk_head+1:]
self.chk_head += 1
self.bytecount += 1
yield data
if self.chk_head == self._block_size:
self._update_checksum()
self.rand_buf = self._rand_gen(self._block_size)
self.chk_head = self.chk_tail = 0
def test_rs():
r = RandomStream()
prev_checksum = 0
stream_bytes = []
for i in range(30):
stream_bytes.append(b''.join(islice(r, 500)))
assert r.checksum() != prev_checksum
assert r.checksum() == r.checksum()
assert hashlib.md5(b''.join(stream_bytes)).hexdigest() == r.checksum()
class HalfDuplexTransfer(object):
"""
Test streaming bytes from one device to another
"""
def __init__(self, source, dest, baudrate=9600, block_size=500):
"""
Prepare for half-duplex transmission from source device to dest
"""
self.source = source
self.dest = dest
self.source.baudrate = baudrate
self.dest.baudrate = baudrate
self.target = []
self.wait_signal = threading.Event()
self.running = threading.Event()
self.rs = RandomStream()
self.block_size = block_size
self.test_duration = 10
self.t1 = None
self.t2 = None
self.done = False
def reader(self):
# Tell writer we're ready for the deluge...
self.wait_signal.set()
# if we've just finished reading when self.done get's set by the
# writer, we won't get the 'last' packet. But if we assume there's
# always one more after done gets set, we'll get some ReadTimeouts....
# Probably best to try one more time but catch & ignore ReadTimeout.
while not self.done:
data = self.dest.read(1024)
self.target.append(data)
try:
data = self.dest.read(1024)
self.target.append(data)
except FtdiError:
pass
def writer(self):
self.running.set()
self.wait_signal.wait()
end_time = time.time() + self.test_duration
while time.time() < end_time:
x = b''.join(list(islice(self.rs, self.block_size)))
self.source.write(x)
# Wait for the reader to catch up
time.sleep(0.01)
self.done = True
def go(self, test_duration=None):
if test_duration is not None:
self.test_duration = test_duration
self.t1 = threading.Thread(target=self.writer)
self.t1.daemon = True
self.t1.start()
# We wait for the writer to be actually running (but not yet
# writing anything) before we start the reader.
self.running.wait()
self.t2 = threading.Thread(target=self.reader)
self.t2.daemon = True
self.t2.start()
def join(self):
# Use of a timeout allows Ctrl-C interruption
self.t1.join(timeout=1e6)
self.t2.join(timeout=1e6)
def results(self):
result = b''.join(self.target)
print(" Bytes TX: {} RX: {}".format(self.rs.bytecount, len(result)))
rx_chksum = hashlib.md5(b''.join(self.target)).hexdigest()
print(" Checksum TX: {} RX: {}".format(self.rs.checksum(), rx_chksum))
if len(result) == self.rs.bytecount and self.rs.checksum() == rx_chksum:
print(" SUCCESS")
else:
print(" FAIL")
def test_half_duplex_transfer(d1, d2, baudrate=9600):
"""
Test half-duplex stream from d1 to d2, report on status
"""
hd = HalfDuplexTransfer(d1, d2, baudrate)
hd.go()
hd.join()
hd.results()
def test_full_duplex_transfer(d1, d2, baudrate=9600):
"""
Start two half-duplex streams in opposite directions at the
same time, check both are OK
"""
hd1 = HalfDuplexTransfer(d1, d2, baudrate)
hd1.go()
hd2 = HalfDuplexTransfer(d2, d1, baudrate)
hd2.go()
hd1.join()
hd1.results()
hd2.join()
hd2.results()
def main():
d1 = Device(device_index=0)
d2 = Device(device_index=1)
for b in 9600, 38400, 115200:
print("Testing {} baud".format(b))
d1.flush()
d2.flush()
print("Half duplex d1->d2...")
test_half_duplex_transfer(d1, d2, baudrate=b)
d1.flush()
d2.flush()
print("Half duplex d2->d1...")
test_half_duplex_transfer(d2, d1, baudrate=b)
d1.flush()
d2.flush()
print("Full duplex d1<=>d2...")
test_full_duplex_transfer(d1, d2, baudrate=b)
if __name__ == '__main__':
test_rs()
main()
|
#%%
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers.experimental.preprocessing import Rescaling
from tensorflow.keras.layers.experimental.preprocessing import Resizing
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import os
import matplotlib.pyplot as plt
import pickle
from cifarData import CIFAR10
MODEL_DIR = os.path.join(os.path.dirname(__file__),"models")
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
MODEL_FILE_PATH = os.path.join(MODEL_DIR, "feature_extraction_model.h5")
IMAGENET_SIZE = 96
IMAGENET_DEPTH = 3
IMAGENET_SHAPE = (IMAGENET_SIZE, IMAGENET_SIZE, IMAGENET_DEPTH)
CIFAR_SIZE = 32
CIFAR_DEPTH = 3
CIFAR_SHAPE = (CIFAR_SIZE, CIFAR_SIZE, CIFAR_DEPTH)
def build_base_model(img_shape, num_classes) -> Model:
base_model = VGG16(
include_top=False,
weights="imagenet",
input_shape=CIFAR_SHAPE #(224, 224, 3)
)
#features = base_model.layers[-1].output
num_layers = len(base_model.layers)
print(f"Number of layers in the base model: {num_layers}")
for i, layer in enumerate(base_model.layers):
layer.trainable = False
filters = layer.get_weights()
for filt in filters:
print(i, layer.name, filt.shape)
input_img = Input(shape=img_shape)
features = base_model(input_img)
#x = GlobalAveragePooling2D()(x)
#x = Dense(units=num_classes)(x)
#y_pred = Activation("softmax")(x)
model = Model(
inputs=[base_model.input],
#outputs=[base_model.output]
outputs=[base_model.get_layer('block3_pool').output]
)
model.summary()
return model
def build_my_model(feature_shape, num_classes) -> Model:
num_layers = len(base_model.layers)
print(f"Number of layers in the my model: {num_layers}")
for layer in base_model.layers:
layer.trainable = False
input_features = Input(shape=feature_shape)
#x = MaxPooling2D()(input_features)
x = Conv2D(512, 3, padding="same")(input_features)
x = Activation("relu")(x)
x = MaxPooling2D()(x)
x = Conv2D(1024, 3, padding="same")(input_features)
x = Activation("relu")(x)
x = MaxPooling2D()(x)
x = Flatten()(x)
x = Dense(units=512)(x)
x = Activation("relu")(x)
x = Dense(units=256)(x)
x = Activation("relu")(x)
x = Dense(units=128)(x)
x = Activation("relu")(x)
x = Dense(units=num_classes)(x)
y_pred = Activation("softmax")(x)
model = Model(
inputs=[input_features],
outputs=[y_pred]
)
model.summary()
return model
if __name__ == "__main__":
data = CIFAR10()
(train_x, train_y) = data.get_train_set()
(val_x, val_y) = data.get_val_set()
(test_x, test_y) = data.get_test_set()
plt.imsave(f"output/feature_extraction/test_image_0.jpg", train_x[0])
img_shape = data.img_shape
num_classes = data.num_classes
train_x = preprocess_input(train_x)
val_x = preprocess_input(val_x)
test_x = preprocess_input(test_x)
# Global params
epochs = 100
base_model = build_base_model(
img_shape,
num_classes
)
opt = Adam(learning_rate=5e-4)
base_model.compile(
loss="categorical_crossentropy",
optimizer=opt,
metrics=["accuracy"]
)
es_callback = EarlyStopping(
monitor="val_loss",
patience=30,
verbose=1,
restore_best_weights=True
)
features_train = base_model.predict(train_x, batch_size=32)
features_val = base_model.predict(val_x, batch_size=32)
#%%
square = 8
i=1
fig = plt.figure()
for _ in range(square):
for _ in range(square):
ax = fig.add_subplot(square, square, i)
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(features_train[0,:,:,i-1],cmap='gray')
i += 1
fig.savefig(f"output/feature_extraction/test_image_0_features.jpg")
my_model = build_my_model(
features_train.shape[1:],
num_classes
)
my_model.compile(
loss="categorical_crossentropy",
optimizer=opt,
metrics=["accuracy"]
)
my_model.fit(
features_train,
train_y,
verbose=1,
epochs=epochs,
callbacks=[es_callback],
validation_data=(features_val
, val_y),
batch_size=128
)
scores = my_model.evaluate(
features_val,
val_y,
verbose=0
)
print(f"Scores: {scores}")
# %%
|
from io import BytesIO, BufferedIOBase
from math import ceil
from typing import List, Optional, Union
import emoji
import nonebot
from PIL import Image, ImageFont, ImageDraw
from nonebot.adapters.cqhttp.message import MessageSegment
from nonebot.log import logger
from pydantic import BaseModel
from .config import Config
mailcnt = 0
global_config = nonebot.get_driver().config
plugin_config = Config(**global_config.dict())
def circle_corner(img: Image.Image, radii: int) -> Image.Image:
circle = Image.new('L', (radii * 2, radii * 2), 0)
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, radii * 2, radii * 2), fill=255)
img = img.convert("RGBA")
w, h = img.size
alpha = Image.new("L", img.size, 255)
alpha.paste(circle.crop((0, 0, radii, radii)), (0, 0)) # 本质上是修改圆角外部的区域透明度为0,内部255
alpha.paste(circle.crop((radii, 0, radii * 2, radii)), (w - radii, 0))
alpha.paste(circle.crop((radii, radii, radii * 2, radii * 2)), (w - radii, h - radii))
alpha.paste(circle.crop((0, radii, radii, radii * 2)), (0, h - radii))
img.putalpha(alpha)
return img
def square_n_thumb(imglist: list, sidelength: int) -> list:
"""
将图片裁减为圆角方形(转换为RGBA模式),并生成缩略图
:param imglist: 存放图片的List
:param sidelength: 缩略图需要的边长
:text_crurn: 修改后的图片存放的List
"""
for i in range(len(imglist)):
min_edge = min(imglist[i].size[0], imglist[i].size[1])
cut_width = (imglist[i].size[0] - min_edge) / 2
cut_height = (imglist[i].size[1] - min_edge) / 2
imglist[i] = imglist[i].crop((cut_width, cut_height,
imglist[i].size[0] - cut_width, imglist[i].size[1] - cut_height))
imglist[i].thumbnail((sidelength, sidelength))
imglist[i] = circle_corner(imglist[i], 10)
return imglist
class Mail(object):
no: int
raw_text: str
images: Union[List[bytes], List[BufferedIOBase]]
translation: str
time: str
def __init__(self):
global mailcnt
self.no = mailcnt
self.raw_text = ""
self.images = []
self.translation = ""
self.time = ""
self.stat = 0 # 0: 初始状态/非mail内容 1:图片载入完成,等待翻译 2:翻译载入完成,等待图片 3:等待发送 4:已发送 5:已取消
self.type = "" # "tweet" "mail"
mailcnt += 1
def status(self) -> str:
if self.stat == 0:
return "非mail内容,等待发送"
if self.stat == 1:
return "图片收集完成,等待翻译"
if self.stat == 2:
return "翻译收集完成,等待图片"
if self.stat == 3:
return "准备完毕,等待发送"
if self.stat == 4:
return "已发送"
if self.stat == 5:
return "已取消"
def message(self):
msg = MessageSegment.text(self.translation)
if self.images:
for image in self.images:
msg += MessageSegment.image(image)
return msg
def info(self):
msg = MessageSegment.text("序号:" + str(self.no) + "\n")
msg += MessageSegment.text(f"类型:{self.type}\n")
msg += MessageSegment.text("原文:\n")
if self.raw_text:
msg += MessageSegment.text("*************\n")
msg += MessageSegment.text(self.raw_text)
msg += MessageSegment.text("\n*************\n")
msg += MessageSegment.text("图片:")
if self.images:
for image in self.images:
msg += MessageSegment.image(image)
msg += MessageSegment.text("\n")
msg += MessageSegment.text("翻译:\n")
if self.translation:
msg += MessageSegment.text("*************\n")
msg += MessageSegment.text(self.translation)
msg += MessageSegment.text("\n*************\n")
msg += MessageSegment.text("状态:" + self.status())
return msg
def imgcreate(self):
top = Image.open("./imgsrc/top.jpg")
bottom = Image.open("./imgsrc/bottom.jpg")
background = Image.open("./imgsrc/background.jpg")
fnt = ImageFont.truetype("./imgsrc/font.otf", 45)
fnt_emoji = ImageFont.truetype("./imgsrc/font_emoji.ttf", 109, layout_engine=ImageFont.LAYOUT_RAQM)
width = background.size[0]
height_top = top.size[1]
height_bottom = bottom.size[1]
d = ImageDraw.Draw(background)
imgs = []
if self.images:
imgs = [Image.open(BytesIO(img)) for img in self.images]
s = self.translation
def emoji_repl(symbol, meta):
return symbol
s = emoji.replace_emoji(s, emoji_repl)
text_edited = ""
text_tmp = ""
for char in list(s.replace("\r\n", "\n").replace(" ", "")):
text_tmp += char
if char == "\n":
text_edited += text_tmp
text_size = 0
text_tmp = ""
else:
text_size = d.textsize(text_tmp, font=fnt)[0]
if text_size > width - 30 * 4:
text_edited += text_tmp + "\n"
text_size = 0
text_tmp = ""
if text_tmp != "":
text_edited += text_tmp
font_height = d.textsize(plugin_config.dynamic_topic, font=fnt)[1] # 设置首行话题的高度
height_text = (font_height + 30) * (1 + len(text_edited.split("\n")))
logger.debug(f"预设文字高度:{height_text}")
text_ground = Image.new("RGB", size=(width, height_text), color=(255, 255, 255))
d = ImageDraw.Draw(text_ground)
d.text((30, 0), plugin_config.dynamic_topic, font=fnt, fill=(17, 136, 178)) # 行距30,左边距30
width_offset, height_offset = (30, font_height + 25)
r = emoji.get_emoji_regexp()
for line in text_edited.split("\n"):
if not line:
height_offset += font_height + 30
width_offset = 30
continue
line_split_emj = r.split(line)
for text in line_split_emj:
if not emoji.is_emoji(text):
d.text((width_offset, height_offset), text, font=fnt, fill=(0, 0, 0))
width_offset += int(fnt.getlength(text))
else:
t = Image.new("RGB", size=(150, 150), color=(255, 255, 255)) # FreeType 不可以直接设定尺寸,只能手动缩放
td = ImageDraw.Draw(t)
td.text((0, 20), text, font=fnt_emoji, fill=(0, 0, 0), embedded_color=True)
t = t.resize((60, 60))
text_ground.paste(t, (width_offset, height_offset))
width_offset += 55
height_offset += font_height + 30
width_offset = 30
logger.debug(f"最终文字高度:{height_offset}")
height_pic = background.size[1]
pic_ground = background.copy().convert("RGBA") # 没有图片的时候只粘贴一段默认空白背景
if imgs:
if len(imgs) == 1:
sidelen = width - 30 * 2
rate = sidelen / imgs[0].size[0]
imgs[0] = imgs[0].resize((int(rate * imgs[0].size[0]), int(rate * imgs[0].size[1])))
height_pic = imgs[0].size[1]
imgs[0] = circle_corner(imgs[0], 10)
pic_ground = Image.new("RGBA", size=(width, height_pic), color=(255, 255, 255))
pic_ground.paste(imgs[0], box=(30, 0))
else:
if len(imgs) == 2:
sidelen = round((width - 30 * 2 - 15) / 2)
height_pic = sidelen
imgs = square_n_thumb(imgs, sidelen)
pic_ground = Image.new("RGBA", size=(width, height_pic), color=(255, 255, 255))
pic_ground.paste(imgs[0], box=(30, 0))
pic_ground.paste(imgs[1], box=(30 + sidelen + 15, 0))
else:
sidelen = round((width - 30 * 2 - 15 * 2) / 3)
height_pic = (sidelen + 15) * ceil(len(imgs) / 3) - 15
imgs = square_n_thumb(imgs, sidelen)
pic_ground = Image.new("RGBA", size=(width, height_pic), color=(255, 255, 255))
column_cursor = 0
row_cursor = - (sidelen + 15)
text_cnt = 1
for img in imgs:
if text_cnt % 3 == 1:
column_cursor = 30
row_cursor += sidelen + 15
else:
column_cursor += sidelen + 15
pic_ground.paste(img, box=(column_cursor, row_cursor))
text_cnt = text_cnt + 1
if text_cnt > 9:
break
height_total = height_top + height_text + height_pic + height_bottom
final = Image.new("RGB", (width, height_total), (255, 255, 255)) # 前面计算需要多少高度,并准备好图片的四个部分(top/text/pic/bottom)
final.paste(top, box=(0, 0))
final.paste(text_ground, box=(0, height_top))
final.paste(pic_ground, box=(0, height_top + height_text), mask=pic_ground.split()[3])
final.paste(bottom, box=(0, height_top + height_text + height_pic))
ret = BytesIO()
final.save(ret, format="jpeg")
return ret.getvalue()
def preview(self):
notes1 = "\n—————————\n" \
f"*如需修改请重新发送翻译,无需取消,旧翻译会被覆盖\n" \
f"**发送“取消发送 {self.no}”取消"
notes2 = "\n—————————\n" \
f"*如需修改请先取消发送,再重新回复原消息\n" \
f"**发送“取消发送 {self.no}”取消"
if self.stat != 0:
# msg = "【发送预览】\n#贺喜遥香#\n" + self.message() + notes1
msg = "【发送预览】\n-检查翻译错误/图片缺失情况-\n" + MessageSegment.image(self.imgcreate()) + notes1
else:
# msg = "【发送预览】\n#贺喜遥香#\n" + self.message() + notes2
msg = "【发送预览】\n-检查翻译错误/图片缺失情况-\n" + MessageSegment.image(self.imgcreate()) + notes2
return msg
class ParsedObject(BaseModel):
text: str
images_url: List[str]
timestamp: Optional[str]
|
#! /usr/bin/env python
import generator_test
import sys
if "--hwut-info" in sys.argv:
print "Pseudo Ambgiguous Post Condition: Part I"
print "CHOICES: ANSI-C-PlainMemory, ANSI-C, ANSI-C-CG, Cpp, Cpp_StrangeStream, Cpp-Template, Cpp-Template-CG, Cpp-Path, Cpp-Path-CG, ANSI-C-PathTemplate;"
print "SAME;"
sys.exit(0)
choice = sys.argv[1]
pattern_list = [
# -- pre-conditioned expressions need to preceed same (non-preoconditioned) expressions,
# otherwise, the un-conditional expressions gain precedence and the un-conditional
# pattern is never matched.
#
# -- post-conditioned patterns do not need to appear before the same non-postconditioned
# patterns, since they are always longer.
#
# normal repetition (one or more) of 'x'
'x+/x',
# other characters
'[a-z]+',
# whitespace
'[ \\t\\n]+'
]
pattern_action_pair_list = map(lambda x: [x, x.replace("\\", "\\\\")], pattern_list)
test_str = "xxx x xx x"
generator_test.do(pattern_action_pair_list, test_str, {}, choice, QuexBufferSize=10)
|
#Lane Carasik
#Last Modified: 12/15/2015
'''% if Pe <= 200
% Nu = 24.15*log10(-8.12 + 12.76*PtoD - 3.65*PtoD^2);
% else if Pe > 200
% Nu = 24.15*log10(-8.12 + 12.76*PtoD - 3.65*PtoD^2) ...
% + 0.0174*(1-exp(-6*(PtoD-1)))*(Pe-200)^0.9;
% end
% end'''
import numpy as np
def Nu(PtoD,Pe):
if Pe <= 150:
Nu = 4.496*(-16.15 + 24.96*PtoD - 8.55*(PtoD**2))
else:
Nu = 4.496*(-16.15 + 24.96*PtoD - 8.55*(PtoD**2))*(Pe/150)**0.3
return Nu |
from utils import CSVScraper
class GrandePrairieCountyNo1PersonScraper(CSVScraper):
csv_url = 'http://data.countygp.ab.ca/data/ElectedOfficials/elected-officials.csv'
|
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing as mp
def color_dict(mpc_database):
orbit_color = [
"gold",
"red",
"dodgerblue",
"limegreen",
"grey",
"magenta",
"chocolate",
"blue",
"orange",
"mediumspringgreen",
"deeppink",
]
return {
orbit_type: orbit_color
for orbit_type, orbit_color in zip(
np.unique(mpc_database["Orbit_type"]), orbit_color
)
}
def compute_residue(df):
df = df.reset_index(drop=True)
computed_elem = df[
["a_x", "e_x", "i_x", "long. node", "arg. peric", "mean anomaly"]
]
known_elem = df[["a_y", "e_y", "i_y", "Node", "Peri", "M"]]
df[["da", "de", "di", "dNode", "dPeri", "dM"]] = (
computed_elem.values - known_elem.values
)
return df
def plot_residue(df, orbit_color, n_trajectories, n_points):
df = compute_residue(df)
orbit_type = np.unique(df["Orbit_type"])
fig, axes = plt.subplots(3, 2, sharex=True)
fig.suptitle(
"Orbital elements residuals, {} trajectories, {} points".format(
n_trajectories, n_points
)
)
subplot_title = [
"semi-major axis",
"eccentricity",
"inclination",
"Longitude of the ascending node",
"Argument of perihelion",
"Mean anomaly",
]
for ax, orb_elem, title in zip(
axes.flatten(), ["da", "de", "di", "dNode", "dPeri", "dM"], subplot_title
):
ax.set_title(title)
ax.axhline(0, ls="--", color="grey")
for otype in orbit_type:
v = df[df["Orbit_type"] == otype]
omean = np.mean(v[orb_elem].values)
failed_orb = np.where(v["a_x"].values == -1)
success_orb = np.where(v["a_x"].values != -1)
ax.scatter(
np.array(v.index)[success_orb],
v[orb_elem].values[success_orb],
label="{}: {}, mean : {}, fail: {}".format(
otype, len(v), np.around(omean, decimals=4), len(failed_orb[0])
),
color=orbit_color[otype],
)
ax.scatter(
np.array(v.index)[failed_orb],
v[orb_elem].values[failed_orb],
marker="x",
color=orbit_color[otype],
)
ax.axhline(omean, ls=":", color=orbit_color[otype])
ax.set_ylabel("$\delta$ {}".format(orb_elem[1:])) # noqa: W605
ax.legend(prop={"size": 7})
plt.show()
def plot_cpu_time(all_time, n_trajectories, n_points):
plt.plot(np.arange(1, mp.cpu_count() + 1), all_time)
plt.xlabel("number of cpu")
plt.ylabel("computation time")
plt.title(
"CPU Time analysis\nwith file write on ram\n {} trajectories with {} points".format(
n_trajectories, n_points
)
)
plt.show()
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 CEA
# Pierre Raybaut
# Licensed under the terms of the CECILL License
# (see guiqwt/__init__.py for details)
# pylint: disable=C0103
"""
guiqwt.histogram
----------------
The `histogram` module provides histogram related objects:
* :py:class:`guiqwt.histogram.HistogramItem`: an histogram plot item
* :py:class:`guiqwt.histogram.ContrastAdjustment`: the `contrast
adjustment panel`
* :py:class:`guiqwt.histogram.LevelsHistogram`: a curve plotting widget
used by the `contrast adjustment panel` to compute, manipulate and
display the image levels histogram
``HistogramItem`` objects are plot items (derived from QwtPlotItem) that may
be displayed on a 2D plotting widget like :py:class:`guiqwt.curve.CurvePlot`
or :py:class:`guiqwt.image.ImagePlot`.
Example
~~~~~~~
Simple histogram plotting example:
.. literalinclude:: /../guiqwt/tests/histogram.py
Reference
~~~~~~~~~
.. autoclass:: HistogramItem
:members:
:inherited-members:
.. autoclass:: ContrastAdjustment
:members:
:inherited-members:
.. autoclass:: LevelsHistogram
:members:
:inherited-members:
"""
import weakref
import numpy as np
from qtpy.QtCore import Qt, Signal
from qtpy.QtWidgets import QHBoxLayout, QVBoxLayout, QToolBar
from guidata.dataset.datatypes import DataSet
from guidata.dataset.dataitems import FloatItem
from guidata.utils import assert_interfaces_valid, update_dataset
from guidata.configtools import get_icon, get_image_layout
from guidata.qthelpers import add_actions, create_action
# Local imports
from guiqwt.transitional import QwtPlotCurve
from guiqwt.config import CONF, _
from guiqwt.interfaces import IBasePlotItem, IHistDataSource, IVoiImageItemType, IPanel
from guiqwt.panels import PanelWidget, ID_CONTRAST
from guiqwt.curve import CurveItem, CurvePlot
from guiqwt.image import ImagePlot
from guiqwt.styles import HistogramParam, CurveParam
from guiqwt.shapes import XRangeSelection
from guiqwt.tools import SelectTool, BasePlotMenuTool, SelectPointTool, AntiAliasingTool
from guiqwt.plot import PlotManager
class HistDataSource(object):
"""
An objects that provides an Histogram data source interface
to a simple numpy array of data
"""
__implements__ = (IHistDataSource,)
def __init__(self, data):
self.data = data
def get_histogram(self, nbins):
"""Returns the histogram computed for nbins bins"""
return np.histogram(self.data, nbins)
assert_interfaces_valid(HistDataSource)
def hist_range_threshold(hist, bin_edges, percent):
hist = np.concatenate((hist, [0]))
threshold = 0.5 * percent / 100 * hist.sum()
i_bin_min = np.cumsum(hist).searchsorted(threshold)
i_bin_max = -1 - np.cumsum(np.flipud(hist)).searchsorted(threshold)
return bin_edges[i_bin_min], bin_edges[i_bin_max]
def lut_range_threshold(item, bins, percent):
hist, bin_edges = item.get_histogram(bins)
return hist_range_threshold(hist, bin_edges, percent)
class HistogramItem(CurveItem):
"""A Qwt item representing histogram data"""
__implements__ = (IBasePlotItem,)
def __init__(self, curveparam=None, histparam=None):
self.hist_count = None
self.hist_bins = None
self.bins = None
self.old_bins = None
self.source = None
self.logscale = None
self.old_logscale = None
if curveparam is None:
curveparam = CurveParam(_("Curve"), icon="curve.png")
curveparam.curvestyle = "Steps"
if histparam is None:
self.histparam = HistogramParam(title=_("Histogram"), icon="histogram.png")
else:
self.histparam = histparam
CurveItem.__init__(self, curveparam)
self.setCurveAttribute(QwtPlotCurve.Inverted)
def set_hist_source(self, src):
"""
Set histogram source
*source*:
Object with method `get_histogram`, e.g. objects derived from
:py:data:`guiqwt.image.ImageItem`
"""
self.source = weakref.ref(src)
self.update_histogram()
def get_hist_source(self):
"""
Return histogram source
*source*:
Object with method `get_histogram`, e.g. objects derived from
:py:data:`guiqwt.image.ImageItem`
"""
if self.source is not None:
return self.source()
def set_hist_data(self, data):
"""Set histogram data"""
self.set_hist_source(HistDataSource(data))
def set_logscale(self, state):
"""Sets whether we use a logarithm or linear scale
for the histogram counts"""
self.logscale = state
self.update_histogram()
def get_logscale(self):
"""Returns the status of the scale"""
return self.logscale
def set_bins(self, n_bins):
self.bins = n_bins
self.update_histogram()
def get_bins(self):
return self.bins
def compute_histogram(self):
return self.get_hist_source().get_histogram(self.bins)
def update_histogram(self):
if self.get_hist_source() is None:
return
hist, bin_edges = self.compute_histogram()
hist = np.concatenate((hist, [0]))
if self.logscale:
hist = np.log(hist + 1)
self.set_data(bin_edges, hist)
# Autoscale only if logscale/bins have changed
if self.bins != self.old_bins or self.logscale != self.old_logscale:
if self.plot():
self.plot().do_autoscale()
self.old_bins = self.bins
self.old_logscale = self.logscale
plot = self.plot()
if plot is not None:
plot.do_autoscale(replot=True)
def update_params(self):
self.histparam.update_hist(self)
CurveItem.update_params(self)
def get_item_parameters(self, itemparams):
CurveItem.get_item_parameters(self, itemparams)
itemparams.add("HistogramParam", self, self.histparam)
def set_item_parameters(self, itemparams):
update_dataset(
self.histparam, itemparams.get("HistogramParam"), visible_only=True
)
self.histparam.update_hist(self)
CurveItem.set_item_parameters(self, itemparams)
assert_interfaces_valid(HistogramItem)
class LevelsHistogram(CurvePlot):
"""Image levels histogram widget"""
#: Signal emitted by LevelsHistogram when LUT range was changed
SIG_VOI_CHANGED = Signal()
def __init__(self, parent=None):
super(LevelsHistogram, self).__init__(
parent=parent, title="", section="histogram"
)
self.antialiased = False
# a dict of dict : plot -> selected items -> HistogramItem
self._tracked_items = {}
self.curveparam = CurveParam(_("Curve"), icon="curve.png")
self.curveparam.read_config(CONF, "histogram", "curve")
self.histparam = HistogramParam(_("Histogram"), icon="histogram.png")
self.histparam.logscale = False
self.histparam.n_bins = 256
self.range = XRangeSelection(0, 1)
self.range_mono_color = self.range.shapeparam.sel_line.color
self.range_multi_color = CONF.get("histogram", "range/multi/color", "red")
self.add_item(self.range, z=5)
self.SIG_RANGE_CHANGED.connect(self.range_changed)
self.set_active_item(self.range)
self.setMinimumHeight(80)
self.setAxisMaxMajor(self.Y_LEFT, 5)
self.setAxisMaxMinor(self.Y_LEFT, 0)
if parent is None:
self.set_axis_title("bottom", "Levels")
def connect_plot(self, plot):
if not isinstance(plot, ImagePlot):
# Connecting only to image plot widgets (allow mixing image and
# curve widgets for the same plot manager -- e.g. in pyplot)
return
self.SIG_VOI_CHANGED.connect(plot.notify_colormap_changed)
plot.SIG_ITEM_SELECTION_CHANGED.connect(self.selection_changed)
plot.SIG_ITEM_REMOVED.connect(self.item_removed)
plot.SIG_ACTIVE_ITEM_CHANGED.connect(self.active_item_changed)
def tracked_items_gen(self):
for plot, items in list(self._tracked_items.items()):
for item in list(items.items()):
yield item # tuple item,curve
def __del_known_items(self, known_items, items):
del_curves = []
for item in list(known_items.keys()):
if item not in items:
curve = known_items.pop(item)
del_curves.append(curve)
self.del_items(del_curves)
def selection_changed(self, plot):
items = plot.get_selected_items(item_type=IVoiImageItemType)
known_items = self._tracked_items.setdefault(plot, {})
if items:
self.__del_known_items(known_items, items)
if len(items) == 1:
# Removing any cached item for other plots
for other_plot, _items in list(self._tracked_items.items()):
if other_plot is not plot:
if not other_plot.get_selected_items(
item_type=IVoiImageItemType
):
other_known_items = self._tracked_items[other_plot]
self.__del_known_items(other_known_items, [])
else:
# if all items are deselected we keep the last known
# selection (for one plot only)
for other_plot, _items in list(self._tracked_items.items()):
if other_plot.get_selected_items(item_type=IVoiImageItemType):
self.__del_known_items(known_items, [])
break
for item in items:
if item not in known_items:
curve = HistogramItem(self.curveparam, self.histparam)
curve.set_hist_source(item)
self.add_item(curve, z=0)
known_items[item] = curve
nb_selected = len(list(self.tracked_items_gen()))
if not nb_selected:
self.replot()
return
self.curveparam.shade = 1.0 / nb_selected
for item, curve in self.tracked_items_gen():
self.curveparam.update_curve(curve)
self.histparam.update_hist(curve)
self.active_item_changed(plot)
# Rescaling histogram plot axes for better visibility
ymax = None
for item in known_items:
curve = known_items[item]
_x, y = curve.get_data()
ymax0 = y.mean() + 3 * y.std()
if ymax is None or ymax0 > ymax:
ymax = ymax0
ymin, _ymax = self.get_axis_limits("left")
if ymax is not None:
self.set_axis_limits("left", ymin, ymax)
self.replot()
def item_removed(self, item):
for plot, items in list(self._tracked_items.items()):
if item in items:
curve = items.pop(item)
self.del_items([curve])
self.replot()
break
def active_item_changed(self, plot):
items = plot.get_selected_items(item_type=IVoiImageItemType)
if not items:
# XXX: workaround
return
active = plot.get_last_active_item(IVoiImageItemType)
if active:
active_range = active.get_lut_range()
else:
active_range = None
multiple_ranges = False
for item, curve in self.tracked_items_gen():
if active_range != item.get_lut_range():
multiple_ranges = True
if active_range is not None:
_m, _M = active_range
self.set_range_style(multiple_ranges)
self.range.set_range(_m, _M, dosignal=False)
self.replot()
def set_range_style(self, multiple_ranges):
if multiple_ranges:
self.range.shapeparam.sel_line.color = self.range_multi_color
else:
self.range.shapeparam.sel_line.color = self.range_mono_color
self.range.shapeparam.update_range(self.range)
def set_range(self, _min, _max):
if _min < _max:
self.set_range_style(False)
self.range.set_range(_min, _max)
self.replot()
return True
else:
# Range was not changed
return False
def range_changed(self, _rangesel, _min, _max):
for item, curve in self.tracked_items_gen():
item.set_lut_range([_min, _max])
self.SIG_VOI_CHANGED.emit()
def set_full_range(self):
"""Set range bounds to image min/max levels"""
_min = _max = None
for item, curve in self.tracked_items_gen():
imin, imax = item.get_lut_range_full()
if _min is None or _min > imin:
_min = imin
if _max is None or _max < imax:
_max = imax
if _min is not None:
self.set_range(_min, _max)
def apply_min_func(self, item, curve, min):
_min, _max = item.get_lut_range()
return min, _max
def apply_max_func(self, item, curve, max):
_min, _max = item.get_lut_range()
return _min, max
def reduce_range_func(self, item, curve, percent):
return lut_range_threshold(item, curve.bins, percent)
def apply_range_function(self, func, *args, **kwargs):
item = None
for item, curve in self.tracked_items_gen():
_min, _max = func(item, curve, *args, **kwargs)
item.set_lut_range([_min, _max])
self.SIG_VOI_CHANGED.emit()
if item is not None:
self.active_item_changed(item.plot())
def eliminate_outliers(self, percent):
"""
Eliminate outliers:
eliminate percent/2*N counts on each side of the histogram
(where N is the total count number)
"""
self.apply_range_function(self.reduce_range_func, percent)
def set_min(self, _min):
self.apply_range_function(self.apply_min_func, _min)
def set_max(self, _max):
self.apply_range_function(self.apply_max_func, _max)
class EliminateOutliersParam(DataSet):
percent = FloatItem(
_("Eliminate outliers") + " (%)", default=2.0, min=0.0, max=100.0 - 1e-6
)
class ContrastAdjustment(PanelWidget):
"""Contrast adjustment tool"""
__implements__ = (IPanel,)
PANEL_ID = ID_CONTRAST
PANEL_TITLE = _("Contrast adjustment tool")
PANEL_ICON = "contrast.png"
def __init__(self, parent=None):
super(ContrastAdjustment, self).__init__(parent)
self.local_manager = None # local manager for the histogram plot
self.manager = None # manager for the associated image plot
# Storing min/max markers for each active image
self.min_markers = {}
self.max_markers = {}
# Select point tools
self.min_select_tool = None
self.max_select_tool = None
style = "<span style='color: #444444'><b>%s</b></span>"
layout, _label = get_image_layout(
self.PANEL_ICON, style % self.PANEL_TITLE, alignment=Qt.AlignCenter
)
layout.setAlignment(Qt.AlignCenter)
vlayout = QVBoxLayout()
vlayout.addLayout(layout)
self.local_manager = PlotManager(self)
self.histogram = LevelsHistogram(parent)
vlayout.addWidget(self.histogram)
self.local_manager.add_plot(self.histogram)
hlayout = QHBoxLayout()
self.setLayout(hlayout)
hlayout.addLayout(vlayout)
self.toolbar = toolbar = QToolBar(self)
toolbar.setOrientation(Qt.Vertical)
# toolbar.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
hlayout.addWidget(toolbar)
# Add standard plot-related tools to the local manager
lman = self.local_manager
lman.add_tool(SelectTool)
lman.add_tool(BasePlotMenuTool, "item")
lman.add_tool(BasePlotMenuTool, "axes")
lman.add_tool(BasePlotMenuTool, "grid")
lman.add_tool(AntiAliasingTool)
lman.get_default_tool().activate()
self.outliers_param = EliminateOutliersParam(self.PANEL_TITLE)
def register_panel(self, manager):
"""Register panel to plot manager"""
self.manager = manager
default_toolbar = self.manager.get_default_toolbar()
self.manager.add_toolbar(self.toolbar, "contrast")
self.manager.set_default_toolbar(default_toolbar)
self.setup_actions()
for plot in manager.get_plots():
self.histogram.connect_plot(plot)
def configure_panel(self):
"""Configure panel"""
self.min_select_tool = self.manager.add_tool(
SelectPointTool,
title=_("Minimum level"),
on_active_item=True,
mode="create",
tip=_("Select minimum level on image"),
toolbar_id="contrast",
end_callback=self.apply_min_selection,
)
self.max_select_tool = self.manager.add_tool(
SelectPointTool,
title=_("Maximum level"),
on_active_item=True,
mode="create",
tip=_("Select maximum level on image"),
toolbar_id="contrast",
end_callback=self.apply_max_selection,
)
def get_plot(self):
return self.manager.get_active_plot()
def closeEvent(self, event):
self.hide()
event.ignore()
def setup_actions(self):
fullrange_ac = create_action(
self,
_("Full range"),
icon=get_icon("full_range.png"),
triggered=self.histogram.set_full_range,
tip=_("Scale the image's display range " "according to data range"),
)
autorange_ac = create_action(
self,
_("Eliminate outliers"),
icon=get_icon("eliminate_outliers.png"),
triggered=self.eliminate_outliers,
tip=_(
"Eliminate levels histogram "
"outliers and scale the image's "
"display range accordingly"
),
)
add_actions(self.toolbar, [fullrange_ac, autorange_ac])
def eliminate_outliers(self):
def apply(param):
self.histogram.eliminate_outliers(param.percent)
if self.outliers_param.edit(self, apply=apply):
apply(self.outliers_param)
def apply_min_selection(self, tool):
item = self.get_plot().get_last_active_item(IVoiImageItemType)
point = self.min_select_tool.get_coordinates()
z = item.get_data(*point)
self.histogram.set_min(z)
def apply_max_selection(self, tool):
item = self.get_plot().get_last_active_item(IVoiImageItemType)
point = self.max_select_tool.get_coordinates()
z = item.get_data(*point)
self.histogram.set_max(z)
def set_range(self, _min, _max):
"""Set contrast panel's histogram range"""
self.histogram.set_range(_min, _max)
# Update the levels histogram in case active item data has changed:
self.histogram.selection_changed(self.get_plot())
assert_interfaces_valid(ContrastAdjustment)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logforce_settings as settings
from logforce_db import db_fetch_many
def main():
sql = """
SELECT rail_car_id, car_number, sort_order FROM rail_car WHERE train_id = (
SELECT train_id FROM delivery_order WHERE delivery_order_id = 2964952
) ORDER BY sort_order ASC;
"""
sql = """
SELECT rail_car_id, car_number, sort_order FROM rail_car
WHERE train_id = 701
ORDER BY sort_order ASC;
"""
results = db_fetch_many(sql)
for result in results:
print("|%3s|%10s|%3s|" % result)
if __name__ == '__main__':
main()
|
import sys
if sys.version_info[0] >= 3 and sys.version_info[1] >= 6:
pass
else:
raise Exception("dynamic_plot: Python 3.6 or a more recent version is required.")
from .dynamic_plot import *
|
# -*- coding: utf-8 -*-
__all__ = ('Analysis',)
import attr
from .core import FileLocation
from .functions import ProgramFunctions
from .insertions import ProgramInsertionPoints
from .loops import ProgramLoops
from .project import Project
from .statements import ProgramStatements
@attr.s(slots=True, auto_attribs=True, frozen=True)
class Analysis:
"""Stores the results of an analysis of a given project.
Attributes
----------
project: Project
A description of the project that was analysed.
loops: ProgramLoops
The set of loop control-flow statements within the program.
functions: ProgramFunctions
The set of functions within the program.
statements: ProgramStatements
The set of statements within the program.
"""
project: Project
loops: ProgramLoops
functions: ProgramFunctions
statements: ProgramStatements
insertions: ProgramInsertionPoints
def is_inside_loop(self, location: FileLocation) -> bool:
return self.loops.is_within_loop(location)
def is_inside_function(self, location: FileLocation) -> bool:
return self.functions.encloses(location) is not None
def is_inside_void_function(self, location: FileLocation) -> bool:
f = self.functions.encloses(location)
return f is not None and f.return_type == 'void'
|
# Takes 2 rasters: the first contains integer values defining groups of data (e.g., a rasterized shapefile)
# the second one contains values to group (e.g., slopes or elevation)
from lsdtt_xtensor_python import comparison_stats_from_2darrays as gpc
import numpy as np
# from matplotlib import pyplot as plt
#TODO add real data here
# First array is the index one
index_array = np.ones((5000,5000), dtype = np.int32)
index_array[0:200,:] = 5
index_array[200:300,:] = 4
index_array[300:400,:] = 1
# Second array contains values
val_array = np.random.rand(5000,5000).astype(np.float32)
print("Getting values in CPP")
# The code takes the following arguments: the 2 arrays ofc, an optional value to ignore (e.g., NoDataValue), and the number of rows and cols
test = gpc(index_array,val_array,10000000,index_array.shape[0],index_array.shape[1])
print("Done")
|
import torch
from torch import nn
from ..downstream_module import DownStreamModule
import torch.nn.functional as F
from .resnet_2d import ResNet2d
# Profile Net
class StretchNet(nn.Module):
def __init__(self, dropout):
super(StretchNet, self).__init__()
self.bilstm = nn.LSTM(20, 128, num_layers=1, dropout=dropout,
bidirectional=True, bias=True)
self.fc = nn.Linear(256, 1)
self.bn1 = nn.BatchNorm1d(256)
self.bn2 = nn.BatchNorm1d(1)
def forward(self, profile, MSA_C=None):
p, _ = self.bilstm(profile.transpose(0, 1))
p = F.relu(p.transpose(0, 1))
p = self.bn1(p.transpose(1, 2)).transpose(1, 2)
p = self.fc(p)
p = self.bn2(p.transpose(1, 2)).transpose(1, 2)
N = F.sigmoid(p)
profile = (1 - profile ** N) / N
return profile, N
class ResnetWithProfileNet(ResNet2d):
"""
contact predictor with resnet with profile net
"""
def __init__(self, backbone_args, backbone_alphabet, num_classes, depth_reduction="mean", with_profilenet=True):
"""
:param depth_reduction: mean, first
"""
super().__init__(backbone_args, backbone_alphabet, depth_reduction)
self.num_classes = num_classes
self.with_profilenet = with_profilenet
if self.with_profilenet == True:
self.profileNet = StretchNet(dropout=0.3)
self.first_layer = nn.Sequential(
nn.Conv2d(256 + 40, 64, kernel_size=1),
)
def forward(self, tokens, embeddings, profile):
# 1. profile embedding
if self.with_profilenet == True:
profile_embedding, _ = self.profileNet(profile)
else:
profile_embedding = profile
# 2. transformer embedding
# remove auxiliary tokens
embeddings, padding_masks = self.remove_aux_tokens_in_embeddings(tokens, embeddings)
batch_size, depth, seqlen, hiddendim = embeddings.size()
# reduction
embeddings = self.msa_depth_reduction(embeddings, padding_masks)
# pre reduction 768 -> 128
embeddings = self.pre_layer(embeddings)
# 3. concat
embeddings = torch.cat([embeddings, profile_embedding], dim=-1)
hiddendim = 128 + 20
# 4. pairwise concat embedding
embeddings = embeddings.unsqueeze(2).expand(batch_size, seqlen, seqlen, hiddendim)
embedding_T = embeddings.permute(0, 2, 1, 3)
pairwise_concat_embedding = torch.cat([embeddings, embedding_T], dim=3)
pairwise_concat_embedding = pairwise_concat_embedding.permute(0, 3, 1, 2)
# 5.
out = self.first_layer(pairwise_concat_embedding)
out = self.res_layers(out)
out = self.final_layer(out)
# contact_map = torch.sigmoid(out.squeeze(1))
contact_map = out.permute(0, 2, 3, 1)
return contact_map
|
import sys
class Graph():
def __init__(self, ver):
self.V = ver
self.graph = [[0 for column in range(ver)] for row in range(ver)]
def minDistance(self, dist, sptSet):
min = sys.maxsize
for u in range(self.V):
if dist[u] < min and sptSet[u] == False:
min = dist[u]
min_index = u
return min_index
def dijkstra(self, src):
try:
dist = [sys.maxsize]*self.V
dist[src] = 0
sptSet = [False] * self.V
for cout in range(self.V):
x = self.minDistance(dist, sptSet)
sptSet[x] = True
for y in range(self.V):
if self.graph[x][y] > 0 and sptSet[y] == False and dist[y] > dist[x] + self.graph[x][y]:
dist[y] = dist[x] + self.graph[x][y]
return dist
except:
return [-1]*self.V
if __name__ == "__main__":
n = int(input())
g=Graph(n)
k=[[0 for _ in range(n)] for _ in range(n)]
for i in range(n):
temp = list(map(int,input().split(" ")))
for j in temp:
k[i][j-1]=1
result = list(map(int,input().split(" ")))
g.graph = k
d = g.dijkstra(result[0]-1)
print(d[result[1]-1])
|
import numpy as np
from typing import Callable, Sequence
from numbers import Number, Integral
from fractions import Fraction
def a_mu(mu: float):
return (1 - 2*mu + np.sqrt(1+4*mu**2))/2
if __name__ == '__main__':
mu = 0.0001
a = a_mu(mu)
A = np.array([
[1,0],
[0,1]
])
b = np.array([
[1],
[1]
])
x_ = np.array([
[ a],
[1/2]
])
w_ = np.array([
[1-a],
[1/2]
])
y_ = np.array([
[mu/(1-a)],
[2*mu]
])
z_ = np.array([
[mu/a],
[2*mu]
])
print(A@x_+w_)
print(A.T@y_-z_)
print(x_*z_)
print(y_*w_)
|
"""Module for Concept SPARQL-queries."""
from string import Template
def build_concepts_by_publisher_query() -> str:
"""Build query to count concepts grouped by publisher."""
return """
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT ?organizationNumber (COUNT(DISTINCT ?concept) AS ?count)
FROM <https://concepts.fellesdatakatalog.digdir.no>
WHERE {{
?concept a skos:Concept .
?collection skos:member ?concept .
OPTIONAL {{ ?concept dct:publisher ?conceptPublisher . }}
OPTIONAL {{ ?collection dct:publisher ?collectionPublisher . }}
BIND ( IF( EXISTS {{ ?concept dct:publisher ?conceptPublisher . }},
?conceptPublisher, ?collectionPublisher ) AS ?publisher ) .
?publisher dct:identifier ?organizationNumber .
}}
GROUP BY ?organizationNumber
"""
def build_org_concepts_query(organization_id: str) -> str:
"""Build query for an organizations concepts."""
return Template(
"""
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT DISTINCT ?concept ?issued
FROM <https://concepts.fellesdatakatalog.digdir.no>
WHERE {{
?concept a skos:Concept .
?record foaf:primaryTopic ?concept .
?record dct:issued ?issued .
?collection skos:member ?concept .
OPTIONAL {{ ?concept dct:publisher ?conceptPublisher . }}
OPTIONAL {{ ?collection dct:publisher ?collectionPublisher . }}
BIND ( IF( EXISTS {{ ?concept dct:publisher ?conceptPublisher . }},
?conceptPublisher, ?collectionPublisher ) AS ?publisher ) .
?publisher dct:identifier "$org_id" .
}}
"""
).substitute(org_id=organization_id)
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import absltest
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.simulation.datasets import emnist
class LoadDataTest(tf.test.TestCase, absltest.TestCase):
def test_synthetic(self):
client_data = emnist.get_synthetic(num_clients=4)
self.assertLen(client_data.client_ids, 4)
self.assertEqual(
client_data.element_type_structure,
collections.OrderedDict([
('pixels', tf.TensorSpec(shape=(28, 28), dtype=tf.float32)),
('label', tf.TensorSpec(shape=(), dtype=tf.int32)),
]))
for client_id in client_data.client_ids:
data = self.evaluate(
list(client_data.create_tf_dataset_for_client(client_id)))
images = [x['pixels'] for x in data]
labels = [x['label'] for x in data]
self.assertLen(labels, 10)
self.assertCountEqual(labels, list(range(10)))
self.assertLen(images, 10)
self.assertEqual(images[0].shape, (28, 28))
self.assertEqual(images[-1].shape, (28, 28))
def test_infinite_transformed_data_doesnt_change(self):
# The point of this test is to validate that the images generated by
# emnist.get_infinite() are always the same. (They are pseudorandom, but
# random seed should always be the same, so that outputs are consistent.)
raw_client_data = emnist.get_synthetic(num_clients=1)
inf_client_data = emnist.get_infinite(raw_client_data, num_pseudo_clients=2)
# Generate the dataset for one of the 'infinite' clients. (I.e., one of the
# clients that is formed by random translations, shearing, etc.).
inf_dataset = inf_client_data.create_tf_dataset_for_client(
inf_client_data.client_ids[-1])
inf_dataset_iter = iter(inf_dataset)
img0_from_inf_dataset = next(inf_dataset_iter)['pixels']
img1_from_inf_dataset = next(inf_dataset_iter)['pixels']
img2_from_inf_dataset = next(inf_dataset_iter)['pixels']
# Just take the first few images from the 'infinite' client's dataset, and
# check that the average of the pixel values never changes.
self.assertAlmostEqual(np.average(img0_from_inf_dataset), 0.8107493)
self.assertAlmostEqual(np.average(img1_from_inf_dataset), 0.8532163)
self.assertAlmostEqual(np.average(img2_from_inf_dataset), 0.8392606)
if __name__ == '__main__':
tf.test.main()
|
# -*- coding: utf-8 -*-
from keras.models import load_model
from keras.optimizers import Adam, SGD
from keras import metrics
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from helpers import *
from keras_helpers import *
from base_cnn import AbstractCNN
class FullCNN(AbstractCNN):
"""
Abstract base class for any CNN model.
"""
def __init__(self,
image_size=608,
nb_classes=2,
batch_size=8,
model_name='FullCNN',
data_format='channels_first',
relu_version=None,
leaky_relu_alpha=0.01):
""" Construct a CNN classifier. """
super().__init__(image_size, nb_classes, batch_size, model_name, data_format, relu_version, leaky_relu_alpha)
def build_model(self):
"""
This method is required to be overwritten by a child-class and supposed to generate the network.
:return: Nothing
"""
raise NotImplementedError('FullCNN::build_model is not yet implemented.')
def preprocessing_train(self):
"""
General preprocessing steps for correct training.
:return: Number of steps for training and validation
"""
# Read images from disk and generate training and validation set
self.images, self.groundtruth = read_images_plus_labels()
self.images_train, self.groundtruth_train, self.images_validate, self.groundtruth_validate = split_dataset(self.images, self.groundtruth)
# Print some extraneous metrics helpful to users of this template
batches_train = (self.images_train.shape[0] * (608 // self.IMAGE_SIZE)**2) // self.BATCH_SIZE
batches_validate = (self.images_validate.shape[0] * (608 // self.IMAGE_SIZE)**2) // self.BATCH_SIZE
print('Dataset shape:', self.images.shape, '( Train:', self.images_train.shape[0], '| Validate:', self.images_validate.shape[0], ')')
print('Trainsteps per epoch:', batches_train, '| Validatesteps per epoch:', batches_validate)
return batches_train, batches_validate
def train(self, epochs, checkpoint=None, init_epoch=0):
"""
Train this model.
"""
np.random.seed(42)
batches_train, batches_validate = self.preprocessing_train()
# Generators for image sequences which apply Monte Carlo sampling on them
training_data = ImageSequenceHeatmaps(self.images_train, self.groundtruth_train, self.BATCH_SIZE,
self.IMAGE_SIZE, batches_train)
validation_data = ImageSequenceHeatmaps(self.images_validate, self.groundtruth_validate, self.BATCH_SIZE,
self.IMAGE_SIZE, batches_validate)
# Reduce learning rate iff validation average f1 score not improving for AdamOptimizer
reduce_lr_on_plateau_adam = ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
patience=5,
verbose=1,
mode='min',
min_delta=5e-3,
cooldown=0,
min_lr=1e-7)
# Stop training early iff validation average f1 score not improving for AdamOptimizer
early_stopping_adam = EarlyStopping(monitor='val_loss',
min_delta=5e-4,
patience=11,
verbose=1,
mode='min')
# Reduce learning rate iff validation average f1 score not improving for SGD
reduce_lr_on_plateau_sgd = ReduceLROnPlateau(monitor='val_loss',
factor=0.25,
patience=5,
verbose=1,
mode='min',
min_delta=1e-4,
cooldown=0,
min_lr=1e-8)
# Stop training early iff validation average f1 score not improving for AdamOptimizer
early_stopping_sgd = EarlyStopping(monitor='val_loss',
min_delta=1e-4,
patience=11,
verbose=1,
mode='min')
# Enable Tensorboard logging and show the graph -- Other options not sensible when using Monte Carlo sampling
tensorboard = TensorBoard(log_dir='./logs',
histogram_freq=0,
batch_size=self.BATCH_SIZE,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None)
# Hacky Tensorboard wrapper if a fixed validation set is given to model.generator_fit and this wrapper
# tensorboard_hack = TensorBoardWrapper(validate_data,
# nb_steps=batches_validate,
# log_dir='./logs',
# histogram_freq=1,
# batch_size=batch_size,
# write_graph=True,
# write_grads=False,
# write_images=False)
# Save the model's state on each epoch, given the epoch has better fitness
filepath = "weights-" + self.MODEL_NAME + "-e{epoch:03d}-ce-{val_loss:.4f}.hdf5"
checkpointer = ModelCheckpoint(filepath=filepath,
monitor='val_loss',
mode='min',
verbose=1,
save_best_only=False,
period=500)
# Shuffle/augment images at the start of each epoch
image_shuffler = ImageShuffler(training_data, validation_data)
def softmax_crossentropy_with_logits(y_true, y_pred):
"""
Applies the loss function as found in the template not using logits (so use softmax layer at the end)
:param y_true: Ground-truth values
:param y_pred: Network predictions
:return: Application of K.categorical_crossentropy()
"""
return K.categorical_crossentropy(y_true, y_pred, from_logits=False, axis=1)
# Define in a list what callbacks and metrics we want included
model_callbacks_adam = [tensorboard, checkpointer, image_shuffler]
model_callbacks_sgd = [tensorboard, checkpointer, reduce_lr_on_plateau_sgd, early_stopping_sgd, image_shuffler]
model_metrics = [metrics.categorical_accuracy]
if checkpoint is not None:
self.model = load_model(checkpoint, custom_objects={'softmax_crossentropy_with_logits': softmax_crossentropy_with_logits})
print('Loaded checkpoint for model to continue training')
else:
self.model.compile(loss=softmax_crossentropy_with_logits,
optimizer=Adam(lr=1e-5))
#metrics=model_metrics))
try:
self.model.fit_generator(
generator=training_data,
steps_per_epoch=batches_train,
epochs=epochs,
initial_epoch=init_epoch,
verbose=1,
callbacks=model_callbacks_adam,
validation_data=validation_data,
validation_steps=batches_validate,
shuffle=False, # Not needed, our generator shuffles everything already
use_multiprocessing=False) # This requires a thread-safe generator which we don't have
# # TODO: Generate callback which makes this double-call to the network not required.
# self.model.compile(loss=softmax_crossentropy_with_logits,
# optimizer=SGD(lr=1e-4, momentum=0.9, nesterov=False),
# metrics=model_metrics)
#
# self.model.fit_generator(
# generator=training_data,
# steps_per_epoch=batches_train,
# epochs=epochs,
# verbose=1,
# callbacks=model_callbacks_sgd,
# validation_data=validation_data,
# validation_steps=batches_validate,
# shuffle=False, # Not needed, our generator shuffles everything already
# use_multiprocessing=False) # This requires a thread-safe generator which we don't have
except KeyboardInterrupt:
# Do not throw away the model in case the user stops the training process
filepath = "weights-" + self.MODEL_NAME + "-SIG2.hdf5"
self.model.save(filepath, overwrite=True, include_optimizer=True)
pass
except:
# Generic case for SIGUSR2. Stop model training and save current state.
filepath = "weights-" + self.MODEL_NAME + "-SIGUSR2.hdf5"
self.model.save(filepath, overwrite=True, include_optimizer=True)
print('Training completed')
def classify(self, X):
"""
Classify a set of samples. This method should be called after successful training and loading of the model.
:param X: Full-size image which to classify.
:return: List of predictions.
"""
# Subdivide the images into blocks
img_patches = generate_blocks(X, self.IMAGE_SIZE)
if K.image_dim_ordering() == 'th' or K.image_dim_ordering() == 'tf':
img_patches = np.rollaxis(img_patches, 3, 1)
# Run prediction
Z = self.model.predict(img_patches)
Z = (np.greater_equal(Z[:, 0], Z[:, 1]) * 255.0).astype('uint8')
# Regroup patches into images
return group_blocks(Z, X.shape[0])
|
"""HTTP connection helpers"""
import os
import json
import socket
from urllib3.connection import HTTPConnection
import requests
from requests.auth import HTTPBasicAuth
from sap import get_logger, config_get
from sap.rest.errors import HTTPRequestError, UnexpectedResponseContent, UnauthorizedError, TimedOutRequestError
KEEPALIVE_CONFIGURED = False
def mod_log():
"""ADT Module logger"""
return get_logger()
def setup_keepalive():
"""Make sure we send keepalive TCP packets"""
# pylint: disable=global-statement
global KEEPALIVE_CONFIGURED
if KEEPALIVE_CONFIGURED:
mod_log().debug("KeepAlive already configured")
return
KEEPALIVE_CONFIGURED = True
mod_log().debug("Updating urllib3.connection.HTTPConnection.default_socket_options with KeepAlive packets")
# Special thanks to: https://www.finbourne.com/blog/the-mysterious-hanging-client-tcp-keep-alives
# This may cause problems in Windows!
HTTPConnection.default_socket_options = HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 60)
]
# pylint: disable=too-many-instance-attributes
class Connection:
"""HTTP communication built on top Python requests.
"""
# pylint: disable=too-many-arguments
def __init__(self, icf_path, login_path, host, client, user, password, port=None, ssl=True, verify=True):
"""Parameters:
- host: string host name
- client: string SAP client
- user: string user name
- password: string user password
- port: string TCP/IP port for ADT
(default 80 or 443 - it depends on the parameter ssl)
- ssl: boolean to switch between http and https
- verify: boolean to switch SSL validation on/off
"""
setup_keepalive()
if ssl:
protocol = 'https'
if port is None:
port = '443'
else:
protocol = 'http'
if port is None:
port = '80'
self._ssl_verify = verify
self._base_url = '{protocol}://{host}:{port}/{icf_path}'.format(
protocol=protocol, host=host, port=port, icf_path=icf_path)
self._query_args = 'sap-client={client}&saml2=disabled'.format(
client=client)
self._user = user
self._auth = HTTPBasicAuth(user, password)
self._session = None
self._login_path = login_path
self._timeout = config_get('http_timeout')
@property
def user(self):
"""Connected user"""
return self._user
def _build_url(self, uri_path):
"""Creates complete URL from the path part
"""
return '{base_url}/{uri_path}?{query_args}'.format(
base_url=self._base_url, uri_path=uri_path,
query_args=self._query_args)
def _handle_http_error(self, req, res):
"""Raise the correct exception based on response content."""
if res.status_code == 401:
raise UnauthorizedError(req, res, self._user)
raise HTTPRequestError(req, res)
# pylint: disable=no-self-use
def _retrieve(self, session, method, url, params=None, headers=None, body=None):
"""A helper method for easier testing."""
req = requests.Request(method.upper(), url, params=params, data=body, headers=headers)
req = session.prepare_request(req)
mod_log().info('Executing %s %s', method, url)
try:
res = session.send(req, timeout=self._timeout)
except requests.exceptions.ConnectTimeout as ex:
raise TimedOutRequestError(req, self._timeout) from ex
mod_log().debug('Response %s %s:\n++++\n%s\n++++', method, url, res.text)
return (req, res)
def _execute_with_session(self, session, method, url, params=None, headers=None, body=None):
"""Executes the given URL using the given method in
the common HTTP session.
"""
req, res = self._retrieve(session, method, url, params=params, headers=headers, body=body)
if res.status_code >= 400:
self._handle_http_error(req, res)
return res
def _get_session(self):
"""Returns the working HTTP session.
The session's cookies are populated by executing a dummy GET which
also retrieves X-CSRF-Token.
"""
if self._session is None:
self._session = requests.Session()
self._session.auth = self._auth
# requests.session.verify is either boolean or path to CA to use!
self._session.verify = os.environ.get('SAP_SSL_SERVER_CERT', self._session.verify)
if self._session.verify is not True:
mod_log().info('Using custom SSL Server cert path: SAP_SSL_SERVER_CERT = %s', self._session.verify)
elif self._ssl_verify is False:
import urllib3
urllib3.disable_warnings()
mod_log().info('SSL Server cert will not be verified: SAP_SSL_VERIFY = no')
self._session.verify = False
login_headers = {'x-csrf-token': 'Fetch'}
csrf_token = None
url = self._build_url(self._login_path)
try:
response = self._execute_with_session(self._session, 'GET', url, headers=login_headers)
except HTTPRequestError as ex:
if ex.response.status_code != 404:
raise ex
if 'x-csrf-token' in response.headers:
csrf_token = response.headers['x-csrf-token']
self._session.headers.update({'x-csrf-token': csrf_token})
return self._session
def execute(self, method, uri_path, params=None, headers=None, body=None, accept=None, content_type=None):
"""Executes the given URI as an HTTP request and returns
the requests response object
"""
session = self._get_session()
url = self._build_url(uri_path)
if headers is None:
headers = {}
if accept is not None:
if isinstance(accept, list):
headers['Accept'] = ', '.join(accept)
else:
headers['Accept'] = accept
if content_type is not None:
headers['Content-Type'] = content_type
if not headers:
headers = None
resp = self._execute_with_session(session, method, url, params=params, headers=headers, body=body)
if accept:
resp_content_type = resp.headers['Content-Type']
if isinstance(accept, str):
accept = [accept]
if not any((resp_content_type.startswith(accepted) for accepted in accept)):
raise UnexpectedResponseContent(accept, resp_content_type, resp.text)
return resp
def get_json(self, uri_path, params=None):
"""Executes a GET HTTP request with the headers Accept = application/json.
"""
response = self.execute('GET', uri_path, accept='application/json', params=params)
return response.json()
def post_obj_as_json(self, uri_path, obj, accept=None):
"""Executes a POST HTTP request with the headers Content-Type = application/json.
"""
body = json.dumps(obj)
return self.execute('POST', uri_path, content_type='application/json', body=body, accept=accept)
|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.substr
~~~~~~~~~~~~~~~~~~~
Provides functions for obtaining a portion of a string.
You enter two numbers to tell the module the starting character position and
the length of the resulting substring. If your input string is "ABCDEFG", then
a From value of 2 and length of 4 gives you a resulting string of "CDEF".
Notice that the first character in the original string is 0, not 1.
If you enter too long a length, the module just returns a substring to the end
of the input string, so if you enter a From of 3 and a length of 100, you'll
get a result of "DEFG".
Examples:
basic usage::
>>> from riko.modules.substr import pipe
>>>
>>> conf = {'start': '3', 'length': '4'}
>>> item = {'content': 'hello world'}
>>> next(pipe(item, conf=conf))['substr'] == 'lo w'
True
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
from . import processor
import pygogo as gogo
OPTS = {'ftype': 'text', 'ptype': 'int', 'field': 'content'}
DEFAULTS = {'start': 0, 'length': 0}
logger = gogo.Gogo(__name__, monolog=True).logger
def parser(word, objconf, skip=False, **kwargs):
""" Parses the pipe content
Args:
word (str): The string to parse
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: substr)
stream (dict): The original item
Returns:
dict: The item
Examples:
>>> from meza.fntools import Objectify
>>>
>>> item = {'content': 'hello world'}
>>> conf = {'start': 3, 'length': 4}
>>> args = item['content'], Objectify(conf)
>>> kwargs = {'stream': item, 'conf': conf}
>>> parser(*args, **kwargs) == 'lo w'
True
"""
end = objconf.start + objconf.length if objconf.length else None
return kwargs['stream'] if skip else word[objconf.start:end]
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A processor module that asynchronously returns a substring of a field
of an item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. May contain the keys 'start' or
'length'.
start (int): starting position (default: 0)
length (int): count of characters to return (default: 0, i.e., all)
assign (str): Attribute to assign parsed content (default: substr)
field (str): Item attribute to operate on (default: 'content')
Returns:
Deferred: twisted.internet.defer.Deferred item with transformed content
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['substr'])
... conf = {'start': '3', 'length': '4'}
... d = async_pipe({'content': 'hello world'}, conf=conf)
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
lo w
"""
return parser(*args, **kwargs)
@processor(**OPTS)
def pipe(*args, **kwargs):
"""A processor that returns a substring of a field of an item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. May contain the keys 'start' or
'length'.
start (int): starting position (default: 0)
length (int): count of characters to return (default: 0, i.e., all)
assign (str): Attribute to assign parsed content (default: substr)
field (str): Item attribute to operate on (default: 'content')
Yields:
dict: an item with the substring
Examples:
>>> conf = {'start': '3', 'length': '4'}
>>> item = {'content': 'hello world'}
>>> next(pipe(item, conf=conf))['substr'] == 'lo w'
True
>>> conf = {'start': '3'}
>>> kwargs = {'conf': conf, 'field': 'title', 'assign': 'result'}
>>> next(pipe({'title': 'Greetings'}, **kwargs))['result'] == 'etings'
True
"""
return parser(*args, **kwargs)
|
# 入力
Q = int(input())
x, d, n = (
zip(*(map(int, input().split()) for _ in range(Q))) if Q else
((), (), ())
)
MOD = 1000003
class ModInt:
def __init__(self, x):
self.x = x % MOD
def __str__(self):
return str(self.x)
__repr__ = __str__
def __add__(self, other):
return (
ModInt(self.x + other.x) if isinstance(other, ModInt) else
ModInt(self.x + other)
)
def __sub__(self, other):
return (
ModInt(self.x - other.x) if isinstance(other, ModInt) else
ModInt(self.x - other)
)
def __mul__(self, other):
return (
ModInt(self.x * other.x) if isinstance(other, ModInt) else
ModInt(self.x * other)
)
def __truediv__(self, other):
return (
ModInt(
self.x * pow(other.x, MOD - 2, MOD)
) if isinstance(other, ModInt) else
ModInt(self.x * pow(other, MOD - 2, MOD))
)
def __pow__(self, other):
return (
ModInt(
pow(self.x, other.x, MOD)
) if isinstance(other, ModInt) else
ModInt(pow(self.x, other, MOD))
)
def __radd__(self, other):
return ModInt(other + self.x)
def __rsub__(self, other):
return ModInt(other - self.x)
def __rmul__(self, other):
return ModInt(other * self.x)
def __rtruediv__(self, other):
return ModInt(other * pow(self.x, MOD - 2, MOD))
def __rpow__(self, other):
return ModInt(pow(other, self.x, MOD))
# MOD未満の整数について、階乗を求めておく
# 各クエリに対して、d^n * (x/d + (n - 1))! / ((x/d) - 1)! を回答する
f = [0 for _ in range(MOD)]
f[0] = ModInt(1)
for i in range(1, MOD):
f[i] = i * f[i - 1]
def q(X, D, N):
Y = (
0 if D.x == 0 else
X / D
)
return (
X**N if D.x == 0 else
D**N * f[Y.x + N - 1] / f[Y.x - 1] if Y.x + N - 1 < MOD else
0
)
ans = '\n'.join(
str(q(X, D, N))
for X, D, N in zip(map(ModInt, x), map(ModInt, d), n)
)
# 出力
print(ans)
|
from kaikobittrex import Index
|
from math import *
au = 149600 # in million meters
''' Polar equation of an elliptical orbit with star at the origin
and the start being the smaller of the two foci
(i.e. start is the "left" focus):
r = p / (1 - e * cos(theta))
p is the semi latus-rectum
e is the eccentricity
'''
class Orbit:
def __init__(self, period, eccentricity, alpha):
self.period = period
self.eccen = eccentricity
self.alpha = alpha
# Latus Rectum as function of semi-major axis and eccentricity
def lr(self):
return self.alpha * (1.0 - self.eccen**2)
def beta_from_e(self):
return self.alpha * sqrt(1.0 - self.eccen**2)
def focus(self):
b = self.beta_from_e()
# Focus if ellipse was centered at origin
focus = sqrt(self.alpha**2 - b**2)
return focus
def eccentricity(self, beta):
focus = sqrt(self.alpha**2 - beta**2)
return 1.0 * focus / self.alpha
'''Angular velocity equation based on Kepler's second Law
which says that orbit covers equal area in equal time:
Period * r^2 * omega = 2*pi*alpha*beta
(Substitute r from polar form of ellipse)
'''
def omega(self, theta):
p = self.lr()
b = self.beta_from_e()
v = 2*pi * self.alpha * b * (1 - self.eccen * cos(theta))**2 / (self.period * p**2)
return v
''' Use angular velocity (above) and a standard Runge-Kuta method for solving ODEs
to find theta as a function of time and thus get polar co-ordinates of orbit
'''
def solve_orbit(self, num_points):
time = [0]
theta = [0]
dt = 1.0*self.period / num_points
for i in range(1, num_points):
y = theta[i-1]
k1 = dt * self.omega(y)
k2 = dt * self.omega(y + k1/2.0)
k3 = dt * self.omega(y + k2/2.0)
k4 = dt * self.omega(y + k3)
y += 1.0 / 6.0 * (k1 + 2*k2 + 2*k3 + k4)
print("%f %f" % (i * dt, y))
theta.append(y)
time.append(i * dt)
# Solve for radius
p = self.lr()
radius = []
for i in range(num_points):
r = p / (1.0 - self.eccen * cos(theta[i]))
radius.append(r)
return time, radius, theta
def rotate_2d(self, phi, x, y):
phi = phi * pi / 180.0
x_rot = []; y_rot = []
for i in range(len(x)):
x_rot.append(x[i] * cos(phi) - y[i] * sin(phi))
y_rot.append(x[i] * sin(phi) + y[i] * cos(phi))
return x_rot, y_rot
# Incline orbit wrt x-y (earth's) plane
def tilt_3d(self, inclination, radius, theta):
x = []; y = []
for i in range(len(radius)):
x.append(radius[i] * cos(theta[i]))
y.append(radius[i] * sin(theta[i]))
phi = inclination * pi / 180.0
x_tilt = []; y_tilt = []; z_tilt = []
for i in range(len(x)):
x_tilt.append(x[i] * cos(phi))
y_tilt.append(y[i])
z_tilt.append(-sin(phi) * x[i])
return x_tilt, y_tilt, z_tilt
# Abnoba comet orbit parameters
eccentricity = 0.17883
alpha = 2.78907 * au
inclination = 14.43869223703236
rotation = 229.2073001218772
period = 1701.3 # in days
Abnoba = Orbit(period, eccentricity, alpha)
time, radius, theta = Abnoba.solve_orbit(int(period))
Ax, Ay, Az = Abnoba.tilt_3d(inclination, radius, theta)
# Earths' orbit parameters:
eccentricity = 0.0167
alpha = 1.0 * au
period = 365.256
Earth = Orbit(period, eccentricity, alpha)
e_time, e_radius, e_theta = Earth.solve_orbit(int(period))
Ex, Ey, Ez = Earth.tilt_3d(0, e_radius, e_theta)
Ex, Ey = Earth.rotate_2d(-rotation, Ex, Ey)
# Store in Cartesian co-ordinates
with open('earth.txt', 'w') as f:
for i in range(len(e_time)):
line = str(e_time[i])+" "+str(Ex[i])+" "+str(Ey[i])+" "+str(Ez[i])+'\n'
f.write(line)
with open('abnoba.txt', 'w') as f:
for i in range(len(time)):
line = str(time[i])+" "+str(Ax[i])+" "+str(Ay[i])+" "+str(Az[i])+'\n'
f.write(line)
|
from random import *
n = 100
x0 = randint(-100, 100)
y0 = randint(-100, 100)
x1 = randint(-100, 100)
y1 = randint(-100, 100)
print n, x0, y0, x1, y1
for i in range(n):
x0 = randint(-100, 100)
y0 = randint(-100, 100)
x1 = randint(-100, 100)
y1 = randint(-100, 100)
print x0, y0, x1, y1
|
import os
from flask import Flask
import urllib.request
app = Flask(__name__)
url = os.getenv('URL', "it20.info")
@app.route("/")
def get_url():
with urllib.request.urlopen('http://' + url) as response:
html = response.read()
return html
if __name__ == '__main__':
app.run(host=os.getenv('IP', '0.0.0.0'), port=int(os.getenv('PORT', 8080)))
app.debug =True |
# opcode 1: 1 VARa VARb PTRc => *PTRc = VARa + VARb
# opcode 2: 2 VARa VARb PTRc => *PTRc = VARa * VARb
# opcode 3: 3 PTRa => *PTRa = input()
# opcode 4: 4 VARa => output(VARa)
# opcode 5: 5 VARa VARb => *PC = VARb if VARa != 0
# opcode 6: 6 VARa VARb => *PC = VARb if VARa == 0
# opcode 7: 7 VARa VARb PTRc => *PTRc = VARa < VARb
# opcode 8: 8 VARa VARb PTRc => *PTRc = VARa == VARb
# opcode 99: 99 => halt
# step: pos += 4
import sys
OPERATIONS = {}
class Value:
def __init__(self, n):
self.n = int(n)
def __repr__(self):
return str(self.n)
class PC:
def __init__(self):
self._n = 0
self.allow_step = True
@property
def n(self):
return self._n
@n.setter
def n(self, value):
self.allow_step = False
self._n = value
def step(self, count):
if self.allow_step:
self._n += count
else:
self.allow_step = True
class Halt(Exception):
pass
# operations
def operation(opcode, args):
def wrapper(func):
OPERATIONS[opcode] = (func, args)
return func
return wrapper
@operation(1, args=3)
def op1(a, b, c):
c.n = a.n + b.n
@operation(2, args=3)
def op2(a, b, c):
c.n = a.n * b.n
@operation(3, args=1)
def op3(a):
a.n = int(input())
@operation(4, args=1)
def op4(a):
print(a.n)
@operation(5, args=2)
def op5(a, b):
if a.n != 0:
pos.n = b.n
@operation(6, args=2)
def op6(a, b):
if a.n == 0:
pos.n = b.n
@operation(7, args=3)
def op7(a, b, c):
c.n = int(a.n < b.n)
@operation(8, args=3)
def op8(a, b, c):
c.n = int(a.n == b.n)
@operation(99, args=0)
def op99():
raise Halt
# runner
def run(p):
while True:
# print(f"{pos}: {p}")
instruction = p[pos.n]
opcode = instruction.n % 100
if opcode not in OPERATIONS:
raise RuntimeError(f"Unknown operation: {opcode} (pc={pos.n})")
op, nargs = OPERATIONS[opcode]
args = []
for i in range(nargs):
# 0 = position mode, 1 = immediate mode
mode = (instruction.n // (10 ** (i + 2)) % 10)
if mode == 0:
args.append(p[p[pos.n + i + 1].n]) # value at address=argument i
else:
args.append(Value(p[pos.n + i + 1].n)) # value=argument i
try:
op(*args)
except Halt:
break
pos.step(nargs + 1)
if __name__ == '__main__':
program = list(map(Value, input().split(',')))
pos = PC()
run(program)
|
from typing import Union
from loguru import logger
from sqlalchemy.dialects import postgresql
from .database import database
from .models import todos
from .schemas import TodoCreate, TodoUpdate
@database.transaction()
async def create(payload: TodoCreate) -> Union[int, None]:
logger.debug("payload: {}".format(payload))
stmt = todos.insert().values(title=payload.title, completed=payload.completed)
logger.debug(
"stmt: {}".format(
stmt.compile(
dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True}
)
)
)
id = await database.execute(stmt)
logger.debug("id: {}".format(id))
if not id:
return None
return id
async def read() -> Union[dict, None]:
stmt = todos.select()
logger.debug(
"stmt: {}".format(
stmt.compile(
dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True}
)
)
)
rows = await database.fetch_all(stmt)
if not rows:
return None
return rows
async def read_by_id(todo_id: int) -> Union[dict, None]:
stmt = todos.select(todos.columns.id == todo_id)
logger.debug(
"stmt: {}".format(
stmt.compile(
dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True}
)
)
)
row = await database.fetch_one(stmt)
if not row:
return None
return row
@database.transaction()
async def update(payload: TodoUpdate) -> Union[dict, None]:
row = await read_by_id(payload.id)
if not row:
return None
stmt = (
todos.update()
.where(todos.columns.id == payload.id)
.values(title=payload.title, completed=payload.completed)
)
logger.debug(
"stmt: {}".format(
stmt.compile(
dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True}
)
)
)
await database.execute(stmt)
return payload
@database.transaction()
async def delete_by_id(todo_id: int) -> Union[int, None]:
row = await read_by_id(todo_id)
logger.debug("row: {}".format(row))
if not row:
return None
stmt = todos.delete().where(todos.columns.id == todo_id)
logger.debug(
"stmt: {}".format(
stmt.compile(
dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True}
)
)
)
await database.execute(stmt)
return todo_id
|
import numpy as np
import numpy.random as npr
import scipy as sc
from scipy import linalg
from operator import add
from functools import reduce, partial
from sds.utils.linalg import symmetrize
from sds.utils.general import Statistics as Stats
class _GaussianBase:
def __init__(self, dim, mu=None):
self.dim = dim
self.mu = mu
@property
def params(self):
raise NotImplementedError
@params.setter
def params(self, values):
raise NotImplementedError
@property
def sigma(self):
raise NotImplementedError
@sigma.setter
def sigma(self, value):
raise NotImplementedError
@property
def lmbda(self):
raise NotImplementedError
@lmbda.setter
def lmbda(self, value):
raise NotImplementedError
@property
def nat_param(self):
return self.std_to_nat(self.params)
@nat_param.setter
def nat_param(self, natparam):
self.params = self.nat_to_std(natparam)
@staticmethod
def std_to_nat(params):
raise NotImplementedError
@staticmethod
def nat_to_std(natparam):
raise NotImplementedError
def mean(self):
return self.mu
def mode(self):
return self.mu
@property
def base(self):
return np.power(2. * np.pi, - self.dim / 2.)
def log_base(self):
return np.log(self.base)
def log_partition(self):
raise NotImplementedError
def log_likelihood(self, x):
if isinstance(x, np.ndarray):
bads = np.isnan(np.atleast_2d(x)).any(axis=1)
x = np.nan_to_num(x, copy=False).reshape((-1, self.dim))
log_lik = np.einsum('d,dl,nl->n', self.mu, self.lmbda, x, optimize=True)\
- 0.5 * np.einsum('nd,dl,nl->n', x, self.lmbda, x, optimize=True)
log_lik[bads] = 0.
log_lik += - self.log_partition() + self.log_base()
return log_lik
else:
return list(map(self.log_likelihood, x))
class GaussianWithPrecision(_GaussianBase):
def __init__(self, dim, mu=None, lmbda=None):
self._lmbda = lmbda
self._lmbda_chol = None
self._lmbda_chol_inv = None
super(GaussianWithPrecision, self).__init__(dim, mu)
@property
def params(self):
return self.mu, self.lmbda
@params.setter
def params(self, values):
self.mu, self.lmbda = values
@property
def nb_params(self):
return self.dim + self.dim * (self.dim + 1) / 2
@staticmethod
def std_to_nat(params):
a = params[1] @ params[0]
b = - 0.5 * params[1]
return Stats([a, b])
@staticmethod
def nat_to_std(natparam):
mu = - 0.5 * np.linalg.inv(natparam[1]) @ natparam[0]
lmbda = - 2. * natparam[1]
return mu, lmbda
@property
def lmbda(self):
return self._lmbda
@lmbda.setter
def lmbda(self, value):
self._lmbda = value
self._lmbda_chol = None
self._lmbda_chol_inv = None
@property
def lmbda_chol(self):
if self._lmbda_chol is None:
self._lmbda_chol = sc.linalg.cholesky(self.lmbda, lower=False)
return self._lmbda_chol
@property
def lmbda_chol_inv(self):
if self._lmbda_chol_inv is None:
self._lmbda_chol_inv = sc.linalg.inv(self.lmbda_chol)
return self._lmbda_chol_inv
@property
def sigma(self):
return self.lmbda_chol_inv @ self.lmbda_chol_inv.T
def rvs(self):
return self.mu + npr.normal(size=self.dim).dot(self.lmbda_chol_inv.T)
def statistics(self, data):
if isinstance(data, np.ndarray):
idx = ~np.isnan(data).any(axis=1)
data = data[idx]
c0, c1 = 'nd->d', 'nd,nl->dl'
x = np.einsum(c0, data, optimize=True)
xxT = np.einsum(c1, data, data, optimize=True)
n = data.shape[0]
return Stats([x, n, xxT, n])
else:
stats = list(map(self.statistics, data))
return reduce(add, stats)
def weighted_statistics(self, data, weights):
if isinstance(data, np.ndarray):
idx = ~np.isnan(data).any(axis=1)
data, weights = data[idx], weights[idx]
c0, c1 = 'n,nd->d', 'nd,n,nl->dl'
x = np.einsum(c0, weights, data, optimize=True)
xxT = np.einsum(c1, data, weights, data, optimize=True)
n = np.sum(weights, axis=0)
return Stats([x, n, xxT, n])
else:
stats = list(map(self.weighted_statistics, data, weights))
return reduce(add, stats)
def log_partition(self):
return 0.5 * np.einsum('d,dl,l->', self.mu, self.lmbda, self.mu)\
- np.sum(np.log(np.diag(self.lmbda_chol)))
def max_likelihood(self, data, weights=None):
x, n, xxT, n = self.statistics(data) if weights is None \
else self.weighted_statistics(data, weights)
self.mu = x / n
sigma = xxT / n - np.outer(self.mu, self.mu)
# numerical stabilization
sigma = symmetrize(sigma) + 1e-16 * np.eye(self.dim)
assert np.allclose(sigma, sigma.T)
assert np.all(np.linalg.eigvalsh(sigma) > 0.)
self.lmbda = np.linalg.inv(sigma)
class StackedGaussiansWithPrecision:
def __init__(self, size, dim, mus=None, lmbdas=None):
self.size = size
self.dim = dim
mus = [None] * self.size if mus is None else mus
lmbdas = [None] * self.size if lmbdas is None else lmbdas
self.dists = [GaussianWithPrecision(dim, mus[k], lmbdas[k])
for k in range(self.size)]
@property
def params(self):
return self.mus, self.lmbdas
@params.setter
def params(self, values):
self.mus, self.lmbdas = values
@property
def nb_params(self):
return self.size * (self.dim + self.dim * (self.dim + 1) / 2)
@property
def nat_param(self):
return self.std_to_nat(self.params)
@nat_param.setter
def nat_param(self, natparam):
self.params = self.nat_to_std(natparam)
def std_to_nat(self, params):
params_list = list(zip(*params))
natparams_list = [dist.std_to_nat(par) for dist, par in zip(self.dists, params_list)]
natparams_stack = Stats(map(partial(np.stack, axis=0), zip(*natparams_list)))
return natparams_stack
def nat_to_std(self, natparam):
natparams_list = list(zip(*natparam))
params_list = [dist.nat_to_std(par) for dist, par in zip(self.dists, natparams_list)]
params_stack = tuple(map(partial(np.stack, axis=0), zip(*params_list)))
return params_stack
@property
def mus(self):
return np.array([dist.mu for dist in self.dists])
@mus.setter
def mus(self, value):
for k, dist in enumerate(self.dists):
dist.mu = value[k, ...]
@property
def lmbdas(self):
return np.array([dist.lmbda for dist in self.dists])
@lmbdas.setter
def lmbdas(self, value):
for k, dist in enumerate(self.dists):
dist.lmbda = value[k, ...]
@property
def lmbdas_chol(self):
return np.array([dist.lmbda_chol for dist in self.dists])
@property
def lmbdas_chol_inv(self):
return np.array([dist.lmbda_chol_inv for dist in self.dists])
@property
def sigmas(self):
return np.array([dist.sigma for dist in self.dists])
def mean(self):
return np.array([dist.mean() for dist in self.dists])
def mode(self):
return np.array([dist.mode() for dist in self.dists])
def rvs(self):
return np.array([dist.rvs() for dist in self.dists])
@property
def base(self):
return np.array([dist.base for dist in self.dists])
def log_base(self):
return np.log(self.base)
def statistics(self, data):
if isinstance(data, np.ndarray):
idx = ~np.isnan(data).any(axis=1)
data = data[idx]
c0, c1 = 'nd->d', 'nd,nl->dl'
x = np.einsum(c0, data, optimize=True)
xxT = np.einsum(c1, data, data, optimize=True)
n = data.shape[0]
xk = np.array([x for _ in range(self.size)])
xxTk = np.array([xxT for _ in range(self.size)])
nk = np.array([n for _ in range(self.size)])
return Stats([xk, nk, xxTk, nk])
else:
stats = list(map(self.statistics, data))
return reduce(add, stats)
def weighted_statistics(self, data, weights):
if isinstance(data, np.ndarray):
idx = ~np.isnan(data).any(axis=1)
data, weights = data[idx], weights[idx]
c0, c1 = 'nk,nd->kd', 'nd,nk,nl->kdl'
xk = np.einsum(c0, weights, data, optimize=True)
xxTk = np.einsum(c1, data, weights, data, optimize=True)
nk = np.sum(weights, axis=0)
return Stats([xk, nk, xxTk, nk])
else:
stats = list(map(self.weighted_statistics, data, weights))
return reduce(add, stats)
def log_partition(self):
return np.array([dist.log_partition() for dist in self.dists])
def log_likelihood(self, x):
if isinstance(x, np.ndarray):
bads = np.isnan(np.atleast_2d(x)).any(axis=1)
x = np.nan_to_num(x, copy=False).reshape((-1, self.dim))
log_lik = np.einsum('kd,kdl,nl->nk', self.mus, self.lmbdas, x, optimize=True)\
- 0.5 * np.einsum('nd,kdl,nl->nk', x, self.lmbdas, x, optimize=True)
log_lik[bads] = 0.
log_lik += - self.log_partition() + self.log_base()
return log_lik
else:
return list(map(self.log_likelihood, x))
def max_likelihood(self, data, weights):
xk, nk, xxTk, nk = self.weighted_statistics(data, weights)
mus = np.zeros((self.size, self.dim))
lmbdas = np.zeros((self.size, self.dim, self.dim))
for k in range(self.size):
mus[k] = xk[k] / nk[k]
sigma = xxTk[k] / nk[k] - np.outer(mus[k], mus[k])
# numerical stabilization
sigma = symmetrize(sigma) + 1e-16 * np.eye(self.dim)
assert np.allclose(sigma, sigma.T)
assert np.all(np.linalg.eigvalsh(sigma) > 0.)
lmbdas[k] = np.linalg.inv(sigma)
self.mus = mus
self.lmbdas = lmbdas
class TiedGaussiansWithPrecision(StackedGaussiansWithPrecision):
def __init__(self, size, dim, mus=None, lmbdas=None):
super(TiedGaussiansWithPrecision, self).__init__(size, dim, mus, lmbdas)
def max_likelihood(self, data, weights):
xk, nk, xxTk, nk = self.weighted_statistics(data, weights)
xxT = np.sum(xxTk, axis=0)
n = np.sum(nk, axis=0)
mus = np.zeros((self.size, self.dim))
sigma = np.zeros((self.dim, self.dim))
sigma += xxT
for k in range(self.size):
mus[k] = xk[k] / nk[k]
sigma -= nk[k] * np.outer(mus[k], mus[k])
sigma /= n
# numerical stabilization
sigma = symmetrize(sigma) + 1e-16 * np.eye(self.dim)
assert np.allclose(sigma, sigma.T)
assert np.all(np.linalg.eigvalsh(sigma) > 0.)
self.mus = mus
lmbda = np.linalg.inv(sigma)
self.lmbdas = np.array(self.size * [lmbda])
class GaussianWithDiagonalPrecision(_GaussianBase):
def __init__(self, dim, mu=None, lmbda_diag=None):
self._lmbda_diag = lmbda_diag
self._lmbda_chol = None
self._lmbda_chol_inv = None
super(GaussianWithDiagonalPrecision, self).__init__(dim, mu)
@property
def params(self):
return self.mu, self.lmbda_diag
@params.setter
def params(self, values):
self.mu, self.lmbda_diag = values
@property
def nb_params(self):
return self.dim + self.dim * (self.dim + 1) / 2
@staticmethod
def std_to_nat(params):
a = params[1] * params[0]
b = - 0.5 * params[1]
return Stats([a, b])
@staticmethod
def nat_to_std(natparam):
mu = - 0.5 * (1. / natparam[1]) * natparam[0]
lmbda_diag = - 2. * natparam[1]
return mu, lmbda_diag
@property
def lmbda_diag(self):
return self._lmbda_diag
@lmbda_diag.setter
def lmbda_diag(self, value):
self._lmbda_diag = value
self._lmbda_chol = None
self._lmbda_chol_inv = None
@property
def lmbda(self):
assert self.lmbda_diag is not None
return np.diag(self.lmbda_diag)
@property
def lmbda_chol(self):
if self._lmbda_chol is None:
self._lmbda_chol = np.diag(np.sqrt(self.lmbda_diag))
return self._lmbda_chol
@property
def lmbda_chol_inv(self):
if self._lmbda_chol_inv is None:
self._lmbda_chol_inv = np.diag(1. / np.sqrt(self.lmbda_diag))
return self._lmbda_chol_inv
@property
def sigma_diag(self):
return 1. / self.lmbda_diag
@property
def sigma(self):
return np.diag(self.sigma_diag)
def rvs(self):
return self.mu + npr.normal(size=self.dim).dot(self.lmbda_chol_inv.T)
def statistics(self, data):
if isinstance(data, np.ndarray):
idx = ~np.isnan(data).any(axis=1)
data = data[idx]
x = np.sum(data, axis=0)
n = data.shape[0]
xx = np.einsum('nd,nd->d', data, data)
nd = np.broadcast_to(data.shape[0], (self.dim, ))
return Stats([x, nd, nd, xx])
else:
stats = list(map(self.statistics, data))
return reduce(add, stats)
def weighted_statistics(self, data, weights):
if isinstance(data, np.ndarray):
idx = ~np.isnan(data).any(axis=1)
data, weights = data[idx], weights[idx]
x = np.einsum('n,nd->d', weights, data)
n = np.sum(weights)
xx = np.einsum('nd,n,nd->d', data, weights, data)
nd = np.broadcast_to(np.sum(weights), (self.dim, ))
return Stats([x, nd, nd, xx])
else:
stats = list(map(self.weighted_statistics, data, weights))
return reduce(add, stats)
def log_partition(self):
return 0.5 * np.einsum('d,dl,l->', self.mu, self.lmbda, self.mu)\
- np.sum(np.log(np.diag(self.lmbda_chol)))
def log_likelihood(self, x):
if isinstance(x, np.ndarray):
bads = np.isnan(np.atleast_2d(x)).any(axis=1)
x = np.nan_to_num(x, copy=False).reshape((-1, self.dim))
log_lik = np.einsum('d,dl,nl->n', self.mu, self.lmbda, x, optimize=True)\
- 0.5 * np.einsum('nd,dl,nl->n', x, self.lmbda, x, optimize=True)
log_lik[bads] = 0.
log_lik += - self.log_partition() + self.log_base()
return log_lik
else:
return list(map(self.log_likelihood, x))
def max_likelihood(self, data, weights=None):
x, nd, nd, xx = self.statistics(data) if weights is None\
else self.weighted_statistics(data, weights)
self.mu = x / nd
self.lmbda_diag = 1. / (xx / nd - self.mu**2)
class StackedGaussiansWithDiagonalPrecision:
def __init__(self, size, dim, mus=None, lmbdas_diags=None):
self.size = size
self.dim = dim
mus = [None] * self.size if mus is None else mus
lmbdas_diags = [None] * self.size if lmbdas_diags is None else lmbdas_diags
self.dists = [GaussianWithDiagonalPrecision(dim, mus[k], lmbdas_diags[k])
for k in range(self.size)]
@property
def params(self):
return self.mus, self.lmbdas_diags
@params.setter
def params(self, values):
self.mus, self.lmbdas_diags = values
@property
def nb_params(self):
return self.size * (self.dim + self.dim * (self.dim + 1) / 2)
@property
def nat_param(self):
return self.std_to_nat(self.params)
@nat_param.setter
def nat_param(self, natparam):
self.params = self.nat_to_std(natparam)
def std_to_nat(self, params):
params_list = list(zip(*params))
natparams_list = [dist.std_to_nat(par) for dist, par in zip(self.dists, params_list)]
natparams_stack = Stats(map(partial(np.stack, axis=0), zip(*natparams_list)))
return natparams_stack
def nat_to_std(self, natparam):
natparams_list = list(zip(*natparam))
params_list = [dist.nat_to_std(par) for dist, par in zip(self.dists, natparams_list)]
params_stack = tuple(map(partial(np.stack, axis=0), zip(*params_list)))
return params_stack
@property
def mus(self):
return np.array([dist.mu for dist in self.dists])
@mus.setter
def mus(self, value):
for k, dist in enumerate(self.dists):
dist.mu = value[k, ...]
@property
def lmbdas_diags(self):
return np.array([dist.lmbda_diag for dist in self.dists])
@lmbdas_diags.setter
def lmbdas_diags(self, value):
for k, dist in enumerate(self.dists):
dist.lmbda_diag = value[k, ...]
@property
def lmbdas(self):
return np.array([dist.lmbda for dist in self.dists])
@property
def lmbdas_chol(self):
return np.array([dist.lmbda_chol for dist in self.dists])
@property
def lmbdas_chol_inv(self):
return np.array([dist.lmbda_chol_inv for dist in self.dists])
@property
def sigmas_diags(self):
return np.array([dist.sigma_diag for dist in self.dists])
@property
def sigmas(self):
return np.array([dist.sigma for dist in self.dists])
def mean(self):
return np.array([dist.mean() for dist in self.dists])
def mode(self):
return np.array([dist.mode() for dist in self.dists])
def rvs(self):
return np.array([dist.rvs() for dist in self.dists])
@property
def base(self):
return np.array([dist.base for dist in self.dists])
def log_base(self):
return np.log(self.base)
def statistics(self, data):
if isinstance(data, np.ndarray):
idx = ~np.isnan(data).any(axis=1)
data = data[idx]
c0, c1 = 'nd->d', 'nd,nd->d'
x = np.einsum(c0, data, optimize=True)
xx = np.einsum(c1, data, data, optimize=True)
nd = np.broadcast_to(data.shape[0], (self.dim, ))
xk = np.array([x for _ in range(self.size)])
xxk = np.array([xx for _ in range(self.size)])
ndk = np.array([nd for _ in range(self.size)])
return Stats([xk, ndk, ndk, xxk])
else:
stats = list(map(self.statistics, data))
return reduce(add, stats)
def weighted_statistics(self, data, weights):
if isinstance(data, np.ndarray):
idx = ~np.isnan(data).any(axis=1)
data, weights = data[idx], weights[idx]
xk = np.einsum('nk,nd->kd', weights, data)
xxk = np.einsum('nd,nk,nd->kd', data, weights, data)
ndk = np.broadcast_to(np.sum(weights, axis=0, keepdims=True), (self.size, self.dim))
return Stats([xk, ndk, ndk, xxk])
else:
stats = list(map(self.weighted_statistics, data, weights))
return reduce(add, stats)
def log_partition(self):
return np.array([dist.log_partition() for dist in self.dists])
def log_likelihood(self, x):
if isinstance(x, np.ndarray):
bads = np.isnan(np.atleast_2d(x)).any(axis=1)
x = np.nan_to_num(x, copy=False).reshape((-1, self.dim))
log_lik = np.einsum('kd,kdl,nl->nk', self.mus, self.lmbdas, x, optimize=True)\
- 0.5 * np.einsum('nd,kdl,nl->nk', x, self.lmbdas, x, optimize=True)
log_lik[bads] = 0.
log_lik += - self.log_partition() + self.log_base()
return log_lik
else:
return list(map(self.log_likelihood, x))
def max_likelihood(self, data, weights):
xk, ndk, ndk, xxk = self.weighted_statistics(data, weights)
mus = np.zeros((self.size, self.dim))
lmbdas_diags = np.zeros((self.size, self.dim))
for k in range(self.size):
mus[k] = xk[k] / ndk[k]
lmbdas_diags[k] = 1. / (xxk[k] / ndk[k] - mus[k]**2 + 1e-16)
self.mus = mus
self.lmbdas_diags = lmbdas_diags
class TiedGaussiansWithDiagonalPrecision(StackedGaussiansWithDiagonalPrecision):
def __init__(self, size, dim, mus=None, lmbdas_diags=None):
super(TiedGaussiansWithDiagonalPrecision, self).__init__(size, dim, mus, lmbdas_diags)
def max_likelihood(self, data, weights):
xk, ndk, ndk, xxk = self.weighted_statistics(data, weights)
xx = np.sum(xxk, axis=0)
nd = np.sum(ndk, axis=0)
mus = np.zeros((self.size, self.dim))
sigma_diag = np.zeros((self.dim, ))
sigma_diag += xx
for k in range(self.size):
mus[k] = xk[k] / ndk[k]
sigma_diag -= ndk[k] * mus[k]**2
sigma_diag /= nd
self.mus = mus
lmbda_diag = 1. / (sigma_diag + 1e-16)
self.lmbdas_diags = np.array(self.size * [lmbda_diag])
class GaussianWithKnownMeanAndDiagonalPrecision(GaussianWithDiagonalPrecision):
def __init__(self, dim, mu=None, lmbda_diag=None):
super(GaussianWithKnownMeanAndDiagonalPrecision, self).__init__(dim, mu,
lmbda_diag)
@property
def params(self):
return self.lmbda_diag
@params.setter
def params(self, values):
self.lmbda_diag = values
@property
def nb_params(self):
return self.dim
def statistics(self, data):
if isinstance(data, np.ndarray):
idx = ~np.isnan(data).any(axis=0)
data = data[idx]
n = 0.5 * data.shape[0]
xx = - 0.5 * np.einsum('nd,nd->d', data, data)
return Stats([n, xx])
else:
stats = list(map(self.statistics, data))
return reduce(add, stats)
def weighted_statistics(self, data, weights):
if isinstance(data, np.ndarray):
idx = ~np.isnan(data).any(axis=0)
data, weights = data[idx], weights[idx]
n = 0.5 * np.sum(weights)
xx = - 0.5 * np.einsum('nd,n,nd->d', data, weights, data)
return Stats([n, xx])
else:
stats = list(map(self.weighted_statistics, data, weights))
return reduce(add, stats)
|
import sys
from cartomancy.brains.spaces.go_fish.actions import Actions
from cartomancy.engine.io.getch import getch
class GoFishHumanPolicy:
"""Human input policy for Go Fish.
When this policy is used, the user is asked
to for input to choose actions.
The rank_to_seek and player_to_ask method list numbered
choices and the user inputs one charater to choose one
of the available options.
"""
NAME = 'human'
def __init__(self):
self._actions = None
self._observations = None
@property
def actions(self):
"""An instance of the Actions class"""
return self._actions
@actions.setter
def actions(self, new_actions: Actions):
"""Overwrite the actions stored in Actions."""
self._actions = new_actions
@property
def observations(self):
"""An instance of the observations class"""
return self._observations
@observations.setter
def observations(self, new_observations):
"""Overwrite the observations."""
self._observations = new_observations
def rank_to_seek(self):
"""Ask user to choose one of the available ranks."""
# get user input
user_input = self.ask_user_for_rank()
# create valid choice mapping
user_input_to_idx = {str(i+1): i for i in range(len(self.actions.valid_ranks))}
#user_input_to_idx['q'] = 14
# process user input
try:
ask_idx = user_input_to_idx[user_input]
ask_rank = self.actions.valid_ranks[ask_idx]
except KeyError:
# ask for another rank
print(f'The choice {user_input} is invalid. Please choose another.')
ask_rank = self.rank_to_seek()
return ask_rank
def player_to_ask(self):
"""Ask user to choose one of the available opponents."""
# get user input
user_input = self.ask_user_for_opp()
# create valid choice mapping
user_input_to_idx = {str(i+1): i for i in range(len(self.actions.valid_opponents))}
#user_input_to_idx['q'] = 14
# process user input
try:
ask_idx = user_input_to_idx[user_input]
ask_opp = self.actions.valid_opponents[ask_idx]
except KeyError:
# ask for another opponent
print(f'The choice {user_input} is invalid. Please choose another.')
ask_opp = self.player_to_ask()
return ask_opp
def sample(self):
"""Return the required choices."""
return self.player_to_ask(), self.rank_to_seek()
def ask_user_for_rank(self):
"""Ask user to choose a rank."""
ranks = self.actions.valid_ranks
# prompt user to choose a rank.
rank_options = ["".join(['option ', str(i+1), ' : ', rank, '\n'])
for i, rank in enumerate(ranks)]
print("".join(['Your ranks:\n']+rank_options))
ask_idx_plus_one = input("Choose a rank: ")
if ask_idx_plus_one != 'q':
try:
sys.stdout.write(ranks[int(ask_idx_plus_one)-1] + '\n\n')
sys.stdout.flush()
except Exception as e:
print('Invalid choice.')
else:
# quit the game
sys.exit('quitting the game!')
return ask_idx_plus_one
def ask_user_for_opp(self):
"""Ask user to choose an opponent."""
opponents = self.actions.valid_opponents
# prompt user to choose an opponent.
opponent_options = ["".join(['option ', str(i+1), ' : ', opp.name, '\n'])
for i, opp in enumerate(opponents)]
print("".join(['Your opponents:\n']+opponent_options))
ask_idx_plus_one = input("Choose an opponent: ")
if ask_idx_plus_one != 'q':
try:
sys.stdout.write(opponents[int(ask_idx_plus_one)-1].name + '\n\n')
sys.stdout.flush()
except Exception as e:
print('Invalid choice.')
else:
# quit the game
sys.exit('quitting the game!')
return ask_idx_plus_one
@staticmethod
def process_user_input(char):
"""Check to see if the user would like to quit.
Also checks for 0 char, in which case the index is -1
or the last element of the choices. In this case, we will
set the char to 14 which will never be a valid index. This
will cause an index error later which will be captured
in rank_to_ask or player_to_seek
"""
if char != 'q':
idx = int(char) - 1
elif char == '0':
idx = 14
else:
idx = char
return idx
|
import operator
from django.db.models import signals
from django.db.models.expressions import F, ExpressionNode
EXPRESSION_NODE_CALLBACKS = {
ExpressionNode.ADD: operator.add,
ExpressionNode.SUB: operator.sub,
ExpressionNode.MUL: operator.mul,
ExpressionNode.DIV: operator.div,
ExpressionNode.MOD: operator.mod,
ExpressionNode.AND: operator.and_,
ExpressionNode.OR: operator.or_,
}
class CannotResolve(Exception):
pass
def _resolve(instance, node):
if isinstance(node, F):
return getattr(instance, node.name)
elif isinstance(node, ExpressionNode):
return _resolve(instance, node)
return node
def resolve_expression_node(instance, node):
op = EXPRESSION_NODE_CALLBACKS.get(node.connector, None)
if not op:
raise CannotResolve
runner = _resolve(instance, node.children[0])
for n in node.children[1:]:
runner = op(runner, _resolve(instance, n))
return runner
def update(instance, full_clean=True, post_save=False, **kwargs):
"Atomically update instance, setting field/value pairs from kwargs"
# apply the updated args to the instance to mimic the change
# note that these might slightly differ from the true database values
# as the DB could have been updated by another thread. callers should
# retrieve a new copy of the object if up-to-date values are required
for k, v in kwargs.iteritems():
if isinstance(v, ExpressionNode):
v = resolve_expression_node(instance, v)
setattr(instance, k, v)
# clean instance before update
if full_clean:
instance.full_clean()
# fields that use auto_now=True should be updated corrected, too!
for field in instance._meta.fields:
if hasattr(field, 'auto_now') and field.auto_now and field.name not in kwargs:
kwargs[field.name] = field.pre_save(instance, False)
rows_affected = instance.__class__._default_manager.filter(
pk=instance.pk).update(**kwargs)
if post_save:
signals.post_save.send(sender=instance.__class__, instance=instance)
return rows_affected
class Choices(object):
def __init__(self, *choices):
self._choices = []
self._choice_dict = {}
self._labels = {}
for choice in choices:
if isinstance(choice, (list, tuple)):
if len(choice) == 2:
choice = (choice[0], choice[0], choice[1])
elif len(choice) != 3:
raise ValueError("Choices can't handle a list/tuple of length %s, only 2 or 3" % len(choice))
else:
choice = (choice, choice, choice)
self._choices.append((choice[0], choice[2]))
self._choice_dict[choice[1]] = choice[0]
def __getattr__(self, attname):
try:
return self._choice_dict[attname]
except KeyError:
raise AttributeError(attname)
def __iter__(self):
return iter(self._choices)
def __getitem__(self, index):
return self._choices[index]
def __repr__(self):
values, names = zip(*self._choices)
labels = self._labels.itervalues()
return '%s(%s)' % (self.__class__.__name__,
repr(zip(values, labels, names)))
|
'''
Gets values from special comments in the first 1024 bytes of the main script.
'''
import os
import re
import sys
import shlex
from argcomplete import USING_PYTHON2
from itertools import chain
if USING_PYTHON2:
from pipes import quote
else:
quote = shlex.quote
SEARCH_RANGE = 1024
INFO_PREFIX = 'CACHEDCOMPLETE_'
FILES_TO_HASH_INFO = INFO_PREFIX + r'HASH:\s*(?P<files>.*)$'
def _skip_easy_install(filename):
'''
Handle easyinstall files by returning their wrapped file instead.
'''
import inspect
# Python3 changed the tuple to an object with named fields.
if USING_PYTHON2:
main_file_frame = [stack_frame[0] for stack_frame in inspect.stack() if stack_frame[1] == filename][-1]
else:
main_file_frame = [stack_frame.frame for stack_frame in inspect.stack() if stack_frame.filename == filename][-1]
return main_file_frame.f_globals.get('__file__', filename)
# Path to the main script
if sys.argv[0] != '-c':
MAIN_FILE_PATH = os.path.abspath(_skip_easy_install(sys.argv[0]))
else:
# Running as `python -c "commands"`
MAIN_FILE_PATH = None
def _get_info_list(expr):
'''
:return: an iterable of all items from the main script (not an actual list).
'''
with open(MAIN_FILE_PATH) as main_file:
content = main_file.read(SEARCH_RANGE)
return chain(*(shlex.split(match.group('files')) for match in re.finditer(expr, content, re.M)))
def _expand(filename):
return quote(os.path.expanduser(os.path.expandvars(filename)))
def get_files_to_hash():
'''
:return: an iterable of all the files and directories that should be hashed.
'''
os.environ.setdefault('pwd', os.path.abspath(os.curdir))
return chain([MAIN_FILE_PATH, os.path.dirname(__file__)], (_expand(filename) for filename in _get_info_list(FILES_TO_HASH_INFO)))
def exists():
'''
:return: ``True`` if the main script exists, otherwise ``False`` (as when ran with `python -c`).
'''
return MAIN_FILE_PATH is not None and os.path.exists(MAIN_FILE_PATH)
|
from cv_datalib.image import DataImage
import pandas as pd
from typing import List
class Split:
def __init__(self, name: str, images: List[DataImage]):
self.name = name
self.images = images
def __getitem__(self, index: int) -> DataImage:
return self.images[index]
def add_image(self, image: DataImage) -> None:
self.images.append(image)
def __len__(self) -> int:
return len(self.images)
class Dataset:
def __init__(self, splits: List[Split]):
self.splits = splits
class Splitter:
def __init__(self):
pass
def split_dataset(self, dataset: Dataset) -> Dataset:
# Not implemented yet
return dataset |
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectUD import DistributedObjectUD
class DistributedLobbyManagerUD(DistributedObjectUD):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedLobbyManagerUD")
def announceGenerate(self):
DistributedObjectUD.announceGenerate(self)
self.sendUpdate('lobbyManagerUdStartingUp')
def addLobby(self, todo0, todo1, todo2, todo3):
pass
def addLobbyRequest(self, hostId):
pass
def addLobbyResponse(self, hostId, errorCode):
pass
def getLobbyZone(self, avId, zoneId, isAvAboutToCreateLobby):
pass
def receiveLobbyZone(self, todo0, todo1, todo2):
pass
def freeZoneIdFromCreatedLobby(self, avId, zoneId):
pass
def sendAvToPlayground(self, todo0, todo1):
pass
def exitParty(self, zoneIdOfAv):
pass
def lobbyManagerAIStartingUp(self, todo0, todo1):
pass
def lobbyManagerAIGoingDown(self, todo0, todo1):
pass
def lobbyHasStartedAiToUd(self, todo0, todo1, todo2, todo3, todo4):
pass
def requestShardIdZoneIdForHostId(self, hostId):
pass
def sendShardIdZoneIdToAvatar(self, shardId, zoneId):
pass
def toonHasEnteredPartyAiToUd(self, todo0):
pass
def toonHasExitedPartyAiToUd(self, todo0):
pass
def lobbyHasFinishedUdToAllAi(self, todo0):
pass
def lobbyManagerUdStartingUp(self):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.