content
stringlengths 5
1.05M
|
|---|
import os
import sys
import argparse
import numpy as np
from PIL import Image
import signal
import time
signal.signal(signal.SIGINT, signal.SIG_DFL)
import OpenGL.GL as gl
from glfwBackend import glfwApp
import imgui
from imgui.integrations.glfw import GlfwRenderer
# Local imports
from fieldanimation import FieldAnimation, glInfo
CHOICES = (
'epole',
"Duffing's equation",
'Structurally stable system',
'Transcritical bifurcation',
'Reaction diffusion',
'Positive invariant set',
'Spiral ccw',
'Spiral cw',
'wind',
'gmod',
)
#------------------------------------------------------------------------------
def ElectricField(q, r0, x, y):
""" Return the electric field vector E=(Ex, Ey) due to charge q at r0.
"""
den = np.hypot(x - r0[0], y - r0[1]) ** 1.5
return q * (x - r0[0]) / den, q * (y - r0[1]) / den
#------------------------------------------------------------------------------
def createField(eq='Spiral ccw', m=64, n=64):
# def createField(eq='Spiral ccw', m=81, n=201):
""" Equations are taken from
http://reference.wolfram.com/language/ref/StreamPlot.html
Args:
eq (string): equation name
m (integer): number of rows
n (integer): number of columns
"""
mj = m * 1j
nj = n * 1j
Y, X = np.mgrid[-3:3:mj, -3:3:nj]
if eq == 'epole':
# Return n x m x 2 2D array with Electric field values
# generated by a (2 * charges) electric charges configuration
charges = 1
nq = 2 ** int(charges)
charges = []
for i in range(nq):
q = i % 2 * 2 - 1
charges.append((q,
(np.cos(2 * np.pi * i / nq), np.sin(2 * np.pi * i / nq))))
U, V = np.zeros((m, n)), np.zeros((m, n))
for charge in charges:
ex, ey = ElectricField(*charge, x=X, y=Y)
U += ex
V += ey
elif eq == "Duffing's equation":
# Duffing's equation
U = Y.copy()
V = X - X**3
elif eq == 'Structurally stable system':
# Structurally stable system
U = 2 * X + Y - X * (X**2 + Y**2)
V = -Y - Y * (X**2 + Y**2)
elif eq == 'Transcritical bifurcation':
# Transcritical bifurcation
U = X**2
V = -Y
elif eq == 'Reaction diffusion':
U = 2 * (Y - X) + X * (1 - X ** 2)
V = -2 * (Y - X) + Y * (1 - Y ** 2)
elif eq == 'Positive invariant set':
U = Y.copy()
V = - X + Y * (1 - X ** 2 - 2 * Y ** 2)
elif eq == 'Spiral ccw':
origin = (0.8, 0.8)
X, Y = X - origin[0], Y - origin[1]
U = X - Y
V = X + Y
elif eq == 'Spiral cw':
origin = (0.8, 0.8)
X, Y = X - origin[0], Y - origin[1]
U = Y - X
V = -X - Y
elif eq == 'wind':
field = np.load("wind_2016-11-20T00-00Z.npy")
r, c, bands = field.shape
Y, X = np.mgrid[0:r, 0:c]
U = field[:, :, 0][::-1]
V = - field[:, :, 1][::-1]
elif eq == 'gmod':
U = np.load('vx.npy')[::-1]
V = np.load('vy.npy')[::-1]
r,c = U.shape
Y, X = np.mgrid[0:r, 0:c]
else:
raise SystemExit("Unknown field. Giving up...")
return np.flipud(np.dstack((U, -V)))
#------------------------------------------------------------------------------
def userInterface(renderer, graphicItem, app):
""" Control graphicItem parameters interactively
"""
if not renderer:
return
renderer.process_inputs()
imgui.set_next_window_position(0, 0)
imgui.new_frame()
toOpen = True
dummy, toOpen = imgui.begin('Controls', closable=True,
flags=imgui.WINDOW_ALWAYS_AUTO_RESIZE)
if not toOpen:
app.restoreKeyCallback()
return toOpen
# field to show
current = app.ifield
changed, current = imgui.combo('Field', current, list(CHOICES))
if changed:
app.setField(current)
# Speed Rate
changed, speed = imgui.drag_float('Speed',
graphicItem.speedFactor, 0.01, 0.0, 10.0)
if changed:
graphicItem.speedFactor = speed
# Decay
changed, decay = imgui.drag_float('Decay',
graphicItem.decay, 0.001, 0.001, 1.0)
if changed:
graphicItem.decay = decay
# Drop Rate Bump
changed, decayBoost = imgui.drag_float('Decay boost',
graphicItem.decayBoost, 0.01, 0.001, 1.0)
if changed:
graphicItem.decayBoost = decayBoost
# Unbknown const
changed, opacity = imgui.drag_float('Opacity',
graphicItem.fadeOpacity, 0.001, 0.900, 0.999, '%.4f')
if changed:
graphicItem.fadeOpacity = opacity
# Palette
changed, color = imgui.color_edit3('Color', *graphicItem.color)
if changed:
graphicItem.color = color
imgui.same_line()
changed, palette = imgui.checkbox("Palette", graphicItem.palette)
if changed:
graphicItem.palette = palette
changed, bg_color = imgui.color_edit4('Background color', *app.bg_color)
if changed:
app.bg_color = bg_color
# Point size
changed, pointSize = imgui.input_int("Point size",
graphicItem.pointSize, 1, 1, 1)
if changed:
if pointSize > 5:
pointSize = 5
elif pointSize < 1:
pointSize = 1
graphicItem.pointSize = pointSize
# Number of Points
changed, tracersCount = imgui.drag_int("Number of "
"Tracers", graphicItem.tracersCount, 1000.0, 4000, 10000000)
if changed:
graphicItem.tracersCount = tracersCount
# Periodic border
changed, periodic = imgui.checkbox("Periodic", graphicItem.periodic)
if changed:
graphicItem.periodic = periodic
# Draw field
changed, drawfield = imgui.checkbox("Draw Field", graphicItem.drawField)
if changed:
graphicItem.drawField = drawfield
imgui.end()
imgui.render()
return True
#==============================================================================
class GLApp(glfwApp):
def __init__(self, title, width, height, options):
super(GLApp, self).__init__(title, width, height)
if options.gui:
self._renderer = GlfwRenderer(self.window(), True)
else:
self._renderer = None
if options.image is not None:
options.image = np.flipud(
np.asarray(Image.open(options.image), np.uint8))
self.ifield = CHOICES.index(options.choose)
field = createField(CHOICES[self.ifield])
glversion = glInfo()['glversion']
if not options.use_fragment and glversion < 4.3:
print("WARNING..... Compute shaders not available with OpenGL"
" ver. %.1f." % glversion)
useCompute = False
else:
useCompute = True
# Add Field Animation overlay
self._fa = FieldAnimation(width, height, field, useCompute,
options.image)
if options.draw_field:
self._fa.drawField = True
self._t0 = time.time()
self._fps = 0
self.options = options
def renderScene(self):
super(GLApp, self).renderScene()
self._fa.draw()
self._fps += 1
status = userInterface(self._renderer, self._fa, self)
if not status:
self._renderer = None
now = time.time()
if now - self._t0 >= 1:
if self.options.fps:
self.setTitle("%s - %s FPS" % (self.title(), self._fps))
self._fps = 0
self._t0 = time.time()
def onKeyboard(self, window, key, scancode, action, mode):
if key == GLApp.KEY_G and action == GLApp.PRESS:
# Draw the GUI
if self._renderer is None:
self._renderer = GlfwRenderer(self.window(), True)
super(GLApp, self).onKeyboard(window, key, scancode, action, mode)
def onResize(self, window, width, height):
gl.glViewport(0, 0, width, height)
self._fa.setSize(width, height)
def setField(self, ifield):
field = createField(CHOICES[ifield])
self._fa.setField(field)
self.setTitle('%s' % CHOICES[ifield])
self.ifield = ifield
#------------------------------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="\nField Animation example",
add_help=True,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog=os.path.basename(sys.argv[0]))
parser.add_argument('-f', '--draw_field', action='store_true',
default=False,
help=("Draw vector field as background image ")
)
parser.add_argument('-c', '--choose',
choices=CHOICES,
default="epole",
help=("Choose field to animate ")
)
parser.add_argument('-p', '--fps', action='store_true', default=False,
help=("Count Frames Per Second ")
)
parser.add_argument('-u', '--use-fragment', action='store_true',
default=False, help=("Use fragment instead of compute shader ")
)
parser.add_argument('-g', '--gui', action='store_true', default=False,
help=("Add gui control window ")
)
parser.add_argument('-i', '--image', action='store', default=None,
help=("Load image as background texture")
)
options = parser.parse_args(sys.argv[1:])
app = GLApp('Field Animation', 800, 800, options)
app.run()
|
from bs4 import BeautifulSoup
html = '<div><p>Testing</p></div><div class="feedflare"><p>Feedflare</p></div>'
soup = BeautifulSoup(html, 'html.parser')
print(type(soup))
print(isinstance(soup, BeautifulSoup))
for div in soup.findAll('div', 'feedflare'):
div.decompose()
print(str(soup))
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from pants.backend.go.target_types import GoModule, GoPackage
from pants.backend.go.util_rules import external_module, sdk
from pants.backend.go.util_rules.external_module import (
DownloadedExternalModule,
DownloadExternalModuleRequest,
ResolveExternalGoModuleToPackagesRequest,
ResolveExternalGoModuleToPackagesResult,
)
from pants.core.util_rules import external_tool, source_files
from pants.engine import fs
from pants.engine.fs import EMPTY_DIGEST, Digest, DigestContents
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*external_tool.rules(),
*source_files.rules(),
*fs.rules(),
*sdk.rules(),
*external_module.rules(),
QueryRule(DownloadedExternalModule, [DownloadExternalModuleRequest]),
QueryRule(
ResolveExternalGoModuleToPackagesResult, [ResolveExternalGoModuleToPackagesRequest]
),
QueryRule(DigestContents, [Digest]),
],
target_types=[GoPackage, GoModule],
)
rule_runner.set_options(["--backend-packages=pants.backend.experimental.go"])
return rule_runner
def test_download_external_module(rule_runner: RuleRunner) -> None:
downloaded_module = rule_runner.request(
DownloadedExternalModule,
[DownloadExternalModuleRequest(path="github.com/google/uuid", version="v1.3.0")],
)
assert downloaded_module.path == "github.com/google/uuid"
assert downloaded_module.version == "v1.3.0"
digest_contents = rule_runner.request(DigestContents, [downloaded_module.digest])
found_uuid_go_file = False
for file_content in digest_contents:
if file_content.path == "uuid.go":
found_uuid_go_file = True
break
assert found_uuid_go_file
def test_download_external_module_with_no_gomod(rule_runner: RuleRunner) -> None:
downloaded_module = rule_runner.request(
DownloadedExternalModule,
[DownloadExternalModuleRequest(path="cloud.google.com/go", version="v0.26.0")],
)
assert downloaded_module.path == "cloud.google.com/go"
assert downloaded_module.version == "v0.26.0"
digest_contents = rule_runner.request(DigestContents, [downloaded_module.digest])
found_go_mod = False
for file_content in digest_contents:
if file_content.path == "go.mod":
found_go_mod = True
break
assert found_go_mod
def test_resolve_packages_of_go_external_module(rule_runner: RuleRunner) -> None:
result = rule_runner.request(
ResolveExternalGoModuleToPackagesResult,
[
ResolveExternalGoModuleToPackagesRequest(
path="github.com/google/go-cmp",
version="v0.5.6",
go_sum_digest=EMPTY_DIGEST,
)
],
)
import_path_to_package = {pkg.import_path: pkg for pkg in result.packages}
assert len(import_path_to_package) > 1
pkg = import_path_to_package["github.com/google/go-cmp/cmp"]
assert pkg is not None
assert pkg.address is None
assert pkg.package_name == "cmp"
assert len(pkg.go_files) > 0
|
import torch
import torchvision.transforms as transforms
import random
import numpy as np
__imagenet_stats = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
__imagenet_pca = {
'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]),
'eigvec': torch.Tensor([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Resize(scale_size)] + t_list
return transforms.Compose(t_list)
def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.RandomCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Resize(scale_size)] + t_list
transforms.Compose(t_list)
def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
if type(input_size) is tuple:
padding = (int((scale_size - input_size[0]) / 2), int((scale_size - input_size[1]) / 2))
else:
padding = int((scale_size - input_size) / 2)
return transforms.Compose([
transforms.RandomCrop(input_size, padding=padding),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize),
])
def inception_preproccess(input_size, normalize=__imagenet_stats):
return transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize)
])
def inception_color_preproccess(input_size, normalize=__imagenet_stats):
return transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
Lighting(0.1, __imagenet_pca['eigval'], __imagenet_pca['eigvec']),
transforms.Normalize(**normalize)
])
from data import _DATASET_META_DATA
def get_transform(name='imagenet', input_size=None,
scale_size=None, normalize=None, augment=True):
if 'imagenet' in name or name in ['imaginet','randomnet','cats_vs_dogs']:
normalize = normalize or __imagenet_stats
scale_size = scale_size or 256
input_size = input_size or 224
if augment:
return inception_preproccess(input_size, normalize=normalize)
else:
return scale_crop(input_size=input_size,
scale_size=scale_size, normalize=normalize)
elif name == 'svhn_oe':
input_size = input_size or (32,32)
return transforms.Compose([transforms.Resize(input_size),transforms.ToTensor()])
elif any([i in name for i in ['cifar100', 'cifar10', 'stl10', 'SVHN']]):
input_size = input_size or 32
normalize = normalize or _DATASET_META_DATA.get(name,_DATASET_META_DATA[name]).get_normalization()
if augment:
scale_size = scale_size or 40
return pad_random_crop(input_size, scale_size=scale_size,
normalize=normalize)
else:
scale_size = scale_size or 32
return scale_crop(input_size=input_size,
scale_size=scale_size, normalize=normalize)
elif 'mnist' in name:
normalize = normalize or _DATASET_META_DATA.get(name, _DATASET_META_DATA['mnist']).get_normalization()
input_size = input_size or 28
if name.endswith('_3c'):
pre_transform = lambda org_trans: transforms.Compose([transforms.Resize(input_size),lambda x:x.convert('RGB'),org_trans])
else:
pre_transform = lambda org_trans : org_trans
if augment:
scale_size = scale_size or 32
return pre_transform(pad_random_crop(input_size, scale_size=scale_size,
normalize=normalize))
else:
scale_size = scale_size or 32
return pre_transform(scale_crop(input_size=input_size,
scale_size=scale_size, normalize=normalize))
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone()\
.mul(alpha.view(1, 3).expand(3, 3))\
.mul(self.eigval.view(1, 3).expand(3, 3))\
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
class Grayscale(object):
def __call__(self, img):
gs = img.clone()
gs[0].mul_(0.299).add_(0.587, gs[1]).add_(0.114, gs[2])
gs[1].copy_(gs[0])
gs[2].copy_(gs[0])
return gs
class Saturation(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = Grayscale()(img)
alpha = random.uniform(0, self.var)
return img.lerp(gs, alpha)
class Brightness(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = img.new().resize_as_(img).zero_()
alpha = random.uniform(0, self.var)
return img.lerp(gs, alpha)
class Contrast(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = Grayscale()(img)
gs.fill_(gs.mean())
alpha = random.uniform(0, self.var)
return img.lerp(gs, alpha)
class RandomOrder(object):
""" Composes several transforms together in random order.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
if self.transforms is None:
return img
order = torch.randperm(len(self.transforms))
for i in order:
img = self.transforms[i](img)
return img
class ColorJitter(RandomOrder):
def __init__(self, brightness=0.4, contrast=0.4, saturation=0.4):
self.transforms = []
if brightness != 0:
self.transforms.append(Brightness(brightness))
if contrast != 0:
self.transforms.append(Contrast(contrast))
if saturation != 0:
self.transforms.append(Saturation(saturation))
class RandomNoise(object):
_SUPPORTED_NOISE = ['uniform','normal']
def __init__(self,type,ratio=0.05):
assert type in RandomNoise._SUPPORTED_NOISE
assert 0 < ratio < 1
self.type = type
self.ratio = ratio
self.img = None
def __call__(self, img):
norm_signal = torch.norm(img)
# set noise expectation to bias and variance to 1
if self.type == 'uniform':
alpha=1.7321
noise = torch.distributions.Uniform(-alpha,alpha).sample(img.shape)
elif self.type == 'normal':
noise = torch.distributions.Normal(0,1).sample(img.shape)
norm_noise = torch.norm(noise)
factor = self.ratio * norm_signal / norm_noise
return img * (1-self.ratio) + noise * factor
class ImgGhosting():
def __init__(self, ratio=0.3, ghost_moment = 0.2, residual_init_rate = 0.25,fuse_distribution = torch.distributions.Beta(0.4,0.4)):
self.ratio = ratio
self.init_rate = residual_init_rate
self.ghost_moment=ghost_moment
assert 0 <= self.ratio / (1 - self.init_rate) <= 1
#todo check exponential dist
self.fuse_distribution = fuse_distribution
self.residual = None
def __call__(self, img):
if self.residual and torch.rand(1) > self.init_rate:
residual = self.residual.copy()
#update residual
self.residual = self.residual * self.ghost_moment + (1-self.ghost_moment) * img
# ratio of ghosted images per sample
if torch.rand(1) < self.ratio /(1- self.init_rate):
gamma = self.fuse_distribution.sample()
img = img * (1 - gamma) + gamma * residual
else:
self.residual = img
return img
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, max_num_holes=10,ratio=1/4):#,area_threshold=0.65):
super(Cutout,self).__init__()
self.max_num_holes = max_num_holes
self.ratio = ratio
#self.area_threshold=area_threshold
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
h = img.size(1)
w = img.size(2)
#area = h*w
mask = torch.ones((h,w),device=img.device)
for n in range(torch.randint(self.max_num_holes,(1,))):
hight = torch.randint(1,int(h * self.ratio),(1,))
width = torch.randint(1,int(w * self.ratio),(1,))
y = torch.randint(h,(1,))
x = torch.randint(w,(1,))
y1 = torch.clamp(y - hight // 2, 0, h)
y2 = torch.clamp(y + hight // 2, 0, h)
x1 = torch.clamp(x - width // 2, 0, w)
x2 = torch.clamp(x + width // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
# if mask.sum()/area > self.area_threshold:
# mask[y1: y2, x1: x2] = 1
mask = mask.expand_as(img)
img = img * mask
return img
|
import numpy as np
import pandas as pd
from process_tweets import *
from matplotlib import pyplot as plt
import nltk
from collections import defaultdict
import re
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer, CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import time
def process_text(df):
"""
:rtype: object
"""
features = df['text']
labels = df['party']
processed_features = []
for sentence in range(0, len(features)):
# Remove all the special characters
processed_feature = re.sub(r'\W', ' ', str(features[sentence]))
# remove all single characters
processed_feature = re.sub(r'\s+[a-zA-Z]\s+', ' ', processed_feature)
# Remove single characters from the start
processed_feature = re.sub(r'\^[a-zA-Z]\s+', ' ', processed_feature)
# Substituting multiple spaces with single space
processed_feature = re.sub(r'\s+', ' ', processed_feature, flags=re.I)
# Converting to Lowercase
processed_feature = processed_feature.lower()
processed_features.append(processed_feature)
print(processed_feature)
return processed_features, labels
def evaluate_metrics(test_set, predictions):
print(confusion_matrix(test_set, predictions))
print(classification_report(test_set, predictions))
print(accuracy_score(test_set, predictions))
def CorpusIterator(corpus):
for tweet in corpus:
yield tweet
if __name__ == '__main__':
df_tweets = csv_to_df('tweet_data/tweets_users_with_party.csv')
processed_features, labels = process_text(df_tweets)
corpus = CorpusIterator(processed_features)
vectorizer = TfidfVectorizer(max_features=2500, min_df=7, max_df=0.8, stop_words=stopwords.words('english'))
processed_features = vectorizer.fit_transform(corpus)
processed_features = processed_features.toarray()
X_train, X_test, y_train, y_test = train_test_split(processed_features, labels, test_size=0.2, random_state=0)
text_classifier = RandomForestClassifier(n_estimators=100, random_state=0, verbose=2)
text_classifier.fit(X_train, y_train)
predictions = text_classifier.predict(X_test)
evaluate_metrics(y_test, predictions)
|
"""
Compiled SQL function objects.
"""
from contextlib import contextmanager
import sqlalchemy
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import BindParameter
import threading
_locals = threading.local()
@contextmanager
def _compile_context(multiparams, params):
_locals.compile_context = {
'multiparams': multiparams,
'params': params,
}
try:
yield
finally:
_locals.compile_context = None
@compiles(BindParameter)
def _visit_bindparam(element, compiler, **kw):
cc = getattr(_locals, 'compile_context', None)
if cc:
if _is_expanding_param(element, cc):
element.expanding = True
return compiler.visit_bindparam(element)
def _is_expanding_param(element, cc):
if element.key not in cc['params']:
return False
return isinstance(cc['params'][element.key], (tuple, list))
class Result(object):
def transform(self, r):
raise NotImplementedError()
@property
def display_type(self):
raise NotImplementedError()
class One(Result):
def transform(self, r):
row = r.first()
if row:
return { k: v for k, v in zip(r.keys(), row) }
return None
@property
def display_type(self):
return 'row'
class Many(Result):
def transform(self, r):
ks = r.keys()
return ({ k: v for k, v in zip(ks, row)} for row in r.fetchall())
@property
def display_type(self):
return 'rows'
class Affected(Result):
def transform(self, r):
return r.rowcount
@property
def display_type(self):
return 'rowcount'
class Scalar(Result):
def transform(self, r):
row = r.first()
if not row:
return None
return row[0]
@property
def display_type(self):
return 'scalar'
class Insert(Scalar):
def transform(self, r):
if hasattr(r, 'lastrowid'):
return r.lastrowid
return super(Insert, self).transform(r)
@property
def display_type(self):
return 'insert'
class Raw(Result):
def transform(self, r):
return r
@property
def display_type(self):
return 'raw'
class Statement(object):
def __init__(self, name, sql, doc, result, filename=None):
self.filename = filename
if not name:
self._value_err('Statement must have a name.')
if sql is None:
self._value_err('Statement must have a SQL string.')
sql = sql.strip()
if not len(sql):
self._value_err('SQL string cannot be empty.')
if not result:
self._value_err('Statement must have a result type.')
self.name = name
self.sql = sql
self.doc = doc
self.result = result
self.filename = filename
self._module = None
self._text = sqlalchemy.sql.text(self.sql)
def _value_err(self, msg):
if self.filename:
raise ValueError('%s In: %s' % (msg, self.filename))
raise ValueError(msg)
def set_module(self, module):
self._module = module
def _assert_module(self):
if self._module is None:
raise RuntimeError(
'This statement is not associated with a module')
def __call__(self, *multiparams, **params):
self._assert_module()
multiparams, params = self._convert_params(multiparams, params)
with _compile_context(multiparams, params):
r = self._module._execute(self._text, *multiparams, **params)
return self.result.transform(r)
def _convert_params(self, multiparams, params):
def conv(x):
if isinstance(x, set):
return tuple(x)
return x
return (
[conv(p) for p in multiparams],
{ k: conv(v) for k, v in params.items() })
def _param_names(self):
def kfn(p):
return self.sql.index(':' + p)
return sorted(self._text._bindparams.keys(), key=kfn)
def __str__(self):
paramstr = ', '.join(['%s=None' % k for k in self._param_names()])
return 'pugsql.statement.Statement: %s(%s) :: %s' % (
self.name, paramstr, self.result.display_type)
def __repr__(self):
return str(self)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httpretty
from novaclient.openstack.common import jsonutils
from novaclient.tests.fixture_data import base
class Fixture(base.Fixture):
base_url = 'os-cloudpipe'
def setUp(self):
super(Fixture, self).setUp()
get_os_cloudpipe = {'cloudpipes': [{'project_id': 1}]}
httpretty.register_uri(httpretty.GET, self.url(),
body=jsonutils.dumps(get_os_cloudpipe),
content_type='application/json')
instance_id = '9d5824aa-20e6-4b9f-b967-76a699fc51fd'
post_os_cloudpipe = {'instance_id': instance_id}
httpretty.register_uri(httpretty.POST, self.url(),
body=jsonutils.dumps(post_os_cloudpipe),
content_type='application/json',
status=202)
httpretty.register_uri(httpretty.PUT, self.url('configure-project'),
content_type='application/json',
status=202)
|
import asyncio
import json
import os
import re
from inspect import isawaitable
from sanic import Blueprint
from config import Config
from framework.ws import RedisChannelWebsocket
from utils import to_str, to_bytes
ws_bp = Blueprint('ws', url_prefix='/ws')
class WebsocketHandler(RedisChannelWebsocket):
async def pre_send(self, msg):
return to_str(msg)
class WsSendMixin:
def send(self, data):
if isinstance(data, (dict, list)):
data = json.dumps(data)
else:
data = to_str(data)
asyncio.ensure_future(self.ws.send(data))
class CommandProtocol(asyncio.Protocol, WsSendMixin):
def __init__(self, ws):
self.ws = ws
self.transport = None
def connection_made(self, transport):
self.transport = transport
self.send({
'type': 'cmd_log',
'data': ['openvpn manage连接成功。']
})
def data_received(self, data):
data = to_str(data)
self.send({
'type': 'cmd_log',
'data': [data]
})
def connection_lost(self, exc):
self.send({
'type': 'cmd_log',
'data': ['openvpn manage断开连接。']
})
def get_management_addr():
conf_file = os.path.join(Config.BASEDIR, 'conf.d', 'server.conf')
with open(conf_file) as fd:
content = fd.read()
# content = ' '.join(content.splitlines())
m = re.search(r'management\s+(?P<ip>\S+)\s+(?P<port>\S+)', content, re.M)
ip = m.group('ip')
if ip == '0.0.0.0':
ip = '127.0.0.1'
port = int(m.group('port'))
return ip, port
class CommandHandler(WsSendMixin):
def __init__(self, ws):
self.ws = ws
self.manage_transport = None
async def __call__(self, msg):
msg = json.loads(to_str(msg))
cmd = msg.get('cmd')
attr = getattr(self, cmd, lambda *x:x)
ret = attr(msg)
if isawaitable(ret):
await ret
async def connect(self, msg):
if self.manage_transport and not self.manage_transport.is_closing():
self.send({
'type': 'cmd_log',
'data': ['openvpn管理服务已连接']
})
return
try:
ip, host = get_management_addr()
except:
self.send({
'type': 'cmd_log',
'data': ['openvpn管理服务未开通管理端口。']
})
return
loop = asyncio.get_event_loop()
try:
self.manage_transport, _ = await loop.create_connection(lambda: CommandProtocol(self.ws), ip, host)
except asyncio.TimeoutError:
self.send({
'type': 'cmd_log',
'data': ['openvpn管理服务连接超时。']
})
except ConnectionRefusedError:
self.send({
'type': 'cmd_log',
'data': ['openvpn管理服务连接被拒绝。']
})
except ConnectionAbortedError:
self.send({
'type': 'cmd_log',
'data': ['openvpn管理服务连接终止。']
})
except:
self.send({
'type': 'cmd_log',
'data': ['openvpn管理服务连接失败,未知错误。']
})
async def disconnect(self, msg=None):
if self.manage_transport and not self.manage_transport.is_closing():
self.manage_transport.close()
def write_command(self, command):
if self.manage_transport and not self.manage_transport.is_closing():
self.manage_transport.write(to_bytes(command))
else:
self.send({
'type': 'cmd_log',
'data': ['openvpn管理服务未连接。']
})
async def command(self, msg):
command = msg.get('data')
command = command.strip() + '\r\n'
self.write_command(command)
@ws_bp.websocket('/notify')
async def notify(request, ws):
try:
handler = CommandHandler(ws)
coro = WebsocketHandler(request, ws, handler, channel_names='openvpn-admin:notify')()
coro = asyncio.shield(coro)
await coro
except Exception as e:
print(e)
finally:
await handler.disconnect()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-03 07:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('drivers', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='driver',
name='transporter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='driver',
name='vehicle_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='drivers.VehicleType'),
),
]
|
import json
from pathlib import Path
from unittest.mock import ANY, Mock, call, patch
import pytest
import tornado
from jupyterlab_git.git import Git
from jupyterlab_git.handlers import (
GitAllHistoryHandler,
GitBranchHandler,
GitLogHandler,
GitPushHandler,
GitUpstreamHandler,
setup_handlers,
)
from .testutils import NS, assert_http_error, maybe_future
def test_mapping_added():
mock_web_app = Mock()
mock_web_app.settings = {"base_url": "nb_base_url"}
setup_handlers(mock_web_app)
mock_web_app.add_handlers.assert_called_once_with(".*", ANY)
@patch("jupyterlab_git.handlers.GitAllHistoryHandler.git", spec=Git)
async def test_all_history_handler_localbranch(mock_git, jp_fetch):
# Given
show_top_level = {"code": 0, "foo": "top_level"}
branch = "branch_foo"
log = "log_foo"
status = "status_foo"
mock_git.show_top_level.return_value = maybe_future(show_top_level)
mock_git.branch.return_value = maybe_future(branch)
mock_git.log.return_value = maybe_future(log)
mock_git.status.return_value = maybe_future(status)
# When
body = {"current_path": "test_path", "history_count": 25}
response = await jp_fetch(NS, "all_history", body=json.dumps(body), method="POST")
# Then
mock_git.show_top_level.assert_called_with("test_path")
mock_git.branch.assert_called_with("test_path")
mock_git.log.assert_called_with("test_path", 25)
mock_git.status.assert_called_with("test_path")
assert response.code == 200
payload = json.loads(response.body)
assert payload == {
"code": show_top_level["code"],
"data": {
"show_top_level": show_top_level,
"branch": branch,
"log": log,
"status": status,
},
}
@patch("jupyterlab_git.handlers.GitBranchHandler.git", spec=Git)
async def test_branch_handler_localbranch(mock_git, jp_fetch):
# Given
branch = {
"code": 0,
"branches": [
{
"is_current_branch": True,
"is_remote_branch": False,
"name": "feature-foo",
"upstream": "origin/feature-foo",
"top_commit": "abcdefghijklmnopqrstuvwxyz01234567890123",
"tag": None,
},
{
"is_current_branch": False,
"is_remote_branch": False,
"name": "master",
"upstream": "origin/master",
"top_commit": "abcdefghijklmnopqrstuvwxyz01234567890123",
"tag": None,
},
{
"is_current_branch": False,
"is_remote_branch": False,
"name": "feature-bar",
"upstream": None,
"top_commit": "01234567899999abcdefghijklmnopqrstuvwxyz",
"tag": None,
},
{
"is_current_branch": False,
"is_remote_branch": True,
"name": "origin/feature-foo",
"upstream": None,
"top_commit": "abcdefghijklmnopqrstuvwxyz01234567890123",
"tag": None,
},
{
"is_current_branch": False,
"is_remote_branch": True,
"name": "origin/master",
"upstream": None,
"top_commit": "abcdefghijklmnopqrstuvwxyz01234567890123",
"tag": None,
},
],
}
mock_git.branch.return_value = maybe_future(branch)
# When
body = {"current_path": "test_path"}
response = await jp_fetch(NS, "branch", body=json.dumps(body), method="POST")
# Then
mock_git.branch.assert_called_with("test_path")
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0, "branches": branch["branches"]}
@patch("jupyterlab_git.handlers.GitLogHandler.git", spec=Git)
async def test_log_handler(mock_git, jp_fetch):
# Given
log = {"code": 0, "commits": []}
mock_git.log.return_value = maybe_future(log)
# When
body = {"current_path": "test_path", "history_count": 20}
response = await jp_fetch(NS, "log", body=json.dumps(body), method="POST")
# Then
mock_git.log.assert_called_with("test_path", 20)
assert response.code == 200
payload = json.loads(response.body)
assert payload == log
@patch("jupyterlab_git.handlers.GitLogHandler.git", spec=Git)
async def test_log_handler_no_history_count(mock_git, jp_fetch):
# Given
log = {"code": 0, "commits": []}
mock_git.log.return_value = maybe_future(log)
# When
body = {"current_path": "test_path"}
response = await jp_fetch(NS, "log", body=json.dumps(body), method="POST")
# Then
mock_git.log.assert_called_with("test_path", 25)
assert response.code == 200
payload = json.loads(response.body)
assert payload == log
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_localbranch(mock_git, jp_fetch):
# Given
mock_git.get_current_branch.return_value = maybe_future("localbranch")
mock_git.get_upstream_branch.return_value = maybe_future(
{"code": 0, "remote_short_name": ".", "remote_branch": "localbranch"}
)
mock_git.push.return_value = maybe_future({"code": 0})
# When
body = {"current_path": "test_path"}
response = await jp_fetch(NS, "push", body=json.dumps(body), method="POST")
# Then
mock_git.get_current_branch.assert_called_with("test_path")
mock_git.get_upstream_branch.assert_called_with("test_path", "localbranch")
mock_git.push.assert_called_with(".", "HEAD:localbranch", "test_path", None, False)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_remotebranch(mock_git, jp_fetch):
# Given
mock_git.get_current_branch.return_value = maybe_future("foo/bar")
upstream = {
"code": 0,
"remote_short_name": "origin/something",
"remote_branch": "remote-branch-name",
}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.push.return_value = maybe_future({"code": 0})
# When
body = {"current_path": "test_path"}
response = await jp_fetch(NS, "push", body=json.dumps(body), method="POST")
# Then
mock_git.get_current_branch.assert_called_with("test_path")
mock_git.get_upstream_branch.assert_called_with("test_path", "foo/bar")
mock_git.push.assert_called_with(
"origin/something", "HEAD:remote-branch-name", "test_path", None, False
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream(mock_git, jp_fetch):
# Given
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {
"code": 128,
"command": "",
"message": "fatal: no upstream configured for branch 'foo'",
}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({})
mock_git.push.return_value = maybe_future({"code": 0})
path = "test_path"
# When
body = {"current_path": path}
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(NS, "push", body=json.dumps(body), method="POST")
response = e.value.response
# Then
mock_git.get_current_branch.assert_called_with(path)
mock_git.get_upstream_branch.assert_called_with(path, "foo")
mock_git.config.assert_called_with(path)
mock_git.remote_show.assert_called_with(path)
mock_git.push.assert_not_called()
assert response.code == 500
payload = json.loads(response.body)
assert payload == {
"code": 128,
"message": "fatal: The current branch foo has no upstream branch.",
"remotes": list(),
}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_multipleupstream(mock_git, jp_fetch):
# Given
remotes = ["origin", "upstream"]
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({"remotes": remotes})
mock_git.push.return_value = maybe_future({"code": 0})
path = "test_path"
# When
body = {"current_path": path}
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(NS, "push", body=json.dumps(body), method="POST")
response = e.value.response
# Then
mock_git.get_current_branch.assert_called_with(path)
mock_git.get_upstream_branch.assert_called_with(path, "foo")
mock_git.config.assert_called_with(path)
mock_git.remote_show.assert_called_with(path)
mock_git.push.assert_not_called()
assert response.code == 500
payload = json.loads(response.body)
assert payload == {
"code": 128,
"message": "fatal: The current branch foo has no upstream branch.",
"remotes": remotes,
}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream_unique_remote(mock_git, jp_fetch):
# Given
remote = "origin"
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({"remotes": [remote]})
mock_git.push.return_value = maybe_future({"code": 0})
path = "test_path"
# When
body = {"current_path": path}
response = await jp_fetch(NS, "push", body=json.dumps(body), method="POST")
# Then
mock_git.get_current_branch.assert_called_with(path)
mock_git.get_upstream_branch.assert_called_with(path, "foo")
mock_git.config.assert_called_with(path)
mock_git.remote_show.assert_called_with(path)
mock_git.push.assert_called_with(
remote, "foo", "test_path", None, set_upstream=True
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream_pushdefault(mock_git, jp_fetch):
# Given
remote = "rorigin"
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future(
{"options": {"remote.pushdefault": remote}}
)
mock_git.remote_show.return_value = maybe_future({"remotes": [remote, "upstream"]})
mock_git.push.return_value = maybe_future({"code": 0})
path = "test_path"
# When
body = {"current_path": path}
response = await jp_fetch(NS, "push", body=json.dumps(body), method="POST")
# Then
mock_git.get_current_branch.assert_called_with(path)
mock_git.get_upstream_branch.assert_called_with(path, "foo")
mock_git.config.assert_called_with(path)
mock_git.remote_show.assert_called_with(path)
mock_git.push.assert_called_with(
remote, "foo", "test_path", None, set_upstream=True
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream_pass_remote_nobranch(mock_git, jp_fetch):
# Given
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({})
mock_git.push.return_value = maybe_future({"code": 0})
path = "test_path"
remote = "online"
# When
body = {"current_path": path, "remote": remote}
response = await jp_fetch(NS, "push", body=json.dumps(body), method="POST")
# Then
mock_git.get_current_branch.assert_called_with(path)
mock_git.get_upstream_branch.assert_called_with(path, "foo")
mock_git.config.assert_not_called()
mock_git.remote_show.assert_not_called()
mock_git.push.assert_called_with(remote, "HEAD:foo", "test_path", None, True)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream_pass_remote_branch(mock_git, jp_fetch):
# Given
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({})
mock_git.push.return_value = maybe_future({"code": 0})
path = "test_path"
remote = "online"
remote_branch = "onfoo"
# When
body = {"current_path": path, "remote": "/".join((remote, remote_branch))}
response = await jp_fetch(NS, "push", body=json.dumps(body), method="POST")
# Then
mock_git.get_current_branch.assert_called_with(path)
mock_git.get_upstream_branch.assert_called_with(path, "foo")
mock_git.config.assert_not_called()
mock_git.remote_show.assert_not_called()
mock_git.push.assert_called_with(
remote, "HEAD:" + remote_branch, "test_path", None, True
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitUpstreamHandler.git", spec=Git)
async def test_upstream_handler_forward_slashes(mock_git, jp_fetch):
# Given
mock_git.get_current_branch.return_value = maybe_future("foo/bar")
upstream = {
"code": 0,
"remote_short_name": "origin/something",
"remote_branch": "foo/bar",
}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
# When
body = {"current_path": "test_path"}
response = await jp_fetch(NS, "upstream", body=json.dumps(body), method="POST")
# Then
mock_git.get_current_branch.assert_called_with("test_path")
mock_git.get_upstream_branch.assert_called_with("test_path", "foo/bar")
assert response.code == 200
payload = json.loads(response.body)
assert payload == upstream
@patch("jupyterlab_git.handlers.GitUpstreamHandler.git", spec=Git)
async def test_upstream_handler_localbranch(mock_git, jp_fetch):
# Given
mock_git.get_current_branch.return_value = maybe_future("foo/bar")
upstream = {"code": 0, "remote_short_name": ".", "remote_branch": "foo/bar"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
# When
body = {"current_path": "test_path"}
response = await jp_fetch(NS, "upstream", body=json.dumps(body), method="POST")
# Then
mock_git.get_current_branch.assert_called_with("test_path")
mock_git.get_upstream_branch.assert_called_with("test_path", "foo/bar")
assert response.code == 200
payload = json.loads(response.body)
assert payload == upstream
@patch("jupyterlab_git.git.execute")
async def test_diffcontent(mock_execute, jp_fetch, jp_root_dir):
# Given
top_repo_path = "path/to/repo"
filename = "my/file"
content = "dummy content file\nwith multiple lines"
mock_execute.side_effect = [
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
]
# When
body = {
"filename": filename,
"prev_ref": {"git": "previous"},
"curr_ref": {"git": "current"},
"top_repo_path": top_repo_path,
}
response = await jp_fetch(NS, "diffcontent", body=json.dumps(body), method="POST")
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["prev_content"] == content
assert payload["curr_content"] == content
mock_execute.assert_has_calls(
[
call(
["git", "show", "{}:{}".format("previous", filename)],
cwd=str(jp_root_dir / top_repo_path),
),
call(
["git", "show", "{}:{}".format("current", filename)],
cwd=str(jp_root_dir / top_repo_path),
),
],
any_order=True,
)
@patch("jupyterlab_git.git.execute")
async def test_diffcontent_working(mock_execute, jp_fetch, jp_root_dir):
# Given
top_repo_path = "path/to/repo"
filename = "my/file"
content = "dummy content file\nwith multiple lines"
mock_execute.side_effect = [
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
maybe_future((0, content, "")),
]
dummy_file = jp_root_dir / top_repo_path / Path(filename)
dummy_file.parent.mkdir(parents=True)
dummy_file.write_text(content)
# When
body = {
"filename": filename,
"prev_ref": {"git": "previous"},
"curr_ref": {"special": "WORKING"},
"top_repo_path": top_repo_path,
}
response = await jp_fetch(NS, "diffcontent", body=json.dumps(body), method="POST")
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["prev_content"] == content
assert payload["curr_content"] == content
mock_execute.assert_has_calls(
[
call(
["git", "show", "{}:{}".format("previous", filename)],
cwd=str(jp_root_dir / top_repo_path),
)
]
)
@patch("jupyterlab_git.git.execute")
async def test_diffcontent_index(mock_execute, jp_fetch, jp_root_dir):
# Given
top_repo_path = "path/to/repo"
filename = "my/file"
content = "dummy content file\nwith multiple lines"
mock_execute.side_effect = [
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
]
# When
body = {
"filename": filename,
"prev_ref": {"git": "previous"},
"curr_ref": {"special": "INDEX"},
"top_repo_path": top_repo_path,
}
response = await jp_fetch(NS, "diffcontent", body=json.dumps(body), method="POST")
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["prev_content"] == content
assert payload["curr_content"] == content
mock_execute.assert_has_calls(
[
call(
["git", "show", "{}:{}".format("previous", filename)],
cwd=str(jp_root_dir / top_repo_path),
),
call(
["git", "show", "{}:{}".format("", filename)],
cwd=str(jp_root_dir / top_repo_path),
),
],
any_order=True,
)
@patch("jupyterlab_git.git.execute")
async def test_diffcontent_unknown_special(mock_execute, jp_fetch):
# Given
top_repo_path = "path/to/repo"
filename = "my/file"
content = "dummy content file\nwith multiple lines"
mock_execute.side_effect = [
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
]
# When
body = {
"filename": filename,
"prev_ref": {"git": "previous"},
"curr_ref": {"special": "unknown"},
"top_repo_path": top_repo_path,
}
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(NS, "diffcontent", body=json.dumps(body), method="POST")
assert_http_error(e, 500, expected_message="unknown special ref")
@patch("jupyterlab_git.git.execute")
async def test_diffcontent_show_handled_error(mock_execute, jp_fetch):
# Given
top_repo_path = "path/to/repo"
filename = "my/file"
mock_execute.return_value = maybe_future(
(
-1,
"",
"fatal: Path '{}' does not exist (neither on disk nor in the index)".format(
filename
),
)
)
# When
body = {
"filename": filename,
"prev_ref": {"git": "previous"},
"curr_ref": {"git": "current"},
"top_repo_path": top_repo_path,
}
response = await jp_fetch(NS, "diffcontent", body=json.dumps(body), method="POST")
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["prev_content"] == ""
assert payload["curr_content"] == ""
@patch("jupyterlab_git.git.execute")
async def test_diffcontent_binary(mock_execute, jp_fetch):
# Given
top_repo_path = "path/to/repo"
filename = "my/file"
mock_execute.return_value = maybe_future((0, "-\t-\t{}".format(filename), ""))
# When
body = {
"filename": filename,
"prev_ref": {"git": "previous"},
"curr_ref": {"git": "current"},
"top_repo_path": top_repo_path,
}
# Then
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(NS, "diffcontent", body=json.dumps(body), method="POST")
assert_http_error(e, 500, expected_message="file is not UTF-8")
@patch("jupyterlab_git.git.execute")
async def test_diffcontent_show_unhandled_error(mock_execute, jp_fetch):
# Given
top_repo_path = "path/to/repo"
filename = "my/file"
mock_execute.return_value = maybe_future((-1, "", "Dummy error"))
# When
body = {
"filename": filename,
"prev_ref": {"git": "previous"},
"curr_ref": {"git": "current"},
"top_repo_path": top_repo_path,
}
# Then
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(NS, "diffcontent", body=json.dumps(body), method="POST")
assert_http_error(e, 500, expected_message="Dummy error")
@patch("jupyterlab_git.git.execute")
async def test_diffcontent_getcontent_deleted_file(mock_execute, jp_fetch, jp_root_dir):
# Given
top_repo_path = "path/to/repo"
filename = "my/absent_file"
content = "dummy content file\nwith multiple lines"
mock_execute.side_effect = [
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
]
# When
body = {
"filename": filename,
"prev_ref": {"git": "previous"},
"curr_ref": {"special": "WORKING"},
"top_repo_path": top_repo_path,
}
# Then
response = await jp_fetch(NS, "diffcontent", body=json.dumps(body), method="POST")
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["prev_content"] == content
assert payload["curr_content"] == ""
mock_execute.assert_has_calls(
[
call(
["git", "show", "{}:{}".format("previous", filename)],
cwd=str(jp_root_dir / top_repo_path),
)
]
)
|
import os
datasets = [
'iris',
'heart',
'arrhythmia',
#'abalone',
'wine',
'segment',
#'sensorless_drive',
]
#model = ['jehmo', ]
mix = ['none', 'random']
for i_mix in mix:
for i_d in datasets:
print('Current Method: ' + str('jem') + ', Current dataset: ' + i_d + '.\n')
os.system('python main_merged.py --dataset ' + str(i_d) +
' --model jem --mixup_scheme ' + i_mix +
' --batch_size 64 --epochs 500 --n_warmup 50'
' --od_l 4.9 --od_lr 1.7 --od_n 20 --od_std .2 --n_ood 1')
|
from pomdpy.pomdp import HistoricalData
from .util import INDEX_TO_ACTION
class Belief():
def __init__(self, p_green=float(1/3), p_yellow=float(1/3), p_red=float(1/3), belief_d=None, confidence_d = None):
self.green = p_green
self.yellow = p_yellow
self.red = p_red
self.dist = belief_d
self.dist_confidence = confidence_d
def __eq__(self, other):
if self.green == other.green and self.yellow == other.yellow and self.red == other.red and self.dist == other.dist and self.dist_confidence == other.dist_confidence:
return True
return False
def update(self, other):
self.green = other.green
self.yellow = other.yellow
self.red = other.red
self.dist = other.dist
self.dist_confidence = other.dist_confidence
def normalize(self):
total = self.green + self.yellow + self.red
self.green /= total
self.yellow /= total
self.red /= total
class TrafficLightData(HistoricalData):
def __init__(self, model, speed, belief=Belief()):
self.model = model
self.observations_passed = 0
self.belief = belief
self.speed = speed
self.legal_actions = self.generate_legal_actions
def copy(self):
dat = TrafficLightData(self.model, self.speed, self.belief)
dat.observations_passed = self.observations_passed
return dat
def update(self, other_belief):
self.belief.update(other_belief.belief)
def create_child(self, action, observation):
next_data = self.copy()
self.observations_passed += 1
''' ------- Bayes update of belief state -------- '''
next_data.belief = self.model.belief_update(self.belief, action, observation)
next_data.speed = observation.speed
return next_data
def generate_legal_actions(self):
"""
At each non-terminal state, the agent can listen or choose to open the
door based on the current door probabilities
"""
legal_actions = []
for index in INDEX_TO_ACTION:
if self.speed + INDEX_TO_ACTION[index] >= 0 and self.speed + INDEX_TO_ACTION[index] <= self.model.config["max_speed"]:
legal_actions.append(index)
return legal_actions
|
"""
(c) 2020 Spencer Rose, MIT Licence
Python Landscape Classification Tool (PyLC)
Reference: An evaluation of deep learning semantic segmentation
for land cover classification of oblique ground-based photography,
MSc. Thesis 2020.
<http://hdl.handle.net/1828/12156>
Spencer Rose <spencerrose@uvic.ca>, June 2020
University of Victoria
Module: Profiler
File: profile.py
"""
import torch
import torch.nn.functional
from tqdm import tqdm
from utils.metrics import m2, jsd
import numpy as np
def get_profile(dset):
"""
Computes dataset statistical profile
- probability class distribution for database at db_path
- sample metrics and statistics
- image mean / standard deviation
Parameters
------
dset: MLPDataset
Image/mask dataset.
Returns
------
self
For chaining.
Metadata class for analyzing and generating metadata
for database.
Arguments
---------
args.id: int
Identifier.
args.ch: int
Number of channels
args.schema: str
Path to schema JSON file.
args.output: str
Output path
args.n_samples
Number of samples.
args.tile_size: int
Tile size.
args.scales: list
Image scaling factors.
args.stride: int
Stride.
args.m2: float
M2 variance metric.
args.jsd: float
JSD coefficient.
args.px_mean: np.array
Pixel mean value.
args.px_std: np.array
Pixel standard deviation value.
args.px_dist: np.array
Tile pixel frequency distribution.
args.tile_px_count: int
Tile pixel count.
args.dset_px_dist: np.array
Dataset pixel frequency distribution.
args.dset_px_count: int
Dataset pixel count.
args.probs: np.array
Dataset probability distribution.
args.weights:
Dataset inverse weights.
"""
# update local metadata with dataset metadata
meta = dset.get_meta()
# get data loader
loader, n_batches = dset.loader(
batch_size=1,
n_workers=0,
drop_last=False
)
meta.n_samples = dset.size
# initialize global stats
px_dist = []
px_mean = torch.zeros(meta.ch)
px_std = torch.zeros(meta.ch)
# load images and masks
for i, (img, mask) in tqdm(enumerate(loader), total=n_batches, desc="Profiling: ", unit=' batches'):
# Compute dataset pixel global mean / standard deviation
if meta.ch == 3:
px_mean += torch.mean(img, (0, 2, 3))
px_std += torch.std(img, (0, 2, 3))
else:
px_mean += torch.mean(img)
px_std += torch.std(img)
# convert mask to one-hot encoding
mask_1hot = torch.nn.functional.one_hot(mask, num_classes=meta.n_classes).permute(0, 3, 1, 2)
px_dist_sample = [np.sum(mask_1hot.numpy(), axis=(2, 3))]
px_dist += px_dist_sample
# Divide by dataset size
px_mean /= meta.n_samples
px_std /= meta.n_samples
# Calculate sample pixel distribution / sample pixel count
px_dist = np.concatenate(px_dist)
# Calculate dataset pixel distribution / dataset total pixel count
dset_px_dist = np.sum(px_dist, axis=0)
dset_px_count = np.sum(dset_px_dist)
probs = dset_px_dist / dset_px_count
assert dset_px_count / meta.tile_px_count == meta.n_samples, \
"Pixel distribution does not match tile count."
# Calculate class weight balancing
weights = 1 / (np.log(1.02 + probs))
weights = weights / np.max(weights)
# initialize balanced distributions [n]
balanced_px_prob = np.empty(meta.n_classes)
balanced_px_prob.fill(1 / meta.n_classes)
# Calculate JSD and M2 metrics
meta.m2 = m2(probs, meta.n_classes)
meta.jsd = jsd(probs, balanced_px_prob)
# store metadata values
meta.px_mean = px_mean.tolist()
meta.px_std = px_std.tolist()
meta.px_dist = px_dist.tolist()
meta.tile_px_count = meta.tile_size * meta.tile_size
meta.probs = probs.tolist()
meta.weights = weights.tolist()
meta.dset_px_count = int(dset_px_count)
meta.dset_px_dist = dset_px_dist.tolist()
return meta
def print_meta(meta):
"""
Prints profile metadata to console
"""
hline = '\n' + '_' * 70
readout = '\n{}'.format('Profile Metadata')
readout += hline
readout += '\n {:30s}{}'.format('ID', meta.id)
readout += '\n {:30s}{} ({})'.format('Channels', meta.ch, 'Grayscale' if meta.ch == 1 else 'Colour')
readout += '\n {:30s}{}'.format('Classes', meta.n_classes)
readout += '\n {:30s}{}'.format('Samples', meta.n_samples)
readout += '\n {:30s}{}px x {}px'.format('Tile size (WxH)', meta.tile_size, meta.tile_size)
# RGB/Grayscale mean
px_mean = 'R{:3s} G{:3s} B{:3s}'.format(
str(round(meta.px_mean[0], 3)), str(round(meta.px_mean[1], 3)), str(round(meta.px_mean[2], 3))) \
if meta.ch == 3 else str(round(meta.px_mean[0], 3)
)
readout += '\n {:30s}{}'.format('Pixel mean', px_mean)
# RGB/Grayscale std-dev
px_std = 'R{:3s} G{:3s} B{:3s}'.format(
str(round(meta.px_std[0], 3)), str(round(meta.px_std[1], 3)), str(round(meta.px_std[2], 3))) \
if meta.ch == 3 else str(round(meta.px_std[0], 3))
readout += '\n {:30s}{}'.format('Pixel std-dev', px_std)
readout += '\n {:30s}{}'.format('M2', str(round(meta.m2, 3)))
readout += '\n {:30s}{}'.format('JSD', str(round(meta.jsd, 3)))
# palette
readout += '\n\n{} ({})'.format('Palette', meta.schema)
readout += hline
readout += '\n {:8s}{:25s}{:20s}{:15s}'.format('Code', 'Name', 'RGB', 'Hex')
readout += hline
for i, rgb_colour in enumerate(meta.palette_rgb):
rgb = 'R{:3s} G{:3s} B{:3s}'.format(
str(rgb_colour[0]), str(rgb_colour[1]), str(rgb_colour[2]))
readout += '\n {:8s}{:25s}{:20s}{:15s}'.format(
meta.class_codes[i], meta.class_labels[i], rgb, meta.palette_hex[i])
readout += hline
# class weights
readout += '\n\n{:30s}'.format('Distribution')
readout += hline
readout += '\n {:30s}{:10s}{:10s}'.format('Class', 'Probs', 'Weights')
readout += hline
for i, w in enumerate(meta.weights):
readout += '\n {:25s}{:10f} {:10f}'.format(
meta.class_labels[i], round(meta.probs[i], 4), round(w, 4))
readout += hline
readout += '\n{:25s}{:,}'.format('Tile pixel count', int(meta.tile_px_count))
readout += '\n{:25s}{:,}'.format('Dataset pixel count', int(meta.dset_px_count))
readout += hline + '\n'
print(readout)
|
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from saspy.version import __version__
from saspy.sasbase import SASsession, SASconfig
from saspy.sasdata import SASdata
from saspy.sasexceptions import SASIONotSupportedError, SASConfigNotValidError
from saspy.sasproccommons import SASProcCommons
from saspy.sastabulate import Tabulate
from saspy.sasresults import SASresults
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
if isnotebook():
from saspy.sas_magic import SASMagic
get_ipython().register_magics(SASMagic)
|
'''
simple RPC over HTTP
'''
import threading
import json
import socket
try:
from BaseHTTPServer import BaseHTTPRequestHandler
from SocketServer import TCPServer
except:
from http.server import BaseHTTPRequestHandler
from socketserver import TCPServer
class Client:
def __init__(self, host, port):
self.timeout = 300
self.endpoint = "http://{0}:{1}".format(host, port)
def send(self, jsonDictionary):
'''
sends JSON over the HTTP POST and returns parsed JSON as result
no particular error checking is done as we trust our server in a way.
'''
data = json.dumps(jsonDictionary).encode("utf-8")
try:
from urllib2 import urlopen
except:
from urllib.request import urlopen
response = urlopen(self.endpoint, data=data, timeout=self.timeout)
return json.loads(response.read())
class Server:
def __init__(self, port):
self.port = port
self.instance = None
self.instance_thread = None
def run(self, requestHandlerMethod):
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.wfile.write("maya tdd server\n")
def log_message(self, format, *args):
return
def do_POST(self):
request = self.rfile.read(int(self.headers['Content-Length']))
request = json.loads(request)
result = requestHandlerMethod(request)
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps(result).encode("utf-8"))
self.instance = TCPServer(("", self.port), RequestHandler, bind_and_activate=False)
self.instance.allow_reuse_address = True
self.instance.server_bind()
self.instance.server_activate()
self.instance_thread = threading.Thread(target=self.instance.serve_forever)
self.instance_thread.start()
def stop(self):
if self.instance is not None:
self.instance.shutdown()
self.instance_thread.join()
self.instance.server_close()
|
# -*- coding: utf-8 -*-
import os
import logging
import httplib2
from gtd_tasks import tasks_datamodel
from gtd_tasks import visitors
from gtd_tasks.gtd_model import Recipient
from apiclient.discovery import build
from django import forms
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from gtasks.models import CredentialsModel
from gtd_django import settings
from oauth2client import xsrfutil
from oauth2client.client import flow_from_clientsecrets
from oauth2client.django_orm import Storage
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), '..', 'client_secrets.json')
FLOW = flow_from_clientsecrets(
CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/tasks.readonly',
redirect_uri='http://localhost:8000/gtasks/oauth2callback')
def get_tasks_service(request):
storage = Storage(CredentialsModel, 'id', request.user, 'credential')
credential = storage.get()
if credential is None or credential.invalid == True:
FLOW.params['state'] = xsrfutil.generate_token(settings.SECRET_KEY,
request.user)
authorize_url = FLOW.step1_get_authorize_url()
return HttpResponseRedirect(authorize_url)
else:
http = httplib2.Http()
http = credential.authorize(http)
tasks_service = build("tasks", "v1", http=http)
return tasks_service
@login_required
def index(request):
tasks_service = get_tasks_service(request)
tasklists_collection = \
tasks_datamodel.TasklistsCollection(tasks_datamodel.FileBackedService(tasks_service, 'mycache.txt'))
gtd_item_list = tasks_datamodel.get_model_from_gtasks(tasklists_collection)
next_lister = visitors.ListNextActions()
next_lister.visit(gtd_item_list)
result = visitors.FilterSet(gtd_item_list).filter(visitors.ListNextActions()).filter(visitors.Addresse(Recipient(u"xn")))
return render_to_response('gtasks/welcome.html', {
#'tasklists_col': next_lister.next_actions,
'tasklists_col': result.collection,
})
class InterlocutorForm(forms.Form):
interlocutor_filter = forms.BooleanField(required=False)
interlocutor = forms.ChoiceField()
class PlaneForm(forms.Form):
plane = forms.ChoiceField()
def pair_fix_empty(pair):
if not pair[1]:
return (pair[0], pair[0])
return pair
def generate_forms(documentation, post_data=None):
if post_data:
interlocutor_form = InterlocutorForm(post_data)
plane_form = PlaneForm(post_data)
else:
interlocutor_form = InterlocutorForm()
plane_form = PlaneForm()
interlocutor_form.fields['interlocutor'].choices = \
[pair_fix_empty(pair) for pair in [("None", ""), ("Any", "")] + documentation['Interlocutors']]
interlocutor_form = InterlocutorForm()
plane_form.fields['plane'].choices = \
[pair_fix_empty(pair) for pair in [("Any", "")] + documentation['Planes']]
return {'interlocutor_form': interlocutor_form, 'plane_form': plane_form}
@login_required
def next_actions(request):
tasks_service = get_tasks_service(request)
tasklists_collection = \
tasks_datamodel.TasklistsCollection(tasks_datamodel.FileBackedService(tasks_service, request.user.username+"_cache.txt"))
documentation = tasks_datamodel.get_documentation_from_gtasks(tasklists_collection)
listing = []
if request.method == 'POST':
form_dict = generate_forms(documentation, request.POST)
if len(filter(lambda x: not x.is_valid(), form_dict.itervalues())) == 0:
root_item = tasks_datamodel.get_model_from_gtasks(tasklists_collection)
next_lister = visitors.ListNextActions()
next_lister.visit(root_item)
listing = next_lister.result()
else:
form_dict = generate_forms(documentation)
# interlocutor_form = InterlocutorForm()
# interlocutor_form.fields['interlocutor'].choices = \
# [pair_fix_empty(pair) for pair in [("None", ""), ("Any", "")] + documentation['Interlocutors']]
#
# plane_form = PlaneForm()
# plane_form.fields['plane'].choices = \
# [pair_fix_empty(pair) for pair in [("Any", "")] + documentation['Planes']]
return render_to_response('gtasks/list.html',
form_dict,
# form_dict.update(
# {
## 'interlocutor_form': interlocutor_form,
## 'plane_form': plane_form,
# 'tasklists_col': listing,
# }),
context_instance=RequestContext(request),
)
@login_required
def auth_return(request):
if not xsrfutil.validate_token(settings.SECRET_KEY, request.REQUEST['state'],
request.user):
return HttpResponseBadRequest()
credential = FLOW.step2_exchange(request.REQUEST)
storage = Storage(CredentialsModel, 'id', request.user, 'credential')
storage.put(credential)
return HttpResponseRedirect("gtasks/")
def logout(request):
logout
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego (gallegoj@uw.edu)
# @Date: 2020-07-15
# @Filename: calibration.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
from __future__ import annotations
import asyncio
import struct
from typing import Dict, List, Tuple
import numpy
from jaeger import config, log
from jaeger.commands import Command, CommandID
from jaeger.exceptions import JaegerError
from jaeger.maskbits import PositionerStatus as PS
from jaeger.utils import bytes_to_int, int_to_bytes, motor_steps_to_angle
__all__ = [
"calibrate_positioner",
"StartDatumCalibration",
"StartMotorCalibration",
"StartCoggingCalibration",
"SaveInternalCalibration",
"HallOn",
"HallOff",
]
MOTOR_STEPS = config["positioner"]["motor_steps"]
async def calibrate_positioner(
fps, positioner_id, motors=True, datums=True, cogging=True
):
"""Runs the calibration process and saves it to the internal memory.
Parameters
----------
fps : .FPS
The instance of `.FPS` that will receive the trajectory.
positioner_id : int
The ID of the positioner to calibrate.
motors : bool
Whether to perform the motor calibration.
datums : bool
Whether to perform the datums calibration.
cogging : bool
Whether to perform the cogging calibration (may take more
than one hour).
Raises
------
JaegerError
If encounters a problem during the process.
Examples
--------
::
>>> fps = FPS()
>>> await fps.initialise()
# Calibrate positioner 31.
>>> await calibrate_positioner(fps, 31)
"""
log.info(f"Calibrating positioner {positioner_id}.")
if positioner_id not in fps.positioners:
raise JaegerError(f"Positioner {positioner_id} not found.")
positioner = fps[positioner_id]
if fps.pollers.running:
log.debug("Stopping pollers")
await fps.pollers.stop()
if motors:
log.info("Starting motor calibration.")
cmd = await fps.send_command(
CommandID.START_MOTOR_CALIBRATION,
positioner_ids=positioner_id,
)
if cmd.status.failed:
raise JaegerError("Motor calibration failed.")
await asyncio.sleep(1)
await positioner.wait_for_status(
[
PS.DISPLACEMENT_COMPLETED,
PS.MOTOR_ALPHA_CALIBRATED,
PS.MOTOR_BETA_CALIBRATED,
]
)
else:
log.warning("Skipping motor calibration.")
if datums:
log.info("Starting datum calibration.")
cmd = await fps.send_command(
CommandID.START_DATUM_CALIBRATION,
positioner_ids=positioner_id,
)
if cmd.status.failed:
raise JaegerError("Datum calibration failed.")
await asyncio.sleep(1)
await positioner.wait_for_status(
[
PS.DISPLACEMENT_COMPLETED,
PS.DATUM_ALPHA_CALIBRATED,
PS.DATUM_BETA_CALIBRATED,
]
)
else:
log.warning("Skipping datum calibration.")
if cogging:
log.info("Starting cogging calibration.")
cmd = await fps.send_command(
CommandID.START_COGGING_CALIBRATION,
positioner_ids=positioner_id,
)
if cmd.status.failed:
raise JaegerError("Cogging calibration failed.")
await asyncio.sleep(1)
await positioner.wait_for_status(
[PS.COGGING_ALPHA_CALIBRATED, PS.COGGING_BETA_CALIBRATED]
)
else:
log.warning("Skipping cogging calibration.")
if motors or datums or cogging:
log.info("Saving calibration.")
cmd = await fps.send_command(
CommandID.SAVE_INTERNAL_CALIBRATION,
positioner_ids=positioner_id,
)
if cmd.status.failed:
raise JaegerError("Saving calibration failed.")
log.info(f"Positioner {positioner_id} has been calibrated.")
return
class StartDatumCalibration(Command):
"""Indicates that the transmission for the trajectory has ended."""
command_id = CommandID.START_DATUM_CALIBRATION
broadcastable = False
move_command = True
class StartMotorCalibration(Command):
"""Aborts sending a trajectory."""
command_id = CommandID.START_MOTOR_CALIBRATION
broadcastable = False
move_command = True
class StartCoggingCalibration(Command):
"""Starts the trajectories."""
command_id = CommandID.START_COGGING_CALIBRATION
broadcastable = False
move_command = True
class SaveInternalCalibration(Command):
"""Stop the trajectories."""
command_id = CommandID.SAVE_INTERNAL_CALIBRATION
broadcastable = False
move_command = False
class GetOffset(Command):
"""Gets the motor offsets."""
command_id = CommandID.GET_OFFSETS
broadcastable = False
safe = True
def get_replies(self) -> Dict[int, numpy.ndarray]:
return self.get_offsets()
def get_offsets(self) -> Dict[int, numpy.ndarray]:
"""Returns the alpha and beta offsets, in degrees.
Raises
------
ValueError
If no reply has been received or the data cannot be parsed.
"""
offsets = {}
for reply in self.replies:
pid = reply.positioner_id
data = reply.data
alpha = bytes_to_int(data[0:4], dtype="i4")
beta = bytes_to_int(data[4:], dtype="i4")
offsets[pid] = numpy.array(motor_steps_to_angle(alpha, beta))
return offsets
class SetOffsets(Command):
"""Sets the motor offsets."""
command_id = CommandID.SET_OFFSETS
broadcastable = False
safe = True
move_command = False
def __init__(
self,
positioner_ids: int | List[int],
alpha=None,
beta=None,
**kwargs,
):
if alpha is not None and beta is not None:
alpha_steps, beta_steps = motor_steps_to_angle(alpha, beta, inverse=True)
data = int_to_bytes(int(alpha_steps)) + int_to_bytes(int(beta_steps))
kwargs["data"] = data
super().__init__(positioner_ids, **kwargs)
class HallOn(Command):
"""Turns hall sensors ON."""
command_id = CommandID.HALL_ON
broadcastable = False
move_command = False
safe = True
class HallOff(Command):
"""Turns hall sensors ON."""
command_id = CommandID.HALL_OFF
broadcastable = False
move_command = False
safe = True
class SetHoldingCurrents(Command):
"""Sets the motors holding currents."""
command_id = CommandID.SET_HOLDING_CURRENT
broadcastable = False
safe = True
move_command = False
def __init__(self, positioner_ids, alpha=None, beta=None, **kwargs):
if alpha is not None and beta is not None:
data = int_to_bytes(int(alpha)) + int_to_bytes(int(beta))
kwargs["data"] = data
super().__init__(positioner_ids, **kwargs)
class GetHoldingCurrents(Command):
"""Gets the motor offsets."""
command_id = CommandID.GET_HOLDING_CURRENT
broadcastable = False
safe = True
def get_replies(self) -> Dict[int, numpy.ndarray]:
return self.get_holding_currents()
def get_holding_currents(self) -> Dict[int, numpy.ndarray]:
"""Returns the alpha and beta holding currents, in percent.
Raises
------
ValueError
If no reply has been received or the data cannot be parsed.
"""
currents = {}
for reply in self.replies:
data = reply.data
alpha = bytes_to_int(data[0:4], dtype="i4")
beta = bytes_to_int(data[4:], dtype="i4")
currents[reply.positioner_id] = numpy.array([alpha, beta])
return currents
class PreciseMoveAlphaOn(Command):
"""Turns precise move on alpha ON."""
command_id = CommandID.SWITCH_ON_PRECISE_MOVE_ALPHA
broadcastable = False
move_command = False
safe = True
class PreciseMoveAlphaOff(Command):
"""Turns precise move on alpha OFF."""
command_id = CommandID.SWITCH_OFF_PRECISE_MOVE_ALPHA
broadcastable = False
move_command = False
safe = True
class PreciseMoveBetaOn(Command):
"""Turns precise move on beta ON."""
command_id = CommandID.SWITCH_ON_PRECISE_MOVE_BETA
broadcastable = False
move_command = False
safe = True
class PreciseMoveBetaOff(Command):
"""Turns precise move on beta OFF."""
command_id = CommandID.SWITCH_OFF_PRECISE_MOVE_BETA
broadcastable = False
move_command = False
safe = True
class SetIncreaseCollisionMargin(Command):
"""Sets the buffer for collision margin."""
command_id = CommandID.SET_INCREASE_COLLISION_MARGIN
broadcastable = False
move_command = False
safe = False
def __init__(self, positioner_ids, buffer: int, **kwargs):
data = int_to_bytes(int(buffer))
kwargs["data"] = data
super().__init__(positioner_ids, **kwargs)
class GetAlphaHallCalibration(Command):
command_id = CommandID.GET_ALPHA_HALL_CALIB
broadcastable = False
move_command = False
safe = True
def get_replies(self) -> Dict[int, Tuple[int, int, int, int]]:
return self.get_values()
def get_values(self) -> dict[int, Tuple[int, int, int, int]]:
"""Returns the ``maxA, maxB, minA, minB`` values."""
values = {}
for reply in self.replies:
values[reply.positioner_id] = struct.unpack("HHHH", reply.data)
return values
class GetBetaHallCalibration(Command):
command_id = CommandID.GET_BETA_HALL_CALIB
broadcastable = False
move_command = False
safe = True
def get_replies(self) -> Dict[int, Tuple[int, int, int, int]]:
return self.get_values()
def get_values(self) -> dict[int, Tuple[int, int, int, int]]:
"""Returns the ``maxA, maxB, minA, minB`` values."""
values = {}
for reply in self.replies:
values[reply.positioner_id] = struct.unpack("HHHH", reply.data)
return values
class GetHallCalibrationError(Command):
command_id = CommandID.GET_HALL_CALIB_ERROR
broadcastable = False
move_command = False
safe = True
def get_replies(self) -> Dict[int, Tuple[int, int]]:
return self.get_values()
def get_values(self) -> dict[int, Tuple[int, int]]:
"""Returns the alpha and beta error values."""
values = {}
for reply in self.replies:
values[reply.positioner_id] = struct.unpack("ii", reply.data)
return values
|
# coding: utf-8
"""
@brief test log(time=1s)
"""
import unittest
import pandas
import numpy
from scipy.sparse.linalg import lsqr as sparse_lsqr
from pyquickhelper.pycode import ExtTestCase, ignore_warnings
from pandas_streaming.df import pandas_groupby_nan, numpy_types
class TestPandasHelper(ExtTestCase):
def test_pandas_groupbynan(self):
self.assertTrue(sparse_lsqr is not None)
types = [(int, -10), (float, -20.2), (str, "e"),
(bytes, bytes("a", "ascii"))]
skip = (numpy.bool_, numpy.complex64, numpy.complex128)
types += [(_, _(5)) for _ in numpy_types() if _ not in skip]
for ty in types:
data = [{"this": "cst", "type": "tt1=" + str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "tt2=" +
str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "row_for_nan"}]
df = pandas.DataFrame(data)
gr = pandas_groupby_nan(df, "value")
co = gr.sum()
li = list(co["value"])
try:
self.assertIsInstance(li[-1], float)
except AssertionError as e:
raise AssertionError("Issue with {0}".format(ty)) from e
try:
self.assertTrue(numpy.isnan(li[-1]))
except AssertionError as e:
raise AssertionError(
"Issue with value {}\n--df--\n{}\n--gr--\n{}\n--co--\n{}".format(
li, df, gr.count(), co)) from e
for ty in types:
data = [{"this": "cst", "type": "tt1=" + str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "tt2=" +
str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "row_for_nan"}]
df = pandas.DataFrame(data)
try:
gr = pandas_groupby_nan(df, ("value", "this"))
t = True
raise Exception("---")
except TypeError:
t = False
if t:
co = gr.sum()
li = list(co["value"])
self.assertIsInstance(li[-1], float)
self.assertTrue(numpy.isnan(li[-1]))
try:
gr = pandas_groupby_nan(df, ["value", "this"])
t = True
except (TypeError, NotImplementedError):
t = False
if t:
co = gr.sum()
li = list(co["value"])
self.assertEqual(len(li), 2)
def test_pandas_groupbynan_tuple(self):
data = [dict(a="a", b="b", c="c", n=1), dict(
b="b", n=2), dict(a="a", n=3), dict(c="c", n=4)]
df = pandas.DataFrame(data)
gr = df.groupby(["a", "b", "c"]).sum()
self.assertEqual(gr.shape, (1, 1))
for nanback in [True, False]:
try:
gr2_ = pandas_groupby_nan(
df, ["a", "b", "c"], nanback=nanback, suffix="NAN")
except NotImplementedError:
continue
gr2 = gr2_.sum().sort_values("n")
self.assertEqual(gr2.shape, (4, 4))
d = gr2.to_dict("records")
self.assertEqual(d[0]["a"], "a")
self.assertEqual(d[0]["b"], "b")
self.assertEqual(d[0]["c"], "c")
self.assertEqual(d[0]["n"], 1)
self.assertEqual(d[1]["a"], "NAN")
def test_pandas_groupbynan_regular(self):
df = pandas.DataFrame([dict(a="a", b=1), dict(a="a", b=2)])
gr = df.groupby(["a"]).sum()
gr2_ = pandas_groupby_nan(df, ["a"]).sum()
self.assertEqualDataFrame(gr, gr2_)
def test_pandas_groupbynan_regular_nanback(self):
df = pandas.DataFrame([dict(a="a", b=1, cc=0), dict(a="a", b=2)])
gr = df.groupby(["a", "cc"]).sum()
self.assertEqual(len(gr), 1)
self.assertRaise(
lambda: pandas_groupby_nan(df, ["a", "cc"], nanback=True).sum(),
NotImplementedError)
def test_pandas_groupbynan_doc(self):
data = [dict(a=2, ind="a", n=1),
dict(a=2, ind="a"),
dict(a=3, ind="b"),
dict(a=30)]
df = pandas.DataFrame(data)
gr2 = pandas_groupby_nan(df, ["ind"]).sum()
ind = list(gr2['ind'])
self.assertTrue(numpy.isnan(ind[-1]))
val = list(gr2['a'])
self.assertEqual(val[-1], 30)
@ignore_warnings(UserWarning)
def test_pandas_groupbynan_doc2(self):
data = [dict(a=2, ind="a", n=1),
dict(a=2, ind="a"),
dict(a=3, ind="b"),
dict(a=30)]
df = pandas.DataFrame(data)
gr2 = pandas_groupby_nan(df, ["ind", "a"], nanback=False).sum()
ind = list(gr2['ind'])
self.assertEqual(ind[-1], "²nan")
def test_pandas_groupbynan_doc3(self):
data = [dict(a=2, ind="a", n=1),
dict(a=2, ind="a"),
dict(a=3, ind="b"),
dict(a=30)]
df = pandas.DataFrame(data)
self.assertRaise(lambda: pandas_groupby_nan(df, ["ind", "n"]).sum(),
NotImplementedError)
# ind = list(gr2['ind'])
# self.assertTrue(numpy.isnan(ind[-1]))
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
"""
@package mi.dataset.driver.adcpa_n
@file mi/dataset/driver/adcpa_n/auv/adcpa_n_recovered_driver.py
@author Jeff Roy
@brief Driver for the adcpa_n instrument
Release notes:
Initial Release
"""
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.adcp_pd0 import AdcpPd0Parser
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.core.versioning import version
@version("15.8.1")
def parse(unused, source_file_path, particle_data_handler):
"""
This is the method called by Uframe
:param unused
:param source_file_path This is the full path and filename of the file to be parsed
:param particle_data_handler Java Object to consume the output of the parser
:return particle_data_handler
"""
with open(source_file_path, 'rb') as stream_handle:
# create and instance of the concrete driver class defined below
driver = AdcpaNRecoveredDriver(unused, stream_handle, particle_data_handler)
driver.processFileStream()
return particle_data_handler
class AdcpaNRecoveredDriver(SimpleDatasetDriver):
"""
Derived adcpa_n driver class
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
config = {
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
'velocity': 'VelocityInst',
'engineering': 'AuvEngineering',
'config': 'AuvConfig',
'bottom_track': 'InstBottom',
'bottom_track_config': 'BottomConfig',
}
}
return AdcpPd0Parser(config, stream_handle, self._exception_callback)
|
# -*- coding: utf-8 -*-
# File: layer_norm.py
from ..compat import tfv1 as tf # this should be avoided first in model code
from ..utils.argtools import get_data_format
from ..utils.develop import log_deprecated
from .common import VariableHolder, layer_register
from .tflayer import convert_to_tflayer_args
__all__ = ['LayerNorm', 'InstanceNorm']
@layer_register()
@convert_to_tflayer_args(
args_names=[],
name_mapping={
'use_bias': 'center',
'use_scale': 'scale',
'gamma_init': 'gamma_initializer',
})
def LayerNorm(
x, epsilon=1e-5, *,
center=True, scale=True,
gamma_initializer=tf.ones_initializer(),
data_format='channels_last'):
"""
Layer Normalization layer, as described in the paper:
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
Args:
x (tf.Tensor): a 4D or 2D tensor. When 4D, the layout should match data_format.
epsilon (float): epsilon to avoid divide-by-zero.
center, scale (bool): whether to use the extra affine transformation or not.
"""
data_format = get_data_format(data_format, keras_mode=False)
shape = x.get_shape().as_list()
ndims = len(shape)
assert ndims in {2, 4}
mean, var = tf.nn.moments(x, list(range(1, len(shape))), keep_dims=True)
if data_format == 'NCHW':
chan = shape[1]
new_shape = [1, chan, 1, 1]
else:
chan = shape[-1]
new_shape = [1, 1, 1, chan]
if ndims == 2:
new_shape = [1, chan]
if center:
beta = tf.get_variable('beta', [chan], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
else:
beta = tf.zeros([1] * ndims, name='beta')
if scale:
gamma = tf.get_variable('gamma', [chan], initializer=gamma_initializer)
gamma = tf.reshape(gamma, new_shape)
else:
gamma = tf.ones([1] * ndims, name='gamma')
ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output')
vh = ret.variables = VariableHolder()
if scale:
vh.gamma = gamma
if center:
vh.beta = beta
return ret
@layer_register()
@convert_to_tflayer_args(
args_names=[],
name_mapping={
'gamma_init': 'gamma_initializer',
})
def InstanceNorm(x, epsilon=1e-5, *, center=True, scale=True,
gamma_initializer=tf.ones_initializer(),
data_format='channels_last', use_affine=None):
"""
Instance Normalization, as in the paper:
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`_.
Args:
x (tf.Tensor): a 4D tensor.
epsilon (float): avoid divide-by-zero
center, scale (bool): whether to use the extra affine transformation or not.
use_affine: deprecated. Don't use.
"""
data_format = get_data_format(data_format, keras_mode=False)
shape = x.get_shape().as_list()
assert len(shape) == 4, "Input of InstanceNorm has to be 4D!"
if use_affine is not None:
log_deprecated("InstanceNorm(use_affine=)", "Use center= or scale= instead!", "2020-06-01")
center = scale = use_affine
if data_format == 'NHWC':
axis = [1, 2]
ch = shape[3]
new_shape = [1, 1, 1, ch]
else:
axis = [2, 3]
ch = shape[1]
new_shape = [1, ch, 1, 1]
assert ch is not None, "Input of InstanceNorm require known channel!"
mean, var = tf.nn.moments(x, axis, keep_dims=True)
if center:
beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
else:
beta = tf.zeros([1, 1, 1, 1], name='beta', dtype=x.dtype)
if scale:
gamma = tf.get_variable('gamma', [ch], initializer=gamma_initializer)
gamma = tf.reshape(gamma, new_shape)
else:
gamma = tf.ones([1, 1, 1, 1], name='gamma', dtype=x.dtype)
ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output')
vh = ret.variables = VariableHolder()
if scale:
vh.gamma = gamma
if center:
vh.beta = beta
return ret
|
from micropython import const
import ntptime
import ubinascii
import uhashlib
import uos
import ure
import utime
import uuurequests
import config
IRQ_SCAN_RESULT = const(5)
IRQ_SCAN_DONE = const(6)
EPOCH_OFFSET = const(946681200) # seconds between 1970 and 2000
ONBOARD_LED = const(2)
__DEPOT_SSIDS = set()
__DEPOT_MACS = set()
def removeIgnoredSSIDs(nets):
new_nets = []
compiled_regex = []
for regex in config.SSID_EXCLUDE_REGEX:
compiled_regex.append(ure.compile(regex))
for net in nets:
ssid, mac, channel, rssi, authmode, hidden = net
isIgnored = False
if hidden:
continue
for prefix in config.SSID_EXCLUDE_PREFIX:
if ssid.startswith(prefix):
isIgnored = True
break
if isIgnored:
continue
for suffix in config.SSID_EXCLUDE_SUFFIX:
if ssid.endswith(suffix):
isIgnored = True
break
if isIgnored:
continue
for r in compiled_regex:
if r.match(ssid):
isIgnored = True
break
if isIgnored:
continue
new_nets.append(net)
return new_nets
def second_to_millisecond(i: int) -> int:
return i * const(1000)
def second_to_microsecond(i: int) -> int:
return i * const(1000 * 1000)
def now() -> int:
return utime.time() + EPOCH_OFFSET
def syslog(categorie: str, message: str):
print("-- [{}] -- {}".format(categorie, message))
def openFile(filename: str):
try:
return open(filename, "r+b")
except OSError:
return open(filename, "w+b")
def syncTime():
try:
ntptime.settime()
syslog("Time", "Synced via NTP.")
except Exception as e:
syslog("Time", "Error getting NTP: {}".format(e))
def otaUpdateConfig():
try:
r = uuurequests.get(
"{}/config?client_id={}".format(config.OTA_URL, config.CLIENT_ID)
)
if (r.status_code == 200) and (
ubinascii.unhexlify(r.headers["Hash"])
== uhashlib.sha256(r.content).digest()
):
with openFile("new_config.py") as f:
f.write(r.content)
uos.rename("new_config.py", "config.py")
syslog("OTA", "Updated config.py")
else:
syslog("OTA", "Hash mismatch, cowardly refusing to install update!")
except Exception as e:
syslog("OTA", "Error getting updates: {}".format(e))
def prepareDepotWifiSets():
for ssid in config.DEPOT_SSIDS:
__DEPOT_SSIDS.add(ssid)
for mac in config.DEPOT_MACS:
mac = mac.replace(":", "")
mac = mac.replace("-", "")
mac = mac.replace(" ", "")
__DEPOT_MACS.add(ubinascii.unhexlify(mac))
def isDepotWifi(ssid: str, mac: bytes) -> bool:
if ssid in __DEPOT_SSIDS:
return True
if mac in __DEPOT_MACS:
return True
return False
|
import ctypes
import logging
logger = logging.getLogger("elasticapm.utils")
class TraceParent(object):
__slots__ = ("version", "trace_id", "span_id", "trace_options")
def __init__(self, version, trace_id, span_id, trace_options):
self.version = version
self.trace_id = trace_id
self.span_id = span_id
self.trace_options = trace_options
def copy_from(self, version=None, trace_id=None, span_id=None, trace_options=None):
return TraceParent(
version or self.version,
trace_id or self.trace_id,
span_id or self.span_id,
trace_options or self.trace_options,
)
def to_ascii(self):
return u"{:02x}-{}-{}-{:02x}".format(
self.version, self.trace_id, self.span_id, self.trace_options.asByte
).encode("ascii")
@classmethod
def from_string(cls, traceparent_string):
try:
parts = traceparent_string.split("-")
version, trace_id, span_id, trace_flags = parts[:4]
except ValueError:
logger.debug("Invalid traceparent header format, value %s", traceparent_string)
return
try:
version = int(version, 16)
if version == 255:
raise ValueError()
except ValueError:
logger.debug("Invalid version field, value %s", version)
return
try:
tracing_options = TracingOptions()
tracing_options.asByte = int(trace_flags, 16)
except ValueError:
logger.debug("Invalid trace-options field, value %s", trace_flags)
return
return TraceParent(version, trace_id, span_id, tracing_options)
class TracingOptions_bits(ctypes.LittleEndianStructure):
_fields_ = [("recorded", ctypes.c_uint8, 1)]
class TracingOptions(ctypes.Union):
_anonymous_ = ("bit",)
_fields_ = [("bit", TracingOptions_bits), ("asByte", ctypes.c_uint8)]
def __init__(self, **kwargs):
super(TracingOptions, self).__init__()
for k, v in kwargs.items():
setattr(self, k, v)
|
from lpdm_missing_power_source_manager import LpdmMissingPowerSourceManager
from lpdm_battery_discharge_while_charging import LpdmBatteryDischargeWhileCharging
from lpdm_battery_not_discharging import LpdmBatteryNotDischarging
from lpdm_battery_already_discharging import LpdmBatteryAlreadyDischarging
from lpdm_battery_cannot_discharge import LpdmBatteryCannotDischarge
from lpdm_battery_cannot_charge import LpdmBatteryCannotCharge
from lpdm_battery_charge_while_discharging import LpdmBatteryChargeWhileDischarging
from lpdm_battery_already_charging import LpdmBatteryAlreadyCharging
|
from typing import TYPE_CHECKING, List
if TYPE_CHECKING:
from Platforms.Web.main_web import PhaazebotWeb
import json
from aiohttp.web import Response, Request
from Utils.Classes.storagetransformer import StorageTransformer
from Utils.Classes.webrequestcontent import WebRequestContent
from Utils.Classes.webrole import WebRole
from Utils.Classes.undefined import UNDEFINED
from Platforms.Web.db import getWebRoles
DEFAULT_LIMIT:int = 50
async def apiAdminRolesGet(cls:"PhaazebotWeb", WebRequest:Request) -> Response:
"""
Default url: /api/admin/roles/get
"""
Data:WebRequestContent = WebRequestContent(WebRequest)
await Data.load()
Search:StorageTransformer = StorageTransformer()
# get required stuff
Search["role_id"] = Data.getInt("role_id", UNDEFINED, min_x=1)
Search["name"] = Data.getStr("name", UNDEFINED)
Search["name_contains"] = Data.getStr("name_contains", UNDEFINED)
Search["can_be_removed"] = Data.getInt("can_be_removed", UNDEFINED)
Search["limit"] = Data.getInt("limit", DEFAULT_LIMIT, min_x=1)
Search["offset"] = Data.getInt("offset", 0, min_x=1)
# custom
Search["for_user_id"] = Data.getInt("for_user_id", UNDEFINED, min_x=1)
if Search["for_user_id"] != UNDEFINED:
res:List[dict] = cls.BASE.PhaazeDB.selectQuery("""
SELECT `web_user+web_role`.`role_id` AS `rid`
FROM `web_user+web_role`
WHERE `web_user+web_role`.`user_id` = %s""",
(int(Search["for_user_id"]),)
)
rid_list:str = ','.join(str(x["rid"]) for x in res)
if not rid_list: rid_list = "0"
Search["overwrite_where"] = f" AND `web_role`.`id` IN ({rid_list})"
res_roles:List[WebRole] = await getWebRoles(cls, **Search.getAllTransform())
result:dict = dict(
result=[Role.toJSON() for Role in res_roles],
limit=Search["limit"],
offset=Search["offset"],
total=await getWebRoles(cls, count_mode=True, **Search.getAllTransform()),
status=200
)
return cls.response(
text=json.dumps(result),
content_type="application/json",
status=200
)
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from .. import cntk_py
from ..tensor import ArrayMixin
from ..utils import typemap, value_to_seq
from cntk.device import use_default_device
import numpy as np
INFINITELY_REPEAT = cntk_py.MinibatchSource.infinitely_repeat
FULL_DATA_SWEEP = cntk_py.MinibatchSource.full_data_sweep
INFINITE_SAMPLES = cntk_py.MinibatchSource.infinite_samples
DEFAULT_RANDOMIZATION_WINDOW = cntk_py.MinibatchSource.default_randomization_window
class MinibatchData(cntk_py.MinibatchData, ArrayMixin):
'''
Holds a minibatch of input data. This is never directly created, but
only returned by :class:`MinibatchSource` instances.
'''
@property
def num_sequences(self):
'''
The number of sequences in this minibatch
'''
return self.m_num_sequences
@property
def num_samples(self):
'''
The number of samples in this minibatch
'''
return self.m_num_samples
@property
def value(self):
'''
The value of the minibatch as a NumPy array.
'''
return value_to_seq(self.m_data)
@property
def shape(self):
'''
The shape of the data in this minibatch as tuple.
'''
return self.m_data.shape().dimensions()
@property
def mask(self):
'''
The mask object of the minibatch. In it, `2` marks the beginning of a
sequence, `1` marks a sequence element as valid, and `0` marks it as
invalid.
'''
return self.m_data.mask().to_ndarray()
@property
def is_sparse(self):
'''
Whether the data in this minibatch is sparse.
'''
return self.m_data.is_sparse()
def __len__(self):
return self.num_sequences
class MinibatchSource(cntk_py.MinibatchSource):
'''
Parent class of all minibatch sources. For most cases you will need the
helper functions :func:`text_format_minibatch_source` or
:func:`minibatch_source`.
A `MinibatchSource` can be indexed by the stream name, which will return a
:class:`MinibatchData` object that can be passed e.g. to the
:func:`~cntk.trainer.Trainer.train_minibatch` function.
Args:
deserializers ('list', default is empty): list of deserializers
randomize (bool, default True): randomize before every epoch
randomization_window (int) : size of window that reader will shuffle, ignored if `randomize` is False
epoch_size (int): epoch size
distributed_after (int): sample count after which minibatch source becomes distributed
multithreaded_deserializer (bool): using multi threaded deserializer
'''
def __init__(self, deserializers=None, randomize=True, randomization_window=DEFAULT_RANDOMIZATION_WINDOW, epoch_size=INFINITELY_REPEAT, distributed_after=INFINITE_SAMPLES, multithreaded_deserializer=None):
if not isinstance(deserializers, (list,tuple)):
deserializers = [deserializers] # allow passing a single item or a list
reader_config = ReaderConfig(
deserializers=deserializers,
randomize=randomize,
randomization_window=randomization_window,
epoch_size=epoch_size,
distributed_after=distributed_after,
multithreaded_deserializer=multithreaded_deserializer)
source = minibatch_source(reader_config)
# transplant into this class instance
self.__dict__ = source.__dict__
# transplant all members of deserializers into a record called streams
streams = {}
for si in self.stream_infos():
streams[si.m_name] = si
from ..utils import Record
self.streams = Record(**streams)
def stream_infos(self):
'''
Describes the stream that this source produces.
Returns:
dict mapping input names to the stream information
'''
return super(MinibatchSource, self).stream_infos()
def stream_info(self, name):
'''
Gets the description of the stream with given name.
Throws an exception if there are none or multiple streams with this
same name.
'''
return super(MinibatchSource, self).stream_info(name)
def __getitem__(self, name):
'''
Return the :class:`StreamInfo` for the given stream name
Args:
name (str): stream name to fetch :class:`StreamInfo` for
'''
return self.stream_info(name)
@typemap
def next_minibatch(self, minibatch_size_in_samples,
input_map=None, device=None):
'''
Reads a minibatch that contains data for all input streams. The
minibatch size is specified in terms of #samples and/or #sequences for the
primary input stream; value of 0 for #samples/#sequences means
unspecified. In case the size is specified in terms of both #sequences
and #samples, the smaller of the 2 is taken. An empty map is returned
when the MinibatchSource has no more data to return.
Args:
minibatch_size_in_samples (int): number of samples to retrieve for
the next minibatch. Must be > 0.
input_map (dict): mapping of :class:`~cntk.ops.variabls.Variable`
to :class:`StreamInformation` which will be used to convert the
returned data.
device (`DeviceDescriptor`, defaults to `None`): CNTK DeviceDescriptor
Returns:
A mapping of :class:`StramInformation` to :class:`MinibatchData` if
``input_map`` was not specified. Otherwise, the returned value will
be a mapping of :class:`~cntk.ops.variabls.Variable` to class:`MinibatchData`.
'''
if device is None:
device = use_default_device()
mb = super(MinibatchSource, self).get_next_minibatch(
minibatch_size_in_samples, device)
if input_map:
if not mb:
return {}
else:
return { key : mb[value] for (key, value) in input_map.items() }
else:
return mb
def get_checkpoint_state(self):
'''
Gets the checkpoint state of the MinibatchSource.
Returns:
:class:`~cntk_py.Dictionary`
'''
return super(MinibatchSource, self).get_checkpoint_state()
def restore_from_checkpoint(self, checkpoint):
'''
Restores the MinibatchSource state from the specified checkpoint.
Args:
checkpoint (:class:`~cntk_py.Dictionary`): checkpoint to restore from
'''
super(MinibatchSource, self).restore_from_checkpoint(checkpoint)
@property
def is_distributed(self):
'''
Whether the minibatch source is running distributed
'''
return super(MinibatchSource, self).is_distributed()
def _py_dict_to_cntk_dict(py_dict):
'''
Converts a Python dictionary into a CNTK Dictionary whose values are CNTK DictionaryValue instances.
Args:
py_dict (dict): a dictionary to be converted.
Returns:
:class:`~cntk_py.Dictionary`
'''
res = cntk_py.Dictionary()
for k, v in py_dict.items():
if isinstance(v, dict):
res[k] = cntk_py.DictionaryValueFromDict(_py_dict_to_cntk_dict(v))
# TODO: add support to list of lists ?
elif isinstance(v, list):
l = []
for e in v:
if isinstance(e, dict):
l.append(cntk_py.DictionaryValueFromDict(
_py_dict_to_cntk_dict(e)))
else:
l.append(cntk_py.DictionaryValue(e))
res[k] = cntk_py.DictionaryValue(l)
else:
res[k] = cntk_py.DictionaryValue(v)
return res
# TODO: This should be a private function; use MinibatchSource(deserializer, ...).
@typemap
def minibatch_source(config):
'''
Instantiate the CNTK built-in composite minibatch source which is used to stream data into the network.
Args:
config (dict): a dictionary containing all the key-value configuration entries.
Returns:
:class:`MinibatchSource`
'''
cntk_dict = _py_dict_to_cntk_dict(config)
return cntk_py.create_composite_minibatch_source(cntk_dict)
# TODO: This should be a private class.
class ReaderConfig(dict):
'''
Reader configuration.
Args:
deserializers ('list', default is empty): list of deserializers
(:class:`ImageDeserializer` for now).
randomize (bool, default True): randomize images before every epoch
randomization_window (int) : size of window that reader will shuffle, ignored if `randomize` is False
epoch_size (int): epoch size
distributed_after (int): sample count after which reader becomes distributed
multithreaded_deserializer (bool): using multi threaded deserializer
'''
def __init__(self, deserializers=None, randomize=True, randomization_window=DEFAULT_RANDOMIZATION_WINDOW, epoch_size=INFINITELY_REPEAT, distributed_after=INFINITE_SAMPLES, multithreaded_deserializer=None):
self['epochSize'] = cntk_py.SizeTWrapper(epoch_size) # force to store in size_t
if not isinstance(deserializers, (list, tuple)):
deserializers = [deserializers]
self['deserializers'] = self.deserializers = deserializers or []
self['randomize'] = randomize
self['randomizationWindow'] = cntk_py.SizeTWrapper(randomization_window)
self['distributedAfterSampleCount'] = cntk_py.SizeTWrapper(distributed_after)
if multithreaded_deserializer != None:
self['multiThreadedDeserialization'] = multithreaded_deserializer
@typemap
def minibatch_source(self):
'''
Creates an instance of :class:`MinibatchSource` from this
instance, which can be used to feed data into the `eval()` methods of
the graph nodes or the `train_minibatch()` of :class:`~cntk.trainer.Trainer`.
Returns:
instance of :class:`MinibatchSource`
'''
return minibatch_source(self)
class Deserializer(dict):
'''
Base deserializer class that can be used in the :class:`ReaderConfig`. A
deserializer is responsible for deserialization of input from external
storage into in-memory sequences.
Currently CNTK supports the below deserializers:
========================== ============
Deserializer type Description
========================== ============
:class:`ImageDeserializer` Deserializer for images that uses OpenCV
:class:`CTFDeserializer` Deserializer for text of the `CNTKTextReader format <https://github.com/microsoft/cntk/wiki/CNTKTextFormat-Reader>`_
========================== ============
Args:
type (str): type of the deserializer
See also:
https://github.com/microsoft/cntk/wiki/Understanding-and-Extending-Readers
'''
def __init__(self, type):
self['type'] = type
class ImageDeserializer(Deserializer):
'''
This class configures the image reader that reads images and corresponding
labels from a file of the form::
<full path to image> <tab> <numerical label (0-based class id)>
or::
sequenceId <tab> path <tab> label
Args:
filename (str): file name of the map file that associates images to
classes
See also:
`Image reader definition <https://github.com/microsoft/cntk/wiki/Image-reader>`_
'''
def __init__(self, filename, streams=None):
super(ImageDeserializer, self).__init__('ImageDeserializer')
self['file'] = filename
self['input'] = self.input = {}
# In ImageDeserializer, stream field names are hard-coded as "image" and "label".
# These are configured in a somewhat inconsistent way.
if streams is not None:
for key in streams:
s = streams[key]
node = s.stream_alias
if node == "image":
# BUGBUG: Can dim not be specified as well?
# TODO: clean this up and use a unified internal representation
self.map_features(key, s.transforms)
elif node == "label":
self.map_labels(key, s.dim)
else:
raise ValueError("ImageDeserializer: invalid field name '{}', allowed are 'image' and 'label'".format(node))
# TODO: should be a private method; use constructor only
def map_features(self, node, transforms):
'''
Maps feature node (either node instance or node name) to the transforms
that will be applied to the images. It is usually applied to the input
of the network with data augmentation.
Args:
node (str or input node): node or its name
transforms (`list` of transforms): the transforms can be created by
the static methods `crop`, `scale`, or `mean`.
'''
if not isinstance(node, str):
node = node.name()
if not isinstance(transforms, list):
transforms = [transforms] if transforms else []
self.input[node] = dict(transforms=transforms)
# TODO: should be a private method; use constructor only
def map_labels(self, node, num_classes):
'''
Maps label node (either node instance or node name)
that will be applied to the images. It is usually used to define the
ground truth of train or test.
Args:
node (str or input node): node or its name
num_classes (int): number of classes
'''
if not isinstance(node, str):
node = node.name()
self.input[node] = dict(labelDim=num_classes) # reader distinguishes labels from features by calling this 'labelDim'
@staticmethod
def crop(crop_type='center', ratio=1.0, jitter_type='uniRatio'):
'''
Crop transform that can be used to pass to `map_features`
Args:
crop_type (str, default 'center'): 'center' or 'random'. 'random'
is usually used during training while 'center' is usually for testing.
Random cropping is a popular data augmentation technique used to improve
generalization of the DNN.
ratio (`float`, default 1.0): crop ratio. It specifies the ratio of
final image dimension, e.g. width , to the size of the random crop
taken from the image. For example, the ratio 224 / 256 = 0.875 means
crop of size 224 will be taken from the image rescaled to 256 (implementation
detail: ImageReader takes the crop and then rescales instead of doing
the other way around). To enable scale jitter (another popular data
augmentation technique), use colon-delimited values like cropRatio=0.875:0.466
which means 224 crop will be taken from images randomly scaled to have
size in [256, 480] range.
jitter_type (str, default 'uniRatio'): crop scale jitter type, possible
values are 'None', 'UniRatio'. 'uniRatio' means uniform distributed jitter
scale between the minimum and maximum cropRatio values.
Returns:
dict describing the crop transform
'''
return dict(type='Crop', cropType=crop_type, cropRatio=ratio,
jitterType=jitter_type)
@staticmethod
def scale(width, height, channels, interpolations='linear', scale_mode="fill", pad_value=-1):
'''
Scale transform that can be used to pass to `map_features` for data augmentation.
Args:
width (int): width of the image in pixels
height (int): height of the image in pixels
channels (int): channels of the image
interpolations (str, default 'linear'): possible values are
'nearest', 'linear', 'cubic', and 'lanczos'
scale_mode (str, default 'fill'): 'fill', 'crop' or 'pad'.
'fill' - warp the image to the given target size.
'crop' - resize the image's shorter side to the given target size and crop the overlap.
'pad' - resize the image's larger side to the given target size, center it and pad the rest
pad_value (int, default -1): -1 or int value. The pad value used for the 'pad' mode.
If set to -1 then the border will be replicated.
Returns:
dict describing the scale transform
'''
return dict(type='Scale', width=width, height=height, channels=channels,
interpolations=interpolations, scaleMode=scale_mode, padValue=pad_value)
@staticmethod
def mean(filename):
'''
Mean transform that can be used to pass to `map_features` for data augmentation.
Args:
filename (str): file that stores the mean values for each pixel
in OpenCV matrix XML format
Returns:
dict describing the mean transform
'''
return dict(type='Mean', meanFile=filename)
# TODO color transpose
class CTFDeserializer(Deserializer):
'''
This class configures the text reader that reads text-encoded files from a
file with lines of the form::
[Sequence_Id](Sample)+
where::
Sample=|Input_Name (Value )*
Args:
filename (str): file name containing the text input
See also:
`CNTKTextReader format <https://github.com/microsoft/cntk/wiki/CNTKTextFormat-Reader>`_
'''
def __init__(self, filename, streams=None):
super(CTFDeserializer, self).__init__('CNTKTextFormatDeserializer')
self['file'] = filename
self['input'] = self.input = {}
# connect all streams (: StreamDef) if given
if streams is not None:
for key in streams:
s = streams[key]
# TODO: guard against any other fields, such as transformers, which is not valid here
self.map_input(key, s.dim, "sparse" if s.is_sparse else "dense", alias=s.stream_alias)
# TODO: should be a private method; use constructor only
def map_input(self, node, dim, format="dense", alias=None):
'''
Maps node (either node instance or node name) to a part of the text input,
either specified by the node name or the alias in the text file.
Example: for node name 'input0' an input line could look like this::
|input0 3 7 1 0 2
Args:
node (str or input node): node or its name
dim (int): specifies the dimension of the input value vector
(for dense input this directly corresponds to the number of values in each sample,
for sparse this represents the upper bound on the range of possible index values).
format (str, default 'dense'): 'dense' or 'sparse'. Specifies the input type.
alias (str, default None): None or alias name. Optional abbreviated name that
is used in the text file to avoid repeating long input names. For details please
see `CNTKTextReader format <https://github.com/microsoft/cntk/wiki/CNTKTextFormat-Reader>`_
'''
if not isinstance(node, str):
node = node.name()
if alias is None:
alias=node
self.input[node] = dict(dim=dim, format=format, alias=alias)
# TODO: this should be a private class; use StreamDef instead
class StreamConfiguration(cntk_py.StreamConfiguration):
'''
Configuration of a stream in a text format reader. This can be used in
:func:`text_format_minibatch_source`.
Args:
name (str): name of this stream
dim (int): dimensions of this stream. A text format reader reads data
as flat arrays. If you need different shapes you can
:func:`~cntk.ops.reshape` it later.
is_sparse (bool, default `False`): whether the provided data is sparse
(`False` by default)
stream_alias (str, default ''): name of the stream in the file that is fed to the
:func:`text_format_minibatch_source`
'''
def __init__(self, name, dim, is_sparse=False, stream_alias=''):
return super(StreamConfiguration, self).__init__(name, dim, is_sparse, stream_alias)
# wrapper around text_format_minibatch_source() that attaches a record of streams
# TODO: This should not exist; use MinibatchSource(CTFDeserializer(...))
def _unused_CNTKTextFormatMinibatchSource(path, streams, epoch_size=None): # TODO: delete this
from cntk.utils import _ClassFromDict
# convert streams into StreamConfiguration format
# TODO: stream_alias should default to 'key'
stream_configs = [ StreamConfiguration(key, dim=value.dim, is_sparse=value.is_sparse, stream_alias=value.stream_alias) for (key, value) in streams.items() ]
if epoch_size is not None: # TODO: use MAX_UI64, now that we have access
source = text_format_minibatch_source(path, stream_configs, epoch_size)
else:
source = text_format_minibatch_source(path, stream_configs)
# attach a dictionary of the streams
source.streams = _ClassFromDict({ name : source.stream_info(name) for name in streams.keys() })
return source
# stream definition for use in StreamDefs
# returns a record { stream_alias, is_sparse, optional dim, optional transforms }
from cntk.utils import Record
def StreamDef(field, shape=None, is_sparse=False, transforms=None):
# note: the names used inside here are required by the C++ code which looks them up in a dictionary
config = dict(stream_alias=field, is_sparse=is_sparse)
if shape is not None:
config['dim'] = shape
if transforms is not None:
config['transforms'] = transforms
return Record(**config)
# TODO: we should always use 'shape' unless it is always rank-1 or a single rank's dimension
# TODO: dim should be inferred from the file, at least for dense
# StreamDefs for use in constructing deserializers
# StreamDefs(query = StreamDef(...), labels = StreamDef(...), ...)
StreamDefs = Record
def _dense_to_str(data):
return ' '.join(data.ravel(order='C').astype(np.str))
def _sparse_to_str(data):
return ' '.join('%s:%s' % (k, v) for k, v in sorted(data.items()))
def _is_tensor(data):
'''
Checks whether the data is a tensor, i.e. whether it is a NumPy array or a
list of NumPy arrays.
Args:
data: data to check
Returns: True, if it is a tensor.
'''
if isinstance(data, np.ndarray):
return True
if not isinstance(data, list):
return False
while len(data) > 0:
# All but the innermost dimension's values have to be lists
try:
data[0][0]
except:
# We reached the innermost dimension
try:
data[0] + 0
return True
except:
# Innermost type is not a number
return False
if isinstance(data, np.ndarray):
return True
if not isinstance(data[0], list):
return False
data = data[0]
return True
def sequence_to_cntk_text_format(seq_idx, alias_tensor_map):
'''
Converts a list of NumPy arrays representing tensors of inputs into a
format that is readable by :class:`~cntk.io.CTFDeserializer`.
Args:
seq_idx (int): number of current sequence
alias_tensor_map (dict): maps alias (str) to tensor (ndarray). Tensors
are assumed to have dynamic axis.
Returns:
String representation in `CNTKTextReader format <https://github.com/microsoft/cntk/wiki/CNTKTextFormat-Reader>`_
'''
max_seq_length = max(len(t) for t in alias_tensor_map.values())
if max_seq_length == 0:
return ''
lines = []
for elem_idx in range(0, max_seq_length):
line = []
for alias, tensor in sorted(alias_tensor_map.items()):
if elem_idx >= len(tensor):
# for this alias there no more sequence elements
continue
if _is_tensor(tensor):
if not isinstance(tensor, np.ndarray):
tensor = np.asarray(tensor)
to_str = _dense_to_str
elif isinstance(tensor, list) and isinstance(tensor[0], dict):
to_str = _sparse_to_str
else:
raise ValueError(
'expected a tensor (dense) or list of dicts (sparse), but got "%s"' % type(tensor))
line.append('%s %s' % (alias, to_str(tensor[elem_idx])))
lines.append('%i\t|' % seq_idx + ' |'.join(line))
return '\n'.join(lines)
|
#!/usr/local/bin/python2.7
import os, ConfigParser, inspect, hashlib, json
class TweetBotConfig(object):
''' TweetBotConfig cls '''
def __init__(self,cfgFile='config'):
# Bool initializer
str_to_bool = lambda x : x.strip()=='True'
path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# read config file
self.config = ConfigParser.SafeConfigParser()
self.config.read(os.path.join(path, "config"))
self.consumer_key = self.config.get("twitter","consumer_key")
self.consumer_key_secret = self.config.get("twitter","consumer_secret")
self.access_token = self.config.get("twitter","access_token")
self.access_token_secret = self.config.get("twitter","access_token_secret")
# Bot strategy on incoming data (tweets)
self.strategy = {'retweet':None,'fav':None,'follow':None}
self.strategy['retweet'] = str_to_bool(self.config.get("strategy","retweet"))
self.strategy['fav'] = str_to_bool(self.config.get("strategy","fav"))
self.strategy['follow'] = str_to_bool(self.config.get("strategy","follow"))
# Banned
self.banned_accounts = json.loads(self.config.get("banned","accounts"))
self.banned_words = json.loads(self.config.get("banned","words"))
# Tracked
self.track_words = json.loads(self.config.get("track","words"))
# Follow accounts
self.follow_accounts = json.loads(self.config.get("follow","accounts"))
# Whitelist
self.whitelist_accounts = json.loads(self.config.get("whitelist","accounts"))
self.whitelist_words = json.loads(self.config.get("whitelist","words"))
|
import torch
from irislandmarks import IrisLandmarks
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = IrisLandmarks().to(gpu)
net.load_weights("irislandmarks.pth")
##############################################################################
batch_size = 1
height = 64
width = 64
x = torch.randn((batch_size, height, width, 3), requires_grad=True).byte().to(gpu)
opset = 12
##############################################################################
input_names = ["input"] #[B,64,64,3],
output_names = ['eye', 'iris'] #[B,71,3], [B,5,3]
onnx_file_name = "BlazeIris_{}x{}x{}xBGRxByte_opset{}.onnx".format(batch_size, height, width, opset)
dynamic_axes = {
"input": {0: "batch_size"},
"eye": {0: "batch_size"},
"iris": {0: "batch_size"}
}
torch.onnx.export(net,
x,
onnx_file_name,
export_params=True,
opset_version=opset,
do_constant_folding=True,
input_names=input_names,
output_names=output_names
#,dynamic_axes=dynamic_axes
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lcurves', '0035_auto_20150430_1553'),
]
operations = [
migrations.AlterField(
model_name='lightcurve',
name='gls_freq',
field=models.FloatField(default=None, null=True, verbose_name='GLS Frequency'),
preserve_default=True,
),
migrations.AlterField(
model_name='lightcurve',
name='pdm_freq',
field=models.FloatField(default=None, null=True, verbose_name='PDM Frequency'),
preserve_default=True,
),
migrations.AlterField(
model_name='lightcurve',
name='pdm_period',
field=models.FloatField(default=None, null=True, verbose_name='PDM Period'),
preserve_default=True,
),
]
|
from questlog import QuestLog
class Category():
'''A category is a subfolder of a QuestLog and contains itself another QuestLog'''
def __init__(self, title, ql_title, ql_parent, ql_desc=''):
self.title = title
self.questlog = QuestLog(ql_title, desc=ql_desc)
self.ql_parent = ql_parent
self._pos_parent = self.ql_parent.search_log(self.title)
def __str__(self):
return f"{self.questlog}"
def __len__(self):
return len(self.questlog)
# Reader Functions
def read_title(self):
return self.title
def read_ql_title(self):
return self.questlog.read_name()
def read_desc(self):
return self.questlog.read_desc()
# Access Functions
def change_title(self, title):
self.title = title
def change_ql(self, ql_title, ql_desc):
self.questlog = QuestLog(ql_title, desc=ql_desc)
def change_parent(self, new_parent):
self.ql_parent.remove_category(self)
self.ql_parent = new_parent
self.ql_parent.add_category(self)
self._pos_parent = self.ql_parent.search_log(self.title)
def correct_pos(self):
self._pos_parent = self.ql_parent.search_log(self.title)
|
import urllib.parse
import json
import datetime
import requests
from pgsheets.exceptions import _check_status
class Client():
"""Represent an application's Google's client data, along with methods for
getting a refresh token.
A refresh token is required to intialize a Token object.
"""
def __init__(self, client_id, client_secret, **kwargs):
super().__init__(**kwargs)
self._client_id = client_id
self._client_secret = client_secret
self._redirect_uri = "urn:ietf:wg:oauth:2.0:oob"
def getOauthUrl(self):
"""Returns the oauth url a user can put in their browser.
This is requried to get Google's authorization code.
Provide the returned code to the getRefreshToken() method to get a
token that can be used repeatedly in the future.
"""
scope = urllib.parse.quote('https://spreadsheets.google.com/feeds')
return (
"https://accounts.google.com/o/oauth2/auth?"
"scope={scope}&"
"redirect_uri={redirect_uri}&"
"response_type=code&"
"client_id={client_id}".format(
client_id=self._client_id,
redirect_uri=self._redirect_uri,
scope=scope)
)
def getRefreshToken(self, user_code):
"""Using the user token provided by visiting the url from getOauthUrl()
returns a refresh token (a string).
You should persist the token and use it on future Token initializations
This method calls the Google API
"""
r = requests.post(
'https://www.googleapis.com/oauth2/v3/token',
data={
'code': user_code,
'client_id': self._client_id,
'client_secret': self._client_secret,
'redirect_uri': self._redirect_uri,
'grant_type': 'authorization_code',
})
_check_status(r)
# Returns a dictionary with the keys:
# access_token
# expires_in
# refresh_token
# 'token_type': 'Bearer'
data = json.loads(r.content.decode())
return data['refresh_token']
class Token():
_REFRSH_TOKEN_SLACK = 100
def __init__(self, client, refresh_token, **kwargs):
"""Initializes a SheetsRequest object.
The refresh_token should be stored and provided on all
initializations of any particular client and Google user.
"""
super().__init__(**kwargs)
self._client = client
self._refresh_token = refresh_token
self._expires = None
def _setExpiresTime(self, request_time, expires):
expires = int(expires)
# we underestimate the estimate time slightly
assert expires > self._REFRSH_TOKEN_SLACK, (expires,
self._REFRSH_TOKEN_SLACK)
expires -= self._REFRSH_TOKEN_SLACK
self._expires = request_time + datetime.timedelta(seconds=expires)
def _refreshToken(self):
"""Gets a new access token.
"""
request_time = datetime.datetime.utcnow()
r = requests.post(
'https://www.googleapis.com/oauth2/v3/token',
data={
'refresh_token': self._refresh_token,
'client_id': self._client._client_id,
'client_secret': self._client._client_secret,
'grant_type': 'refresh_token',
})
_check_status(r)
# We have a dictionary with the keys
# access_token
# expires_in
# 'token_type': 'Bearer'
data = json.loads(r.content.decode())
self._access_token = data['access_token']
self._setExpiresTime(request_time, data['expires_in'])
def _getValidToken(self):
"""Gets a access token, refreshing as necessary.
"""
if ((self._expires is None) or (datetime.datetime.utcnow()
>= self._expires)):
self._refreshToken()
return self._access_token
def getAuthorizationHeader(self, headers=None):
"""Returns a dictionary containing a Authorization header.
If a dictionary is supplied the Authorization header is added.
"""
if headers is None:
headers = {}
headers['Authorization'] = "Bearer " + self._getValidToken()
return headers
|
from django.contrib import admin
from .models import *
models = [
Post,
Comment,
Heart,
]
admin.site.register(models)
|
# coding: utf-8
from django.core.management.base import NoArgsCommand
from django.conf import settings
from wagtail.wagtailcore.models import Page
class Command(NoArgsCommand):
def set_subtree(self, root, root_path, lang=None):
update_fields = ['url_path_'+lang] if hasattr(root.specific, 'url_path_'+lang) else ['url_path']
if hasattr(root.specific, 'url_path_'+lang):
setattr(root.specific, 'url_path_'+lang, root_path)
else:
setattr(root, 'url_path', root_path)
if lang == settings.LANGUAGE_CODE:
setattr(root, 'url_path', root_path)
update_fields.append('url_path')
root.specific.save(update_fields=update_fields)
for child in root.get_children():
slug = getattr(
child.specific, 'slug_'+lang) if hasattr(
child.specific, 'slug_'+lang) else getattr(child, 'slug')
if not slug or slug == '':
slug = getattr(
child.specific, 'slug_'+settings.LANGUAGE_CODE) if hasattr(child.specific, 'slug_'+settings.LANGUAGE_CODE) and getattr(child.specific, 'slug_'+settings.LANGUAGE_CODE) else getattr(child, 'slug')
self.set_subtree(child, root_path + slug + '/', lang)
def handle_noargs(self, **options):
for node in Page.get_root_nodes():
for lang in settings.LANGUAGES:
self.set_subtree(node, '/', lang=lang[0])
|
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.shortcuts import redirect
from django.views import View
from church_site.views import AdminListView, BaseDetailView
from contactus.models import ContactMessage
class AdminContactMessageListView(PermissionRequiredMixin, AdminListView):
permission_required = 'contactus.view_contactmessage'
template_name = 'contactus/admin-contact-messages.html'
model = ContactMessage
context_object_name = 'contact_messages'
queryset = ContactMessage.objects.all_existing()
ordering = ['-message_date']
page_title = 'Contact Messages - Admin'
current_page = 'manage'
class AdminContactMessageDetailView(PermissionRequiredMixin, BaseDetailView):
permission_required = 'contactus.view_contactmessage'
template_name = 'contactus/admin-message-detail.html'
model = ContactMessage
context_object_name = 'contact_msg'
# queryset = ContactMessage.objects.all_existing
page_title = 'Contact Message - Admin'
current_page = 'manage'
class AdminContactMessageDeleteView(PermissionRequiredMixin, View):
permission_required = 'contactus.delete_contactmessage'
def post(self, request, pk=None):
msg = ContactMessage.objects.filter(id=pk).first()
if msg:
msg.deleted = True
msg.deleted_by = request.user
msg.save()
return redirect('contactus:admin-messages')
class AdminReadMessage(PermissionRequiredMixin, View):
permission_required = 'contactus.change_contactmessage'
def get(self, request, pk=None):
msg = ContactMessage.objects.filter(id=pk).first()
if msg:
msg.read = not msg.read
msg.save()
return redirect('contactus:admin-message-detail', pk=pk)
|
# https://gist.github.com/omz/6762c1e55e8c3a596637
# coding: utf-8
#!python2
'''
NOTE: This requires the latest beta of Pythonista 1.6 (build 160022)
Demo of using Pythonista's own internals to implement an editor view with
syntax highlighting (basically the exact same view Pythonista uses itself)
IMPORTANT: This is just for fun -- I was curious if it would work at all,
but I don't recommend that you rely on this for anything important.
The way Pythonista's internals work can change at any time,
and this code is *very* likely to break in the future.
'''
import ui
from objc_util import *
class CodeEditorView(ui.View):
@on_main_thread
def __init__(self, mode='python', ext_kb=True, *args, **kwargs):
ui.View.__init__(self, *args, **kwargs)
valid_modes = {
'python': 'OMPythonSyntaxHighlighter',
'html': 'OMHTMLSyntaxHighlighter',
'javascript': 'OMJavaScriptSyntaxHighlighter',
'markdown': 'OMMarkdownSyntaxHighlighter',
'text': 'OMBaseSyntaxHighlighter'
}
if mode not in valid_modes:
raise ValueError('invalid syntax mode')
objc_view = ObjCInstance(self._objc_ptr)
OMTextEditorView = ObjCClass('OMTextEditorView')
OMSyntaxHighlighterTheme = ObjCClass('OMSyntaxHighlighterTheme')
SyntaxHighlighter = ObjCClass(valid_modes[mode])
PA2UITheme = ObjCClass('PA2UITheme')
theme_dict = PA2UITheme.sharedTheme().themeDict().mutableCopy()
theme_dict.autorelease()
theme_dict['font-family'] = 'Menlo-Regular'
theme_dict['font-size'] = 14
theme = OMSyntaxHighlighterTheme.alloc().initWithDictionary_(theme_dict)
theme.autorelease()
f = CGRect(CGPoint(0, 0), CGSize(self.width, self.height))
editor_view = OMTextEditorView.alloc(
).initWithFrame_syntaxHighlighterClass_theme_(f, SyntaxHighlighter, theme)
editor_view.textView().setAutocapitalizationType_(0)
editor_view.textView().setAutocorrectionType_(1)
flex_width, flex_height = (1 << 1), (1 << 4)
editor_view.setAutoresizingMask_(flex_width | flex_height)
margins = UIEdgeInsets(16, 10, 16, 10)
editor_view.setMarginsForPortrait_landscape_(margins, margins)
if ext_kb:
kb_types = {
'python': 'KeyboardAccessoryTypePythonCompact',
'markdown': 'KeyboardAccessoryTypeMarkdownWithoutSnippets',
'html': 'KeyboardAccessoryTypeHTML',
'javascript': 'KeyboardAccessoryTypeHTML'
}
kb_type = kb_types.get(mode)
if kb_type:
OMKeyboardAccessoryView = ObjCClass('OMKeyboardAccessoryView')
accessory_view = OMKeyboardAccessoryView.alloc().initWithType_dark_(
kb_type, False).autorelease()
editor_view.setKeyboardAccessoryView_(accessory_view)
editor_view.autorelease()
objc_view.addSubview_(editor_view)
self.editor_view = editor_view
@property
@on_main_thread
def text(self):
text_view = self.editor_view.textView()
text = text_view.text()
return unicode(text)
@text.setter
@on_main_thread
def text(self, new_text):
if not isinstance(new_text, basestring):
raise TypeError('expected string/unicode')
text_view = self.editor_view.textView()
text_view.setText_(new_text)
@on_main_thread
def insert_text(self, text):
if not isinstance(text, basestring):
raise TypeError('expected string/unicode')
text_view = self.editor_view.textView()
text_view.insertText_(text)
@on_main_thread
def replace_range(self, range, text):
text_view = self.editor_view.textView()
ns_range = NSRange(range[0], range[1] - range[0])
text_range = ObjCClass('OMTextRange').rangeWithNSRange_(ns_range)
text_view.replaceRange_withText_(text_range, text)
@property
@on_main_thread
def selected_range(self):
text_view = self.editor_view.textView()
range = text_view.selectedRange()
return (range.location, range.location + range.length)
@selected_range.setter
@on_main_thread
def selected_range(self, new_value):
text_view = self.editor_view.textView()
range = NSRange(new_value[0], new_value[1] - new_value[0])
text_view.setSelectedRange_(range)
@on_main_thread
def begin_editing(self):
text_view = self.editor_view.textView()
text_view.becomeFirstResponder()
@on_main_thread
def end_editing(self):
text_view = self.editor_view.textView()
text_view.resignFirstResponder()
# --- DEMO
editor_view = None
def copy_action(sender):
import clipboard
clipboard.set(editor_view.text)
import console
console.hud_alert('Copied')
def main():
global editor_view
editor_view = CodeEditorView('markdown', ext_kb=True, frame=(0, 0, 500, 500))
editor_view.name = 'Code Editor Demo'
copy_btn = ui.ButtonItem('Copy', action=copy_action)
editor_view.right_button_items = [copy_btn]
editor_view.text = '#coding: utf-8\nprint "Hello World"'
editor_view.present('sheet')
if __name__ == '__main__':
main()
|
import os
import random
from typing import Any, Dict, List, Text
import structlog
from aiohttp import ClientSession
from rasa_sdk import Action, Tracker
from rasa_sdk.events import ActionExecuted, BotUttered, EventType, SlotSet, UserUttered
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import REQUESTED_SLOT
from covidflow.constants import (
ACTION_LISTEN_NAME,
LANGUAGE_SLOT,
QA_TEST_PROFILE_ATTRIBUTE,
SKIP_SLOT_PLACEHOLDER,
)
from .answers import (
QuestionAnsweringProtocol,
QuestionAnsweringResponse,
QuestionAnsweringStatus,
)
from .lib.form_helper import _form_slots_to_validate
from .lib.log_util import bind_logger
logger = structlog.get_logger()
FAQ_URL_ENV_KEY = "COVID_FAQ_SERVICE_URL"
DEFAULT_FAQ_URL = "https://covidfaq.dialoguecorp.com"
QUESTION_SLOT = "question_answering_form_active_question"
FEEDBACK_SLOT = "question_answering_form_feedback"
STATUS_SLOT = "question_answering_status"
ANSWERS_SLOT = "question_answering_form_answers"
ASKED_QUESTION_SLOT = "question_answering_form_asked_question"
SKIP_QA_INTRO_SLOT = "question_answering_form_skip_qa_intro"
ANSWERS_KEY = "answers"
STATUS_KEY = "status"
FEEDBACK_KEY = "feedback"
QUESTION_KEY = "question"
FEEDBACK_NOT_GIVEN = "not_given"
FORM_NAME = "question_answering_form"
ASK_QUESTION_ACTION_NAME = f"action_ask_{QUESTION_SLOT}"
VALIDATE_ACTION_NAME = f"validate_{FORM_NAME}"
SUBMIT_ACTION_NAME = f"action_submit_question_answering_form"
FALLBACK_ACTIVATE_ACTION_NAME = "action_activate_fallback_question_answering_form"
TEST_PROFILES_RESPONSE = {
"success": QuestionAnsweringResponse(
answers=["this is my answer"], status=QuestionAnsweringStatus.SUCCESS
),
"failure": QuestionAnsweringResponse(status=QuestionAnsweringStatus.FAILURE),
"need_assessment": QuestionAnsweringResponse(
status=QuestionAnsweringStatus.NEED_ASSESSMENT
),
"out_of_distribution": QuestionAnsweringResponse(
status=QuestionAnsweringStatus.OUT_OF_DISTRIBUTION
),
}
class ActionActivateFallbackQuestionAnswering(Action):
def name(self) -> Text:
return FALLBACK_ACTIVATE_ACTION_NAME
async def run(
self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict
) -> List[EventType]:
bind_logger(tracker)
question = tracker.latest_message.get("text", "")
# Slot will be validated on form activation
return [SlotSet(QUESTION_SLOT, question)]
class ActionAskActiveQuestion(Action):
def name(self) -> Text:
return ASK_QUESTION_ACTION_NAME
async def run(
self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict
) -> List[EventType]:
bind_logger(tracker)
events = []
if not (tracker.get_slot(SKIP_QA_INTRO_SLOT) is True):
dispatcher.utter_message(template="utter_can_help_with_questions")
dispatcher.utter_message(template="utter_qa_disclaimer")
random_qa_samples = (
_get_fixed_questions_samples()
if _must_stub_result(tracker)
else _get_random_question_samples(domain)
)
if len(random_qa_samples) > 0:
dispatcher.utter_message(
template="utter_qa_sample",
sample_questions="\n".join(random_qa_samples),
)
events = [SlotSet(SKIP_QA_INTRO_SLOT, True)]
dispatcher.utter_message(
template="utter_ask_question_answering_form_active_question"
)
return events
class ValidateQuestionAnsweringForm(Action):
def name(self) -> Text:
return VALIDATE_ACTION_NAME
async def run(
self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict
) -> List[EventType]:
bind_logger(tracker)
extracted_slots: Dict[Text, Any] = _form_slots_to_validate(tracker)
validation_events: List[EventType] = []
for slot_name, slot_value in extracted_slots.items():
slot_events = [SlotSet(slot_name, slot_value)]
if slot_name == FEEDBACK_SLOT:
if slot_value is False:
dispatcher.utter_message(
template="utter_question_answering_form_feedback_false"
)
elif not isinstance(slot_value, bool):
slot_events = [SlotSet(FEEDBACK_SLOT, FEEDBACK_NOT_GIVEN)]
elif slot_name == QUESTION_SLOT:
result = (
_get_stub_qa_result(tracker)
if _must_stub_result(tracker)
else await _fetch_qa(slot_value, tracker)
)
slot_events += [SlotSet(STATUS_SLOT, result.status)]
if result.status == QuestionAnsweringStatus.SUCCESS:
dispatcher.utter_message(result.answers[0])
slot_events += [SlotSet(ANSWERS_SLOT, result.answers)]
else:
slot_events += [
SlotSet(REQUESTED_SLOT, None),
SlotSet(FEEDBACK_SLOT, SKIP_SLOT_PLACEHOLDER),
]
validation_events.extend(slot_events)
return validation_events
class ActionSubmitQuestionAnsweringForm(Action):
def name(self) -> Text:
return SUBMIT_ACTION_NAME
async def run(
self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict
) -> List[EventType]:
bind_logger(tracker)
feedback = tracker.get_slot(FEEDBACK_SLOT)
full_question_result = {
QUESTION_KEY: tracker.get_slot(QUESTION_SLOT),
ANSWERS_KEY: tracker.get_slot(ANSWERS_SLOT),
STATUS_KEY: tracker.get_slot(STATUS_SLOT),
FEEDBACK_KEY: feedback,
}
# Clearing and saving in case of re-rentry in the form.
slot_sets = [
SlotSet(QUESTION_SLOT),
SlotSet(FEEDBACK_SLOT),
SlotSet(ASKED_QUESTION_SLOT, full_question_result),
]
if feedback == FEEDBACK_NOT_GIVEN:
return slot_sets + _carry_user_utterance(tracker)
return slot_sets
def _must_stub_result(tracker: Tracker):
metadata = tracker.get_slot("metadata") or {}
return QA_TEST_PROFILE_ATTRIBUTE in metadata
def _get_random_question_samples(domain: Dict[Text, Any],) -> List[str]:
responses = domain.get("responses", {})
qa_samples_categories = [
key for key in responses.keys() if key.startswith("utter_qa_sample_")
]
random_qa_samples_categories = random.sample(
qa_samples_categories, k=min(len(qa_samples_categories), 3)
)
return [
f"- {random.choice(value).get('text')}"
for key, value in responses.items()
if key in random_qa_samples_categories
]
def _get_fixed_questions_samples() -> List[str]:
return ["- sample question 1", "- sample question 2"]
async def _fetch_qa(text: Text, tracker: Tracker) -> QuestionAnsweringResponse:
protocol = QuestionAnsweringProtocol(
os.environ.get(FAQ_URL_ENV_KEY, DEFAULT_FAQ_URL)
)
language = tracker.get_slot(LANGUAGE_SLOT)
async with ClientSession() as session:
return await protocol.get_response(session, text, language)
def _get_stub_qa_result(tracker: Tracker):
profile = tracker.get_slot("metadata")[QA_TEST_PROFILE_ATTRIBUTE]
return TEST_PROFILES_RESPONSE[profile]
def _carry_user_utterance(tracker: Tracker) -> List[EventType]:
return [
ActionExecuted("utter_ask_another_question"),
BotUttered(metadata={"template_name": "utter_ask_another_question"}),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered(
tracker.latest_message.get("text", ""),
parse_data={
"text": tracker.latest_message.get("text", ""),
"intent": tracker.latest_message.get("intent", {}),
"intent_ranking": tracker.latest_message.get("intent_ranking", []),
"entities": tracker.latest_message.get("entities", []),
},
),
]
|
Profile
{'000001.SZ': {'address1': 'No. 5047, Shennan East Road', 'address2': 'Luohu District', 'city': 'Shenzhen', 'zip': '518001', 'country': 'China', 'phone': '86 21 3882 4910', 'website': 'http://www.bank.pingan.com', 'industry': 'Banks—Regional', 'sector': 'Financial Services', 'longBusinessSummary': "Ping An Bank Co., Ltd. provides commercial banking products and services in the People's Republic of China. The company offers investment products; loans; financial trading services; reverse factoring services; fund management products; settlement and trade financing products; offshore business services; custody services; insurance products; bond agency settlement services; foreign exchange services; wealth management and investment products; and precious metal investment products. It also issues and underwrites the inter-bank bond market, including short-term financing bonds and medium-term notes. In addition, the company provides syndicated loans, M&A loans, structured financing services, financial management and asset securitization services, credit assets transfer and asset support services, private equity funds, trust financing services, etc.; perennial, debt financing, M&A and restructuring, private equity, corporate listing and refinancing, structured, MBO and ESOP, and other financial advisory services; and debt financing tools. As of December 31, 2018, it operated through a network of 80 branches and 1,057 business offices. The
company was founded in 1987 and is headquartered in Shenzhen, the People's Republic of China. Ping An Bank Co., Ltd. operates as a subsidiary of Ping An Insurance Group Company of China, Ltd.",
'fullTimeEmployees': 33440, 'companyOfficers': [], 'maxAge': 86400}}
Details
{'000001.SZ': {'maxAge': 1, 'priceHint': 2, 'previousClose': 12.8, 'open': 12.83, 'dayLow': 12.72, 'dayHigh': 12.88, 'regularMarketPreviousClose': 12.8, 'regularMarketOpen': 12.83, 'regularMarketDayLow': 12.72, 'regularMarketDayHigh': 12.88, 'dividendRate': 0.14, 'dividendYield': 0.0113, 'exDividendDate': '2019-06-25 21:00:00', 'payoutRatio': 0.09729999, 'beta': 1.052282, 'trailingPE': 8.573342, 'forwardPE': 8.258065, 'volume': 93794022, 'regularMarketVolume': 93794022, 'averageVolume': 70874712, 'averageVolume10days': 75876740, 'averageDailyVolume10Day': 75876740, 'bid': 12.8, 'ask': 12.81, 'bidSize': 0, 'askSize': 0, 'marketCap': 248395513856, 'fiftyTwoWeekLow': 11.91, 'fiftyTwoWeekHigh': 17.6, 'priceToSalesTrailing12Months': 3.026335, 'fiftyDayAverage': 13.1828575, 'twoHundredDayAverage': 14.330497, 'trailingAnnualDividendRate': 0.218, 'trailingAnnualDividendYield': 0.017031249, 'currency': 'CNY', 'fromCurrency': None, 'toCurrency': None, 'lastMarket':
None, 'algorithm': None, 'tradeable': False}}
|
from django.db import models
import datetime as dt
from django.db.models import Q
class Editor(models.Model):
first_name = models.CharField(max_length =30)
last_name = models.CharField(max_length =30)
email = models.EmailField()
phone_number = models.CharField(max_length = 10,blank =True)
def __str__(self):
return self.first_name
def save_editor(self):
self.save()
# def del_editor(self):
# self.delete()
def display_editors():
Editor.objects.all()
def update_editor(self):
Editor.objects.filter(self).update(self)
class Meta:
ordering = ['first_name']
class Category(models.Model):
name = models.CharField(max_length =30)
def __str__(self):
return self.name
def save_cat(self):
self.save()
def del_cat(self):
self.delete()
# def display_tags():
# tags.objects.all()
def update_cat(self):
Category.objects.filter(self).update(self)
class Location(models.Model):
name = models.CharField(max_length =30)
def __str__(self):
return self.name
def save_loc(self):
self.save()
def del_loc(self):
self.delete()
# def display_tags():
# tags.objects.all()
def update_loc(self):
Location.objects.filter(self).update(self)
class Image(models.Model):
# id = models.IntegerField(primary_key=True)
name = models.CharField(max_length =60)
description = models.TextField()
editor = models.ForeignKey(Editor)
location = models.ForeignKey(Location)
category = models.ForeignKey(Category)
pub_date = models.DateTimeField(auto_now_add=True, null=True, blank=True)
image = models.ImageField(upload_to = 'image/',default='SOME STRING')
def save_image(self):
self.save()
def delete_image(self):
self.delete()
# def display_artis():
# image.objects.all()
# def update_arti(self):
# image.objects.filter(self).update(self)
@classmethod
def todays_image(cls):
today = dt.date.today()
image = cls.objects.filter(pub_date__date = today)
return image
@classmethod
def days_image(cls,date):
image = cls.objects.filter(pub_date__date = date)
return image
@classmethod
def image_details(cls,id):
image = cls.objects.filter(id__icontains = id)
return image
# @classmethod
# def get_image_by_id(cls,search_id):
# image = cls.objects.filter(id__icontains=search_id)
# return image
@classmethod
def search_image(cls,search_cat,search_loc):
image = cls.objects.filter(
Q(category__name__icontains=search_cat) |
Q(location__name__icontains=search_loc)
)
return image
# @classmethod
# def filter_by_location(cls,search_loc):
# image = cls.objects.filter(location__name__icontains=search_loc)
# return image
|
import time
from vibora import Vibora
from vibora.responses import Response
from vibora.utils import Timeouts
app = Vibora()
@app.route('/')
def home():
time.sleep(10)
return Response(b'123')
if __name__ == '__main__':
app.run(debug=False, port=8000, host='0.0.0.0', workers=1, timeouts=Timeouts(
worker=5,
keep_alive=10
))
|
default_app_config = "mytemplatetags.apps.myTemplateTagsConfig"
|
from colossalai.context import ParallelMode
from colossalai.nn.layer import WrappedDropout as Dropout
def moe_sa_args(d_model: int,
n_heads: int,
d_kv: int,
attention_drop: float = 0,
drop_rate: float = 0,
bias: bool = True):
"""This is an example for args in moe self attention, since lots of modules should be
adapted before putting them in experts.
"""
dropout1 = Dropout(attention_drop, mode=ParallelMode.TENSOR)
dropout2 = Dropout(drop_rate, mode=ParallelMode.TENSOR)
return dict(
d_model=d_model,
n_heads=n_heads,
d_kv=d_kv,
bias=bias,
dropout1=dropout1,
dropout2=dropout2
)
def moe_mlp_args(d_model: int,
d_ff: int,
drop_rate: float,
bias: bool = True):
"""This is an example for args of MLP in Experts, since lots of modules should be adapted
before putting them in experts.
"""
dropout1 = Dropout(drop_rate, mode=ParallelMode.TENSOR)
dropout2 = Dropout(drop_rate, mode=ParallelMode.TENSOR)
return dict(
d_model=d_model,
d_ff=d_ff,
bias=bias,
dropout1=dropout1,
dropout2=dropout2
)
|
# -*- coding: utf-8 -*-
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import check_that, equal_to, require_that, is_true, is_false
from common.base_test import BaseTest
SUITE = {
"description": "Method 'call_contract_no_changing_state'"
}
@lcc.prop("main", "type")
@lcc.prop("positive", "type")
@lcc.tags("api", "database_api", "database_api_contracts", "call_contract_no_changing_state")
@lcc.suite("Check work of method 'call_contract_no_changing_state'", rank=1)
class CallContractNoChangingState(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.echo_acc0 = None
self.contract = self.get_byte_code("piggy", "code")
self.greet = self.get_byte_code("piggy", "greet()")
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
lcc.log_info(
"API identifiers are: database='{}', registration='{}'".format(self.__database_api_identifier,
self.__registration_api_identifier))
self.echo_acc0 = self.get_account_id(self.accounts[0], self.__database_api_identifier,
self.__registration_api_identifier)
lcc.log_info("Echo account is '{}'".format(self.echo_acc0))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Simple work of method 'call_contract_no_changing_state'")
def method_main_check(self):
lcc.set_step("Create contract in the Echo network and get its contract id")
contract_id = self.utils.get_contract_id(self, self.echo_acc0, self.contract, self.__database_api_identifier)
lcc.set_step("Get call contract operation no changing state")
params = [contract_id, self.echo_acc0, self.echo_asset, self.greet]
response_id = self.send_request(self.get_request("call_contract_no_changing_state", params),
self.__database_api_identifier)
result = self.get_response(response_id)["result"]
lcc.log_info("Call method 'call_contract_no_changing_state' with params: '{}'".format(params))
lcc.set_step("Check simple work of method 'call_contract_no_changing_state'")
if not self.validator.is_hex(result):
lcc.log_error("Wrong format of response from 'call_contract_no_changing_state', got: {}".format(result))
else:
lcc.log_info("response from 'call_contract_no_changing_state' has correct format: hex")
@lcc.prop("positive", "type")
@lcc.tags("api", "database_api", "database_api_contract", "call_contract_no_changing_state")
@lcc.suite("Positive testing of method 'call_contract_no_changing_state'", rank=2)
class PositiveTesting(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.echo_acc0 = None
self.dynamic_fields_contract = self.get_byte_code("dynamic_fields", "code")
self.set_uint = self.get_byte_code("dynamic_fields", "onUint256Changed(uint256)")
self.get_uint = self.get_byte_code("dynamic_fields", "getUint256()")
self.set_string = self.get_byte_code("dynamic_fields", "onStringChanged(string)")
self.get_string = self.get_byte_code("dynamic_fields", "getString()")
self.set_all_values = self.get_byte_code("dynamic_fields", "setAllValues(uint256,string)")
self.piggy_contract = self.get_byte_code("piggy", "code")
self.getPennie = self.get_byte_code("piggy", "pennieReturned()")
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
lcc.log_info(
"API identifiers are: database='{}', registration='{}'".format(self.__database_api_identifier,
self.__registration_api_identifier))
self.echo_acc0 = self.get_account_id(self.accounts[0], self.__database_api_identifier,
self.__registration_api_identifier)
lcc.log_info("Echo account is '{}'".format(self.echo_acc0))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Check work of 'call_contract_no_changing_state' method, not empty output call contract result "
"(int and string type)")
@lcc.depends_on("DatabaseApi.Contracts.CallContractNoChangingState.CallContractNoChangingState.method_main_check")
def check_call_contract_no_changing_state_with_not_empty_call_contract_result(self, get_random_integer,
get_random_string):
int_param, string_param = get_random_integer, get_random_string
lcc.set_step("Create 'dynamic_fields' contract in the Echo network and get it's contract id")
contract_id = self.utils.get_contract_id(self, self.echo_acc0, self.dynamic_fields_contract,
self.__database_api_identifier)
lcc.set_step("Call method 'set_string' to add uint field in contract")
method_bytecode = self.set_string + self.get_byte_code_param(string_param, param_type=str)
operation = self.echo_ops.get_contract_call_operation(echo=self.echo, registrar=self.echo_acc0,
bytecode=method_bytecode, callee=contract_id)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation, log_broadcast=False)
lcc.log_info("Method 'set_string' performed successfully")
lcc.set_step("Call method 'get_string'")
operation = self.echo_ops.get_contract_call_operation(echo=self.echo, registrar=self.echo_acc0,
bytecode=self.get_string, callee=contract_id)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation,
log_broadcast=False)
lcc.log_info("Method 'get_string' performed successfully")
lcc.set_step("Get contract result output")
contract_result = self.get_contract_result(broadcast_result, self.__database_api_identifier)
contract_output_in_hex = self.get_contract_output(contract_result, in_hex=True)
lcc.tags("Contract output in hex: '{}'".format(contract_output_in_hex))
lcc.set_step("Get call contract operation no changing state")
params = [contract_id, self.echo_acc0, self.echo_asset, self.get_string]
response_id = self.send_request(self.get_request("call_contract_no_changing_state", params),
self.__database_api_identifier)
result = self.get_response(response_id)["result"]
require_that("'method result has value'", bool(result), is_true(), quiet=True)
lcc.log_info("Call method 'call_contract_no_changing_state' with params: '{}'".format(params))
lcc.set_step("Check call contract operation no changing state equal to call contract result")
check_that("'call contract operation no changing state, output_type=string'", result,
equal_to(contract_output_in_hex), quiet=True)
lcc.set_step("Call method 'set_uint' to add uint field in contract")
method_bytecode = self.set_uint + self.get_byte_code_param(int_param, param_type=int)
operation = self.echo_ops.get_contract_call_operation(echo=self.echo, registrar=self.echo_acc0,
bytecode=method_bytecode, callee=contract_id)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation, log_broadcast=False)
lcc.log_info("Method 'set_uint' performed successfully")
lcc.set_step("Call method 'get_uint'")
operation = self.echo_ops.get_contract_call_operation(echo=self.echo, registrar=self.echo_acc0,
bytecode=self.get_uint, callee=contract_id)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation,
log_broadcast=False)
lcc.log_info("Method 'get_uint' performed successfully")
lcc.set_step("Get contract result output")
contract_result = self.get_contract_result(broadcast_result, self.__database_api_identifier)
contract_output_in_hex = self.get_contract_output(contract_result, in_hex=True)
lcc.tags("Contract output in hex: '{}'".format(contract_output_in_hex))
lcc.set_step("Get call contract operation no changing state")
params = [contract_id, self.echo_acc0, self.echo_asset, self.get_uint]
response_id = self.send_request(self.get_request("call_contract_no_changing_state", params),
self.__database_api_identifier)
result = self.get_response(response_id)["result"]
require_that("'method result has value'", bool(result), is_true(), quiet=True)
lcc.log_info("Call method 'call_contract_no_changing_state' with params: '{}'".format(params))
lcc.set_step("Check call contract operation no changing state equal to call contract result")
check_that("'call contract operation no changing state, output_type=int'", result,
equal_to(contract_output_in_hex), quiet=True)
@lcc.test("Check work of 'call_contract_no_changing_state' method, empty output call contract result but with "
"new asset_id")
@lcc.depends_on("DatabaseApi.Contracts.CallContractNoChangingState.CallContractNoChangingState.method_main_check")
def check_call_contract_no_changing_state_with_empty_call_contract_result(self, get_random_valid_asset_name,
get_random_integer):
value_amount = get_random_integer
asset_name = get_random_valid_asset_name
lcc.set_step("Create asset and get id new asset")
new_asset_id = self.utils.get_asset_id(self, asset_name, self.__database_api_identifier)
lcc.log_info("New asset created, asset_id is '{}'".format(new_asset_id))
lcc.set_step("Add created asset to account")
self.utils.add_assets_to_account(self, value_amount, new_asset_id, self.echo_acc0,
self.__database_api_identifier)
lcc.log_info("'{}' account became new asset holder of '{}' asset_id".format(self.echo_acc0, new_asset_id))
lcc.set_step("Create 'piggy' contract in the Echo network and get it's contract id")
contract_id = self.utils.get_contract_id(self, self.echo_acc0, self.piggy_contract,
self.__database_api_identifier, value_asset_id=new_asset_id,
value_amount=value_amount, supported_asset_id=new_asset_id)
lcc.set_step("Call method of piggy contract: 'getPennie'")
operation = self.echo_ops.get_contract_call_operation(echo=self.echo, registrar=self.echo_acc0,
bytecode=self.getPennie, callee=contract_id,
value_asset_id=new_asset_id)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation,
log_broadcast=False)
lcc.log_info("Method 'getPennie' performed successfully")
lcc.set_step("Get contract result output")
contract_result = self.get_contract_result(broadcast_result, self.__database_api_identifier)
contract_output_in_hex = self.get_contract_output(contract_result, in_hex=True)
lcc.tags("Contract output in hex: '{}'".format(contract_output_in_hex))
lcc.set_step("Get call contract operation no changing state")
params = [contract_id, self.echo_acc0, new_asset_id, self.getPennie]
response_id = self.send_request(self.get_request("call_contract_no_changing_state", params),
self.__database_api_identifier)
result = self.get_response(response_id)["result"]
require_that("'method result has no value'", bool(result), is_false(), quiet=True)
lcc.log_info("Call method 'call_contract_no_changing_state' with params: '{}'".format(params))
lcc.set_step("Check call contract operation no changing state equal to call contract result")
check_that("'call contract operation no changing state'", result, equal_to(contract_output_in_hex), quiet=True)
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CESNET.
#
# invenio-oarepo-ui is free software; you can redistribute it and/or modify it under
# the terms of the MIT License; see LICENSE file for more details.
"""Version information for invenio-oarepo-ui.
This file is imported by ``invenio_oarepo_ui.__init__``,
and parsed by ``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = '1.1.3'
|
import time
import json
import re
from django.db import models
from django_extensions.db.models import TimeStampedModel
from url_store.models import URL
from .api_base import api
class TwitterUser(TimeStampedModel):
username = models.CharField(blank=True, max_length=100, primary_key=True)
source = models.CharField(blank=True, max_length=100)
@property
def latest_tweet(self):
try:
return self.tweet_set.latest('tweet_id')
except Tweet.DoesNotExist:
return None
@property
def latest_tweet_timestamp(self):
if self.latest_tweet:
return time.mktime(self.latest_tweet.created.timetuple())
def get_tweets(self):
latest_tweet = self.latest_tweet
kwargs = {
'screen_name': self.username
}
if latest_tweet:
kwargs['since_id'] = latest_tweet.pk
statuses = api.GetUserTimeline(**kwargs)
for tweet in statuses:
tweet_obj, created = Tweet.objects.update_or_create(
tweet_id=tweet.id,
raw_data=json.dumps(tweet.AsDict()),
twitter_user=self,
text=tweet.text,
)
class Tweet(TimeStampedModel):
tweet_id = models.CharField(blank=True, max_length=100, primary_key=True)
raw_data = models.TextField()
twitter_user = models.ForeignKey(TwitterUser)
text = models.CharField(blank=True, max_length=200)
@property
def json_from_raw(self):
return json.loads(self.raw_data)
def parse_urls(self):
json = self.json_from_raw
ALL_URLS = set()
if 'urls' in json:
ALL_URLS = ALL_URLS.union(json['urls'].values())
url_matcher = re.compile(r"(http://[^ ]+)")
extra_urls = url_matcher.findall(unicode(self.text))
if extra_urls:
ALL_URLS = ALL_URLS.union(set(extra_urls))
return ALL_URLS
def create_urls(self):
for url in self.parse_urls():
if not url.startswith('http://t.co/'):
url, created = URL.objects.update_or_create(
url=url
)
def save(self, *args, **kwargs):
ret = super(Tweet, self).save(*args, **kwargs)
self.create_urls()
return ret
|
# This code is based on and adapted from https://github.com/Qiskit/qiskit-qcgpu-provider/blob/master/qiskit_qcgpu_provider/qasm_simulator.py
# and https://github.com/qulacs/cirq-qulacs/blob/master/cirqqulacs/qulacs_simulator.py
#
# Adapted by Daniel Strano.
# Many thanks to the qulacs team for providing an open source example of a Cirq provider.
# Many thanks to Adam Kelley for an example of a third-party Qiskit provider.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
import numpy as np
import scipy as sp
import collections
from typing import Dict
from .qrack_controller_wrapper import qrack_controller_factory
from cirq import circuits, ops, protocols, study
from cirq.sim import SimulatesSamples
from cirq.sim.simulator import check_all_resolved, split_into_matching_protocol_then_general
class QasmSimulator(SimulatesSamples):
"""Contains an OpenCL based backend
**Backend options**
The following backend options may be used with in the
``backend_options`` kwarg for :meth:`QasmSimulator.run` or
``qiskit.execute``:
* ``"normalize"`` (bool): Keep track of the total global probability
normalization, and correct toward exactly 1. (Also turns on
"zero_threshold". With "zero_threshold">0 "schmidt_decompose"=True,
this can actually improve execution time, for opportune circuits.)
* ``"zero_threshold"`` (double): Sets the threshold for truncating
small values to zero in the simulation, gate-to-gate. (Only used
if "normalize" is enabled. Default value: Qrack default)
* ``"schmidt_decompose"`` (bool): If true, enable "QUnit" layer of
Qrack, including Schmidt decomposition optimizations.
* ``"paging"`` (bool): If true, enable "QPager" layer of Qrack.
* ``"stabilizer"`` (bool): If true, enable Qrack "QStabilizerHybrid"
layer of Qrack. (This can be enabled with universal gate simulations.)
* ``"opencl"`` (bool): If true, use the OpenCL engine of Qrack
("QEngineOCL") as the base "Schroedinger method" simulator.
If OpenCL is not available, simulation will fall back to CPU.
* ``"opencl_device_id"`` (int): (If OpenCL is enabled,) choose
the OpenCl device to simulate on, (indexed by order of device
discovery on OpenCL load/compilation). "-1" indicates to use
the Qrack default device, (the last discovered, which tends to
be a non-CPU accelerator, on common personal hardware systems.)
If "opencl-multi" is active, set the default device index.
* ``"opencl-multi"`` (bool): (If OpenCL and Schmidt decomposition
are enabled,) distribute Schmidt-decomposed sub-engines among
all available OpenCL devices.
"""
DEFAULT_CONFIGURATION = {
'backend_name': 'qasm_simulator',
'backend_version': '5.4.0',
'n_qubits': 64,
'conditional': True,
'url': 'https://github.com/vm6502q/qiskit-qrack-provider',
'simulator': True,
'local': True,
'conditional': False,
'open_pulse': False,
'memory': True,
'max_shots': 65536,
'description': 'An OpenCL based qasm simulator',
'coupling_map': None,
'normalize': True,
'zero_threshold': -999.0,
'schmidt_decompose': True,
'paging': True,
'stabilizer': True,
'opencl': True,
'opencl_device_id': -1,
'opencl_multi': False
}
# TODO: Implement these __init__ options. (We only match the signature for any compatibility at all, for now.)
def __init__(self,
configuration=None):
self._configuration = configuration or self.DEFAULT_CONFIGURATION
self._number_of_qubits = None
self._memory = collections.defaultdict(list)
self._results = {}
self._shots = {}
self._local_random = np.random.RandomState()
def _run(
self, circuit: circuits.Circuit, param_resolver: study.ParamResolver, repetitions: int
) -> Dict[str, np.ndarray]:
"""Run a simulation, mimicking quantum hardware."""
param_resolver = param_resolver or study.ParamResolver({})
resolved_circuit = protocols.resolve_parameters(circuit, param_resolver)
check_all_resolved(resolved_circuit)
qubit_order = sorted(resolved_circuit.all_qubits())
self._number_of_qubits = len(qubit_order)
# Simulate as many unitary operations as possible before having to
# repeat work for each sample.
unitary_prefix, general_suffix = (
split_into_matching_protocol_then_general(resolved_circuit, protocols.has_unitary)
)
qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(unitary_prefix.all_qubits())
num_qubits = len(qubits)
qid_shape = protocols.qid_shape(qubits)
qubit_map = {q: i for i, q in enumerate(qubits)}
self._sample_measure = True
self._sim = qrack_controller_factory()
self._sim.initialize_qreg(self._configuration['opencl'],
self._configuration['schmidt_decompose'],
self._configuration['paging'],
self._configuration['stabilizer'],
self._number_of_qubits,
self._configuration['opencl_device_id'],
self._configuration['opencl_multi'],
self._configuration['normalize'],
self._configuration['zero_threshold'])
for moment in unitary_prefix:
operations = moment.operations
for op in operations:
indices = [num_qubits - 1 - qubit_map[qubit] for qubit in op.qubits]
self._try_gate(op, indices)
general_ops = list(general_suffix.all_operations())
if all(isinstance(op.gate, ops.MeasurementGate) for op in general_ops):
indices = []
for op in general_ops:
indices = indices + [num_qubits - 1 - qubit_map[qubit] for qubit in op.qubits]
sample_measure = self._add_sample_measure(indices, repetitions)
for sample in sample_measure:
qb_index = 0
for op in general_ops:
key = protocols.measurement_key(op.gate)
value = []
for _ in op.qubits:
value.append(sample[qb_index])
qb_index = qb_index + 1
self._memory[key].append(value)
return self._memory
self._sample_measure = False
preamble_sim = self._sim
for shot in range(repetitions):
self._sim = preamble_sim.clone()
for moment in general_suffix:
operations = moment.operations
for op in operations:
indices = [num_qubits - 1 - qubit_map[qubit] for qubit in op.qubits]
key = protocols.measurement_key(op.gate)
self._memory[key].append(self._add_qasm_measure(indices))
return self._memory
def _try_gate(self, op: ops.GateOperation, indices: np.array):
# One qubit gate
if isinstance(op.gate, ops.pauli_gates._PauliX):
self._sim.x([indices[0]])
elif isinstance(op.gate, ops.pauli_gates._PauliY):
self._sim.y([indices[0]])
elif isinstance(op.gate, ops.pauli_gates._PauliZ):
self._sim.z([indices[0]])
elif isinstance(op.gate, ops.common_gates.HPowGate):
if op.gate._exponent == 1.0:
self._sim.h([indices[0]])
else :
c = np.cos(np.pi * t / 2.0)
s = np.sin(np.pi * t / 2.0)
g = np.exp((np.pi * t / 2.0) * (1.0j))
mat = [[g * (c - (1.0j) * s / sqrt(2.0)), -(1.0j) * g * s / sqrt(2.0)],[-(1.0j) * g * s / sqrt(2.0), g * (c + (1.0j) * s / sqrt(2.0))]]
self._sim.matrix_gate([indices[0]], mat)
elif isinstance(op.gate, ops.common_gates.XPowGate):
self._sim.rx([indices[0]], [-np.pi * op.gate._exponent])
elif isinstance(op.gate, ops.common_gates.YPowGate):
self._sim.ry([indices[0]], [-np.pi * op.gate._exponent])
elif isinstance(op.gate, ops.common_gates.ZPowGate):
self._sim.rz([indices[0]], [-np.pi * op.gate._exponent])
elif (len(indices) == 1 and isinstance(op.gate, ops.matrix_gates.MatrixGate)):
mat = op.gate._matrix
self._sim.matrix_gate([indices[0]], mat)
elif isinstance(op.gate, circuits.qasm_output.QasmUGate):
lmda = op.gate.lmda
theta = op.gate.theta
phi = op.gate.phi
self._sim.u([indices[0]], [theta * np.pi, phi * np.pi, lmda * np.pi])
# Two qubit gate
elif isinstance(op.gate, ops.common_gates.CNotPowGate):
if op.gate._exponent == 1.0:
self._sim.cx([indices[0], indices[1]])
else:
mat = sp.linalg.fractional_matrix_power([[0.0 + 0.0j, 1.0 + 0.0j], [1.0 + 0.0j, 0.0 + 0.0j]], -np.pi * op.gate._exponent)
self._sim.ctrld_matrix_gate(indices, mat)
elif isinstance(op.gate, ops.common_gates.CZPowGate):
if op.gate._exponent == 1.0:
self._sim.cz([indices[0], indices[1]])
else:
mat = sp.linalg.fractional_matrix_power([[1.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, -1.0 + 0.0j]], -np.pi * op.gate._exponent)
self._sim.ctrld_matrix_gate(indices, mat)
elif isinstance(op.gate, ops.common_gates.SwapPowGate):
if op.gate._exponent == 1.0:
self._sim.swap(indices[0], indices[1])
elif op.gate._exponent == 0.5:
self._sim.sqrtswap(indices[0], indices[1])
else:
return False
#TODO:
#elif isinstance(op.gate, ops.parity_gates.XXPowGate):
# qulacs_circuit.add_multi_Pauli_rotation_gate(indices, [1, 1], -np.pi * op.gate._exponent)
#elif isinstance(op.gate, ops.parity_gates.YYPowGate):
# qulacs_circuit.add_multi_Pauli_rotation_gate(indices, [2, 2], -np.pi * op.gate._exponent)
#elif isinstance(op.gate, ops.parity_gates.ZZPowGate):
# qulacs_circuit.add_multi_Pauli_rotation_gate(indices, [3, 3], -np.pi * op.gate._exponent)
#elif (len(indices) == 2 and isinstance(op.gate, ops.matrix_gates.MatrixGate)):
# indices.reverse()
# mat = op.gate._matrix
# qulacs_circuit.add_dense_matrix_gate(indices, mat)
# Three qubit gate
elif isinstance(op.gate, ops.three_qubit_gates.CCXPowGate):
if op.gate._exponent == 1.0:
self._sim.cx([indices[0], indices[1], indices[2]])
else:
mat = sp.linalg.fractional_matrix_power([[0.0 + 0.0j, 1.0 + 0.0j],[1.0 + 0.0j, 0.0 + 0.0j]], -np.pi * op.gate._exponent)
self._sim.ctrld_matrix_gate([indices[0], indices[1], indices[2]], mat)
elif isinstance(op.gate, ops.three_qubit_gates.CCZPowGate):
if op.gate._exponent == 1.0:
self._sim.cz([indices[0], indices[1], indices[2]])
else:
mat = sp.linalg.fractional_matrix_power([[0.0 + 0.0j, 1.0 + 0.0j],[1.0 + 0.0j, 0.0 + 0.0j]], -np.pi * op.gate._exponent)
self._sim.ctrld_matrix_gate([indices[0], indices[1], indices[2]], mat)
elif isinstance(op.gate, ops.three_qubit_gates.CSwapGate):
self._sim.cswap(indices)
# Misc
#elif protocols.has_unitary(op):
# indices.reverse()
# mat = op._unitary_()
# qulacs_circuit.add_dense_matrix_gate(indices, mat)
# Not unitary
else:
return False
return True
def _add_sample_measure(self, measure_qubit, num_samples):
"""Generate memory samples from current statevector.
Taken almost straight from the terra source code.
Args:
measure_qubit (int[]): qubits to be measured.
num_samples (int): The number of memory samples to generate.
Returns:
list: A list of memory values.
"""
# If we only want one sample, it's faster for the backend to do it,
# without passing back the probabilities.
if num_samples == 1:
key = self._sim.measure(measure_qubit)
return [self._int_to_bits(key, len(measure_qubit))]
# Sample and convert to bit-strings
memory = []
measure_results = self._sim.measure_shots(measure_qubit, num_samples)
for key, value in measure_results.items():
memory += value * [self._int_to_bits(int(key), len(measure_qubit))]
return memory
def _add_qasm_measure(self, measure_qubit):
"""Apply a measure instruction to a qubit.
Args:
measure_qubit (int[]): qubits to be measured.
Returns:
int: Memory values.
"""
key = self._sim.measure(measure_qubit)
return self._int_to_bits(int(key), len(measure_qubit))
def _int_to_bits(self, i, len):
bits = []
for _ in range(len):
bits.append(i & 1)
i = i >> 1
return bits
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_mysql_query_tool are the test associated with the mysql_query_tool
copyright: 2015, (c) sproutsocial.com
author: Nicholas Flink <nicholas@sproutsocial.com>
"""
import logging
import mock
import mysql_query_tool
import unittest
logging.basicConfig(level=logging.CRITICAL)
logger = logging.getLogger(__name__)
ALL_PRIVS_USER = "allprivs"
class FakeMysqlCursor(object):
def __init__(self):
self._lastQuery = None
self._lastQArgs = None
self._closed = False
def close(self):
self._closed = True
def execute(self, query, qArgs):
self._lastQuery = query
self._lastQArgs = qArgs
def fetchall(self):
callDict = {self._lastQuery: self._lastQArgs}
results = tuple([callDict])
if self._lastQuery == "SELECT User, Host FROM mysql.user":
results = tuple([{'Host': '%', 'User': 'grant_bot'},
{'Host': '%', 'User': 'revert_bot'}])
elif self._lastQuery.startswith("SHOW GRANTS FOR"):
user, host = self._lastQArgs
userAtHost = user+"@"+host
if user == ALL_PRIVS_USER:
results = tuple([{"Grants for "+userAtHost: "GRANT ALL PRIVILEGES ON *.* TO '"+user+"'@'"+host+"' IDENTIFIED BY PASSWORD '*DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF' WITH GRANT OPTION"}])
return results
class FakeMysqlConnection(object):
def __init__(self):
self._autocommit = True
self._commits = 0
self._rollbacks = 0
def close(self):
pass
def autocommit(self, val):
self._autocommit = val
def commit(self):
self._commits += 1
def rollback(self):
self._rollbacks += 1
def cursor(self, curType):
return FakeMysqlCursor()
class TestMysqlQueryTool(unittest.TestCase):
def setUp(self):
self._cluster = "cluster"
self._username = "username"
self._password = "password"
self._fakeConnection = FakeMysqlConnection()
self._fakeCursor = FakeMysqlCursor()
self._logPasswords = True
echoAccessLevel = mysql_query_tool.QAL_ALL
queryAccessLevel = mysql_query_tool.QAL_ALL
mysql_query_tool.MySQLdb.connect = mock.MagicMock(
return_value=self._fakeConnection)
self._mysqlQueryTool = mysql_query_tool.MysqlQueryTool(self._cluster, self._username, self._password, echoAccessLevel, queryAccessLevel, self._logPasswords)
self._mysqlQueryTool.getCursor = mock.MagicMock(
return_value=self._fakeCursor)
def test_closeConnection(self):
self._mysqlQueryTool.closeConnection()
self.assertEquals(None, self._mysqlQueryTool._connection)
def test_getCmdLineQuery(self):
user = "test"
host = "%"
cmdLineQuery = self._mysqlQueryTool.getCmdLineQuery("SHOW GRANTS FOR %s@%s", (user, host))
expectedQuery = "mysql -h "+self._cluster+" -u "+self._username+" -p"+self._password+' -e "SHOW GRANTS FOR \'test\'@\'%\'"'
self.assertEquals(expectedQuery, cmdLineQuery)
def test_transactions(self):
# test successful transaction
self.assertEquals(0, self._fakeConnection._commits)
self.assertEquals(True, self._fakeConnection._autocommit)
self._mysqlQueryTool.beginTransaction()
self.assertEquals(False, self._fakeConnection._autocommit)
self._mysqlQueryTool.commitTransaction()
self.assertEquals(1, self._fakeConnection._commits)
self.assertEquals(True, self._fakeConnection._autocommit)
# test failed transaction
self.assertEquals(0, self._fakeConnection._rollbacks)
self.assertEquals(True, self._fakeConnection._autocommit)
self._mysqlQueryTool.beginTransaction()
self.assertEquals(False, self._fakeConnection._autocommit)
self._mysqlQueryTool.rollbackTransaction()
self.assertEquals(True, self._fakeConnection._autocommit)
self.assertEquals(1, self._fakeConnection._rollbacks)
def test_queryVersion(self):
result = self._mysqlQueryTool.queryVersion()
self.assertEquals(1, len(result))
self.assertDictEqual({'SELECT VERSION()': None}, result[0])
def test_queryFlushPrivileges(self):
result = self._mysqlQueryTool.queryFlushPrivileges()
self.assertEquals(1, len(result))
self.assertDictEqual({'FLUSH PRIVILEGES': None}, result[0])
def test_queryGrant(self):
allPrivsUserAtHost = ALL_PRIVS_USER+"@localhost"
result = self._mysqlQueryTool.queryGrant(allPrivsUserAtHost, ["SELECT", "insert", "blah"], "db.table")
self.assertEquals(1, len(result))
self.assertDictEqual({'GRANT INSERT, SELECT ON db.table TO %s@%s': (ALL_PRIVS_USER, 'localhost')}, result[0])
def test_queryUserGrants(self):
result = self._mysqlQueryTool.queryUserGrants(ALL_PRIVS_USER+"@localhost")
self.assertDictEqual({'*.*': set(['ALL PRIVILEGES'])}, result)
def test_queryRevoke(self):
allPrivsUserAtHost = ALL_PRIVS_USER+"@localhost"
result = self._mysqlQueryTool.queryRevoke(allPrivsUserAtHost, ["SELECT"], "db.table")
self.assertEquals(1, len(result))
self.assertDictEqual({'REVOKE SELECT ON db.table FROM %s@%s': (ALL_PRIVS_USER, 'localhost')}, result[0])
def test_getGrantDeltaDict(self):
userAtHost = ALL_PRIVS_USER+'@localhost'
dbTable = "*.*"
privileges = ["SELECT"]
grantDeltaDict = self._mysqlQueryTool.getGrantDeltaDict(userAtHost, dbTable, privileges)
expectedGrantDeltaDict = {'grants': set([]),
'revokes': self._mysqlQueryTool.getAllPrivileges() - set(privileges)}
self.assertDictEqual(expectedGrantDeltaDict, grantDeltaDict)
def test_findAllUsers(self):
allUserDict = self._mysqlQueryTool.findAllUsers()
expectedDict = set(['revert_bot@%', 'grant_bot@%'])
self.assertEquals(expectedDict, allUserDict)
def test_userExists(self):
exists = self._mysqlQueryTool.userExists("grant_bot", "%")
self.assertTrue(exists)
def test_createUser(self):
newUserAtHost = "user@host"
newPassword = "password"
self._mysqlQueryTool.createUser(newUserAtHost, newPassword)
self.assertEquals("CREATE USER %s@%s IDENTIFIED BY %s", self._fakeCursor._lastQuery)
self.assertItemsEqual(("user", "host", "password"), self._fakeCursor._lastQArgs)
def test_dropUser(self):
newUserAtHost = "user@host"
self._mysqlQueryTool.dropUser(newUserAtHost)
self.assertEquals("DROP USER %s@%s", self._fakeCursor._lastQuery)
self.assertItemsEqual(("user", "host"), self._fakeCursor._lastQArgs)
class TestMysqlQueryToolMain(unittest.TestCase):
def setUp(self):
self._backupPath = "./testMysqlBackup"
self._echoOnly = True
self._unmockedGetVersion = mysql_query_tool.MysqlQueryTool.getVersion
self._unmockedConnect = mysql_query_tool.MysqlQueryTool.connect
self._unmockedQueryMySQL = mysql_query_tool.MysqlQueryTool.queryMySQL
mysql_query_tool.MysqlQueryTool.getVersion = mock.MagicMock(
return_value=5.5)
mysql_query_tool.MysqlQueryTool.connect = mock.MagicMock(
return_value=None)
mysql_query_tool.MysqlQueryTool.queryMySQL = mock.MagicMock(
return_value=None)
def tearDown(self):
mysql_query_tool.MysqlQueryTool.getVersion = self._unmockedGetVersion
mysql_query_tool.MysqlQueryTool.connect = self._unmockedQueryMySQL
mysql_query_tool.MysqlQueryTool.queryMySQL = self._unmockedQueryMySQL
def test_main(self):
cluster = "cluster"
username = "username"
password = "password"
dbName = "db"
query = "SELECT VERSION()"
qArgs = None
mysql_query_tool.main(["-c", cluster, "-u", username, "-p", password, "-d", dbName, "-q", query])
expectedCalls = [mock.call(mysql_query_tool.QAL_ALL, query, qArgs)]
mysql_query_tool.MysqlQueryTool.queryMySQL.assert_has_calls(
expectedCalls)
if __name__ == '__main__':
logging.basicConfig(level=logging.CRITICAL)
unittest.main()
|
#!/usr/bin/env python
"""Module for calculating sine and cosine function solely using parametrization
of unit circle by arc-length; i.e. no power series, no circular definitions in
terms of complex exponentials, and no right triangles.
Caleb Levy, March 2015.
"""
import numpy as np
import matplotlib.pyplot as plt
def circle_points(n):
"""Return n point approximation to the unit circle using the definition
that (x, y) is on the unit circle if x**2 + y**2 = 1."""
top = np.linspace(1, -1, n)[:-1]
bottom = np.linspace(-1, 1, n)
x = np.concatenate((top, bottom))
circle_top = np.sqrt(1 - top**2)
circle_bot = -np.sqrt(1 - bottom**2)
y = np.concatenate((circle_top, circle_bot))
return x, y
def arc_length(x, y):
"""Parametrize angle by arc length along the unit circle"""
length = np.array([0.]*len(x))
for i in range(1, len(x)):
length[i] = length[i-1] + np.sqrt((x[i]-x[i-1])**2 + (y[i]-y[i-1])**2)
return length
def fundamental_sin(n=100):
"""Define sine by height with respect to arc length"""
x, y = circle_points(n)
l = arc_length(x, y)
return l, y
def fundamental_cos(n=100):
"""Define cosine by base leg length with respect to arc length"""
x, y = circle_points(n)
l = arc_length(x, y)
return l, x
if __name__ == '__main__':
x, y = fundamental_sin(1000)
plt.plot(x, y)
x2, y2 = fundamental_cos(1000)
plt.plot(x, np.sin(x))
plt.show()
|
# Generated by Django 2.2 on 2021-05-27 06:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_pdf_bundels'),
]
operations = [
migrations.RemoveField(
model_name='file',
name='pdf_path',
),
]
|
# Section 12.3.1 snippets
# Loading the Data
from pathlib import Path
from textblob import TextBlob
blob = TextBlob(Path('RomeoAndJuliet.txt').read_text())
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
# Getting the Word Frequencies
items = blob.word_counts.items()
# Eliminating the Stop Words
items = [item for item in items if item[0] not in stop_words]
# Sorting the Words by Frequency
from operator import itemgetter
sorted_items = sorted(items, key=itemgetter(1), reverse=True)
# Getting the Top 20 Words
top20 = sorted_items[1:21]
# Convert top20 to a DataFrame
import pandas as pd
df = pd.DataFrame(top20, columns=['word', 'count'])
df
# Visualizing the DataFrame
axes = df.plot.bar(x='word', y='count', legend=False)
import matplotlib.pyplot as plt
plt.gcf().tight_layout()
##########################################################################
# (C) Copyright 2019 by Deitel & Associates, Inc. and #
# Pearson Education, Inc. All Rights Reserved. #
# #
# DISCLAIMER: The authors and publisher of this book have used their #
# best efforts in preparing the book. These efforts include the #
# development, research, and testing of the theories and programs #
# to determine their effectiveness. The authors and publisher make #
# no warranty of any kind, expressed or implied, with regard to these #
# programs or to the documentation contained in these books. The authors #
# and publisher shall not be liable in any event for incidental or #
# consequential damages in connection with, or arising out of, the #
# furnishing, performance, or use of these programs. #
##########################################################################
|
# ########################################################################################
# Subdomain Frequency Data Processing
# We copied the table of data available here:
# bitquark_20160227_subdomains_popular_1000_with_count.txt
# Downloaded from
# https://github.com/bitquark/dnspop/blob/master/results/bitquark_20160227_subdomains_popular_1000_with_count
# and then process it into a dictionary of frequencies
#
# ###################################
import pandas as pd
import json
file = "data/bitquark_20160227_subdomains_popular_1000_with_count.txt"
df = pd.read_csv(file, sep=" ", header=None, names=['Count','Subdomain'])
df['freq'] = df['Count']/(df['Count'].sum())
lookup = pd.Series(df.freq.values, index=df.Subdomain).to_dict()
with open('url2features/data/subdomain_freqs.dat', 'w') as file:
file.write(json.dumps(lookup))
|
"""Matcher object."""
# This file is part of the 'xarray-regex' project
# (http://github.com/Descanonge/xarray-regex) and subject
# to the MIT License as defined in the file 'LICENSE',
# at the root of this project. © 2021 Clément Haëck
import re
class Matcher():
"""Manage a matcher inside the pre-regex.
Parameters
----------
m: re.match
Match object obtained to find matchers in the pre-regex.
idx: int
Index inside the pre-regex.
Attributes
----------
idx: int
Index inside the pre-regex.
group: str
Group name.
name: str
Matcher name.
custom: bool
If there is a custom regex to use preferentially.
rgx: str
Regex.
discard: bool
If the matcher should not be used when retrieving values from matches.
match: str
The string that created the matcher `%(match)`.
"""
NAME_RGX = {"idx": r"\d+",
"Y": r"\d\d\d\d",
"m": r"\d\d",
"d": r"\d\d",
"j": r"\d\d\d",
"H": r"\d\d",
"M": r"\d\d",
"S": r"\d\d",
"x": r"%Y%m%d",
"X": r"%H%M%S",
"F": r"%Y-%m-%d",
"B": r"[a-zA-Z]*",
"text": r"[a-zA-Z]*",
"char": r"\S*"}
"""Regex str for each type of element."""
def __init__(self, m: re.match, idx: int = 0):
self.idx = idx
self.group = None
self.name = None
self.custom = False
self.rgx = None
self.discard = False
self.match = m.group()[2:-1] # slicing removes %()
self.set_matcher(m)
def __repr__(self):
return '\n'.join([super().__repr__(), self.__str__()])
def __str__(self):
return '{}: {}'.format(self.idx, self.match)
def set_matcher(self, m: re.match):
"""Find attributes from match.
Raises
------
NameError
No name.
ValueError
Empty custom regex.
"""
group = m.group('group')
name = m.group('name')
custom = m.group('cus') is not None
rgx = m.group('cus_rgx')
if name is None:
raise NameError("Matcher name cannot be empty.")
if custom and not rgx:
raise ValueError("Matcher custom regex cannot be empty.")
self.group = group
self.name = name
self.custom = custom
if custom:
self.rgx = rgx
else:
self.rgx = self.NAME_RGX[name]
def get_regex(self) -> str:
"""Get matcher regex.
Replace the matchers name by regex from `Matcher.NAME_RGX`.
If there is a custom regex, recursively replace '%' followed by a single
letter by the corresponding regex from `NAME_RGX`. '%%' is replaced by a
single percentage character.
Raises
------
KeyError
Unknown replacement.
"""
def replace(match):
group = match.group(1)
if group == '%':
return '%'
if group in self.NAME_RGX:
replacement = self.NAME_RGX[group]
if '%' in replacement:
return self.get_regex(replacement)
return replacement
raise KeyError("Unknown replacement '{}'.".format(match.group(0)))
return re.sub("%([a-zA-Z%])", replace, self.rgx)
|
from django.test import TestCase
from django.utils import timezone
from django.contrib.auth.models import User
from django.shortcuts import reverse
from django.db.models import Q
from web.models import Event
from datetime import timedelta
from bs4 import BeautifulSoup
class EventViewTest(TestCase):
@staticmethod
def get_event_list(response):
soup = BeautifulSoup(response.content, 'html.parser')
event_list = list()
for card_html in soup.find_all('div', {'class': 'event-card'}):
url = card_html.find('a', href=True)['href']
event_list.append(url)
return event_list
def setUp(self):
User.objects.create_user('standard_user')
User.objects.create_user('staff_user', is_staff=True)
User.objects.create_user('super_user', is_staff=True, is_superuser=True)
yesterday = timezone.now() - timedelta(days=1)
tomorrow = timezone.now() + timedelta(days=1)
Event.objects.create(name='Open Event', slug='open-event', open_date=yesterday)
Event.objects.create(name='Closed Event', slug='closed-event', open_date=tomorrow)
def test_open_event_shown_for_guest(self):
response = self.client.get('/open-event')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'web/pages/event.html')
def test_open_event_shown_for_all_users(self):
for user in User.objects.all():
self.client.force_login(user)
response = self.client.get('/open-event')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'web/pages/event.html')
self.client.logout()
def test_closed_event_redirect_to_home_for_guest(self):
response = self.client.get('/closed-event')
self.assertRedirects(response, reverse('home'), target_status_code=200)
def test_closed_event_redirect_to_home_for_standard_user(self):
self.client.force_login(User.objects.get(username='standard_user'))
response = self.client.get('/closed-event')
self.assertRedirects(response, reverse('home'), target_status_code=200)
def test_closed_event_shown_for_staff_user(self):
self.client.force_login(User.objects.get(username='staff_user'))
response = self.client.get('/closed-event')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'web/pages/event.html')
def test_closed_event_shown_for_super_user(self):
self.client.force_login(User.objects.get(username='super_user'))
response = self.client.get('/closed-event')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'web/pages/event.html')
def test_open_even_listed_for_guest(self):
response = self.client.get(reverse('events'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'web/pages/events.html')
event_list = self.get_event_list(response)
self.assertTrue('/open-event' in event_list)
def test_open_event_listed_for_all_users(self):
for user in User.objects.all():
self.client.force_login(user)
response = self.client.get(reverse('events'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'web/pages/events.html')
event_list = self.get_event_list(response)
self.assertTrue('/open-event' in event_list)
def test_closed_event_hidden_for_guest(self):
response = self.client.get(reverse('events'))
self.assertEqual(response.status_code, 200)
event_list = self.get_event_list(response)
self.assertFalse('/closed-event' in event_list)
def test_closed_event_hidden_for_standard_user(self):
self.client.force_login(User.objects.get(username='standard_user'))
response = self.client.get(reverse('events'))
self.assertEqual(response.status_code, 200)
event_list = self.get_event_list(response)
self.assertFalse('/closed-event' in event_list)
def test_closed_event_listed_for_staff_user(self):
self.client.force_login(User.objects.get(username='staff_user'))
response = self.client.get(reverse('events'))
self.assertEqual(response.status_code, 200)
event_list = self.get_event_list(response)
self.assertTrue('/closed-event' in event_list)
def test_closed_event_listed_for_super_user(self):
self.client.force_login(User.objects.get(username='super_user'))
response = self.client.get(reverse('events'))
self.assertEqual(response.status_code, 200)
event_list = self.get_event_list(response)
self.assertTrue('/closed-event' in event_list)
|
from CommonCode.convertPbToJSON import ConvertPbToJSON
from CommonCode.strings import Strings
from Enums.databaseTables import Tables
from protobuff.workertype_pb2 import WorkerTypeEnum
class DatabaseHelper:
m_pbConvertor = ConvertPbToJSON()
BASE_QUERY = "SELECT * FROM"
BASE_UPDATE_QUERY = "UPDATE"
BASE_INSERT_QUERY = 'INSERT INTO'
BASE_RAW_DATA_QUERY = "SELECT raw_data FROM "
BASE_COUNT_RAW_DATA_QUERY = "SELECT count(raw_data) FROM "
def getBaseQuery(self):
return self.BASE_QUERY
def getQuotedString(self, data):
return '"' + data + '"'
def getSingleQuotedString(self, data):
return "'" + data + "'"
def getEntityQuery(self, data):
return self.BASE_QUERY + ' "' + data + '"'
def updateEntityQuery(self, data, value):
return self.BASE_UPDATE_QUERY + ' "' + data + '"' + " SET dbid = " + value + " WHERE id = 1"
def getInsertQuery(self, table, data):
if(table == Tables.WORKER_TYPE.name):
print(type(data.workerType))
return self.BASE_INSERT_QUERY + ' "' + table + '"' + "( dbid ,workertype, raw_data) " + " VALUES " + "(" + self.getSingleQuotedString(
data.dbInfo.id) + " , " + self.getSingleQuotedString(
str(WorkerTypeEnum.Name(data.workerType))) + " , " + self.m_pbConvertor.converPbtojsonString(
builder=data) + ");"
else:
return self.BASE_INSERT_QUERY + ' "' + table + '"' + "( dbid , raw_data) " + " VALUES " + "(" + self.getSingleQuotedString(
data.dbInfo.id) + " , " + self.m_pbConvertor.converPbtojsonString(
builder=data) + ");"
def getRowDataQuery(self, table, id):
return self.BASE_RAW_DATA_QUERY + ' "' + table + '"' + "WHERE dbid = " + self.getSingleQuotedString(id) + ";"
def updateRawDataEntityQuery(self, id, newPb, table):
return self.BASE_UPDATE_QUERY + ' "' + table + '"' + " SET raw_data = " + self.m_pbConvertor.converPbtojsonString(
builder=newPb) + " WHERE dbid = " + self.getSingleQuotedString(id) + " ;"
def getAllTableQuery(self):
return "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'"
def getCreateTableQuery(self, table):
return 'CREATE TABLE'+'"'+table+'"'+'(id serial PRIMARY KEY,dbid VARCHAR (255) UNIQUE NOT NULL,raw_data json NOT NULL);'
def getCreateEntityTableQuery(self, table):
return 'CREATE TABLE '+'"'+table+'"'+' (id serial PRIMARY KEY,dbid VARCHAR (255) UNIQUE NOT NULL);'
def getWorkerTypeTableQuery(self,table):
return 'CREATE TABLE'+'"'+table+'"'+'(id serial PRIMARY KEY,dbid VARCHAR (255) UNIQUE NOT NULL,workertype VARCHAR (255) UNIQUE NOT NULL,raw_data json NOT NULL);'
def getSearchQuery(self,table,subquery):
return self.BASE_RAW_DATA_QUERY +'"'+table+'" WHERE '+ subquery + ';'
def getCountQuery(self,table,subquery):
if(Strings.isEmpty(subquery)):
return self.BASE_COUNT_RAW_DATA_QUERY +'"'+table+'" WHERE '+ 'true' + ';'
else:
return self.BASE_COUNT_RAW_DATA_QUERY +'"'+table+'" WHERE '+ subquery + ';'
|
from distutils.util import strtobool
def boolean_argument(value):
"""Convert a string value to boolean."""
return bool(strtobool(value))
|
__version__ = "0.0.3dev0"
from . import dhn_from_osm # noqa: F401
from . import graph # noqa: F401
from . import helpers # noqa: F401
from . import input_output # noqa: F401
from . import model # noqa: F401
from . import network # noqa: F401
from . import plotting # noqa: F401
from . import simulation # noqa: F401
from .gistools import connect_points # noqa: F401
from .gistools import geometry_operations # noqa: F401
from .optimization import add_components # noqa: F401
from .optimization import dhs_nodes # noqa: F401
from .optimization import oemof_heatpipe # noqa: F401
from .optimization import optimization_models # noqa: F401
|
from PyQt5.QtWidgets import QWidget, QGridLayout, QLineEdit, \
QMessageBox, QSpinBox, QHBoxLayout, QPushButton, QDialog, QSplitter, \
QVBoxLayout, QLabel, QTextEdit, QSizePolicy
from PyQt5.QtGui import QFontMetrics, QIcon
from PyQt5.QtCore import Qt
from app.resources.resources import RESOURCES
from app.data.database import DB
from app.extensions.custom_gui import PropertyBox, ComboBox, QHLine
from app.extensions.list_widgets import AppendMultiListWidget, MultiDictWidget
from app.extensions.list_models import ReverseDoubleListModel
from app.extensions.multi_select_combo_box import MultiSelectComboBox
from app.editor.settings import MainSettingsController
from app.editor.tag_widget import TagDialog
from app.editor.stat_widget import StatListWidget, StatAverageDialog, ClassStatAveragesModel
from app.editor.weapon_editor.weapon_rank import WexpGainDelegate, WexpGainMultiAttrModel
from app.editor.learned_skill_delegate import LearnedSkillDelegate
from app.editor.icons import ItemIcon80
from app.editor.class_editor import class_model
from app.editor.map_sprite_editor import map_sprite_tab
from app.editor.combat_animation_editor import combat_animation_tab
from app.editor import timer
from app.utilities import str_utils
class ClassProperties(QWidget):
def __init__(self, parent, current=None):
super().__init__(parent)
self.window = parent
self.model = self.window.left_frame.model
self._data = self.window._data
self.settings = MainSettingsController()
theme = self.settings.get_theme(0)
if theme == 0:
icon_folder = 'icons/icons'
else:
icon_folder = 'icons/dark_icons'
self.current = current
top_section = QHBoxLayout()
main_section = QGridLayout()
self.icon_edit = ItemIcon80(self)
main_section.addWidget(self.icon_edit, 0, 0, 2, 2, Qt.AlignHCenter)
self.nid_box = PropertyBox("Unique ID", QLineEdit, self)
self.nid_box.edit.textChanged.connect(self.nid_changed)
self.nid_box.edit.editingFinished.connect(self.nid_done_editing)
main_section.addWidget(self.nid_box, 0, 2)
self.name_box = PropertyBox("Display Name", QLineEdit, self)
self.name_box.edit.setMaxLength(13)
self.name_box.edit.textChanged.connect(self.name_changed)
main_section.addWidget(self.name_box, 1, 2)
self.desc_box = PropertyBox("Description", QTextEdit, self)
self.desc_box.edit.textChanged.connect(self.desc_changed)
font_height = QFontMetrics(self.desc_box.edit.font())
self.desc_box.edit.setFixedHeight(font_height.lineSpacing() * 3 + 20)
main_section.addWidget(self.desc_box, 2, 0, 1, 2)
self.movement_box = PropertyBox("Movement Type", ComboBox, self)
self.movement_box.edit.addItems(DB.mcost.unit_types)
self.movement_box.edit.currentIndexChanged.connect(self.movement_changed)
main_section.addWidget(self.movement_box, 2, 2)
self.tier_box = PropertyBox("Tier", QSpinBox, self)
self.tier_box.edit.setRange(0, 5)
self.tier_box.edit.setAlignment(Qt.AlignRight)
self.tier_box.edit.valueChanged.connect(self.tier_changed)
main_section.addWidget(self.tier_box, 3, 0)
self.promotes_from_box = PropertyBox("Promotes From", ComboBox, self)
self.promotes_from_box.edit.addItems(["None"] + DB.classes.keys())
self.promotes_from_box.edit.activated.connect(self.promotes_from_changed)
main_section.addWidget(self.promotes_from_box, 3, 1)
self.max_level_box = PropertyBox("Max Level", QSpinBox, self)
self.max_level_box.edit.setRange(1, 255)
self.max_level_box.edit.setAlignment(Qt.AlignRight)
self.max_level_box.edit.valueChanged.connect(self.max_level_changed)
main_section.addWidget(self.max_level_box, 3, 2)
tag_section = QHBoxLayout()
self.turns_into_box = PropertyBox("Turns Into", MultiSelectComboBox, self)
self.turns_into_box.edit.setPlaceholderText("Promotion Options...")
self.turns_into_box.edit.addItems(DB.classes.keys())
self.turns_into_box.edit.updated.connect(self.turns_into_changed)
tag_section.addWidget(self.turns_into_box)
self.tag_box = PropertyBox("Tags", MultiSelectComboBox, self)
self.tag_box.edit.setPlaceholderText("No tag")
self.tag_box.edit.addItems(DB.tags.keys())
self.tag_box.edit.updated.connect(self.tags_changed)
tag_section.addWidget(self.tag_box)
self.tag_box.add_button(QPushButton('...'))
self.tag_box.button.setMaximumWidth(40)
self.tag_box.button.clicked.connect(self.access_tags)
stat_section = QGridLayout()
self.class_stat_widget = StatListWidget(self.current, "Stats", parent=self)
self.class_stat_widget.button.clicked.connect(self.display_averages)
self.class_stat_widget.model.dataChanged.connect(self.stat_list_model_data_changed)
self.averages_dialog = None
stat_section.addWidget(self.class_stat_widget, 1, 0, 1, 2)
weapon_section = QHBoxLayout()
attrs = ("usable", "nid", "wexp_gain")
default_weapons = {weapon_nid: DB.weapons.default() for weapon_nid in DB.weapons.keys()}
self.wexp_gain_widget = MultiDictWidget(
default_weapons, "Weapon Experience",
attrs, WexpGainDelegate, self, model=WexpGainMultiAttrModel)
self.wexp_gain_widget.model.checked_columns = {0} # Add checked column
weapon_section.addWidget(self.wexp_gain_widget)
skill_section = QHBoxLayout()
attrs = ("level", "skill_nid")
self.class_skill_widget = AppendMultiListWidget([], "Class Skills", attrs, LearnedSkillDelegate, self, model=ReverseDoubleListModel)
skill_section.addWidget(self.class_skill_widget)
self.map_sprite_label = QLabel()
self.map_sprite_label.setMaximumWidth(32)
self.map_sprite_box = QPushButton("Choose Map Sprite...")
self.map_sprite_box.clicked.connect(self.select_map_sprite)
self.map_sprite_auto_box = QPushButton()
self.map_sprite_auto_box.setIcon(QIcon(f"{icon_folder}/autoassign.png"))
self.map_sprite_auto_box.setMaximumWidth(32)
self.map_sprite_auto_box.setToolTip("Auto-assign map sprite with the same unique ID")
self.map_sprite_auto_box.clicked.connect(self.autoselect_map_sprite)
self.combat_anim_label = QLabel()
self.combat_anim_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
self.combat_anim_box = QPushButton("Choose Combat Animation...")
self.combat_anim_box.clicked.connect(self.select_combat_anim)
self.combat_anim_box.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.combat_anim_auto_box = QPushButton()
self.combat_anim_auto_box.setIcon(QIcon(f"{icon_folder}/autoassign.png"))
self.combat_anim_auto_box.setMaximumWidth(32)
self.combat_anim_auto_box.setToolTip("Auto-assign combat animation with the same unique ID")
self.combat_anim_auto_box.clicked.connect(self.autoselect_combat_anim)
self.combat_anim_auto_box.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
total_section = QVBoxLayout()
total_section.addLayout(top_section)
total_section.addLayout(main_section)
total_section.addLayout(tag_section)
total_section.addWidget(QHLine())
total_section.addLayout(stat_section)
total_widget = QWidget()
total_widget.setLayout(total_section)
right_section = QVBoxLayout()
right_section.addLayout(weapon_section)
right_section.addWidget(QHLine())
right_section.addLayout(skill_section)
map_sprite_section = QHBoxLayout()
map_sprite_section.addWidget(self.map_sprite_label)
map_sprite_section.addWidget(self.map_sprite_box)
map_sprite_section.addWidget(self.map_sprite_auto_box)
right_section.addLayout(map_sprite_section)
combat_anim_section = QHBoxLayout()
combat_anim_section.addWidget(self.combat_anim_label)
combat_anim_section.addWidget(self.combat_anim_box)
combat_anim_section.addWidget(self.combat_anim_auto_box)
right_section.addLayout(combat_anim_section)
right_widget = QWidget()
right_widget.setLayout(right_section)
self.splitter = QSplitter(self)
self.splitter.setChildrenCollapsible(False)
self.splitter.addWidget(total_widget)
self.splitter.addWidget(right_widget)
self.splitter.setStyleSheet("QSplitter::handle:horizontal {background: qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 #eee, stop:1 #ccc); border: 1px solid #777; width: 13px; margin-top: 2px; margin-bottom: 2px; border-radius: 4px;}")
final_section = QHBoxLayout()
self.setLayout(final_section)
final_section.addWidget(self.splitter)
# final_section = QHBoxLayout()
# self.setLayout(final_section)
# final_section.addLayout(total_section)
# final_section.addWidget(QVLine())
# final_section.addLayout(right_section)
timer.get_timer().tick_elapsed.connect(self.tick)
def tick(self):
self.window.update_list()
def nid_changed(self, text):
self.current.nid = text
self.window.update_list()
def nid_done_editing(self):
# Check validity of nid!
other_nids = [d.nid for d in self._data.values() if d is not self.current]
if self.current.nid in other_nids:
QMessageBox.warning(self.window, 'Warning', 'Class ID %s already in use' % self.current.nid)
self.current.nid = str_utils.get_next_name(self.current.nid, other_nids)
self.model.on_nid_changed(self._data.find_key(self.current), self.current.nid)
self._data.update_nid(self.current, self.current.nid)
self.window.update_list()
def name_changed(self, text):
self.current.name = text
def desc_changed(self, text=None):
self.current.desc = self.desc_box.edit.toPlainText()
# self.current.desc = text
def tier_changed(self, val):
self.current.tier = val
def promotes_from_changed(self):
p = self.promotes_from_box.edit.currentText()
if p == "None":
self.current.promotes_from = None
else:
self.current.promotes_from = p
def movement_changed(self, index):
self.current.movement_group = self.movement_box.edit.currentText()
def max_level_changed(self, val):
self.current.max_level = val
def turns_into_changed(self):
self.current.turns_into = self.turns_into_box.edit.currentText()
def tags_changed(self):
self.current.tags = self.tag_box.edit.currentText()
def access_tags(self):
dlg = TagDialog.create(self)
result = dlg.exec_()
if result == QDialog.Accepted:
self.tag_box.edit.clear()
self.tag_box.edit.addItems(DB.tags.keys())
self.tag_box.edit.setCurrentTexts(self.current.tags)
else:
pass
# def access_stats(self):
# dlg = StatTypeDialog.create()
# result = dlg.exec_()
# if result == QDialog.Accepted:
# self.class_stat_widget.update_stats()
# else:
# pass
def display_averages(self):
if not self.current:
return
# Modeless dialog
if not self.averages_dialog:
self.averages_dialog = StatAverageDialog(self.current, "Class", ClassStatAveragesModel, self)
self.averages_dialog.show()
self.averages_dialog.raise_()
self.averages_dialog.activateWindow()
def close_averages(self):
if self.averages_dialog:
self.averages_dialog.done(0)
self.averages_dialog = None
def stat_list_model_data_changed(self, index1, index2):
if self.averages_dialog:
self.averages_dialog.update()
def select_map_sprite(self):
res, ok = map_sprite_tab.get()
if ok:
nid = res.nid
self.current.map_sprite_nid = nid
pix = class_model.get_map_sprite_icon(self.current, num=0)
self.map_sprite_label.setPixmap(pix)
self.window.update_list()
def autoselect_map_sprite(self):
nid = self.current.nid
res = RESOURCES.map_sprites.get(nid)
if res:
nid = res.nid
self.current.map_sprite_nid = nid
pix = class_model.get_map_sprite_icon(self.current, num=0)
self.map_sprite_label.setPixmap(pix)
self.window.update_list()
def select_combat_anim(self):
res, ok = combat_animation_tab.get_animations()
if res and ok:
nid = res.nid
self.current.combat_anim_nid = nid
pix = class_model.get_combat_anim_icon(self.current)
if pix:
self.combat_anim_label.setPixmap(pix)
self.window.update_list()
else: # Use to clear the combat animation -- since this can be reasonable
self.current.combat_anim_nid = None
self.combat_anim_label.clear()
def autoselect_combat_anim(self):
nid = self.current.nid
res = RESOURCES.combat_anims.get(nid)
if res:
nid = res.nid
self.current.combat_anim_nid = nid
pix = class_model.get_combat_anim_icon(self.current)
if pix:
self.combat_anim_label.setPixmap(pix)
self.window.update_list()
def set_current(self, current):
self.current = current
self.nid_box.edit.setText(current.nid)
self.name_box.edit.setText(current.name)
self.desc_box.edit.setText(current.desc)
self.tier_box.edit.setValue(current.tier)
self.max_level_box.edit.setValue(current.max_level)
self.movement_box.edit.setValue(current.movement_group)
# Reset promotes from box
self.promotes_from_box.edit.clear()
self.promotes_from_box.edit.addItems(["None"] + DB.classes.keys())
if current.promotes_from:
self.promotes_from_box.edit.setValue(current.promotes_from)
else:
self.promotes_from_box.edit.setValue("None")
# Need to make copies because otherwise ResetSelection calls
# self.tag_box.updated which resets the current.tags
turns_into = current.turns_into[:]
tags = current.tags[:]
self.turns_into_box.edit.clear()
self.turns_into_box.edit.addItems(DB.classes.keys())
self.turns_into_box.edit.setCurrentTexts(turns_into)
self.tag_box.edit.clear()
self.tag_box.edit.addItems(DB.tags.keys())
self.tag_box.edit.setCurrentTexts(tags)
self.class_stat_widget.update_stats()
self.class_stat_widget.set_new_obj(current)
if self.averages_dialog:
self.averages_dialog.set_current(current)
self.class_skill_widget.set_current(current.learned_skills)
self.wexp_gain_widget.set_current(current.wexp_gain)
self.icon_edit.set_current(current.icon_nid, current.icon_index)
pix = class_model.get_map_sprite_icon(self.current, num=0)
if pix:
self.map_sprite_label.setPixmap(pix)
else:
self.map_sprite_label.clear()
pix = class_model.get_combat_anim_icon(self.current)
if pix:
self.combat_anim_label.setPixmap(pix)
else:
self.combat_anim_label.clear()
def hideEvent(self, event):
self.close_averages()
|
import threading
import socket
#define host address, port number
host = '127.0.0.1' #localhost (can also be the ip of the server if it's running on web server)
port = 49800 #random port - not from well-known ports (0-1023) or registered ports (1024-49151)
#starting the server
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port)) #bind server to host and ip address
server.listen() #listen for incoming connections
#client and nickname lists
clients = []
nicknames = []
#broadcast function - sends message to all connected clients
def broadcast(msg):
for client in clients:
client.send(msg)
#handle function - handles messages from clients
def handle(client):
while True:
try:
special_msg = msg = client.recv(1024) #special_msg for kick or ban
if special_msg.decode('ascii').startswith('KICK'):
if nicknames[clients.index(client)] == 'admin':
user_to_kick = special_msg.decode('ascii')[5:] #after the first 5 characters (kick+space)
kick_user(user_to_kick)
else:
client.send('Command was refused!'.encode('ascii'))
elif special_msg.decode('ascii').startswith('BAN'):
if nicknames[clients.index(client)] == 'admin':
user_to_ban = special_msg.decode('ascii')[4:] #after the first 4 characters (ban+space)
ban_user(user_to_ban)
with open('banned_users.txt','a') as bu:
bu.write(f'{user_to_ban}\n')
print(f'{user_to_ban} was banned from the server!')
else:
client.send('Command was refused!'.encode('ascii'))
else:
broadcast(msg) #broadcast the message to all other clients
except:
if client in clients:
index = clients.index(client) #remove client from the list
clients.remove(client)
client.close()
nickname = nicknames[index]
nicknames.remove(nickname)
broadcast(f'{nickname} has left the chat.'.encode('ascii'))
break
#receive function
def receive():
while True:
client, address = server.accept() #accept method returns a client and his address
print("Connected with {}".format(str(address)))
client.send('nick'.encode('ascii')) #message visible only to the client to give his nickname
nickname = client.recv(1024).decode('ascii')
with open('banned_users.txt','r') as bu:
bans = bu.readlines()
if nickname+'\n' in bans: #refuse connection to banned client
client.send('BAN'.encode('ascii'))
client.close() #close connection to the client
continue
if nickname == 'admin':
client.send('PASS'.encode('ascii'))
password = client.recv(1024).decode('ascii')
if password != 'adminpwd':
client.send('REFUSE'.encode('ascii'))
client.close()
continue
nicknames.append(nickname) #add nickname to nicknames list
clients.append(client) #add client to clients list
print(f'Nickname of the client is {nickname}.')
broadcast(f'{nickname} has joined the chat!'.encode('ascii'))
client.send("Connected to the server!".encode('ascii')) #let the client know that he has connected successfully to the server
thread = threading.Thread(target=handle, args=(client,)) #one thread per client connected to handle them at the same time
thread.start()
def kick_user(user):
if user in nicknames:
user_index = nicknames.index(user) #find the position of user in nicknames which is the same position as the client
client_to_kick = clients[user_index]
clients.remove(client_to_kick)
client_to_kick.send('You were kicked from the server by the admin.'.encode('ascii'))
client_to_kick.close()
nicknames.remove(user)
broadcast(f'{user} was kicked from the server by the admin!'.encode('ascii'))
def ban_user(user):
if user in nicknames:
user_index = nicknames.index(user) #find the position of user in nicknames which is the same position as the client
client_to_ban = clients[user_index]
clients.remove(client_to_ban)
client_to_ban.send('You were banned from the server by the admin.'.encode('ascii'))
client_to_ban.close()
nicknames.remove(user)
broadcast(f'{user} was banned from the server by the admin!'.encode('ascii'))
print("Server is listening...")
receive()
|
from __future__ import absolute_import, division, print_function
from ete3 import Tree
import sys
#read in a rooted treelist, print out a .trees file that RootAnnotator might (?) like
trees = []
inh = open(sys.argv[1])
for line in inh:
ct = Tree(line.rstrip())
trees.append(ct)
inh.close()
leaf_names = []
for leaf in trees[0]:
leaf_names.append(leaf.name)
num_taxa = str(len(leaf_names))
leaf_map = {}
index = 0
for element in leaf_names:
index += 1
leaf_map[element] = str(index)
#now print some basic guff
print('#NEXUS\n\nBegin taxa\n\tDimensions ntax=' + num_taxa + ';\n\tTaxlabels')
for taxon in leaf_names:
print('\t\t' + taxon)
print('\t\t;\nEnd;\n\nBegin trees;\n\tTranslate')
for taxon in leaf_names:
if taxon == leaf_names[-1]:
print('\t\t' + leaf_map[taxon] + ' ' + taxon)
else:
print('\t\t' + leaf_map[taxon] + ' ' + taxon + ',')
print('\t\t;')
tree_count = 0
for t in trees:
tree_count += 1
for leaf in t:
leaf.name = leaf_map[leaf.name]
print('tree ' + str(tree_count) + ' = ' + str(t.write()))
print('End;')
|
# coding: utf-8
"""
OrganizationAuthorizationApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class OrganizationAuthorizationApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def delete_orgauthorization_trustee(self, trustee_org_id, **kwargs):
"""
Delete Org Trust
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_orgauthorization_trustee(trustee_org_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustee_org_id: Trustee Organization Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustee_org_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_orgauthorization_trustee" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustee_org_id' is set
if ('trustee_org_id' not in params) or (params['trustee_org_id'] is None):
raise ValueError("Missing the required parameter `trustee_org_id` when calling `delete_orgauthorization_trustee`")
resource_path = '/api/v2/orgauthorization/trustees/{trusteeOrgId}'.replace('{format}', 'json')
path_params = {}
if 'trustee_org_id' in params:
path_params['trusteeOrgId'] = params['trustee_org_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_orgauthorization_trustee_user(self, trustee_org_id, trustee_user_id, **kwargs):
"""
Delete Trustee User
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_orgauthorization_trustee_user(trustee_org_id, trustee_user_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustee_org_id: Trustee Organization Id (required)
:param str trustee_user_id: Trustee User Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustee_org_id', 'trustee_user_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_orgauthorization_trustee_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustee_org_id' is set
if ('trustee_org_id' not in params) or (params['trustee_org_id'] is None):
raise ValueError("Missing the required parameter `trustee_org_id` when calling `delete_orgauthorization_trustee_user`")
# verify the required parameter 'trustee_user_id' is set
if ('trustee_user_id' not in params) or (params['trustee_user_id'] is None):
raise ValueError("Missing the required parameter `trustee_user_id` when calling `delete_orgauthorization_trustee_user`")
resource_path = '/api/v2/orgauthorization/trustees/{trusteeOrgId}/users/{trusteeUserId}'.replace('{format}', 'json')
path_params = {}
if 'trustee_org_id' in params:
path_params['trusteeOrgId'] = params['trustee_org_id']
if 'trustee_user_id' in params:
path_params['trusteeUserId'] = params['trustee_user_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_orgauthorization_trustee_user_roles(self, trustee_org_id, trustee_user_id, **kwargs):
"""
Delete Trustee User Roles
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_orgauthorization_trustee_user_roles(trustee_org_id, trustee_user_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustee_org_id: Trustee Organization Id (required)
:param str trustee_user_id: Trustee User Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustee_org_id', 'trustee_user_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_orgauthorization_trustee_user_roles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustee_org_id' is set
if ('trustee_org_id' not in params) or (params['trustee_org_id'] is None):
raise ValueError("Missing the required parameter `trustee_org_id` when calling `delete_orgauthorization_trustee_user_roles`")
# verify the required parameter 'trustee_user_id' is set
if ('trustee_user_id' not in params) or (params['trustee_user_id'] is None):
raise ValueError("Missing the required parameter `trustee_user_id` when calling `delete_orgauthorization_trustee_user_roles`")
resource_path = '/api/v2/orgauthorization/trustees/{trusteeOrgId}/users/{trusteeUserId}/roles'.replace('{format}', 'json')
path_params = {}
if 'trustee_org_id' in params:
path_params['trusteeOrgId'] = params['trustee_org_id']
if 'trustee_user_id' in params:
path_params['trusteeUserId'] = params['trustee_user_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_orgauthorization_trustor(self, trustor_org_id, **kwargs):
"""
Delete Org Trust
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_orgauthorization_trustor(trustor_org_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustor_org_id: Trustor Organization Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustor_org_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_orgauthorization_trustor" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustor_org_id' is set
if ('trustor_org_id' not in params) or (params['trustor_org_id'] is None):
raise ValueError("Missing the required parameter `trustor_org_id` when calling `delete_orgauthorization_trustor`")
resource_path = '/api/v2/orgauthorization/trustors/{trustorOrgId}'.replace('{format}', 'json')
path_params = {}
if 'trustor_org_id' in params:
path_params['trustorOrgId'] = params['trustor_org_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_orgauthorization_trustor_user(self, trustor_org_id, trustee_user_id, **kwargs):
"""
Delete Trustee User
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_orgauthorization_trustor_user(trustor_org_id, trustee_user_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustor_org_id: Trustor Organization Id (required)
:param str trustee_user_id: Trustee User Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustor_org_id', 'trustee_user_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_orgauthorization_trustor_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustor_org_id' is set
if ('trustor_org_id' not in params) or (params['trustor_org_id'] is None):
raise ValueError("Missing the required parameter `trustor_org_id` when calling `delete_orgauthorization_trustor_user`")
# verify the required parameter 'trustee_user_id' is set
if ('trustee_user_id' not in params) or (params['trustee_user_id'] is None):
raise ValueError("Missing the required parameter `trustee_user_id` when calling `delete_orgauthorization_trustor_user`")
resource_path = '/api/v2/orgauthorization/trustors/{trustorOrgId}/users/{trusteeUserId}'.replace('{format}', 'json')
path_params = {}
if 'trustor_org_id' in params:
path_params['trustorOrgId'] = params['trustor_org_id']
if 'trustee_user_id' in params:
path_params['trusteeUserId'] = params['trustee_user_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_orgauthorization_pairing(self, pairing_id, **kwargs):
"""
Get Pairing Info
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_orgauthorization_pairing(pairing_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str pairing_id: Pairing Id (required)
:return: TrustRequest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pairing_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_orgauthorization_pairing" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'pairing_id' is set
if ('pairing_id' not in params) or (params['pairing_id'] is None):
raise ValueError("Missing the required parameter `pairing_id` when calling `get_orgauthorization_pairing`")
resource_path = '/api/v2/orgauthorization/pairings/{pairingId}'.replace('{format}', 'json')
path_params = {}
if 'pairing_id' in params:
path_params['pairingId'] = params['pairing_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TrustRequest',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_orgauthorization_trustee(self, trustee_org_id, **kwargs):
"""
Get Org Trust
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_orgauthorization_trustee(trustee_org_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustee_org_id: Trustee Organization Id (required)
:return: Trustee
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustee_org_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_orgauthorization_trustee" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustee_org_id' is set
if ('trustee_org_id' not in params) or (params['trustee_org_id'] is None):
raise ValueError("Missing the required parameter `trustee_org_id` when calling `get_orgauthorization_trustee`")
resource_path = '/api/v2/orgauthorization/trustees/{trusteeOrgId}'.replace('{format}', 'json')
path_params = {}
if 'trustee_org_id' in params:
path_params['trusteeOrgId'] = params['trustee_org_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Trustee',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_orgauthorization_trustee_user(self, trustee_org_id, trustee_user_id, **kwargs):
"""
Get Trustee User
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_orgauthorization_trustee_user(trustee_org_id, trustee_user_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustee_org_id: Trustee Organization Id (required)
:param str trustee_user_id: Trustee User Id (required)
:return: TrustUser
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustee_org_id', 'trustee_user_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_orgauthorization_trustee_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustee_org_id' is set
if ('trustee_org_id' not in params) or (params['trustee_org_id'] is None):
raise ValueError("Missing the required parameter `trustee_org_id` when calling `get_orgauthorization_trustee_user`")
# verify the required parameter 'trustee_user_id' is set
if ('trustee_user_id' not in params) or (params['trustee_user_id'] is None):
raise ValueError("Missing the required parameter `trustee_user_id` when calling `get_orgauthorization_trustee_user`")
resource_path = '/api/v2/orgauthorization/trustees/{trusteeOrgId}/users/{trusteeUserId}'.replace('{format}', 'json')
path_params = {}
if 'trustee_org_id' in params:
path_params['trusteeOrgId'] = params['trustee_org_id']
if 'trustee_user_id' in params:
path_params['trusteeUserId'] = params['trustee_user_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TrustUser',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_orgauthorization_trustee_user_roles(self, trustee_org_id, trustee_user_id, **kwargs):
"""
Get Trustee User Roles
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_orgauthorization_trustee_user_roles(trustee_org_id, trustee_user_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustee_org_id: Trustee Organization Id (required)
:param str trustee_user_id: Trustee User Id (required)
:return: UserAuthorization
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustee_org_id', 'trustee_user_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_orgauthorization_trustee_user_roles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustee_org_id' is set
if ('trustee_org_id' not in params) or (params['trustee_org_id'] is None):
raise ValueError("Missing the required parameter `trustee_org_id` when calling `get_orgauthorization_trustee_user_roles`")
# verify the required parameter 'trustee_user_id' is set
if ('trustee_user_id' not in params) or (params['trustee_user_id'] is None):
raise ValueError("Missing the required parameter `trustee_user_id` when calling `get_orgauthorization_trustee_user_roles`")
resource_path = '/api/v2/orgauthorization/trustees/{trusteeOrgId}/users/{trusteeUserId}/roles'.replace('{format}', 'json')
path_params = {}
if 'trustee_org_id' in params:
path_params['trusteeOrgId'] = params['trustee_org_id']
if 'trustee_user_id' in params:
path_params['trusteeUserId'] = params['trustee_user_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserAuthorization',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_orgauthorization_trustee_users(self, trustee_org_id, **kwargs):
"""
The list of trustee users for this organization (i.e. users granted access to this organization).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_orgauthorization_trustee_users(trustee_org_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustee_org_id: Trustee Organization Id (required)
:param int page_size: Page size
:param int page_number: Page number
:return: TrustUserEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustee_org_id', 'page_size', 'page_number']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_orgauthorization_trustee_users" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustee_org_id' is set
if ('trustee_org_id' not in params) or (params['trustee_org_id'] is None):
raise ValueError("Missing the required parameter `trustee_org_id` when calling `get_orgauthorization_trustee_users`")
resource_path = '/api/v2/orgauthorization/trustees/{trusteeOrgId}/users'.replace('{format}', 'json')
path_params = {}
if 'trustee_org_id' in params:
path_params['trusteeOrgId'] = params['trustee_org_id']
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TrustUserEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_orgauthorization_trustees(self, **kwargs):
"""
The list of trustees for this organization (i.e. organizations granted access to this organization).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_orgauthorization_trustees(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_size: Page size
:param int page_number: Page number
:return: TrustEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page_number']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_orgauthorization_trustees" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/orgauthorization/trustees'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TrustEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_orgauthorization_trustor(self, trustor_org_id, **kwargs):
"""
Get Org Trust
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_orgauthorization_trustor(trustor_org_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustor_org_id: Trustor Organization Id (required)
:return: Trustor
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustor_org_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_orgauthorization_trustor" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustor_org_id' is set
if ('trustor_org_id' not in params) or (params['trustor_org_id'] is None):
raise ValueError("Missing the required parameter `trustor_org_id` when calling `get_orgauthorization_trustor`")
resource_path = '/api/v2/orgauthorization/trustors/{trustorOrgId}'.replace('{format}', 'json')
path_params = {}
if 'trustor_org_id' in params:
path_params['trustorOrgId'] = params['trustor_org_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Trustor',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_orgauthorization_trustor_user(self, trustor_org_id, trustee_user_id, **kwargs):
"""
Get Trustee User
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_orgauthorization_trustor_user(trustor_org_id, trustee_user_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustor_org_id: Trustor Organization Id (required)
:param str trustee_user_id: Trustee User Id (required)
:return: TrustUser
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustor_org_id', 'trustee_user_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_orgauthorization_trustor_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustor_org_id' is set
if ('trustor_org_id' not in params) or (params['trustor_org_id'] is None):
raise ValueError("Missing the required parameter `trustor_org_id` when calling `get_orgauthorization_trustor_user`")
# verify the required parameter 'trustee_user_id' is set
if ('trustee_user_id' not in params) or (params['trustee_user_id'] is None):
raise ValueError("Missing the required parameter `trustee_user_id` when calling `get_orgauthorization_trustor_user`")
resource_path = '/api/v2/orgauthorization/trustors/{trustorOrgId}/users/{trusteeUserId}'.replace('{format}', 'json')
path_params = {}
if 'trustor_org_id' in params:
path_params['trustorOrgId'] = params['trustor_org_id']
if 'trustee_user_id' in params:
path_params['trusteeUserId'] = params['trustee_user_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TrustUser',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_orgauthorization_trustor_users(self, trustor_org_id, **kwargs):
"""
The list of users in the trustor organization (i.e. users granted access).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_orgauthorization_trustor_users(trustor_org_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustor_org_id: Trustee Organization Id (required)
:param int page_size: Page size
:param int page_number: Page number
:return: TrustUserEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustor_org_id', 'page_size', 'page_number']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_orgauthorization_trustor_users" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustor_org_id' is set
if ('trustor_org_id' not in params) or (params['trustor_org_id'] is None):
raise ValueError("Missing the required parameter `trustor_org_id` when calling `get_orgauthorization_trustor_users`")
resource_path = '/api/v2/orgauthorization/trustors/{trustorOrgId}/users'.replace('{format}', 'json')
path_params = {}
if 'trustor_org_id' in params:
path_params['trustorOrgId'] = params['trustor_org_id']
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TrustUserEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_orgauthorization_trustors(self, **kwargs):
"""
The list of organizations that have authorized/trusted your organization.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_orgauthorization_trustors(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_size: Page size
:param int page_number: Page number
:return: TrustorEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page_number']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_orgauthorization_trustors" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/orgauthorization/trustors'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TrustorEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_orgauthorization_pairings(self, body, **kwargs):
"""
A pairing id is created by the trustee and given to the trustor to create a trust.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_orgauthorization_pairings(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param TrustRequestCreate body: Pairing Info (required)
:return: TrustRequest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_orgauthorization_pairings" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_orgauthorization_pairings`")
resource_path = '/api/v2/orgauthorization/pairings'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TrustRequest',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_orgauthorization_trustee_users(self, trustee_org_id, body, **kwargs):
"""
Add a user to the trust.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_orgauthorization_trustee_users(trustee_org_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustee_org_id: Trustee Organization Id (required)
:param TrustMemberCreate body: Trust (required)
:return: TrustUser
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustee_org_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_orgauthorization_trustee_users" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustee_org_id' is set
if ('trustee_org_id' not in params) or (params['trustee_org_id'] is None):
raise ValueError("Missing the required parameter `trustee_org_id` when calling `post_orgauthorization_trustee_users`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_orgauthorization_trustee_users`")
resource_path = '/api/v2/orgauthorization/trustees/{trusteeOrgId}/users'.replace('{format}', 'json')
path_params = {}
if 'trustee_org_id' in params:
path_params['trusteeOrgId'] = params['trustee_org_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TrustUser',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_orgauthorization_trustees(self, body, **kwargs):
"""
Create a new organization authorization trust. This is required to grant other organizations access to your organization.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_orgauthorization_trustees(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param TrustCreate body: Trust (required)
:return: Trustee
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_orgauthorization_trustees" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_orgauthorization_trustees`")
resource_path = '/api/v2/orgauthorization/trustees'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Trustee',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_orgauthorization_trustees_audits(self, body, **kwargs):
"""
Get Org Trustee Audits
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_orgauthorization_trustees_audits(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param TrusteeAuditQueryRequest body: Values to scope the request. (required)
:param int page_size: Page size
:param int page_number: Page number
:param str sort_by: Sort by
:param str sort_order: Sort order
:return: AuditQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'page_size', 'page_number', 'sort_by', 'sort_order']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_orgauthorization_trustees_audits" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_orgauthorization_trustees_audits`")
resource_path = '/api/v2/orgauthorization/trustees/audits'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'sort_by' in params:
query_params['sortBy'] = params['sort_by']
if 'sort_order' in params:
query_params['sortOrder'] = params['sort_order']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AuditQueryResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_orgauthorization_trustor_audits(self, body, **kwargs):
"""
Get Org Trustor Audits
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_orgauthorization_trustor_audits(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param TrustorAuditQueryRequest body: Values to scope the request. (required)
:param int page_size: Page size
:param int page_number: Page number
:param str sort_by: Sort by
:param str sort_order: Sort order
:return: AuditQueryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'page_size', 'page_number', 'sort_by', 'sort_order']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_orgauthorization_trustor_audits" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_orgauthorization_trustor_audits`")
resource_path = '/api/v2/orgauthorization/trustor/audits'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'sort_by' in params:
query_params['sortBy'] = params['sort_by']
if 'sort_order' in params:
query_params['sortOrder'] = params['sort_order']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AuditQueryResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_orgauthorization_trustee(self, trustee_org_id, body, **kwargs):
"""
Update Org Trust
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_orgauthorization_trustee(trustee_org_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustee_org_id: Trustee Organization Id (required)
:param TrustUpdate body: Client (required)
:return: Trustee
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustee_org_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_orgauthorization_trustee" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustee_org_id' is set
if ('trustee_org_id' not in params) or (params['trustee_org_id'] is None):
raise ValueError("Missing the required parameter `trustee_org_id` when calling `put_orgauthorization_trustee`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `put_orgauthorization_trustee`")
resource_path = '/api/v2/orgauthorization/trustees/{trusteeOrgId}'.replace('{format}', 'json')
path_params = {}
if 'trustee_org_id' in params:
path_params['trusteeOrgId'] = params['trustee_org_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Trustee',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_orgauthorization_trustee_user_roledivisions(self, trustee_org_id, trustee_user_id, body, **kwargs):
"""
Update Trustee User Roles
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_orgauthorization_trustee_user_roledivisions(trustee_org_id, trustee_user_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustee_org_id: Trustee Organization Id (required)
:param str trustee_user_id: Trustee User Id (required)
:param RoleDivisionGrants body: Set of roles with corresponding divisions to apply (required)
:return: UserAuthorization
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustee_org_id', 'trustee_user_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_orgauthorization_trustee_user_roledivisions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustee_org_id' is set
if ('trustee_org_id' not in params) or (params['trustee_org_id'] is None):
raise ValueError("Missing the required parameter `trustee_org_id` when calling `put_orgauthorization_trustee_user_roledivisions`")
# verify the required parameter 'trustee_user_id' is set
if ('trustee_user_id' not in params) or (params['trustee_user_id'] is None):
raise ValueError("Missing the required parameter `trustee_user_id` when calling `put_orgauthorization_trustee_user_roledivisions`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `put_orgauthorization_trustee_user_roledivisions`")
resource_path = '/api/v2/orgauthorization/trustees/{trusteeOrgId}/users/{trusteeUserId}/roledivisions'.replace('{format}', 'json')
path_params = {}
if 'trustee_org_id' in params:
path_params['trusteeOrgId'] = params['trustee_org_id']
if 'trustee_user_id' in params:
path_params['trusteeUserId'] = params['trustee_user_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserAuthorization',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_orgauthorization_trustee_user_roles(self, trustee_org_id, trustee_user_id, body, **kwargs):
"""
Update Trustee User Roles
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_orgauthorization_trustee_user_roles(trustee_org_id, trustee_user_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustee_org_id: Trustee Organization Id (required)
:param str trustee_user_id: Trustee User Id (required)
:param list[str] body: List of roles (required)
:return: UserAuthorization
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustee_org_id', 'trustee_user_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_orgauthorization_trustee_user_roles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustee_org_id' is set
if ('trustee_org_id' not in params) or (params['trustee_org_id'] is None):
raise ValueError("Missing the required parameter `trustee_org_id` when calling `put_orgauthorization_trustee_user_roles`")
# verify the required parameter 'trustee_user_id' is set
if ('trustee_user_id' not in params) or (params['trustee_user_id'] is None):
raise ValueError("Missing the required parameter `trustee_user_id` when calling `put_orgauthorization_trustee_user_roles`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `put_orgauthorization_trustee_user_roles`")
resource_path = '/api/v2/orgauthorization/trustees/{trusteeOrgId}/users/{trusteeUserId}/roles'.replace('{format}', 'json')
path_params = {}
if 'trustee_org_id' in params:
path_params['trusteeOrgId'] = params['trustee_org_id']
if 'trustee_user_id' in params:
path_params['trusteeUserId'] = params['trustee_user_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserAuthorization',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_orgauthorization_trustor_user(self, trustor_org_id, trustee_user_id, **kwargs):
"""
Add a Trustee user to the trust.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_orgauthorization_trustor_user(trustor_org_id, trustee_user_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str trustor_org_id: Trustor Organization Id (required)
:param str trustee_user_id: Trustee User Id (required)
:return: TrustUser
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['trustor_org_id', 'trustee_user_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_orgauthorization_trustor_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'trustor_org_id' is set
if ('trustor_org_id' not in params) or (params['trustor_org_id'] is None):
raise ValueError("Missing the required parameter `trustor_org_id` when calling `put_orgauthorization_trustor_user`")
# verify the required parameter 'trustee_user_id' is set
if ('trustee_user_id' not in params) or (params['trustee_user_id'] is None):
raise ValueError("Missing the required parameter `trustee_user_id` when calling `put_orgauthorization_trustor_user`")
resource_path = '/api/v2/orgauthorization/trustors/{trustorOrgId}/users/{trusteeUserId}'.replace('{format}', 'json')
path_params = {}
if 'trustor_org_id' in params:
path_params['trustorOrgId'] = params['trustor_org_id']
if 'trustee_user_id' in params:
path_params['trusteeUserId'] = params['trustee_user_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TrustUser',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
#!
import jax.numpy as np
from jax import jit, random, vmap
from jax.ops import index_add, index_update, index
import matplotlib.pyplot as plt
import functools
import itertools
from scipy import optimize
from scipy.special import gamma
from tqdm import tqdm
import numpy as np2
import pandas as pd
import pickle
import os
from models import model
config_data = pd.read_csv('config.csv', sep=',', header=None, index_col=0)
figures_path = config_data.loc['figures_dir'][1]
results_path = config_data.loc['results_dir'][1]
params_data_path = config_data.loc['bogota_params_ages_data'][1]
ages_data_path = config_data.loc['bogota_age_data_dir'][1]
houses_data_path = config_data.loc['bogota_houses_data_dir'][1]
teachers_data_path = config_data.loc['bogota_teachers_data_dir'][1]
#from networks import networks
from networks import create_networks
import argparse
parser = argparse.ArgumentParser(description='Simulating interventions')
parser.add_argument('--res_id', default='ND', type=str,
help='Result ID for simulation save')
parser.add_argument('--population', default=500, type=int,
help='Speficy the number of individials')
parser.add_argument('--intervention', default=0.6, type=float,
help='Intervention efficiancy')
parser.add_argument('--intervention_type', default='intervention', type=str,
help='Define the type of intervention [no_intervention,internvention,school_alternancy]')
parser.add_argument('--work_occupation', default=0.6, type=float,
help='Percentage of occupation at workplaces over intervention')
parser.add_argument('--school_occupation', default=0.35, type=float,
help='Percentage of occupation at classrooms over intervention')
parser.add_argument('--school_openings', default=20, type=int,
help='Day of the simulation where schools are open')
parser.add_argument('--ventilation_out', default=3, type=float,
help='Ventilation values (h-1) that define how much ventilated is a classroom [2-15]')
parser.add_argument('--fraction_people_masks', default=1.0, type=float,
help='Fraction value of people wearing masks')
parser.add_argument('--masks_type', default='N95', type=str,
help='Type of masks that individuals are using. Options are: cloth, surgical, N95')
parser.add_argument('--duration_event', default=6, type=float,
help='Duration of event (i.e. classes/lectures) in hours over a day')
parser.add_argument('--height_room', default=3.1, type=float,
help='Schools height of classroom')
parser.add_argument('--preschool_length_room', default=7.0, type=float,
help='Preschool length of classroom')
parser.add_argument('--preschool_width_room', default=7.0, type=float,
help='Preschool length of classroom')
parser.add_argument('--primary_length_room', default=10.0, type=float,
help='primary length of classroom')
parser.add_argument('--primary_width_room', default=10.0, type=float,
help='primary length of classroom')
parser.add_argument('--highschool_length_room', default=10.0, type=float,
help='highschool length of classroom')
parser.add_argument('--highschool_width_room', default=10.0, type=float,
help='highschool length of classroom')
parser.add_argument('--Tmax', default=200, type=int,
help='Length of simulation (days)')
parser.add_argument('--delta_t', default=0.08, type=float,
help='Time steps')
parser.add_argument('--number_trials', default=10, type=int,
help='Number of iterations per step')
parser.add_argument('--preschool_mean', default=9.4, type=float,
help='preschool degree distribution (mean)')
parser.add_argument('--preschool_std', default=1.8, type=float,
help='preschool degree distribution (standard deviation)')
parser.add_argument('--preschool_size', default=15, type=float,
help='Number of students per classroom')
parser.add_argument('--preschool_r', default=1, type=float,
help='Correlation in preschool layer')
parser.add_argument('--primary_mean', default=9.4, type=float,
help='primary degree distribution (mean)')
parser.add_argument('--primary_std', default=1.8, type=float,
help='primary degree distribution (standard deviation)')
parser.add_argument('--primary_size', default=35, type=float,
help='Number of students per classroom')
parser.add_argument('--primary_r', default=1, type=float,
help='Correlation in primary layer')
parser.add_argument('--highschool_mean', default=9.4, type=float,
help='highschool degree distribution (mean)')
parser.add_argument('--highschool_std', default=1.8, type=float,
help='highschool degree distribution (standard deviation)')
parser.add_argument('--highschool_size', default=35, type=float,
help='Number of students per classroom')
parser.add_argument('--highschool_r', default=1, type=float,
help='Correlation in highschool layer')
parser.add_argument('--work_mean', default=14.4/3, type=float,
help='Work degree distribution (mean)')
parser.add_argument('--work_std', default=6.2/3, type=float,
help='Work degree distribution (standard deviation)')
parser.add_argument('--work_size', default=10, type=float,
help='Approximation of a work place size')
parser.add_argument('--work_r', default=1, type=float,
help='Correlation in work layer')
parser.add_argument('--community_mean', default=4.3/2, type=float,
help='Community degree distribution (mean)')
parser.add_argument('--community_std', default=1.9/2, type=float,
help='Community degree distribution (standard deviation)')
parser.add_argument('--community_n', default=1, type=float,
help='Number of community')
parser.add_argument('--community_r', default=0, type=float,
help='Correlation in community layer')
args = parser.parse_args()
number_nodes = args.population
pop = number_nodes
#--------------------------------------------------------------------------------------------------------------------------------
################################
########## Parameters ##########
# Model parameter values
# Means
IncubPeriod=5 #Incubation period, days
DurMildInf=6 #Duration of mild infections, days
DurSevereInf=6 #Duration of hospitalization (severe infection), days
DurCritInf=8 #Time from ICU admission to death/recovery (critical infection), days
# Standard deviations
std_IncubPeriod=4 #Incubation period, days
std_DurMildInf=2 #Duration of mild infections, days
std_DurSevereInf=4.5 #Duration of hospitalization (severe infection), days
std_DurCritInf=6 #Time from ICU admission to death/recovery (critical infection), days
FracSevere=0.15 #Fraction of infections that are severe
FracCritical=0.05 #Fraction of infections that are critical
CFR=0.02 #Case fatality rate (fraction of infections resulting in death)
FracMild=1-FracSevere-FracCritical #Fraction of infections that are mild
# Get gamma distribution parameters
mean_vec = np.array(
[1., IncubPeriod, DurMildInf, DurSevereInf, DurCritInf, 1., 1.])
std_vec=np.array(
[1., std_IncubPeriod, std_DurMildInf, std_DurSevereInf, std_DurCritInf, 1., 1.])
shape_vec=(mean_vec/std_vec)**2# This will contain shape values for each state
scale_vec=(std_vec**2)/mean_vec # This will contain scale values for each state
# Define transition probabilities
# Define probability of recovering (as opposed to progressing or dying) from each state
recovery_probabilities = np.array([0., 0., FracMild, FracSevere / (FracSevere + FracCritical), 1. - CFR / FracCritical, 0., 0.])
# Define relative infectivity of each state
infection_probabilities = np.array([0., 0., 1.0, 0., 0., 0., 0.])
# Mask efficiencies in inhalation and exhalation taken from https://tinyurl.com/covid-estimator
mask_inhalation = {'cloth':0.5 , 'surgical':0.3, 'N95':0.9}
mask_exhalation = {'cloth':0.5 , 'surgical':0.65, 'N95':0.9}
inhalation_mask = mask_inhalation[args.masks_type]
exhalation_mask = mask_exhalation[args.masks_type]
#----------------------------------------------------------------------------------------------------------------------------------
def discrete_gamma(key, alpha, beta, shape=()):
shape_ = shape
if shape_ == ():
try:
shape_ = alpha.shape
except:
shape_ = ()
return _discrete_gamma(key, alpha, beta, shape_)
@functools.partial(jit, static_argnums=(3,))
def _discrete_gamma(key, alpha, beta, shape=()):
samples = np.round(random.gamma(key, alpha, shape=shape) / beta)
return samples.astype(np.int32)
@jit
def state_length_sampler(key, new_state):
"""Duration in transitional state. Must be at least 1 time unit."""
alphas = shape_vec[new_state]
betas = delta_t/scale_vec[new_state]
key, subkey = random.split(key)
lengths = 1 + discrete_gamma(subkey, alphas, betas) # Time must be at least 1.
return key, lengths * model.is_transitional(new_state) # Makes sure non-transitional states are returning 0.
#-----------------------------------------------------------------------------------------------------------------------------------
######################################
######## Teachers distribution #######
teachers_data_BOG = pd.read_csv(teachers_data_path, encoding= 'unicode_escape', delimiter=',')
total_teachers_BOG = int(teachers_data_BOG['Total'][1])
teachers_preschool_ = [int(teachers_data_BOG['Preescolar'][1])]
teachers_preschool = sum(teachers_preschool_)/total_teachers_BOG
teachers_primary_ = [int(teachers_data_BOG['Basica_primaria'][1])]
teachers_primary = sum(teachers_primary_)/total_teachers_BOG
teachers_highschool_ = [int(teachers_data_BOG['Basica_secundaria'][1])]
teachers_highschool = sum(teachers_highschool_)/total_teachers_BOG
#-----------------------------------------------------------------------------------------------------------------------------------
#################################
######## Age distribution #######
### Get age distribution
ages_data_BOG = pd.read_csv(ages_data_path, encoding= 'unicode_escape', delimiter=';')
total_pop_BOG = int(ages_data_BOG['Total.3'][17].replace('.',''))
# Ages 0-4 (0)
very_young_ = [int(ages_data_BOG['Total.3'][0].replace('.',''))]
very_young = sum(very_young_)/total_pop_BOG
# Ages 5-9 (1)
preschool_ = [int(ages_data_BOG['Total.3'][1].replace('.',''))]
preschool = sum(preschool_)/total_pop_BOG
# Ages 10-14 (2)
primary_ = [int(ages_data_BOG['Total.3'][2].replace('.',''))]
primary = sum(primary_)/total_pop_BOG
# Ages 15-19 (3)
highschool_ = [int(ages_data_BOG['Total.3'][3].replace('.',''))]
highschool = sum(highschool_)/total_pop_BOG
# Ages 20-24 (4)
university_ = [int(ages_data_BOG['Total.3'][4].replace('.',''))]
university = sum(university_)/total_pop_BOG
# Ages 25-64 (5,6,7,8,9,10,11,12)
work_ = [int(ages_data_BOG['Total.3'][i].replace('.','')) for i in range(5,12+1)]
work = sum(work_)/total_pop_BOG
# Ages 65+ (13,14,15,16)
elderly_ = [int(ages_data_BOG['Total.3'][i].replace('.','')) for i in range(13,16+1)]
elderly = sum(elderly_)/total_pop_BOG
# Community ages
community_ = very_young_ + preschool_ + primary_ + highschool_ + university_ + work_ + elderly_
community = sum(community_)/total_pop_BOG
# Adult classification
adults = np.arange(4,16+1,1)
#-----------------------------------------------------------------------------------------------------------------------------------
#################################
########### Age params ##########
### Get medians
def get_medians(df_p,last):
df_res = df_p.iloc[-last:].groupby(['param']).median().reset_index()['median'][0]
return df_res
def medians_params(df_list,age_group,last):
params_def = ['age','beta','IFR','RecPeriod','alpha','sigma']
params_val = [age_group,get_medians(df_list[0],last),get_medians(df_list[1],last),
get_medians(df_list[2],last),get_medians(df_list[3],last),get_medians(df_list[4],last)]
res = dict(zip(params_def,params_val))
return res
params_data_BOG = pd.read_csv(params_data_path, encoding='unicode_escape', delimiter=',')
# Ages 0-19
young_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG['age_group']=='0-19'])
young_ages_beta = pd.DataFrame(young_ages_params[young_ages_params['param']=='contact_rate'])
young_ages_IFR = pd.DataFrame(young_ages_params[young_ages_params['param']=='IFR'])
young_ages_RecPeriod = pd.DataFrame(young_ages_params[young_ages_params['param']=='recovery_period'])
young_ages_alpha = pd.DataFrame(young_ages_params[young_ages_params['param']=='report_rate'])
young_ages_sigma = pd.DataFrame(young_ages_params[young_ages_params['param']=='relative_asymp_transmission'])
young_params = [young_ages_beta,young_ages_IFR,young_ages_RecPeriod,young_ages_alpha,young_ages_sigma]
# Ages 20-39
youngAdults_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG['age_group']=='20-39'])
youngAdults_ages_beta = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params['param']=='contact_rate'])
youngAdults_ages_IFR = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params['param']=='IFR'])
youngAdults_ages_RecPeriod = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params['param']=='recovery_period'])
youngAdults_ages_alpha = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params['param']=='report_rate'])
youngAdults_ages_sigma = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params['param']=='relative_asymp_transmission'])
youngAdults_params = [youngAdults_ages_beta,youngAdults_ages_IFR,youngAdults_ages_RecPeriod,youngAdults_ages_alpha,youngAdults_ages_sigma]
# Ages 40-49
adults_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG['age_group']=='40-49'])
adults_ages_beta = pd.DataFrame(adults_ages_params[adults_ages_params['param']=='contact_rate'])
adults_ages_IFR = pd.DataFrame(adults_ages_params[adults_ages_params['param']=='IFR'])
adults_ages_RecPeriod = pd.DataFrame(adults_ages_params[adults_ages_params['param']=='recovery_period'])
adults_ages_alpha = pd.DataFrame(adults_ages_params[adults_ages_params['param']=='report_rate'])
adults_ages_sigma = pd.DataFrame(adults_ages_params[adults_ages_params['param']=='relative_asymp_transmission'])
adults_params = [adults_ages_beta,adults_ages_IFR,adults_ages_RecPeriod,adults_ages_alpha,adults_ages_sigma]
# Ages 50-59
seniorAdults_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG['age_group']=='50-59'])
seniorAdults_ages_beta = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params['param']=='contact_rate'])
seniorAdults_ages_IFR = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params['param']=='IFR'])
seniorAdults_ages_RecPeriod = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params['param']=='recovery_period'])
seniorAdults_ages_alpha = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params['param']=='report_rate'])
seniorAdults_ages_sigma = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params['param']=='relative_asymp_transmission'])
seniorAdults_params = [seniorAdults_ages_beta,seniorAdults_ages_IFR,seniorAdults_ages_RecPeriod,seniorAdults_ages_alpha,seniorAdults_ages_sigma]
# Ages 60-69
senior_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG['age_group']=='60-69'])
senior_ages_beta = pd.DataFrame(senior_ages_params[senior_ages_params['param']=='contact_rate'])
senior_ages_IFR = pd.DataFrame(senior_ages_params[senior_ages_params['param']=='IFR'])
senior_ages_RecPeriod = pd.DataFrame(senior_ages_params[senior_ages_params['param']=='recovery_period'])
senior_ages_alpha = pd.DataFrame(senior_ages_params[senior_ages_params['param']=='report_rate'])
senior_ages_sigma = pd.DataFrame(senior_ages_params[senior_ages_params['param']=='relative_asymp_transmission'])
senior_params = [senior_ages_beta,senior_ages_IFR,senior_ages_RecPeriod,senior_ages_alpha,senior_ages_sigma]
# Ages 70+
elderly_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG['age_group']=='70-90+'])
elderly_ages_beta = pd.DataFrame(elderly_ages_params[elderly_ages_params['param']=='contact_rate'])
elderly_ages_IFR = pd.DataFrame(elderly_ages_params[elderly_ages_params['param']=='IFR'])
elderly_ages_RecPeriod = pd.DataFrame(elderly_ages_params[elderly_ages_params['param']=='recovery_period'])
elderly_ages_alpha = pd.DataFrame(elderly_ages_params[elderly_ages_params['param']=='report_rate'])
elderly_ages_sigma = pd.DataFrame(elderly_ages_params[elderly_ages_params['param']=='relative_asymp_transmission'])
elderly_params = [elderly_ages_beta,elderly_ages_IFR,elderly_ages_RecPeriod,elderly_ages_alpha,elderly_ages_sigma]
young_params_medians = medians_params(young_params,'0-19',last=15) # Schools
youngAdults_params_medians = medians_params(youngAdults_params,'20-39',last=15) # Adults
adults_params_medians = medians_params(adults_params,'40-49',last=15) # Adults
seniorAdults_params_medians = medians_params(seniorAdults_params,'50-59',last=15) # Adults
senior_params_medians = medians_params(senior_params,'60-69',last=15) # Elders
elderly_params_medians = medians_params(elderly_params,'70-90+',last=15) # Elders
# Simplify, get medians of values
params_desc = ['age','beta','IFR','RecPeriod','alpha','sigma']
main_adults_params_values = ['20-59',
np2.median([youngAdults_params_medians['beta'],adults_params_medians['beta'],seniorAdults_params_medians['beta']]),
np2.median([youngAdults_params_medians['IFR'],adults_params_medians['IFR'],seniorAdults_params_medians['IFR']]),
np2.median([youngAdults_params_medians['RecPeriod'],adults_params_medians['RecPeriod'],seniorAdults_params_medians['RecPeriod']]),
np2.median([youngAdults_params_medians['alpha'],adults_params_medians['alpha'],seniorAdults_params_medians['alpha']]),
np2.median([youngAdults_params_medians['sigma'],adults_params_medians['sigma'],seniorAdults_params_medians['sigma']])]
main_adults_params_medians = dict(zip(params_desc,main_adults_params_values))
main_elders_params_values = ['60-90+',
np2.median([senior_params_medians['beta'],elderly_params_medians['beta']]),
np2.median([senior_params_medians['IFR'],elderly_params_medians['IFR']]),
np2.median([senior_params_medians['RecPeriod'],elderly_params_medians['RecPeriod']]),
np2.median([senior_params_medians['alpha'],elderly_params_medians['alpha']]),
np2.median([senior_params_medians['sigma'],elderly_params_medians['sigma']])]
main_elders_params_medians = dict(zip(params_desc,main_elders_params_values))
### Define parameters per layers
def calculate_R0(IFR,alpha,beta,RecPeriod,sigma):
return (1-IFR)*(alpha*beta*RecPeriod+(1-alpha)*beta*sigma*RecPeriod)
def model_params(params_dict,layer):
layer_params = {'layer':layer,
'RecPeriod':params_dict['RecPeriod'],
'R0':calculate_R0(params_dict['IFR'],params_dict['alpha'],params_dict['beta'],
params_dict['RecPeriod'],params_dict['sigma'])}
return layer_params
school_params = model_params(young_params_medians,'schools')
adults_params = model_params(main_adults_params_medians,'adults')
elders_params = model_params(main_elders_params_medians,'elders')
params_def = ['layer','RecPeriod','R0']
run_params = [ [school_params['layer'],adults_params['layer'],elders_params['layer']],
[school_params['RecPeriod'],adults_params['RecPeriod'],elders_params['RecPeriod']],
[school_params['R0'],adults_params['R0'],elders_params['R0']] ]
run_params = dict(zip(params_def,run_params))
df_run_params = pd.DataFrame.from_dict(run_params)
#------------------------------------------------------------------------------------------------------------------------------------
################################
######## Household sizes #######
### Get household size distribution from 2018 census data
census_data_BOG = pd.read_csv(houses_data_path)
one_house = np2.sum(census_data_BOG['HA_TOT_PER'] == 1.0)
two_house = np2.sum(census_data_BOG['HA_TOT_PER'] == 2.0)
three_house = np2.sum(census_data_BOG['HA_TOT_PER'] == 3.0)
four_house = np2.sum(census_data_BOG['HA_TOT_PER'] == 4.0)
five_house = np2.sum(census_data_BOG['HA_TOT_PER'] == 5.0)
six_house = np2.sum(census_data_BOG['HA_TOT_PER'] == 6.0)
seven_house = np2.sum(census_data_BOG['HA_TOT_PER'] == 7.0)
total_house = one_house + two_house + three_house + four_house + five_house + six_house + seven_house
house_size_dist = np2.array([one_house,two_house,three_house,four_house,five_house,six_house,seven_house])/total_house
# House-hold sizes
household_sizes = []
household_sizes.extend(np2.random.choice(np.arange(1,8,1),p=house_size_dist,size=int(pop/3))) # This division is just to make the code faster
pop_house = sum(household_sizes)
while pop_house <= pop:
size = np2.random.choice(np.arange(1,8,1),p=house_size_dist,size=1)
household_sizes.extend(size)
pop_house += size[0]
household_sizes[-1] -= pop_house-pop
# Mean of household degree dist
mean_household = sum((np2.asarray(household_sizes)-1)*np2.asarray(household_sizes))/pop
# Keeping track of the household indx for each individual
house_indices = np2.repeat(np2.arange(0,len(household_sizes),1), household_sizes)
# Keeping track of the household size for each individual
track_house_size = np2.repeat(household_sizes, household_sizes)
#-----------------------------------------------------------------------------------------------------------------------------------------
###############################
######## Classify nodes #######
preschool_pop_ = preschool_ + teachers_preschool_
preschool_pop = sum(preschool_pop_)
primary_pop_ = primary_ + teachers_primary_
primary_pop = sum(primary_pop_)
highschool_pop_ = highschool_ + teachers_highschool_
highschool_pop = sum(highschool_pop_)
work_pop_no_teachers = sum(work_) - total_teachers_BOG
# Frac of population that is school going, working, preschool or elderly
dist_of_pop = [preschool_pop/total_pop_BOG,
primary_pop/total_pop_BOG,
highschool_pop/total_pop_BOG,
work_pop_no_teachers/total_pop_BOG,
very_young+university+elderly]
dist_of_pop[-1] += 1-sum(dist_of_pop)
# Classifying each person
classify_pop = np2.random.choice(['preschool','primary','highschool','work','other'], size=pop, p=dist_of_pop)
# Save as df
classify_pop_df = pd.DataFrame()
classify_pop_df['indx'] = list(np.arange(0,pop,1))
classify_pop_df['type'] = list(classify_pop)
# Number of individuals in each group
state, counts = np2.unique(classify_pop, return_counts=True)
dict_of_counts = dict(zip(state,counts))
preschool_going = dict_of_counts['preschool']
primary_going = dict_of_counts['primary']
highschool_going = dict_of_counts['highschool']
working = dict_of_counts['work']
other = dict_of_counts['other']
# Indices of individuals in each group
preschool_indx = np2.where(classify_pop=='preschool')[0]
primary_indx = np2.where(classify_pop=='primary')[0]
highschool_indx = np2.where(classify_pop=='highschool')[0]
work_indx = np2.where(classify_pop=='work')[0]
other_indx = np2.where(classify_pop=='other')[0]
# Keep track of the age groups for each individual labelled from 0-16
age_tracker_all = np2.zeros(pop)
age_tracker = np2.zeros(pop)
#------------------------------------------------------------------------------------------------------------------------------------------
###############################
##### Degree distribution #####
### Community --------------------------------------------------------
# Degree dist. mean and std div obtained by Prem et al data, scaled by 1/2.5 in order to ensure that community+friends+school = community data in Prem et al
mean, std = args.community_mean, args.community_std
p = 1-(std**2/mean)
n_binom = mean/p
community_degree = np2.random.binomial(n_binom, p, size = pop)
# No correlation between contacts
n_community = args.community_n
r_community = args.community_r
# Split the age group of old population according to the population seen in the data
prob = []
for i in range(0,len(community_)):
prob.append(community_[i]/sum(community_))
age_group_community = np2.random.choice(np2.arange(0,len(community_),1),size=pop,p=prob,replace=True)
community_indx = np2.arange(0,pop,1)
for i in range(pop):
age_tracker_all[community_indx[i]] = age_group_community[i]
### Preschool -------------------------------------------------------
mean, std = args.preschool_mean, args.preschool_std
p = 1-(std**2/mean)
n_binom = mean/p
preschool_degree = np2.random.binomial(n_binom, p, size = preschool_going)
n_preschool = preschool_going/args.preschool_size
r_preschool = args.preschool_r
preschool_clroom = np2.random.choice(np.arange(0,n_preschool+1,1),size=preschool_going)
# Assign ages to the preschool going population acc. to their proportion from the census data
prob = []
preschool_pop_ = preschool_ + teachers_preschool_
preschool_pop = sum(preschool_pop_)
for i in range(0,len(preschool_pop_)):
prob.append(preschool_pop_[i]/preschool_pop)
age_group_preschool = np2.random.choice(np.array([1,7]),size=preschool_going,p=prob,replace=True)
for i in range(preschool_going):
age_tracker[preschool_indx[i]] = age_group_preschool[i]
### Primary ---------------------------------------------------------
mean, std = args.primary_mean, args.primary_std
p = 1-(std**2/mean)
n_binom = mean/p
primary_degree = np2.random.binomial(n_binom, p, size = primary_going)
n_primary = primary_going/args.primary_size
r_primary = args.primary_r
primary_clroom = np2.random.choice(np.arange(0,n_primary+1,1),size=primary_going)
# Assign ages to the primary going population acc. to their proportion from the census data
prob = []
primary_pop_ = primary_ + teachers_primary_
primary_pop = sum(primary_pop_)
for i in range(0,len(primary_pop_)):
prob.append(primary_pop_[i]/primary_pop)
age_group_primary = np2.random.choice(np.array([2,7]),size=primary_going,p=prob,replace=True)
for i in range(primary_going):
age_tracker[primary_indx[i]] = age_group_primary[i]
### Highschool -------------------------------------------------------
mean, std = args.highschool_mean, args.highschool_std
p = 1-(std**2/mean)
n_binom = mean/p
highschool_degree = np2.random.binomial(n_binom, p, size = highschool_going)
n_highschool = highschool_going/args.highschool_size
r_highschool = args.highschool_r
highschool_clroom = np2.random.choice(np.arange(0,n_highschool+1,1),size=highschool_going)
# Assign ages to the highschool going population acc. to their proportion from the census data
prob = []
highschool_pop_ = highschool_ + teachers_highschool_
highschool_pop = sum(highschool_pop_)
for i in range(0,len(highschool_pop_)):
prob.append(highschool_pop_[i]/highschool_pop)
age_group_highschool = np2.random.choice(np.array([3,7]),size=highschool_going,p=prob,replace=True)
for i in range(highschool_going):
age_tracker[highschool_indx[i]] = age_group_highschool[i]
### Work -----------------------------------------------------------
# Degree dist., the mean and std div have been taken from the Potter et al data. The factor of 1/3 is used to correspond to daily values and is chosen to match with the work contact survey data
mean, std = args.work_mean, args.work_std
p = 1-(std**2/mean)
n_binom = mean/p
work_degree = np2.random.binomial(n_binom, p, size = working)
# Assuming that on average the size of a work place is ~ 10 people and the correlation is
# chosen such that the clustering coeff is high as the network in Potter et al had a pretty high value
work_place_size = args.work_size
n_work = working/work_place_size
r_work = args.work_r
# Assign each working individual a 'work place'
job_place = np2.random.choice(np.arange(0,n_work+1,1),size=working)
# Split the age group of working population according to the populapreschool_tion seen in the data
p = []
work_pop_ = university_ + work_
work_pop = sum(work_pop_)
for i in range(0,len(work_pop_)):
p.append(work_pop_[i]/work_pop)
age_group_work = np2.random.choice(np.arange(4,12+1,1),size=working,p=p,replace=True)
for i in range(working):
age_tracker[work_indx[i]] = age_group_work[i]
#---------------------------------------------------------------------------------------------------------------------------------------
###############################
######## Create graphs ########
print('Creating graphs...')
## Households
matrix_household = create_networks.create_fully_connected(household_sizes,age_tracker_all,np2.arange(0,pop,1),df_run_params,args.delta_t)
## Preschool
matrix_preschool = create_networks.create_external_corr_schools(pop,preschool_going,preschool_degree,n_preschool,r_preschool,preschool_indx,preschool_clroom,age_tracker,df_run_params,args.delta_t
,args.preschool_length_room,args.preschool_width_room,args.height_room,args.ventilation_out,inhalation_mask,exhalation_mask,args.fraction_people_masks,args.duration_event)
## Primary
matrix_primary = create_networks.create_external_corr_schools(pop,primary_going,primary_degree,n_primary,r_primary,primary_indx,primary_clroom,age_tracker,df_run_params,args.delta_t
,args.primary_length_room,args.primary_width_room,args.height_room,args.ventilation_out,inhalation_mask,exhalation_mask,args.fraction_people_masks,args.duration_event)
## Highschool
matrix_highschool = create_networks.create_external_corr_schools(pop,highschool_going,highschool_degree,n_highschool,r_highschool,highschool_indx,highschool_clroom,age_tracker,df_run_params,args.delta_t
,args.highschool_length_room,args.highschool_width_room,args.height_room,args.ventilation_out,inhalation_mask,exhalation_mask,args.fraction_people_masks,args.duration_event)
## Work
matrix_work = create_networks.create_external_corr(pop,working,work_degree,n_work,r_work,work_indx,job_place,age_tracker,df_run_params,args.delta_t)
## Community
matrix_community = create_networks.create_external_corr(pop,pop,community_degree,n_community,r_community,np2.arange(0,pop,1),age_group_community,age_tracker,df_run_params,args.delta_t)
# Saves graphs
multilayer_matrix = [matrix_household,matrix_preschool,matrix_primary,matrix_highschool,matrix_work,matrix_community]
#--------------------------------------------------------------------------------------------------------------------------------------
#########################################
######## Create dynamical layers ########
# Time paramas
Tmax = args.Tmax
days_intervals = [1] * Tmax
delta_t = args.delta_t
step_intervals = [int(x/delta_t) for x in days_intervals]
total_steps = sum(step_intervals)
# Create dynamic
import networks.network_dynamics as nd
import networks.network_dynamics_no_interventions as nd_ni
print('Creating dynamics...')
if args.intervention_type == 'no_intervention':
time_intervals, ws = nd_ni.create_day_intervention_dynamics(multilayer_matrix,Tmax=Tmax,total_steps=total_steps,schools_day_open=0,
interv_glob=0,schl_occupation=1.0,work_occupation=1.0)
elif args.intervention_type == 'intervention':
time_intervals, ws = nd.create_day_intervention_dynamics(multilayer_matrix,Tmax=Tmax,total_steps=total_steps,schools_day_open=args.school_openings,
interv_glob=args.intervention,schl_occupation=args.school_occupation,work_occupation=args.work_occupation)
elif args.intervention_type == 'school_alternancy':
time_intervals, ws = nd.create_day_intervention_altern_schools_dynamics(multilayer_matrix,Tmax=Tmax,total_steps=total_steps,schools_day_open=args.school_openings,
interv_glob=args.intervention,schl_occupation=args.school_occupation,work_occupation=args.work_occupation)
else:
print('No valid intervention type')
#--------------------------------------------------------------------------------------------------------------------------------------
#########################################
############### SIMULATE ################
# Bogota data
cum_cases = 632532
cum_rec = 593329
mild_house = 17595
hosp_beds = 5369
ICU_beds = 1351
deaths = 13125
BOG_E = int( pop * (cum_cases-cum_rec-mild_house-deaths)/total_pop_BOG)
BOG_R = int( pop * 0.3 ) # Assuming that 30% of population is already recovered
BOG_I1 = int( pop * mild_house/total_pop_BOG )
BOG_I2 = int( pop * hosp_beds/total_pop_BOG )
BOG_I3 = int( pop * ICU_beds/total_pop_BOG )
BOG_D = int( pop * deaths/total_pop_BOG )
####################### RUN
print('Simulating...')
soln=np.zeros((args.number_trials,total_steps,7))
soln_cum=np.zeros((args.number_trials,total_steps,7))
soln_ind=np.zeros((args.number_trials,len(time_intervals),pop), dtype=np.int8)
for key in tqdm(range(args.number_trials), total=args.number_trials):
#Initial condition
init_ind_E = random.uniform(random.PRNGKey(key), shape=(BOG_E,), maxval=pop).astype(np.int32)
init_ind_I1 = random.uniform(random.PRNGKey(key), shape=(BOG_I1,), maxval=pop).astype(np.int32)
init_ind_I2 = random.uniform(random.PRNGKey(key), shape=(BOG_I2,), maxval=pop).astype(np.int32)
init_ind_I3 = random.uniform(random.PRNGKey(key), shape=(BOG_I3,), maxval=pop).astype(np.int32)
init_ind_D = random.uniform(random.PRNGKey(key), shape=(BOG_D,), maxval=pop).astype(np.int32)
init_ind_R = random.uniform(random.PRNGKey(key), shape=(BOG_R,), maxval=pop).astype(np.int32)
init_state = np.zeros(pop, dtype=np.int32)
init_state = index_update(init_state,init_ind_E,np.ones(BOG_E, dtype=np.int32)*1) # E
init_state = index_update(init_state,init_ind_I1,np.ones(BOG_I1, dtype=np.int32)*2) # I1
init_state = index_update(init_state,init_ind_I2,np.ones(BOG_I2, dtype=np.int32)*3) # I2
init_state = index_update(init_state,init_ind_I3,np.ones(BOG_I3, dtype=np.int32)*4) # I3
init_state = index_update(init_state,init_ind_D,np.ones(BOG_D, dtype=np.int32)*5) # D
init_state = index_update(init_state,init_ind_R,np.ones(BOG_R, dtype=np.int32)*6) # R
_, init_state_timer = state_length_sampler(random.PRNGKey(key), init_state)
#Run simulation
_, state, _, states_evolution, total_history = model.simulate_intervals(
ws, time_intervals, state_length_sampler, infection_probabilities,
recovery_probabilities, init_state, init_state_timer, key = random.PRNGKey(key), epoch_len=1)
history = np.array(total_history)[:, 0, :] # This unpacks current state counts
soln=index_add(soln,index[key,:, :],history)
soln_ind=index_add(soln_ind,index[key,:, :],states_evolution)
# cumulative_history = np.array(total_history)[:, 1, :]
# soln_cum=index_add(soln_cum,index[key,:, :],cumulative_history)
#------------------------------------------------------------------------------------------------------------------------------------------------
#########################################
############## Save Results #############
# Confidence intervals
loCI = 5
upCI = 95
soln_avg=np.average(soln,axis=0)
soln_loCI=np.percentile(soln,loCI,axis=0)
soln_upCI=np.percentile(soln,upCI,axis=0)
print('Saving results...')
# Save results
tvec = np.linspace(0,Tmax,total_steps)
hvec = np.arange(4,sum(time_intervals)+4,4)
df_soln_list = []
for i in range(args.number_trials):
df_results_soln_i = pd.DataFrame(columns=['iter','tvec','S','E','I1','I2','I3','D','R'])
df_results_soln_i['iter'] = [i] * len(tvec)
df_results_soln_i['tvec'] = list(tvec)
df_results_soln_i['S'] = list(soln[i,:,0])
df_results_soln_i['E'] = list(soln[i,:,1])
df_results_soln_i['I1'] = list(soln[i,:,2])
df_results_soln_i['I2'] = list(soln[i,:,3])
df_results_soln_i['I3'] = list(soln[i,:,4])
df_results_soln_i['D'] = list(soln[i,:,5])
df_results_soln_i['R'] = list(soln[i,:,6])
df_soln_list.append(df_results_soln_i)
df_results_soln = pd.concat(df_soln_list)
df_soln_cum_list = []
for i in range(args.number_trials):
df_results_soln_cum_i = pd.DataFrame(columns=['iter','tvec','S','E','I1','I2','I3','D','R'])
df_results_soln_cum_i['iter'] = [i] * len(tvec)
df_results_soln_cum_i['tvec'] = list(tvec)
df_results_soln_cum_i['S'] = list(soln_cum[i,:,0])
df_results_soln_cum_i['E'] = list(soln_cum[i,:,1])
df_results_soln_cum_i['I1'] = list(soln_cum[i,:,2])
df_results_soln_cum_i['I2'] = list(soln_cum[i,:,3])
df_results_soln_cum_i['I3'] = list(soln_cum[i,:,4])
df_results_soln_cum_i['D'] = list(soln_cum[i,:,5])
df_results_soln_cum_i['R'] = list(soln_cum[i,:,6])
df_soln_cum_list.append(df_results_soln_cum_i)
df_results_soln_cum = pd.concat(df_soln_cum_list)
df_soln_ind_list = []
for i in range(args.number_trials):
inds_indx = [str(n) for n in range(0,pop)]
cols = ['iter','tvec']
cols.extend(inds_indx)
df_results_soln_ind_i = pd.DataFrame(columns=cols)
df_results_soln_ind_i['iter'] = [i] * len(hvec)
df_results_soln_ind_i['tvec'] = list(hvec)
for ind in inds_indx:
df_results_soln_ind_i[ind] = list(soln_ind[i,:,int(ind)])
df_soln_ind_list.append(df_results_soln_ind_i)
df_results_soln_ind = pd.concat(df_soln_ind_list)
# df_results_history = pd.DataFrame(columns=['tvec','S','E','I1','I2','I3','D','R'])
# df_results_history['tvec'] = list(tvec)
# df_results_history['S'] = list(history[:,0])
# df_results_history['E'] = list(history[:,1])
# df_results_history['I1'] = list(history[:,2])
# df_results_history['I2'] = list(history[:,3])
# df_results_history['I3'] = list(history[:,4])
# df_results_history['D'] = list(history[:,5])
# df_results_history['R'] = list(history[:,6])
# df_results_com_history = pd.DataFrame(columns=['tvec','S','E','I1','I2','I3','D','R'])
# df_results_com_history['tvec'] = list(tvec)
# df_results_com_history['S'] = list(cumulative_history[:,0])
# df_results_com_history['E'] = list(cumulative_history[:,1])
# df_results_com_history['I1'] = list(cumulative_history[:,2])
# df_results_com_history['I2'] = list(cumulative_history[:,3])
# df_results_com_history['I3'] = list(cumulative_history[:,4])
# df_results_com_history['D'] = list(cumulative_history[:,5])
# df_results_com_history['R'] = list(cumulative_history[:,6])
intervention_save = None
if args.intervention_type == 'no_intervention':
intervention_save = 'no_intervention'
elif args.intervention_type == 'intervention':
intervention_save = 'intervention'
elif args.intervention_type == 'school_alternancy':
intervention_save = 'school_alternancy'
else:
print('No valid intervention type')
if not os.path.isdir( os.path.join(results_path, intervention_save, str(number_nodes)) ):
os.makedirs(os.path.join(results_path, intervention_save, str(number_nodes)))
path_save = os.path.join(results_path, intervention_save, str(number_nodes))
df_results_soln.to_csv(path_save+'/{}_inter_{}_schoolcap_{}_mask_{}_peopleMasked_{}_ventilation_{}_ID_{}_soln.csv'.format(str(number_nodes),str(args.intervention),str(args.school_occupation),args.masks_type,str(args.fraction_people_masks),str(args.ventilation_out),args.res_id), index=False)
df_results_soln_cum.to_csv(path_save+'/{}_inter_{}_schoolcap_{}_mask_{}_peopleMasked_{}_ventilation_{}_ID_{}_soln_cum.csv'.format(str(number_nodes),str(args.intervention),str(args.school_occupation),args.masks_type,str(args.fraction_people_masks),str(args.ventilation_out),args.res_id), index=False)
df_results_soln_ind.to_csv(path_save+'/{}_inter_{}_schoolcap_{}_mask_{}_peopleMasked_{}_ventilation_{}_ID_{}_soln_ind.csv'.format(str(number_nodes),str(args.intervention),str(args.school_occupation),args.masks_type,str(args.fraction_people_masks),str(args.ventilation_out),args.res_id), index=False)
classify_pop_df.to_csv(path_save+'/{}_inter_{}_schoolcap_{}_mask_{}_peopleMasked_{}_ventilation_{}_ID_{}_indIDS.csv'.format(str(number_nodes),str(args.intervention),str(args.school_occupation),args.masks_type,str(args.fraction_people_masks),str(args.ventilation_out),args.res_id), index=False)
# df_results_history.to_csv(path_save+'/{}_inter_{}_schoolcap_{}_mask_{}_peopleMasked_{}_ventilation_{}_ID_{}_history.csv'.format(str(number_nodes),str(args.intervention),str(args.school_occupation),args.masks_type,str(args.fraction_people_masks),str(args.ventilation_out),args.res_id), index=False)
# df_results_com_history.to_csv(path_save+'/{}_inter_{}_schoolcap_{}_mask_{}_peopleMasked_{}_ventilation_{}_ID_{}_com_history.csv'.format(str(number_nodes),str(args.intervention),str(args.school_occupation),args.masks_type,str(args.fraction_people_masks),str(args.ventilation_out),args.res_id), index=False)
print('Done! \n')
|
from collections import Counter
import json
import re
from bs4 import BeautifulSoup as Soup
import time
print 'using new'
class AnnotatedTextException(Exception):
pass
class AnnotatedText(object):
MATCH_TAG = re.compile(r'^\((\S+)\s*')
MATCH_END_BRACKET = re.compile(r'\s*\)\s*$')
MATCH_TEXT_ONLY = re.compile(r'^[^)(]*$')
EXCLUDE_NER_TYPES = set([
'TIME', 'DATE', 'NUMBER', 'DURATION', 'PERCENT', 'SET', 'ORDINAL',
'MONEY'
])
LEGAL_DEPENDENCY_TYPES = set([
'collapsed-ccprocessed', 'collapsed', 'basic'
])
def __init__(
self,
corenlp_xml=None,
aida_json=None,
dependencies='collapsed-ccprocessed',
exclude_ordinal_NERs=False,
exclude_long_mentions=False,
long_mention_threshold=5,
exclude_non_ner_coreferences=False
):
# If true, do not include NER's of the types listed in
# EXCLUDE_NER_TYPES
self.exclude_ordinal_NERs = exclude_ordinal_NERs
# If true, ignore coreference mentions that are more than
# long_mention_threshold number of tokens long
self.exclude_long_mentions = exclude_long_mentions
self.long_mention_threshold = long_mention_threshold
# If true, ignore coreference chains that don't have a Named Entity
# as their representative mention.
self.exclude_non_ner_coreferences = exclude_non_ner_coreferences
# User can choose the kind of dependency parse they wish to use
# Valid options listed below. Ensure that a valid option was
# chosen
if dependencies not in self.LEGAL_DEPENDENCY_TYPES:
raise ValueError(
'dependencies must be one of "basic", '
'"collapsed", or "collapsed-ccprocessed".'
)
self.dependencies = dependencies
# Parse the annotated article xml
if corenlp_xml is not None:
self._read_stanford_xml(corenlp_xml)
# Parse the AIDA JSON
if aida_json is not None:
self._read_aida_json(aida_json)
# User cannot provide AIDA data unless stanford xml is also
# provided
elif aida_json is not None:
raise ValueError(
'You provide AIDA json without also providing Stanford'
' xml.'
)
def _read_stanford_xml(self, article_string):
'''
read in an article that has been annotated by coreNLP, and
represent it using python objects
'''
# a string representing the xml output by coreNLP
self.text = article_string
# Parse the CoreNLP xml using BeautifulSoup
self._beautiful_soup_parse()
# Build a Python representation of all the sentences
self._read_all_sentences()
# build a Python representation of the coreference chains
self._build_coreferences()
# Link AIDA disambiguations to corresponding coreference chains
#self._link_references()
def _beautiful_soup_parse(self):
# Before parsing the xml, we have to deal with a glitch in how
# beatiful soup parses xml: it doesn't allow <head></head> tags,
# which do appear in CoreNLP output. Work around this by converting
# these into <headword> tags instead.
head_replacer = re.compile(r'(?P<open_tag></?)\s*head\s*>')
self.text = head_replacer.sub('\g<open_tag>headword>', self.text)
# Parse the xml
self.soup = Soup(self.text, 'html.parser')
def _read_all_sentences(self):
'''
Process all of the sentence tags in the CoreNLP xml. Each
Sentence has tokens and a dependency parse. Read tokens'
attributes into Python types, and add links between tokens
representing the dependency tree.
'''
# Initialize some containers to hold data found in sentences
self.sentences = []
self.tokens = []
self.tokens_by_offset = {}
self.num_sentences = 0
# Tolerate an article having no sentences
try:
sent_tags = self.soup.find('sentences').find_all('sentence')
except AttributeError, e:
pass
# Process each sentence tag
for s in sent_tags:
self.num_sentences += 1
self.sentences.append(self._read_sentence(s))
def _read_aida_json(self, json_string):
# Parse the json
aida_data = json.loads(json_string)
# Tie each mention disambiguated by aida to a corresponding mention
# in the stanford output
for aida_mention in aida_data['mentions']:
self._link_aida_mention(aida_mention, aida_data)
# For each referenece (group of mentions believed to refer to the
# same entity) check for inconsistent entities
self.disambiguated_references = []
for reference in self.references:
self._link_aida_reference(reference, aida_data)
def _link_aida_reference(self, reference, aida_data):
# Tally up the kbids that have been attached mentions within
# this reference
kbid_counter = Counter()
kbid_score_tally = Counter()
for mention in reference['mentions']:
try:
kbid_counter[mention['kbIdentifier']] += 1
kbid_score_tally[mention['kbIdentifier']] += mention[
'disambiguationScore']
except KeyError:
pass
# Sort the kbids based on the number of times a mention
# was linked to that kbid
kbids_by_popularity = kbid_counter.most_common()
# Fail if no kbids were linked to mentions in this reference
# IDEA: would it be more expected to set kbid to None rather
# than have it not exist at all?
if len(kbids_by_popularity) == 0:
return
# Pull out those kbids that received the most votes
majority_num_votes = kbids_by_popularity[0][1]
majority_vote_kbids = [
kbid for kbid, count
in kbids_by_popularity
if count == majority_num_votes
]
# Sort them by largest total confidence score to break ties
score_tallied_kbids = sorted([
(kbid_score_tally[kbid], kbid)
for kbid in majority_vote_kbids
], key=lambda x: x[0])
# Assign the highest confidence kbid to the reference
kbId = score_tallied_kbids[0][1]
reference['kbIdentifier'] = kbId
# Assign the YAGO taxonomy types associated to that entity
# Remove the "YAGO_" at the same time.
reference['types'] = [
t[len('YAGO_'):] for t in
aida_data['entityMetadata'][kbId]['type']
]
# Add this reference to the list of disambiguated references
self.disambiguated_references.append(reference)
def _link_aida_mention(self, aida_mention, aida_data):
# take the best matched entity found by AIDA for this mention
try:
kbid = (
aida_mention['bestEntity']['kbIdentifier']
.decode('unicode-escape')
)
score = float(aida_mention['bestEntity']['disambiguationScore'])
# Assign the YAGO taxonomy types. Remove the "YAGO_" prefix
# from them at the same time.
types = [
t[len('YAGO_'):] for t in
aida_data['entityMetadata'][kbid]['type']
]
# Fail if AIDA provided no entity
except KeyError:
return
# Find the corresponding Stanford-identified mention
mention = self._find_or_create_mention_by_offset_range(
aida_mention['offset'], aida_mention['length'])
# fail if no associated mention could be found:
if mention is None:
return
mention['kbIdentifier'] = kbid
mention['disambiguationScore'] = score
mention['types'] = types
def _find_or_create_mention_by_offset_range(self, start, length):
pointer = start
mention = None
found_tokens = []
while pointer <= start + length:
# find the next token
token = self._get_token_after(pointer)
# handle an edge case where a token that goes beyond the
# the range is inadvertently accessed
if token['character_offset_end'] > start + length:
break
# Add the token to the list of spanned tokens
found_tokens.append(token)
# find the mention related to that token, if any
try:
mention = token['mention']
break
except KeyError:
pass
pointer = token['character_offset_end']
# If no tokens associated with the mention are found
# (a zero-token mention?), fail
if len(found_tokens) == 0:
return None
# If we found an existing mention, return it
if mention is not None:
return mention
# Otherwise, create a mention and a reference
sentence_id = found_tokens[0]['sentence_id']
sentence = self.sentences[sentence_id]
new_mention = {
'tokens': found_tokens,
'start': min([t['id'] for t in found_tokens]),
'end': max([t['id'] for t in found_tokens]),
'head': self.find_head(found_tokens),
'sentence_id': sentence_id,
'sentence': sentence
}
ref = {
'id': self._get_next_coref_id(),
'mentions': [new_mention],
'representative': new_mention
}
new_mention['reference'] = ref
self.references.append(ref)
# Add the mention to the sentence
try:
sentence['mentions'].append(new_mention)
except KeyError:
sentence['mentions'] = [new_mention]
# Add the mention to the tokens involved
for token in found_tokens:
token['mention'] = new_mention
# Add the reference to the sentence
try:
sentence['references'].append(ref)
except KeyError:
sentence['references'] = [ref]
return new_mention
def _get_token_after(self, pointer):
token = None
while token is None:
# Get the token at or after offset <pointer>
try:
token = self.tokens_by_offset[pointer]
except KeyError:
pointer += 1
# But if we reach the end of the text it's an error
if pointer > len(self.text):
raise
return token
def _get_next_coref_id(self):
'''
yield incrementing coreference ids.
'''
try:
self.next_coref_id += 1
except AttributeError:
self.next_coref_id = 1
return self.next_coref_id - 1
def _link_references(self):
'''
Create a link from each mention's tokens back to the mention, and
create a link from the sentence to the entities for which it has
mentions.
'''
for ref in self.references:
for mention in ref['mentions']:
# link the mention to its reference
mention['reference'] = ref
# link the tokens to the mention
for token in mention['tokens']:
token['mention'] = mention
# note the extent of the mention
mention['start'] = min(
[t['id'] for t in mention['tokens']])
mention['end'] = max([t['id'] for t in mention['tokens']])
# link the sentence to the mention
mention_sentence_id = mention['tokens'][0]['sentence_id']
sentence = self.sentences[mention_sentence_id]
try:
sentence['mentions'].append(mention)
except KeyError:
sentence['mentions'] = [mention]
# Get all the sentences (by id) for a given reference
ref_sentence_ids = set([
token['sentence_id']
for mention in ref['mentions']
for token in mention['tokens']
])
# link the sentence to the references
for s_id in ref_sentence_ids:
sentence = self.sentences[s_id]
try:
sentence['references'].append(ref)
except KeyError:
sentence['references'] = [ref]
def _standardize_coreferencing(self):
# Generate an identifying signature for each NER entity, which
# will be used to cros-reference with coreference mentions.
all_ner_signatures = set()
ner_entity_lookup = {}
for s in self.sentences:
for entity in s['entities']:
# the sentence id and id of the entity's head token
# uniquely identifies it, and is hashable
entity_signature = (
entity['sentence_id'], # idx of the sentence
entity['head']['id'], # idx of entity's head token
)
all_ner_signatures.add(entity_signature)
# keep a link back to the entity based on its signature
ner_entity_lookup[entity_signature] = entity
# Generate an identifying signature for each coreference and for
# each mention. We will then be able to cross-reference the
# coreference chains / mentions and the entities
all_coref_signatures = set()
coref_entity_lookup = {}
all_mention_signatures = set()
all_coref_tokens = set()
for coref in self.coreferences:
coref_signature = (
coref['representative']['sentence_id'],
coref['representative']['head']['id'],
)
coref_entity_lookup[coref_signature] = coref
all_coref_signatures.add(coref_signature)
for mention in coref['mentions']:
all_coref_tokens.update([
(mention['sentence_id'], t['id'])
for t in mention['tokens']
])
all_mention_signatures.add((
mention['sentence_id'],
mention['head']['id'],
))
# get the ner signatures which aren't yet among the coref mentions
novel_ner_signatures = all_ner_signatures - all_coref_tokens
# get the coref signatures that are actual ners
valid_coref_signatures = all_coref_signatures & all_ner_signatures
# In some cases, we want "coreferences" to mean only coreference
# chains whose representative mention is a NER. Otherwise,
# we'll take all coreferences. A coreference chain could, for
# example, refer to an entity mentioned several times using a
# common noun (e.g. "the police").
if self.exclude_non_ner_coreferences:
self.references = [
coref_entity_lookup[es] for es in valid_coref_signatures
]
else:
self.references = [coref for coref in self.coreferences]
# build the ners not yet among the corefs into same structure as
# corefs
for signature in novel_ner_signatures:
entity = ner_entity_lookup[signature]
self.references.append({
'id':self._get_next_coref_id(),
'mentions': [entity],
'representative': entity
})
def _build_coreferences(self):
self.coreferences = []
coref_tag_container = self.soup.find('coreference')
if coref_tag_container is None:
return
coreference_tags = coref_tag_container.find_all('coreference')
for ctag in coreference_tags:
coreference = {
'id': self._get_next_coref_id(),
'mentions':[],
}
# Process each mention in this coreference chain
for mention_tag in ctag.find_all('mention'):
# Recall that we convert 1-based ids to 0-based
sentence_id = int(mention_tag.find('sentence').text) - 1
sentence = self.sentences[sentence_id]
start = int(mention_tag.find('start').text) - 1
end = int(mention_tag.find('end').text) - 1
head = int(mention_tag.find('headword').text) - 1
mention = {
'sentence_id': sentence_id,
'tokens': sentence['tokens'][start:end],
'head': sentence['tokens'][head]
}
# Long mentions are typically nonsense
do_exclude = (
self.exclude_long_mentions and
len(mention['tokens']) > self.long_mention_threshold
)
if do_exclude:
continue
if 'representative' in mention_tag.attrs:
coreference['representative'] = mention
coreference['mentions'].append(mention)
# if there's no mentions left in the coreference, don't keep it
# (this can happen if we are excluding long mentions.)
if len(coreference['mentions']) < 1:
continue
# if we didn't assign a representative mention, assign it to
# the first mention
if 'representative' not in coreference:
coreference['representative'] = coreference['mentions'][0]
self.coreferences.append(coreference)
# When a named entity gets referred to only once, CoreNLP doesn't
# make a coreference chain for that named entity. This makes
# scripts more complicated. Things are simplified if all NERs are
# guaranteed to have a coreference chain representation, even if
# some "chains" contains only one mention.
self._standardize_coreferencing()
def filter_mention_tokens(self, tokens):
tokens_with_ner = [t['ner'] is not None for t in tokens]
try:
idx_at_first_ner_token = tokens_with_ner.index(True)
idx_after_last_ner_token = (
len(tokens_with_ner)
- list(reversed(tokens_with_ner)).index(True)
)
except ValueError:
return []
return tokens[idx_at_first_ner_token:idx_after_last_ner_token]
def print_dep_tree(self, root_token, depth):
depth += 1
if 'children' in root_token:
for relation, child in root_token['children']:
print '\t'*depth + relation + ' ' + child['word']
self.print_dep_tree(child, depth)
def print_tree(self, tree):
if len(tree['children']) == 0:
print '\n'+('\t'*tree['depth'])+tree['code']+ ' : ' + tree['word']
else:
print '\n' + ('\t'*tree['depth'])+tree['code']+ ' :'
for child in tree['children']:
self.print_tree(child)
def _read_sentence(self, sentence_tag):
'''
Convert sentence tags to python dictionaries.
'''
# Note that CoreNLP uses 1-based indexing for sentence ids. We
# convert to 0-based indexing.
sentence = Sentence({
'id': int(sentence_tag['id']) - 1,
'tokens': self._read_tokens(sentence_tag),
'root': Token(),
'parse': self.read_parse(sentence_tag.find('parse').text),
})
# Give the tokens the dependency tree relation
self._read_dependencies(sentence, sentence_tag)
# Group the named entities together, and find the headword within
sentence['entities'] = self._read_entities(sentence['tokens'])
# Add tokens to global list and to the token offset-lookup table
# Exclude the "null" tokens that simulate sentence head.
self.tokens.extend(sentence['tokens'])
token_offsets = dict([
(t['character_offset_begin'], t) for t in sentence['tokens']
])
self.tokens_by_offset.update(token_offsets)
return sentence
def _read_dependencies(self, sentence, sentence_tag):
if self.dependencies == 'collapsed-ccprocessed':
dependencies_type = 'collapsed-ccprocessed-dependencies'
elif self.dependencies == 'collapsed':
dependencies_type = 'collapsed-dependencies'
elif self.dependencies == 'basic':
dependencies_type = 'basic-dependencies'
else:
raise ValueError(
'dependencies must be one of "basic", '
'"collapsed", or "collapsed-ccprocessed".'
)
dependencies = sentence_tag.find(
'dependencies', type=dependencies_type
).find_all('dep')
for dep in dependencies:
dependent_idx = int(dep.find('dependent')['idx']) - 1
dependent = sentence['tokens'][dependent_idx]
governor_idx = int(dep.find('governor')['idx']) - 1
# When the governor idx is -1, it means that the dependent
# token is the root of the sentence. Simply mark it as such
# and continue to the next dependency entry
if governor_idx < 0:
sentence['root'] = dependent
dependent['parents'] = []
continue
# Otherwise there is a distinct governor token, and we'll
# need to build the two-way link between governor and dependent
else:
governor = sentence['tokens'][governor_idx]
# refuse to add a link which would create a cycle
if governor_idx in self.collect_descendents(dependent):
continue
dep_type = dep['type']
try:
governor['children'].append((dep_type, dependent))
except KeyError:
governor['children'] = [(dep_type, dependent)]
try:
dependent['parents'].append((dep_type, governor))
except KeyError:
dependent['parents'] = [(dep_type, governor)]
def collect_descendents(self, token):
descendents = [token['id']]
if 'children' not in token:
return descendents
for dep_type, child in token['children']:
descendents += self.collect_descendents(child)
return descendents
def read_parse(self, parse_text, parent=None, depth=0):
element = {'depth':depth}
# get the phrase or POS code
element['code'] = self.MATCH_TAG.match(parse_text).groups()[0]
# get the inner text
inner_text = self.MATCH_TAG.sub('', parse_text)
inner_text = self.MATCH_END_BRACKET.sub('', inner_text)
# if the inner text is just a word, get it, and don't recurse
if self.MATCH_TEXT_ONLY.match(inner_text):
element['word'] = inner_text.strip()
element['children'] = []
# if the inner text encodes child nodes, parse them recursively
else:
element['word'] = None
child_texts = self.split_parse_text(inner_text)
element['children'] = [
self.read_parse(ct, element, depth+1) for ct in child_texts]
element['parent'] = parent
return element
def split_parse_text(self, text):
if text[0] != '(':
raise ValueError('expected "(" at begining of sentence node.')
depth = 0
strings = []
curstring = ''
for c in text:
# skip whitespace between nodes
if depth == 0 and c.strip() == '':
continue
curstring += c
if c == '(':
depth += 1
if c == ')':
depth -= 1
if depth == 0:
strings.append(curstring)
curstring = ''
return strings
def _read_entities(self, tokens):
'''
collect the entities into a mention-like object
'''
entities = []
last_entity_type = None
cur_entity = None
entity_idx = -1
for token in tokens:
exclude = False
if self.exclude_ordinal_NERs:
if token['ner'] in self.EXCLUDE_NER_TYPES:
exclude = True
if token['ner'] is None or exclude:
token['entity_idx'] = None
# this might be the end of an entity
if cur_entity is not None:
entities.append(cur_entity)
cur_entity = None
elif token['ner'] == last_entity_type:
cur_entity['tokens'].append(token)
token['entity_idx'] = entity_idx
else:
# begins a new entity. Possibly ends an old one
if cur_entity is not None:
entities.append(cur_entity)
cur_entity = None
entity_idx += 1
cur_entity = {
'tokens':[token],
'sentence_id': int(token['sentence_id'])
}
token['entity_idx'] = entity_idx
last_entity_type = token['ner']
# if sentence end coincides with entity end, be sure to add entity
if cur_entity is not None:
entities.append(cur_entity)
# Now that we have the entities, find the headword for each
for entity in entities:
entity['head'] = self.find_head(entity['tokens'])
# filter out entities that have no head
entities = [e for e in entities if e['head'] is not None]
return entities
def find_head(self, tokens):
head = None
# If there is only one token, that's the head
if len(tokens) == 1:
head = tokens[0]
else:
# otherwise iterate over all the tokens to find the head
for token in tokens:
# if this token has no parents or children its not part
# of the dependency tree (it's a preposition, e.g.)
if 'parents' not in token and 'children' not in token:
continue
# if this token has any parents that among the tokens list
# it's not the head!
try:
if any([t[0] in tokens for t in token['parents']]):
continue
except KeyError:
pass
# otherwise it is the head
else:
head = token
# NOTE: head may be none
return head
def _read_tokens(self, sentence_tag):
'''
Convert token tag to python dictionary.
'''
# Note, in CoreNLP's xml, token ids and sentence ids are 1-based.
# We convert to 0-based indices.
sentence_id = int(sentence_tag['id']) - 1
tokens = []
for token_tag in sentence_tag.find_all('token'):
# The "Speaker" property can be missing, so handle that case
if token_tag.find('Speaker') is not None:
speaker = token_tag.find('Speaker').text
else:
speaker = None
# Get rest of the token's properties and make a Token object
token = Token({
'id': int(token_tag['id']) - 1,
'sentence_id': sentence_id,
'word': self.fix_word(token_tag.find('word').text),
'lemma': (token_tag.find('lemma').text).encode('utf8'),
'pos': token_tag.find('pos').text,
'ner': (
None if token_tag.find('ner').text == 'O'
else token_tag.find('ner').text),
'character_offset_begin': int(
token_tag.find('characteroffsetbegin').text),
'character_offset_end': int(
token_tag.find('characteroffsetend').text),
'speaker': speaker
})
tokens.append(token)
return tokens
def fix_word(self, word):
if word == '-LRB-':
return '('
if word == '-RRB-':
return ')'
return word.encode('utf8')#.decode('unicode-escape')
def __str__(self):
sentence_strings = []
for i, s in enumerate(self.sentences):
tokens = ' '.join([t['word'] for t in s['tokens']])
sentence_string = 'Sentence %d:\n%s' % (i, tokens)
sentence_strings.append(sentence_string)
return '\n\n'.join(sentence_strings)
def __repr__(self):
return self.__str__()
class Sentence(dict):
def __init__(self, *args, **kwargs):
super(Sentence, self).__init__(*args, **kwargs)
mandatory_listy_attributes = [
'tokens', 'entities', 'references', 'mentions']
for attr in mandatory_listy_attributes:
if attr not in self:
self[attr] = []
def as_string(self):
'''
return a simple single-line string made from all the tokens in
the sentence. This is basically the way the sentence actually
occurred in the text, but whitespace and certain punctuation get
normalized.
'''
# note, the first token is a "root token", which has to be skipped
return ' '.join([t['word'] for t in self['tokens']])
def __str__(self):
string = 'Sentence %d:\n' % self['id']
for t in self['tokens']:
string += '\t%s\n' % str(t)
return string
def __repr__(self):
return self.__str__()
def shortest_path(self, source, target):
'''
find the shortest path between source and target by performing a
breadth first from source, until target is seen
'''
source_node = {'id': source['id'], 'prev':None, 'next':[]}
ptr = 0
queue = [source_node]
seen = set([source['id']])
path = None
while ptr < len(queue):
cur_node = queue[ptr]
cur_token = self['tokens'][cur_node['id']]
if cur_node['id'] == target['id']:
path = self.trace_back(cur_node)
break
next_tokens = cur_token.get_children() + cur_token.get_parents()
for relation, next_token in next_tokens:
if next_token['id'] in seen:
continue
seen.add(next_token['id'])
next_node = {'id':next_token['id'], 'prev':cur_node, 'next':[]}
cur_node['next'].append(next_node)
queue.append(next_node)
ptr += 1
if path is None:
return path
# path is a list of token ids. Convert it to list of actual tokens
path = [self['tokens'][i] for i in path]
return path
def trace_back(self, target):
path = [target['id']]
cur = target
while cur['prev'] is not None:
cur = cur['prev']
path.append(cur['id'])
path.reverse()
return path
def dep_tree_str(self):
if 'tokens' not in self:
return '[no tokens!]'
string = str(self['root']) + '\n'
string += self._dep_tree_str(self['root'])
return string
def get_text(self):
return ' '.join([t['word'] for t in self['tokens']])
def _dep_tree_str(self, root_token, depth=0):
depth += 1
string = ''
if 'children' in root_token:
for relation, child in root_token['children']:
string += (
'\t'*depth + '<' + relation + '> ' + str(child) + '\n')
string += self._dep_tree_str(child, depth)
return string
class Token(dict):
def __str__(self):
offset = '(%d,%d)' % (
self['character_offset_begin'],
self['character_offset_end']
)
ner = self['ner'] if self['ner'] is not None else '-'
description = '%2d: %s %s %s %s' % (
self['id'], self['word'], offset, self['pos'], ner
)
description = description.encode('utf8')
return description
def __repr__(self):
return self.__str__()
def get_parents(self):
return self['parents'] if 'parents' in self else []
def get_children(self):
return self['children'] if 'children' in self else []
|
import os
from pydub import AudioSegment
path = os.getcwd()
print(path)
# In Windows this is the root path is different: We will change it to the right path
os.chdir("C:\\Users\\s157874\\Documents\\GitHub\\wavegan\\data\\dir_long_audio_files\\houspa")
path = os.getcwd()
audio_files = os.listdir()
print(audio_files)
# You dont need the number of files in the folder, just iterate over them directly using:
for file in audio_files:
#spliting the file into the name and the extension
name, ext = os.path.splitext(file)
if ext == ".mp3":
mp3_sound = AudioSegment.from_mp3(file)
#rename them using the old name + ".wav"
mp3_sound.export("{0}.wav".format(name), format="wav")
|
import pandas as pd
from autoscalingsim.deltarepr.node_group_delta import NodeGroupDelta
from autoscalingsim.utils.error_check import ErrorChecker
from .platform_scaling_info import PlatformScalingInfo
class PlatformScalingModel:
def __init__(self, simulation_step : pd.Timedelta):
self.platform_scaling_infos = dict()
self.simulation_step = simulation_step
def add_provider(self, provider : str, node_scaling_infos_raw : list):
self.platform_scaling_infos[provider] = PlatformScalingInfo(provider, node_scaling_infos_raw)
def delay(self, node_group_delta : NodeGroupDelta):
"""
Implements the delay operation on the platform level. Returns the timestamped
delayed group. Since the node group delta contains only one group,
the application of the delay yields another single group.
"""
delay = pd.Timedelta(0, unit = 'ms')
enforced_node_group_delta = None
if node_group_delta.in_change:
provider = node_group_delta.provider
node_type = node_group_delta.node_type
delay = self.platform_scaling_infos[provider].termination_duration_for_node_type(node_type) if node_group_delta.is_scale_down \
else self.platform_scaling_infos[provider].booting_duration_for_node_type(node_type)
enforced_node_group_delta = node_group_delta.enforce()
return (delay, enforced_node_group_delta)
|
'''
These are tests to assist with creating :class:`.LinearOperator`.
'''
import numpy as np
from numpy.testing import assert_allclose
def adjointTest(O, rtol=1e-7):
''' Test for verifying forward and adjoint functions in LinearOperator.
adjointTest verifies correctness for the forward and adjoint functions
for an operator via asserting :math:`<A^H y, x> = <y, A x>`
Parameters
----------
O : LinearOperator
The LinearOperator to test.
significant : int, optional
Perform the test with a numerical accuracy of "significant" digits.
Examples
--------
>>> from pyop import LinearOperator
>>> A = LinearOperator((4,4), lambda _, x: x, lambda _, x: x)
>>> adjointTest(A)
>>> B = LinearOperator((4,4), lambda _, x: x, lambda _, x: 2*x)
>>> adjointTest(B)
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AssertionError:
'''
x = np.random.rand(O.shape[1])
y = np.random.rand(O.shape[0])
assert_allclose(O.T(y).dot(x), y.dot(O(x)), rtol = rtol)
|
import base64
import hashlib
# Imports required by the __main__ case below
import sys
def cqlDigestGenerator(schemaPath):
buff = ""
with open(schemaPath) as schema:
for line in schema:
realLine = line.strip()
if len(realLine) == 0 or realLine.isspace():
continue
buff += (" " if len(buff) > 0 else "")
buff += realLine
if realLine.endswith(';'):
m = hashlib.sha256()
m.update(buff.encode('utf-8'))
# We want to return a string containing the base64 representation
# so that the user doesn't have to mess around with bytestrings
yield (buff, base64.b64encode(m.digest()).decode('utf-8'))
buff = ""
if __name__ == "__main__":
"""Preserving this for validation of the logic above"""
for (cql, digest) in cqlDigestGenerator(sys.argv[1]):
print("Digest: {}, CQL: {}".format(digest,cql))
|
################################################################################
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## (C) Copyrights Dr. Michel F. Sanner and TSRI 2016
##
################################################################################
##################################################
# ./SecurityuserimportImplService_services.py
# generated by ZSI.wsdl2python
#
#
##################################################
import urlparse, types
from ZSI.TCcompound import Struct
from ZSI import client
import ZSI
class SecurityuserimportImplServiceInterface:
def getSecurityuserimportImpl(self, portAddress=None, **kw):
raise NonImplementationError, "method not implemented"
class SecurityuserimportImplServiceLocator(SecurityuserimportImplServiceInterface):
SecurityuserimportImpl_address = "https://gama.nbcr.net:9443/axis/services/SecurityUserImportService"
def getSecurityuserimportImplAddress(self):
return SecurityuserimportImplServiceLocator.SecurityuserimportImpl_address
def getSecurityuserimportImpl(self, portAddress=None, **kw):
return SecurityUserImportServiceSoapBindingSOAP(portAddress or SecurityuserimportImplServiceLocator.SecurityuserimportImpl_address, **kw)
class SecurityUserImportServiceSoapBindingSOAP:
def __init__(self, addr, **kw):
netloc = (urlparse.urlparse(addr)[1]).split(":") + [80,]
if not kw.has_key("host"):
kw["host"] = netloc[0]
if not kw.has_key("port"):
kw["port"] = int(netloc[1])
if not kw.has_key("url"):
kw["url"] = urlparse.urlparse(addr)[2]
self.binding = client.Binding(**kw)
def listUsers(self, request):
"""
@param: request to listUsersRequest
@return: response from listUsersResponse::
_listUsersReturn: str
"""
if not isinstance(request, listUsersRequest) and\
not issubclass(listUsersRequest, request.__class__):
raise TypeError, "%s incorrect request type" %(request.__class__)
kw = {}
response = self.binding.Send(None, None, request, soapaction="", **kw)
response = self.binding.Receive(listUsersResponseWrapper())
if not isinstance(response, listUsersResponse) and\
not issubclass(listUsersResponse, response.__class__):
raise TypeError, "%s incorrect response type" %(response.__class__)
return response
class listUsersRequest (ZSI.TCcompound.Struct):
def __init__(self, name=None, ns=None):
oname = None
if name:
oname = name
if ns:
oname += ' xmlns="%s"' % ns
ZSI.TC.Struct.__init__(self, listUsersRequest, [], pname=name, aname="%s" % name, oname=oname )
class listUsersRequestWrapper(listUsersRequest):
"""wrapper for rpc:encoded message"""
typecode = listUsersRequest(name='listUsers', ns='urn:axis')
def __init__( self, name=None, ns=None, **kw ):
listUsersRequest.__init__( self, name='listUsers', ns='urn:axis' )
class listUsersResponse (ZSI.TCcompound.Struct):
def __init__(self, name=None, ns=None):
self._listUsersReturn = None
oname = None
if name:
oname = name
if ns:
oname += ' xmlns="%s"' % ns
ZSI.TC.Struct.__init__(self, listUsersResponse, [ZSI.TC.String(pname="listUsersReturn",aname="_listUsersReturn",optional=1),], pname=name, aname="%s" % name, oname=oname )
class listUsersResponseWrapper(listUsersResponse):
"""wrapper for rpc:encoded message"""
typecode = listUsersResponse(name='listUsersResponse', ns='urn:axis')
def __init__( self, name=None, ns=None, **kw ):
listUsersResponse.__init__( self, name='listUsersResponse', ns='urn:axis' )
|
#
# The MIT License (MIT)
#
# This file is part of RLScore
#
# Copyright (c) 2008 - 2016 Tapio Pahikkala, Antti Airola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
import scipy.sparse
from rlscore.utilities import linalg
from rlscore.utilities import array_tools
from rlscore.utilities import adapter
from rlscore.measure.measure_utilities import UndefinedPerformance
from rlscore.predictor import PredictorInterface
from rlscore.measure import cindex
from rlscore.utilities.cross_validation import grid_search
from rlscore.measure.measure_utilities import qids_to_splits
class QueryRankRLS(PredictorInterface):
"""RankRLS algorithm for learning to rank
Implements the learning algorithm for learning from query-structured
data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix
Y : {array-like}, shape = [n_samples] or [n_samples, n_labels]
Training set labels
qids : list of query ids, shape = [n_samples]
Training set qids
regparam : float, optional
regularization parameter, regparam > 0 (default=1.0)
kernel : {'LinearKernel', 'GaussianKernel', 'PolynomialKernel', 'PrecomputedKernel', ...}
kernel function name, imported dynamically from rlscore.kernel
basis_vectors : {array-like, sparse matrix}, shape = [n_bvectors, n_features], optional
basis vectors (typically a randomly chosen subset of the training data)
Other Parameters
----------------
Typical kernel parameters include:
bias : float, optional
LinearKernel: the model is w*x + bias*w0, (default=1.0)
gamma : float, optional
GaussianKernel: k(xi,xj) = e^(-gamma*<xi-xj,xi-xj>) (default=1.0)
PolynomialKernel: k(xi,xj) = (gamma * <xi, xj> + coef0)**degree (default=1.0)
coef0 : float, optional
PolynomialKernel: k(xi,xj) = (gamma * <xi, xj> + coef0)**degree (default=0.)
degree : int, optional
PolynomialKernel: k(xi,xj) = (gamma * <xi, xj> + coef0)**degree (default=2)
Attributes
-----------
predictor : {LinearPredictor, KernelPredictor}
trained predictor
Notes
-----
Computational complexity of training:
m = n_samples, d = n_features, l = n_labels, b = n_bvectors
O(m^3 + dm^2 + lm^2): basic case
O(md^2 +lmd): Linear Kernel, d < m
O(mb^2 +lmb): Sparse approximation with basis vectors
RankRLS algorithm was first introduced in [1], extended version of the work and the
efficient leave-query-out cross-validation method implemented in
the method 'holdout' are found in [2].
References
----------
[1] Tapio Pahikkala, Evgeni Tsivtsivadze, Antti Airola, Jorma Boberg and Tapio Salakoski
Learning to rank with pairwise regularized least-squares.
In Thorsten Joachims, Hang Li, Tie-Yan Liu, and ChengXiang Zhai, editors,
SIGIR 2007 Workshop on Learning to Rank for Information Retrieval, pages 27--33, 2007.
[2] Tapio Pahikkala, Evgeni Tsivtsivadze, Antti Airola, Jouni Jarvinen, and Jorma Boberg.
An efficient algorithm for learning to rank from preference graphs.
Machine Learning, 75(1):129-165, 2009.
"""
def __init__(self, X, Y, qids, regparam = 1.0, kernel='LinearKernel', basis_vectors = None, **kwargs):
kwargs["bias"] = 0.
kwargs['kernel'] = kernel
kwargs['X'] = X
if basis_vectors is not None:
kwargs['basis_vectors'] = basis_vectors
self.svdad = adapter.createSVDAdapter(**kwargs)
self.Y = np.mat(array_tools.as_2d_array(Y))
self.regparam = regparam
self.svals = np.mat(self.svdad.svals)
self.svecs = self.svdad.rsvecs
self.size = self.Y.shape[0]
self.size = self.Y.shape[0]
self.qids = map_qids(qids)
self.qidlist = qids_to_splits(self.qids)
self.solve(self.regparam)
def solve(self, regparam=1.0):
"""Trains the learning algorithm, using the given regularization parameter.
Parameters
----------
regparam : float (regparam > 0)
regularization parameter
Notes
-----
Computational complexity of re-training:
m = n_samples, d = n_features, l = n_labels, b = n_bvectors
O(lm^2): basic case
O(lmd): Linear Kernel, d < m
O(lmb): Sparse approximation with basis vectors
"""
if not hasattr(self, "D"):
qidlist = self.qids
objcount = max(qidlist) + 1
labelcounts = np.mat(np.zeros((1, objcount)))
Pvals = np.ones(self.size)
for i in range(self.size):
qid = qidlist[i]
labelcounts[0, qid] = labelcounts[0, qid] + 1
D = np.mat(np.ones((1, self.size), dtype=np.float64))
#The centering matrix way (HO computations should be modified accordingly too)
for i in range(self.size):
qid = qidlist[i]
Pvals[i] = 1. / np.sqrt(labelcounts[0, qid])
#The old Laplacian matrix way
#for i in range(self.size):
# qid = qidlist[i]
# D[0, i] = labelcounts[0, qid]
P = scipy.sparse.coo_matrix((Pvals, (np.arange(0, self.size), qidlist)), shape=(self.size,objcount))
P_csc = P.tocsc()
P_csr = P.tocsr()
#Eigenvalues of the kernel matrix
#evals = np.multiply(self.svals, self.svals)
#Temporary variables
ssvecs = np.multiply(self.svecs, self.svals)
#These are cached for later use in solve and holdout functions
ssvecsTLssvecs = (np.multiply(ssvecs.T, D) - (ssvecs.T * P_csc) * P_csr.T) * ssvecs
LRsvals, LRevecs = linalg.eig_psd(ssvecsTLssvecs)
LRsvals = np.mat(LRsvals)
LRevals = np.multiply(LRsvals, LRsvals)
LY = np.multiply(D.T, self.Y) - P_csr * (P_csc.T * self.Y)
self.multipleright = LRevecs.T * (ssvecs.T * LY)
self.multipleleft = ssvecs * LRevecs
self.LRevals = LRevals
self.LRevecs = LRevecs
self.D = D
self.regparam = regparam
#Compute the eigenvalues determined by the given regularization parameter
self.neweigvals = 1. / (self.LRevals + regparam)
self.A = self.svecs * np.multiply(1. / self.svals.T, (self.LRevecs * np.multiply(self.neweigvals.T, self.multipleright)))
self.predictor = self.svdad.createModel(self)
def holdout(self, indices):
"""Computes hold-out predictions for a trained RLS.
Parameters
----------
indices : list of indices, shape = [n_hsamples]
list of indices of training examples belonging to the set for which the
hold-out predictions are calculated. Should correspond to one query.
Returns
-------
F : array, shape = [n_hsamples, n_labels]
holdout query predictions
Notes
-----
Computational complexity of holdout:
m = n_samples, d = n_features, l = n_labels, b = n_bvectors, h=n_hsamples
O(h^3 + lmh): basic case
O(min(h^3 + lh^2, d^3 + ld^2) +ldh): Linear Kernel, d < m
O(min(h^3 + lh^2, b^3 + lb^2) +lbh): Sparse approximation with basis vectors
"""
indices = array_tools.as_index_list(indices, self.Y.shape[0])
if len(indices) == 0:
raise IndexError('Hold-out predictions can not be computed for an empty hold-out set.')
if len(indices) != len(np.unique(indices)):
raise IndexError('Hold-out can have each index only once.')
hoqid = self.qids[indices[0]]
for ind in indices:
if not hoqid == self.qids[ind]:
raise IndexError('All examples in the hold-out set must have the same qid.')
indlen = len(indices)
Qleft = self.multipleleft[indices]
sqrtQho = np.multiply(Qleft, np.sqrt(self.neweigvals))
Qho = sqrtQho * sqrtQho.T
Pho = np.mat(np.ones((len(indices),1))) / np.sqrt(len(indices))
Yho = self.Y[indices]
Dho = self.D[:, indices]
LhoYho = np.multiply(Dho.T, Yho) - Pho * (Pho.T * Yho)
RQY = Qleft * np.multiply(self.neweigvals.T, self.multipleright) - Qho * LhoYho
sqrtRQRTLho = np.multiply(Dho.T, sqrtQho) - Pho * (Pho.T * sqrtQho)
if sqrtQho.shape[0] <= sqrtQho.shape[1]:
RQRTLho = sqrtQho * sqrtRQRTLho.T
I = np.mat(np.identity(indlen))
return np.squeeze(np.array((I - RQRTLho).I * RQY))
else:
RQRTLho = sqrtRQRTLho.T * sqrtQho
I = np.mat(np.identity(sqrtQho.shape[1]))
return np.squeeze(np.array(RQY + sqrtQho * ((I - RQRTLho).I * (sqrtRQRTLho.T * RQY))))
class LeaveQueryOutRankRLS(PredictorInterface):
"""RankRLS algorithm for learning to rank with query-structured data. Selects
automatically regularization parameter using leave-query-out cross-validation.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix
Y : {array-like}, shape = [n_samples] or [n_samples, n_labels]
Training set labels
qids : list of query ids, shape = [n_samples]
Training set qids
regparam : float, optional
regularization parameter, regparam > 0 (default=1.0)
kernel : {'LinearKernel', 'GaussianKernel', 'PolynomialKernel', 'PrecomputedKernel', ...}
kernel function name, imported dynamically from rlscore.kernel
basis_vectors : {array-like, sparse matrix}, shape = [n_bvectors, n_features], optional
basis vectors (typically a randomly chosen subset of the training data)
Other Parameters
----------------
Typical kernel parameters include:
bias : float, optional
LinearKernel: the model is w*x + bias*w0, (default=1.0)
gamma : float, optional
GaussianKernel: k(xi,xj) = e^(-gamma*<xi-xj,xi-xj>) (default=1.0)
PolynomialKernel: k(xi,xj) = (gamma * <xi, xj> + coef0)**degree (default=1.0)
coef0 : float, optional
PolynomialKernel: k(xi,xj) = (gamma * <xi, xj> + coef0)**degree (default=0.)
degree : int, optional
PolynomialKernel: k(xi,xj) = (gamma * <xi, xj> + coef0)**degree (default=2)
Attributes
-----------
predictor : {LinearPredictor, KernelPredictor}
trained predictor
cv_performances : array, shape = [grid_size]
leave-query-out performances for each grid point
cv_predictions : list of 1D or 2D arrays, shape = [grid_size, n_queries]
predictions for each query, shapes [query_size] or [query_size, n_labels]
regparam : float
regparam from grid with best performance
Notes
-----
Notes
-----
Uses fast solve and holdout algorithms, complexity depends on the sizes of the queries.
Complexity is:
m = n_samples, d = n_features, l = n_labels, b = n_bvectors, r=grid_size, k = n_queries
O(m^3 + dm^2 + r*(m^3/k^2 + lm^2)): basic case
O(md^2 + r*(min(m^3/k^2 + lm^2/k, kd^3 + kld^2) + ldm)): Linear Kernel, d < m
O(mb^2 + r*(min(m^3/k^2 + lm^2/k, kb^3 + klb^2) + lbm)): Sparse approximation with basis vectors
RankRLS algorithm was first introduced in [1], extended version of the work and the
efficient leave-query-out cross-validation method implemented in
the method 'holdout' are found in [2].
References
----------
[1] Tapio Pahikkala, Evgeni Tsivtsivadze, Antti Airola, Jorma Boberg and Tapio Salakoski
Learning to rank with pairwise regularized least-squares.
In Thorsten Joachims, Hang Li, Tie-Yan Liu, and ChengXiang Zhai, editors,
SIGIR 2007 Workshop on Learning to Rank for Information Retrieval, pages 27--33, 2007.
[2] Tapio Pahikkala, Evgeni Tsivtsivadze, Antti Airola, Jouni Jarvinen, and Jorma Boberg.
An efficient algorithm for learning to rank from preference graphs.
Machine Learning, 75(1):129-165, 2009.
"""
def __init__(self, X, Y, qids, kernel='LinearKernel', basis_vectors = None, regparams=None, measure=None, **kwargs):
if regparams is None:
grid = [2**x for x in range(-15, 15)]
else:
grid = regparams
if measure is None:
self.measure = cindex
else:
self.measure = measure
learner = QueryRankRLS(X, Y, qids, grid[0], kernel, basis_vectors, **kwargs)
crossvalidator = LQOCV(learner, measure)
self.cv_performances, self.cv_predictions, self.regparam = grid_search(crossvalidator, grid)
self.predictor = learner.predictor
class LQOCV(object):
def __init__(self, learner, measure):
self.rls = learner
self.measure = measure
def cv(self, regparam):
rls = self.rls
measure = self.measure
rls.solve(regparam)
Y = rls.Y
performances = []
predictions = []
folds = rls.qidlist
for fold in folds:
P = rls.holdout(fold)
predictions.append(P)
try:
performance = measure(Y[fold], P)
performances.append(performance)
except UndefinedPerformance:
pass
if len(performances) > 0:
performance = np.mean(performances)
else:
raise UndefinedPerformance("Performance undefined for all folds")
return performance, predictions
def map_qids(qids):
qidmap = {}
i = 0
for qid in qids:
if not qid in qidmap:
qidmap[qid] = i
i+=1
new_qids = []
for qid in qids:
new_qids.append(qidmap[qid])
return new_qids
|
##############################
# spider for school of business
# filename:spider_bs.py
# author: liwei
# StuID: 1711350
# date: 2019.12.1
##############################
import scrapy
import os
from teacherInfo.items import TeacherinfoItem
import re
snapshots_path = '../query_system/templates/snapshots' # 网页快照保存位置
class HTTeacherInfoSpider(scrapy.Spider):
name = "bs"
# 创建存储爬取信息的文件夹
if not os.path.exists('../docs/%s'%name): # 主文件夹
os.mkdir('../docs/%s'%name)
if not os.path.exists('../docs/%s/imgs'%name): # 照片存储文件夹
os.mkdir('../docs/%s/imgs'%name)
if not os.path.exists('../docs/%s/m_text' % name): # 锚文本存储文件夹
os.mkdir('../docs/%s/m_text' % name)
if not os.path.exists('%s/%s' % (snapshots_path,name)): # 网页快照存储文件夹
os.mkdir('%s/%s' % (snapshots_path,name))
# if os.path.exists('../docs/%s/index.txt'%name):
# os.remove('../docs/%s/index.txt'%name)
baseurl = 'https://bs.nankai.edu.cn/'
img_name_dict = {}
def start_requests(self):
urls = [
'https://bs.nankai.edu.cn/bm/list.htm',
'https://bs.nankai.edu.cn/caccounting/list.htm',
'https://bs.nankai.edu.cn/cmarketing/list.htm',
'https://bs.nankai.edu.cn/financial/list.htm',
'https://bs.nankai.edu.cn/hr/list.htm',
'https://bs.nankai.edu.cn/mse/list.htm',
'https://bs.nankai.edu.cn/irm/list.htm',
'https://bs.nankai.edu.cn/mrc/list.htm'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
# 得到锚文本
teacherItems = response.xpath('//ul[@class="wp_article_list"]')
print(response.url)
#获取每位老师具体介绍页面链接锚文本解析得到链接
nexturls = teacherItems.xpath('.//li')
for urlt in nexturls:
# 存储锚文本
item = {
"m_text":urlt.get(), # 锚文本
"parentUrl":response.url, # 父页面
"xueyuan":"南开大学商学院" # 学院
}
nurl = str(self.baseurl+urlt.xpath(".//a/@href").get()).replace('\n','').replace(' ','').replace('\r','').replace('\t','')
# 递归回调解析教师信息的解析器
print(nurl)
request = scrapy.Request(url=nurl, callback=self.parseTeacher)
request.meta['item'] = item
yield request
def parseImg(self, response):
item = response.meta['item']
last = str(item['image_url']).split('.')[-1]
#if last == 'gif' : # gif格式为空的图片
# return
with open('../docs/%s/imgs/%s.%s'%(self.name,item['image_name'],last),'wb') as f:
f.write(response.body)
f.close()
def parseTeacher(self,response):
#/html/body/div[3]/div/div/div
data = response.meta['item']
# 保存网页的主体内容
details = response.xpath('//div[@portletmode="simpleArticleAttri"]')
filename=str(details.xpath('.//div[@class="name"]/text()').get()).replace('\n','').replace(' ','').replace('\r','')
f = open('../docs/%s/%s.txt'%(self.name,filename),'w',encoding='utf-8') # 保存个人主页信息文件
f.write(filename+'\n')
for item in details.xpath('.//div[@class = "lxfs-info"]').xpath('.//div[@class="info"]'):
#print(item)
for text in item.xpath('.//text()').getall():
f.write(str(text).replace('\n','').replace(' ','').replace('\r',''))
f.write('\n')
for item in details.xpath('.//div[@class="layui-tab layui-tab-brief"]'):
#print(item)
for text in item.xpath('.//text()').getall():
f.write(str(text).replace('\n','').replace(' ','').replace('\r',''))
f.write('\n')
f.close()
# 存儲教师姓名和网址映射信息
file = open('../docs/%s/index.txt'%self.name,'a',encoding='utf-8')
# 存储基本信息索引,姓名,网页链接,学院,父链接
file.write(filename+ "," + response.url+','+data["xueyuan"]+","+data['parentUrl']+ '\n')
file.close()
# 保存锚文本
m_f = open('../docs/%s/m_text/%s_m.txt'%(self.name,filename),'w',encoding='utf-8')
m_f.write(str(data["m_text"]))
m_f.close()
# 保存网页快照
with open('%s/%s/%s.html' % (snapshots_path,self.name, filename), 'wb')as s_f:
s_f.write(response.body)
# 递归回调函数保存图片
imgurl = details.xpath('.//img/@src').get()
item = TeacherinfoItem()
item['image_name'] = filename
item['image_url'] = self.baseurl + imgurl
request = scrapy.Request(url=item['image_url'], callback=self.parseImg)
request.meta['item'] = item
yield request
|
import multiprocessing
from mrhttp import Application
from mrhttp import Request
import mrhttp
import socket
import mrjson as json
import asyncpg
import weakref
import random
from jinja2 import Template
from nats.aio.client import Client as NATS
from nats.aio.errors import ErrConnectionClosed, ErrTimeout, ErrNoServers
# wrk -t4 -c32 -d5s http://localhost:8080/ -s msgqueue.lua
nc = NATS()
app = Application()
@app.on('at_start')
async def dbsetup():
bstr = "[1,2,3,4,5,6,7,8,9,10]".encode("utf-8")
bstr = bstr*10
#bstr = b"fart"
app.bstr = bstr
await nc.connect(io_loop=app.loop)
@app.route('/', type="text")
async def hello(request):
await nc.publish("foo", app.bstr)
return 'Hello World!'
app.run(debug=True, cores=4)
|
# 定义类属性
class Dog(object):
tooth = 30
# 创建对象
wangcai = Dog()
xiaohei = Dog()
# 用类和对象调用类属性
print(Dog.tooth)
print(wangcai.tooth)
print(xiaohei.tooth)
|
from datetime import datetime
from rest_framework import serializers
from mangacache.models import Chapter, Manga, Author
class MangaDetailSerializer(serializers.Serializer):
name = serializers.CharField(required=True, allow_blank=False, max_length=255)
url = serializers.URLField()
description = serializers.CharField(max_length=65525)
def create(self, validated_data):
return Manga.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.name = validated_data.get("name", instance.name)
instance.url = validated_data.get("url", instance.url)
instance.description = validated_data.get("description", instance.description)
return instance
class AuthorSerializer(serializers.ModelSerializer):
name = serializers.CharField(max_length=100)
url = serializers.URLField()
description = serializers.CharField(max_length=1000)
manga = MangaDetailSerializer(many=True)
class Meta:
model = Author
def create(self, validated_data):
author = Author.objects.create(
name=validated_data["name"],
url=validated_data["url"],
description=validated_data["description"]
)
for item in validated_data["manga"]:
item["author"] = author
self.manga.create(**item)
self.manga.save()
return author
class ChapterSerializer(serializers.ModelSerializer):
class Meta:
model = Chapter
def create(self, validated_data):
chapter = Chapter.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.tom = validated_data.get('tom', instance.tom)
instance.number = validated_data.get('number', instance.number)
instance.added = validated_data.get('added', instance.added)
for item in validated_data["pages"]:
page = instance.pages.update(**item)
page.save()
return instance
class MangaSerializer(serializers.Serializer):
name = serializers.CharField(required=True, allow_blank=False, max_length=255)
chapters = ChapterSerializer(many=True)
def create(self, validated_data):
manga = Manga.objects.get_or_create(name=validated_data["name"])
for item in validated_data['chapters']:
chapter = Chapter.objects.create(
name=item.get("name", ""),
tom=item.get("tom", 0),
number=item.get("number", 0),
added=item.get("added", datetime.now()),
manga=manga
)
chapter.save()
return manga
|
from typing import Dict, Any, Tuple
from datetime import datetime
import re
def checkName(name: str) -> bool:
"""
Checks if a string is a valid name
Only spaces and alphabets allowed
Parameters
----------
name: str
The name to be Tested
Returns
-------
bool
True/False according to the validity of the name
"""
return type(name) == str and bool(re.match(r'[a-zA-Z\s]+$', name))
def checkDate(date: str) -> bool:
"""
Checks if a string is a valid dob
Parameters
----------
dob: str
The name to be Tested
Returns
-------
bool
True/False according to the validity of the dob
"""
if not type(date) == str:
return False
try:
datetime.strptime(date, "%Y-%m-%d")
except Exception:
return False
return True
def compareDates(date1: str, date2: str) -> Tuple[bool, str]:
"""
Checks if a string is a valid dob
Parameters
----------
dob: str
The name to be Tested
Returns
-------
Tuple(bool, str)
True/False according to the validity of the dates
"<"|">"|"="|"Error" is the sign between 1st and 2nd date
"""
if checkDate(date1) and checkDate(date2):
dateObj1 = datetime.strptime(date1, "%Y-%m-%d")
dateObj2 = datetime.strptime(date2, "%Y-%m-%d")
if dateObj1 > dateObj2:
return (True, ">")
elif dateObj1 < dateObj2:
return (True, "<")
else:
return (True, "=")
else:
return (False, "Error")
def checkEmail(email: str) -> bool:
"""
Checks if a string is a valid email
Parameters
----------
email: str
The email to be Tested
Returns
-------
bool
True/False according to the validity of the email
"""
return type(email) == str and bool(re.match(r'^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$', email))
def checkPhone(phone: str) -> bool:
"""
Checks if a phone number is valid
Expected Countrycode<space>Number +91 12345678901
Parameters
----------
phone: str
The string to be Tested
Returns
-------
bool
True/False according to the validity of the phone number
"""
return type(phone) == str and phone.split()[-1].isnumeric()
def testAllInput(Inputs: Dict[str, Any]) -> Tuple[bool, str]:
"""
Tests All Inputs for their validity
Parameters
----------
Inputs: Dict[str, Any]
The Set of Key value Pair holding field names and their values
Returns
-------
bool
False if any Input is invalid, True if all tests pass
str
The field for which the test failed
"""
if "name" in Inputs:
if not checkName(Inputs["name"]):
return (False, "name")
if "organization" in Inputs:
if not checkName(Inputs["name"]):
return (False, "name")
if "dob" in Inputs:
if not checkDate(Inputs["dob"]):
return (False, "dob")
if "phone" in Inputs:
if not checkPhone(Inputs["phone"]):
return (False, "phone")
if "email" in Inputs:
if not checkEmail(Inputs["email"]):
return (False, "email")
if "personalEmail" in Inputs:
if not checkEmail(Inputs["personalEmail"]):
return (False, "personalEmail")
return (True, "None")
|
import json
import pandas
from django.contrib.auth.decorators import login_required, user_passes_test
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from utils.tweepy.helpers import TweePy
@login_required(login_url='/signin')
def home(request):
return render(request, 'home.html')
def sign_in(request):
if request.user.is_authenticated:
return HttpResponseRedirect('/')
return render(request, 'login.html')
@login_required
def trends(request):
tp = TweePy(request.user)
trends = tp.get_trends()
df = pandas.DataFrame(trends[0]['trends'])
return HttpResponse(df['name'].to_json(orient='values'))
|
from time import sleep, time
from random import randint
import os
import sys
_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "../../"))+"/schema"
sys.path.append(_path)
from brain.queries.writes import transition_expired
CHECKING_PERIOD = 3 # seconds
if __name__ == "__main__":
while True:
try:
transition_expired(time())
except ValueError:
pass # forget it and restart
sleep(randint(CHECKING_PERIOD-1, CHECKING_PERIOD+1)) # thundering herd
|
from django.db import models
import base64
import numpy
import pickle
import datetime
from django.db.models import Q, Max, Min
import matplotlib.pyplot as plt
import filename_database.models
from io import BytesIO
from Key import Key
import plot_constants
CHARGE = 'chg'
DISCHARGE = 'dchg'
POLARITIES = [(CHARGE, 'CHARGE'), (DISCHARGE, 'DISCHARGE')]
def id_dict_from_id_list(id_list):
n = len(id_list)
id_dict = {}
for i in range(n):
id_dict[id_list[i]] = i
return id_dict
def get_files_for_cell_id(cell_id):
return CyclingFile.objects.filter(
database_file__deprecated = False
).filter(database_file__valid_metadata__cell_id = cell_id)
def clamp(a, x, b):
x = min(x, b)
x = max(x, a)
return x
def make_voltage_grid(min_v, max_v, n_samples, my_cell_ids):
if n_samples < 2:
n_samples = 2
all_cycs = Cycle.objects.filter(
discharge_group__cell_id__in = my_cell_ids, valid_cycle = True,
)
my_max = max(
all_cycs.aggregate(Max("chg_maximum_voltage"))[
"chg_maximum_voltage__max"
],
all_cycs.aggregate(Max("dchg_maximum_voltage"))[
"dchg_maximum_voltage__max"
]
)
my_min = min(
all_cycs.aggregate(Min("chg_minimum_voltage"))[
"chg_minimum_voltage__min"
],
all_cycs.aggregate(Min("dchg_minimum_voltage"))[
"dchg_minimum_voltage__min"
]
)
my_max = clamp(min_v, my_max, max_v)
my_min = clamp(min_v, my_min, max_v)
delta = (my_max - my_min) / float(n_samples - 1)
return numpy.array([my_min + delta * float(i) for i in range(n_samples)])
def make_current_grid(min_c, max_c, n_samples, my_cell_ids):
if n_samples < 2:
n_samples = 2
all_cycs = Cycle.objects.filter(
discharge_group__cell_id__in = my_cell_ids, valid_cycle = True,
)
my_max = max(
abs(
all_cycs.aggregate(Max("chg_maximum_current"))[
"chg_maximum_current__max"
]
),
abs(
all_cycs.aggregate(Max("dchg_maximum_current"))[
"dchg_maximum_current__max"
]
)
)
my_min = min(
abs(all_cycs.aggregate(Min("chg_minimum_current"))[
"chg_minimum_current__min"
]),
abs(all_cycs.aggregate(Min("dchg_minimum_current"))[
"dchg_minimum_current__min"
])
)
my_max = clamp(min_c, my_max, max_c)
my_min = clamp(min_c, my_min, max_c)
my_max = current_to_log_current(my_max)
my_min = current_to_log_current(my_min)
delta = (my_max - my_min) / float(n_samples - 1.)
return numpy.array([my_min + delta * float(i) for i in range(n_samples)])
def current_to_log_current(current):
return numpy.log(abs(current) + 1e-5)
# def temperature_to_arrhenius(temp):
# return numpy.exp(-1. / (temp + 273))
def make_sign_grid():
return numpy.array([1., -1.])
# TODO(harvey): replace magic numbers with constants
def make_temperature_grid(min_t, max_t, n_samples, my_cell_ids):
if n_samples < 2:
n_samples = 2
my_files = CyclingFile.objects.filter(
database_file__deprecated = False
).filter(database_file__valid_metadata__cell_id__in = my_cell_ids)
my_max = my_files.aggregate(
Max("database_file__valid_metadata__temperature")
)["database_file__valid_metadata__temperature__max"]
my_min = my_files.aggregate(
Min("database_file__valid_metadata__temperature")
)["database_file__valid_metadata__temperature__min"]
my_max = clamp(min_t, my_max, max_t)
if my_max < 55.:
my_max = 55.
my_min = clamp(min_t, my_min, max_t)
if my_min > 20.:
my_min = 20.
delta = (my_max - my_min) / float(n_samples - 1)
return numpy.array([my_min + delta * float(i) for i in range(n_samples)])
def compute_from_database(
cell_id, lower_cycle = None, upper_cycle = None, valid = True,
):
files_cell_id = CyclingFile.objects.filter(
database_file__deprecated = False,
database_file__valid_metadata__cell_id = cell_id,
).order_by("database_file__last_modified")
polarity = DISCHARGE
groups = {}
for cycle_group in get_discharge_groups_from_cell_id(cell_id):
q_curves = []
for f in files_cell_id:
offset_cycle = f.database_file.valid_metadata.start_cycle
filters = Q(valid_cycle = valid) & Q(cycling_file = f)
if not (lower_cycle is None and upper_cycle is None):
filters = filters & Q(
cycle_number__range = (
lower_cycle - offset_cycle, upper_cycle - offset_cycle,
),
)
if polarity == DISCHARGE:
filters = Q(discharge_group = cycle_group) & filters
elif polarity == CHARGE:
filters = Q(charge_group = cycle_group) & filters
cycles = Cycle.objects.filter(filters)
if cycles.exists():
q_curves += list([
(
float(cyc.cycle_number + offset_cycle),
-cyc.dchg_total_capacity,
)
for cyc in cycles.order_by("cycle_number")
])
if len(q_curves) > 0:
groups[(
cycle_group.constant_rate, cycle_group.end_rate_prev,
cycle_group.end_rate, cycle_group.end_voltage,
cycle_group.end_voltage_prev, cycle_group.polarity,
)] = numpy.array(
q_curves,
dtype = [
(Key.N, 'f4'),
("last_cc_capacity", 'f4'),
],
)
return groups
def make_file_legends_and_vertical(
ax, cell_id, lower_cycle = None, upper_cycle = None, show_invalid = False,
vertical_barriers = None, list_all_options = None, leg1 = None,
):
files_cell_id = CyclingFile.objects.filter(
database_file__deprecated = False,
database_file__valid_metadata__cell_id = cell_id,
).order_by("database_file__last_modified")
file_leg = []
if len(files_cell_id) >= 1:
for f_i, f in enumerate(files_cell_id):
offset_cycle = f.database_file.valid_metadata.start_cycle
if show_invalid:
min_cycle = offset_cycle + Cycle.objects.filter(
cycling_file = f
).aggregate(Min("cycle_number"))["cycle_number__min"]
max_cycle = offset_cycle + Cycle.objects.filter(
cycling_file = f
).aggregate(Max("cycle_number"))["cycle_number__max"]
else:
min_cycle = offset_cycle + Cycle.objects.filter(
cycling_file = f, valid_cycle = True,
).aggregate(Min("cycle_number"))["cycle_number__min"]
max_cycle = offset_cycle + Cycle.objects.filter(
cycling_file = f, valid_cycle = True,
).aggregate(Max("cycle_number"))["cycle_number__max"]
if lower_cycle is not None:
if min_cycle < lower_cycle:
min_cycle = lower_cycle - .5
if min_cycle > upper_cycle:
continue
if upper_cycle is not None:
if max_cycle > upper_cycle:
max_cycle = upper_cycle + .5
if max_cycle < lower_cycle:
continue
bla = plt.axvspan(
min_cycle, max_cycle, ymin = .05 * (1 + f_i),
ymax = .05 * (2 + f_i),
facecolor = plot_constants.COLORS[f_i],
alpha = 0.1
)
file_leg.append(
(
bla,
"File {} Last Modif: {}-{}-{}. Size: {}KB".format(
f_i,
f.database_file.last_modified.year,
f.database_file.last_modified.month,
f.database_file.last_modified.day,
int(f.database_file.filesize / 1024),
),
)
)
if vertical_barriers is not None:
for index_set_i in range(len(vertical_barriers) + 1):
col = ["1.", ".1"][index_set_i % 2]
if index_set_i == 0 and len(vertical_barriers) > 0:
min_x, max_x = (lower_cycle - 0.5, vertical_barriers[0])
elif index_set_i == 0 and len(vertical_barriers) == 0:
min_x, max_x = (lower_cycle - 0.5, upper_cycle + 0.5)
elif index_set_i == len(vertical_barriers):
min_x, max_x = (vertical_barriers[-1], upper_cycle + 0.5)
else:
min_x, max_x = (
vertical_barriers[index_set_i - 1],
vertical_barriers[index_set_i],
)
print(min_x, max_x)
ax.axvspan(min_x, max_x, facecolor = col, alpha = 0.1)
plt.text(
0.9 * min_x + .1 * max_x,
.99 * ax.get_ylim()[0] + .01 * ax.get_ylim()[1],
list_all_options[index_set_i],
size = 18,
)
for index_set_i in range(len(list_all_options) - 1):
plt.axvline(
x = vertical_barriers[index_set_i],
color = "k", linestyle = "--",
)
ax.tick_params(
direction = "in", length = 7, width = 2, labelsize = 11,
bottom = True, top = True, left = True, right = True,
)
if len(file_leg) > 0:
if list_all_options is None:
loc = "lower left"
else:
loc = "upper left"
ax.legend([x[0] for x in file_leg], [x[1] for x in file_leg], loc = loc)
ax.add_artist(leg1)
def get_byte_image(fig, dpi):
buf = BytesIO()
plt.savefig(buf, format = "png", dpi = dpi)
image_base64 = base64.b64encode(
buf.getvalue()
).decode("utf-8").replace("\n", "")
buf.close()
plt.close(fig)
return image_base64
# TODO(sam): use common mechanism as in compile_dataset/ml_smoothing for ordering
# TODO(sam): set default color rules in the UI.
def get_discharge_groups_from_cell_id(cell_id):
return list(
CycleGroup.objects.filter(
cell_id = cell_id, polarity = DISCHARGE,
).order_by("constant_rate")
)
class CyclingFile(models.Model):
database_file = models.OneToOneField(
filename_database.models.DatabaseFile, on_delete = models.CASCADE,
)
import_time = models.DateTimeField(default = datetime.datetime(1970, 1, 1))
process_time = models.DateTimeField(default = datetime.datetime(1970, 1, 1))
def get_cycles_array(self, fil = Q()):
return numpy.array(
[
(
cyc.id,
cyc.cycle_number,
cyc.chg_total_capacity,
cyc.chg_average_voltage,
cyc.chg_minimum_voltage,
cyc.chg_maximum_voltage,
cyc.chg_average_current_by_capacity,
cyc.chg_average_current_by_voltage,
cyc.chg_minimum_current,
cyc.chg_maximum_current,
cyc.chg_duration,
cyc.dchg_total_capacity,
cyc.dchg_average_voltage,
cyc.dchg_minimum_voltage,
cyc.dchg_maximum_voltage,
cyc.dchg_average_current_by_capacity,
cyc.dchg_average_current_by_voltage,
cyc.dchg_minimum_current,
cyc.dchg_maximum_current,
cyc.dchg_duration,
)
for cyc in self.cycle_set.filter(fil).order_by("cycle_number")
],
dtype = [
("id", int),
("cycle_number", int),
("chg_total_capacity", float),
("chg_average_voltage", float),
("chg_minimum_voltage", float),
("chg_maximum_voltage", float),
("chg_average_current_by_capacity", float),
("chg_average_current_by_voltage", float),
("chg_minimum_current", float),
("chg_maximum_current", float),
("chg_duration", float),
("dchg_total_capacity", float),
("dchg_average_voltage", float),
("dchg_minimum_voltage", float),
("dchg_maximum_voltage", float),
("dchg_average_current_by_capacity", float),
("dchg_average_current_by_voltage", float),
("dchg_minimum_current", float),
("dchg_maximum_current", float),
("dchg_duration", float),
]
)
class CycleGroup(models.Model):
cell_id = models.IntegerField()
constant_rate = models.FloatField()
end_rate = models.FloatField()
end_rate_prev = models.FloatField()
end_voltage = models.FloatField()
end_voltage_prev = models.FloatField()
polarity = models.CharField(
max_length = 4, choices = POLARITIES, blank = True,
)
class Cycle(models.Model):
cycling_file = models.ForeignKey(CyclingFile, on_delete = models.CASCADE)
cycle_number = models.IntegerField()
def get_offset_cycle(self):
"""
Really important that this only be called when the file is known to be valid!!!
"""
return self.cycle_number + float(
self.cycling_file.database_file.valid_metadata.start_cycle
)
def get_temperature(self):
"""
Really important that this only be called when the file is known to be valid!!!
"""
return float(self.cycling_file.database_file.valid_metadata.temperature)
charge_group = models.ForeignKey(
CycleGroup, null = True,
on_delete = models.SET_NULL, related_name = 'charge_group',
)
discharge_group = models.ForeignKey(
CycleGroup, null = True,
on_delete = models.SET_NULL, related_name = 'discharge_group'
)
valid_cycle = models.BooleanField(default = True)
processed = models.BooleanField(default = False)
chg_total_capacity = models.FloatField(null = True)
chg_average_voltage = models.FloatField(null = True)
chg_minimum_voltage = models.FloatField(null = True)
chg_maximum_voltage = models.FloatField(null = True)
chg_average_current_by_capacity = models.FloatField(null = True)
chg_average_current_by_voltage = models.FloatField(null = True)
chg_minimum_current = models.FloatField(null = True)
chg_maximum_current = models.FloatField(null = True)
chg_duration = models.FloatField(null = True)
dchg_total_capacity = models.FloatField(null = True)
dchg_average_voltage = models.FloatField(null = True)
dchg_minimum_voltage = models.FloatField(null = True)
dchg_maximum_voltage = models.FloatField(null = True)
dchg_average_current_by_capacity = models.FloatField(null = True)
dchg_average_current_by_voltage = models.FloatField(null = True)
dchg_minimum_current = models.FloatField(null = True)
dchg_maximum_current = models.FloatField(null = True)
dchg_duration = models.FloatField(null = True)
def get_first_discharge_step(self):
steps = self.step_set.filter(step_type__contains = "CC_DChg").order_by(
"cycle__cycle_number", "step_number",
)
if len(steps) == 0:
return None
else:
return steps[0]
def get_first_charge_step(self):
steps = self.step_set.filter(
step_type__contains = "CC_Chg"
).order_by("cycle__cycle_number", "step_number")
if len(steps) == 0:
steps = self.step_set.filter(
step_type__contains = "CCCV_Chg"
).order_by("cycle__cycle_number", "step_number")
if len(steps) == 0:
return None
else:
return steps[0]
class Step(models.Model):
cycle = models.ForeignKey(Cycle, on_delete = models.CASCADE)
step_number = models.IntegerField()
step_type = models.CharField(max_length = 200)
start_time = models.DateTimeField()
second_accuracy = models.BooleanField()
total_capacity = models.FloatField(null = True)
average_voltage = models.FloatField(null = True)
minimum_voltage = models.FloatField(null = True)
maximum_voltage = models.FloatField(null = True)
average_current_by_capacity = models.FloatField(null = True)
average_current_by_voltage = models.FloatField(null = True)
minimum_current = models.FloatField(null = True)
maximum_current = models.FloatField(null = True)
duration = models.FloatField(null = True)
constant_voltage = models.FloatField(null = True)
end_voltage = models.FloatField(null = True)
end_voltage_prev = models.FloatField(null = True)
constant_current = models.FloatField(null = True)
end_current = models.FloatField(null = True)
end_current_prev = models.FloatField(null = True)
"""
numpy list, float, voltages (V)
numpy list, float, currents (mA)
numpy list, float, capacities (mAh)
numpy list, float, absolute times (h), delta t between now and the first cycle.
"""
v_c_q_t_data = models.BinaryField(null = True)
def get_v_c_q_t_data(self):
return pickle.loads(base64.decodebytes(self.v_c_q_t_data))
def set_v_c_q_t_data(self, v_c_q_t_data):
np_bytes = pickle.dumps(v_c_q_t_data)
np_base64 = base64.b64encode(np_bytes)
self.v_c_q_t_data = np_base64
|
#!/usr/bin/python
import sys
import os
import re
import codecs
import operator
import datetime
import nltk
import warnings
from unidecode import unidecode
def usage():
print '''
tokenize a directory of text and count unigrams.
usage:
%s input_dir ../data/english_wikipedia.txt
input_dir is the root directory where sentence files live. Each file should contain
one sentence per line, with punctuation. This script will walk the directory recursively,
looking for text files. For each text file, it will tokenize each sentence into words and
add them to a global unigram count, outputted to output.txt of the form:
word count
word count
...
in descending order of count.
For speed, tokenization is done w/ Penn Treebank regexes via nltk's port:
http://www.cis.upenn.edu/~treebank/tokenizer.sed
http://www.nltk.org/api/nltk.tokenize.html#module-nltk.tokenize.treebank
For input sentences, this script allows for the format output by WikiExtractor.py
https://github.com/attardi/wikiextractor
That is,
- lines starting with <doc... are ignored
- lines starting with </doc> are ignored
- blank lines are ignored
To obtain wikipedia dumps, visit: https://dumps.wikimedia.org/enwiki
And download the file ending in '-pages-articles.xml.bz2'. This includes wikipedia pages
and articles but not previous revisions, edit history, and metadata.
Then run:
./WikiExtractor.py -o en_sents --no-templates enwiki-20151002-pages-articles.xml.bz2
''' % sys.argv[0]
SENTENCES_PER_BATCH = 500000 # after each batch, delete all counts with count == 1 (hapax legomena)
PRE_SORT_CUTOFF = 300 # before sorting, discard all words with less than this count
ALL_NON_ALPHA = re.compile(r'^[\W\d]*$', re.UNICODE)
SOME_NON_ALPHA = re.compile(r'[\W\d]', re.UNICODE)
class TopTokenCounter(object):
def __init__(self):
self.count = {}
self.legomena = set()
self.discarded = set()
def add_tokens(self, tokens, split_hyphens=True):
for token in tokens:
# add eg 'marxist-leninist' as two tokens instead of one
if split_hyphens and token.count('-') in [1, 2]:
for subtoken in token.split('-'):
self.add_token(subtoken)
else:
self.add_token(token)
def add_token(self, token):
if not self.should_include(token):
self.discarded.add(token)
return
token = self.normalize(token)
if token in self.count:
self.legomena.discard(token)
self.count[token] += 1
else:
self.legomena.add(token)
self.count[token] = 1
def should_include(self, token):
if len(token) < 2:
return False
if len(token) <= 2 and SOME_NON_ALPHA.search(token):
# B., '', (), ...
return False
if ALL_NON_ALPHA.match(token):
# 1,000, <<>>, ...
return False
if token.startswith('/'):
# eg //en.wikipedia.org/wiki, /doc
return False
if token.endswith('='):
# id=, title=, ...
return False
return True
def normalize(self, token):
return token.lower()
def batch_prune(self):
for token in self.legomena:
del self.count[token]
self.legomena = set()
def pre_sort_prune(self):
under_cutoff = set()
for token, count in self.count.iteritems():
if count < PRE_SORT_CUTOFF:
under_cutoff.add(token)
for token in under_cutoff:
del self.count[token]
self.legomena = set()
def get_sorted_pairs(self):
return sorted(self.count.items(), key=operator.itemgetter(1), reverse=True)
def get_ts(self):
return datetime.datetime.now().strftime("%b %d %Y %H:%M:%S")
def get_stats(self):
ts = self.get_ts()
return "%s keys(count): %d" % (ts, len(self.count))
def main(input_dir_str, output_filename):
counter = TopTokenCounter()
print counter.get_ts(), 'starting...'
lines = 0
for root, dirs, files in os.walk(input_dir_str, topdown=True):
if not files:
continue
for fname in files:
path = os.path.join(root, fname)
for line in codecs.open(path, 'r', 'utf8'):
with warnings.catch_warnings():
# unidecode() occasionally (rarely but enough to clog terminal outout)
# complains about surrogate characters in some wikipedia sentences.
# ignore those warnings.
warnings.simplefilter('ignore')
line = unidecode(line)
tokens = nltk.word_tokenize(line)
counter.add_tokens(tokens)
lines += 1
if lines % SENTENCES_PER_BATCH == 0:
counter.batch_prune()
print counter.get_stats()
print 'processing: %s' % path
print counter.get_stats()
print 'deleting tokens under cutoff of', PRE_SORT_CUTOFF
counter.pre_sort_prune()
print 'done'
print counter.get_stats()
print counter.get_ts(), 'sorting...'
sorted_pairs = counter.get_sorted_pairs()
print counter.get_ts(), 'done'
print 'writing...'
with codecs.open(output_filename, 'w', 'utf8') as f:
for token, count in sorted_pairs:
f.write('%-18s %d\n' % (token, count))
sys.exit(0)
if __name__ == '__main__':
if len(sys.argv) != 3:
usage()
sys.exit(0)
else:
main(*sys.argv[1:])
|
_base_ = './retinanet_r50_fpn_1x_bdd100k.py'
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
|
"""Tests for inmcm4 fixes."""
import unittest
from iris.cube import Cube
from cf_units import Unit
from esmvalcore.cmor.fix import Fix
from esmvalcore.cmor._fixes.cmip5.inmcm4 import Gpp, Lai, Nbp
class TestGpp(unittest.TestCase):
"""Test gpp fixes."""
def setUp(self):
"""Prepare tests."""
self.cube = Cube([1.0], var_name='gpp', units='J')
self.fix = Gpp()
def test_get(self):
"""Test fix get"""
self.assertListEqual(
Fix.get_fixes('CMIP5', 'INMCM4', 'gpp'), [Gpp()])
def test_fix_data(self):
"""Test data fox."""
cube = self.fix.fix_data(self.cube)
self.assertEqual(cube.data[0], -1)
self.assertEqual(cube.units, Unit('J'))
class TestLai(unittest.TestCase):
"""Test lai fixes."""
def setUp(self):
"""Prepare tests."""
self.cube = Cube([1.0], var_name='lai', units='J')
self.fix = Lai()
def test_get(self):
"""Test fix get"""
self.assertListEqual(
Fix.get_fixes('CMIP5', 'INMCM4', 'lai'), [Lai()])
def test_fix_data(self):
"""Test data fix."""
cube = self.fix.fix_data(self.cube)
self.assertEqual(cube.data[0], 1.0 / 100.0)
self.assertEqual(cube.units, Unit('J'))
class TestNbp(unittest.TestCase):
"""Tests for nbp."""
def setUp(self):
"""Prepare tests."""
self.cube = Cube([1.0], var_name='nbp')
self.fix = Nbp()
def test_get(self):
"""Test fix get"""
self.assertListEqual(
Fix.get_fixes('CMIP5', 'INMCM4', 'nbp'), [Nbp()])
def test_fix_metadata(self):
"""Test fix on nbp files to set standard_name."""
new_cube = self.fix.fix_metadata([self.cube])[0]
self.assertEqual(
new_cube.standard_name,
'surface_net_downward_mass_flux_of_carbon_dioxide_'
'expressed_as_carbon_due_to_all_land_processes')
|
#
# Generated by erpcgen 1.7.3 on Tue Mar 24 16:58:24 2020.
#
# AUTOGENERATED - DO NOT EDIT
#
import erpc
from . import common, interface
# Client for Bootloader
class BootloaderClient(interface.IBootloader):
def __init__(self, manager):
super(BootloaderClient, self).__init__()
self._clientManager = manager
def bl_ping(self):
# Build remote function invocation message.
request = self._clientManager.create_request()
codec = request.codec
codec.start_write_message(erpc.codec.MessageInfo(
type=erpc.codec.MessageType.kSingleNormal,
service=self.SERVICE_ID,
request=self.BL_PING_ID,
sequence=request.sequence,
protocol=0))
# Send request and process reply.
self._clientManager.perform_request(request)
# TODO: It looks like this doesn't respect the max_length value - I'm surprised, I thought this would enforce it on the sender side...
# TODO: Enforce max length
# TODO: The generator code uses a 32-bit value for the list index regardless of the actual type of the variable (e.g. if you use a u8 as the length it still encodes a u32 in the Python)
def bl_writePageBuffer(self, offset, data):
# Build remote function invocation message.
request = self._clientManager.create_request()
codec = request.codec
codec.start_write_message(erpc.codec.MessageInfo(
type=erpc.codec.MessageType.kSingleNormal,
service=self.SERVICE_ID,
request=self.BL_WRITEPAGEBUFFER_ID,
sequence=request.sequence,
protocol=0))
# XXX: LOGAN: Insert padding to make this aligned
# XXX: codec.write_uint8(0x00)
# LOGAN: Manually packing by rearranging arguments
if data is None:
raise ValueError("data is None")
codec.write_uint8(len(data))
if offset is None:
raise ValueError("offset is None")
codec.write_uint16(offset)
# Write list
for _i0 in data:
codec.write_uint8(_i0)
# Send request and process reply.
self._clientManager.perform_request(request)
_result = codec.read_int8()
return _result
def bl_erasePageBuffer(self):
# Build remote function invocation message.
request = self._clientManager.create_request()
codec = request.codec
codec.start_write_message(erpc.codec.MessageInfo(
type=erpc.codec.MessageType.kSingleNormal,
service=self.SERVICE_ID,
request=self.BL_ERASEPAGEBUFFER_ID,
sequence=request.sequence,
protocol=0))
# Send request and process reply.
self._clientManager.perform_request(request)
def bl_eraseApp(self, app_id):
# Build remote function invocation message.
request = self._clientManager.create_request()
codec = request.codec
codec.start_write_message(erpc.codec.MessageInfo(
type=erpc.codec.MessageType.kSingleNormal,
service=self.SERVICE_ID,
request=self.BL_ERASEAPP_ID,
sequence=request.sequence,
protocol=0))
if app_id is None:
raise ValueError("app_id is None")
# LOGAN: Manually resize enum to u8
codec.write_uint8(app_id)
# Send request and process reply.
self._clientManager.perform_request(request)
_result = codec.read_int8()
return _result
def bl_writePage(self, app_id, page_no, crc):
# Build remote function invocation message.
request = self._clientManager.create_request()
codec = request.codec
codec.start_write_message(erpc.codec.MessageInfo(
type=erpc.codec.MessageType.kSingleNormal,
service=self.SERVICE_ID,
request=self.BL_WRITEPAGE_ID,
sequence=request.sequence,
protocol=0))
if app_id is None:
raise ValueError("app_id is None")
# XXX: LOGAN: Insert padding to make this aligned
codec.write_uint8(app_id)
# LOGAN: Reorder arguments for alignment
if crc is None:
raise ValueError("crc is None")
codec.write_uint32(crc)
if page_no is None:
raise ValueError("page_no is None")
codec.write_uint16(page_no)
# Send request and process reply.
self._clientManager.perform_request(request)
_result = codec.read_int8()
return _result
def bl_setBootAction(self, action):
# Build remote function invocation message.
request = self._clientManager.create_request()
codec = request.codec
codec.start_write_message(erpc.codec.MessageInfo(
type=erpc.codec.MessageType.kSingleNormal,
service=self.SERVICE_ID,
request=self.BL_SETBOOTACTION_ID,
sequence=request.sequence,
protocol=0))
if action is None:
raise ValueError("action is None")
# LOGAN: Manually resize enum to u8
codec.write_uint8(action)
# Send request and process reply.
self._clientManager.perform_request(request)
_result = codec.read_int8()
return _result
def bl_boot(self):
# Build remote function invocation message.
request = self._clientManager.create_request()
codec = request.codec
codec.start_write_message(erpc.codec.MessageInfo(
type=erpc.codec.MessageType.kSingleNormal,
service=self.SERVICE_ID,
request=self.BL_BOOT_ID,
sequence=request.sequence,
protocol=0))
# Send request and process reply.
self._clientManager.perform_request(request)
_result = codec.read_int8()
return _result
|
#!/Library/Frameworks/Python.framework/Versions/3.5/bin/python3.5
import math
import torch.nn as nn
import torch
class Hyper_synthesis(nn.Module):
def __init__(self, num_filters=128):
super(Hyper_synthesis, self).__init__()
self.conv1 = nn.ConvTranspose2d(num_filters, num_filters, 3, stride=1, padding=1)
self.leaky_relu1 = nn.LeakyReLU()
self.conv2 = nn.ConvTranspose2d(num_filters, num_filters, 3, stride=2, padding=1, output_padding=1)
self.leaky_relu2 = nn.LeakyReLU()
self.conv3 = nn.ConvTranspose2d(num_filters, int(num_filters*1.5), 3, stride=1, padding=1)
self.leaky_relu3 = nn.LeakyReLU()
self.conv4 = nn.ConvTranspose2d(int(num_filters*1.5), int(num_filters*1.5), 3, stride=2, padding=1, output_padding=1)
self.leaky_relu4 = nn.LeakyReLU()
# self.conv5 = nn.ConvTranspose2d(int(num_filters*1.5), num_filters*2, 3, stride=1, padding=1)
self.conv5 = nn.ConvTranspose2d(int(num_filters*1.5), num_filters*2, 3, stride=1, padding=1)
def forward(self, x):
x = self.leaky_relu1(self.conv1(x))
x = self.leaky_relu2(self.conv2(x))
x = self.leaky_relu3(self.conv3(x))
x = self.leaky_relu4(self.conv4(x))
x = self.conv5(x)
return x
if __name__ == "__main__":
hyper_synthesis = Hyper_synthesis()
input_image = torch.zeros([1,128, 8, 12])
result = hyper_synthesis(input_image)
print("result: ", result.shape)
|
from motion import Motion
from message_receiver import MessageReceiver
from image_slicer import ImageSender
from remote_control import RemoteControl
from config import Config
remote_control = RemoteControl(Config.ws_connection_url)
ImageSender(remote_control)
MessageReceiver(remote_control, Motion())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_experiment
----------------------------------
Tests for `Experiment` class provided in pypsych.experiment module.
"""
import unittest
import pandas as pd
import numpy as np
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from pkg_resources import resource_filename
from pypsych.config import Config
from pypsych.schedule import Schedule
from pypsych.experiment import Experiment
class ExperimentLoadingTestCases(unittest.TestCase):
"""
Asserts that bad config and schedule yaml files cannot be loaded.
"""
def setUp(self):
# Load a config and a schedule
self.config_path = resource_filename('tests.config', 'config.yaml')
self.schedule_path = resource_filename('tests.schedule',
'schedule.yaml')
self.data_path = 'tests/data'
def test_create_experiment(self):
"""Should not throw errors when correct configuration is loaded."""
experiment = Experiment(config_path=self.config_path,
schedule_path=self.schedule_path,
data_path=self.data_path)
# Check that biopac instance matches
self.assertIsInstance(experiment, Experiment)
experiment.load()
def test_compile_experiment(self):
"""Test that compilation works."""
experiment = Experiment(config_path=self.config_path,
schedule_path=self.schedule_path,
data_path=self.data_path)
experiment.load()
experiment.compile()
class ExperimentProcessingTestCases(unittest.TestCase):
"""
Asserts that experiment can consume a schedule.
"""
def setUp(self):
# Load a config and a schedule
self.config_path = resource_filename('tests.config', 'config.yaml')
self.schedule_path = resource_filename('tests.schedule',
'schedule.yaml')
self.data_path = 'tests/data'
self.experiment = Experiment(config_path=self.config_path,
schedule_path=self.schedule_path,
data_path=self.data_path)
self.experiment.load()
self.experiment.compile()
def test_process_experiment(self):
"""Consume a schedule."""
self.experiment.process()
if __name__ == '__main__':
unittest.main()
|
from numpy import diagonal, transpose
from modules.helpers import anti_diagonal, noop
SIGN = {
'0': 'X',
'X': '0'
}
WINNING_FUNCTIONS = [diagonal, anti_diagonal, transpose, noop]
|
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
import unittest
import subprocess
tool_directory = '../../tools/LTE/scripts'
class smoke_ledger(unittest.TestCase):
def test_FAB_9708_LevelDB_VaryNumTxs(self):
'''
In this smoke test, we conduct a single, hopefully short-lived,
test-run to verify LTE with LevelDB is working
Passing criteria: Underlying LTE test completed successfully with
exit code 0
'''
logfile = open("output_Smoke_LevelDB_VaryNumTxs.log", "w")
returncode = subprocess.call(
"./runbenchmarks.sh -f parameters_smoke_CI.sh "
"varyNumTxs",
shell=True, stderr=subprocess.STDOUT, stdout=logfile,
cwd=tool_directory)
logfile.close()
self.assertEqual(returncode, 0, msg="VaryNumTxs "
"performance test failed. \nPlease check the logfile "
+logfile.name+" for more details.")
def test_FAB_9708_CouchDB_VaryNumTxs(self):
'''
In this smoke test, we conduct a single, hopefully short-lived,
test-run to verify LTE with CouchDB is working
Passing criteria: Underlying LTE test completed successfully with
exit code 0
'''
logfile = open("output_Smoke_CouchDB_VaryNumTxs.log", "w")
returncode = subprocess.call(
"./runbenchmarks.sh -f parameters_couchdb_smoke_CI.sh "
"varyNumTxs",
shell=True, stderr=subprocess.STDOUT, stdout=logfile,
cwd=tool_directory)
logfile.close()
self.assertEqual(returncode, 0, msg="VaryNumTxs "
"performance test failed. \nPlease check the logfile "
+logfile.name+" for more details.")
|
from abaqusConstants import *
class Cell:
"""Cells are volumetric regions of geometry.
Attributes
----------
index: int
An Int specifying the index of the cell in the CellArray.
isReferenceRep: Boolean
A Boolean specifying whether the cell belongs to the reference representation of the
Part or Instance.
pointOn: float
A tuple of Floats specifying the **X**-, **Y**-, and **Z**-coordinates of a point located on
the cell.
featureName: float
A tuple of Floats specifying the name of the feature that created this cell.
instanceName: float
A tuple of Floats specifying the name of the part instance for this cell (if
applicable).
Notes
-----
This object can be accessed by:
.. code-block:: python
import part
mdb.models[name].parts[name].allInternalSets[name].cells[i]
mdb.models[name].parts[name].allSets[name].cells[i]
mdb.models[name].parts[name].cells[i]
mdb.models[name].parts[name].sets[name].cells[i]
import assembly
mdb.models[name].rootAssembly.allInstances[name].cells[i]
mdb.models[name].rootAssembly.allInstances[name].sets[name].cells[i]
mdb.models[name].rootAssembly.allInternalSets[name].cells[i]
mdb.models[name].rootAssembly.allSets[name].cells[i]
mdb.models[name].rootAssembly.instances[name].cells[i]
mdb.models[name].rootAssembly.instances[name].sets[name].cells[i]
mdb.models[name].rootAssembly.modelInstances[i].sets[name].cells[i]
mdb.models[name].rootAssembly.sets[name].cells[i]
"""
# An Int specifying the index of the cell in the CellArray.
index: int = None
# A Boolean specifying whether the cell belongs to the reference representation of the
# Part or Instance.
isReferenceRep: Boolean = OFF
# A tuple of Floats specifying the *X*-, *Y*-, and *Z*-coordinates of a point located on
# the cell.
pointOn: float = None
# A tuple of Floats specifying the name of the feature that created this cell.
featureName: float = None
# A tuple of Floats specifying the name of the part instance for this cell (if
# applicable).
instanceName: float = None
def getSize(self, printResults: Boolean = True):
"""This method returns a Float indicating the volume of the cell.
Parameters
----------
printResults
A Boolean that determines whether a verbose output is to be printed. The default is
True.
Returns
-------
A Float.
"""
pass
def getFaces(self):
"""This method returns a sequence consisting of the face IDs of the faces which bound the
cell.
Returns
-------
A tuple of integers.
"""
pass
def getEdges(self):
"""This method returns a sequence consisting of the edge IDs of the edges on the cell.
Returns
-------
A tuple of integers.
"""
pass
def getVertices(self):
"""This method returns a sequence consisting of the vertex IDs of the vertices on the cell.
Returns
-------
A tuple of integers.
"""
pass
def getAdjacentCells(self):
"""This method returns an array of cell objects that share at least one face of the cell.
Returns
-------
A CellArray object which is a sequence of Cell objects.
"""
pass
def getNodes(self):
"""This method returns an array of node objects that are associated with the cell.
Returns
-------
A MeshNodeArray object which is a sequence of MeshNode objects.
"""
pass
def getElements(self):
"""This method returns an array of element objects that are associated with the cell.
Returns
-------
A MeshElementArray object which is a sequence of MeshElement objects.
"""
pass
def getCADAttributes(self):
"""This method returns an array of CAD attribute strings associated with the cell when the
part was created from CAD data.
Returns
-------
An array of String.
"""
pass
|
# import keras
import numpy as np
from tensorflow import keras
from utils.compute_overlap import compute_overlap
class Anchor(object):
"""Anchor class for anchor-based object detectors."""
def __init__(self,
min_level = 3,
max_level = 7,
num_scales = None,
scales=np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]),
aspect_ratios = np.array([1, 1/2, 2.,1/3,3,1/6,6,1/9,9]),
anchor_size = 4,
image_size = (1024,1024)):
"""Constructs multiscale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of float numbers representing the aspect raito anchors
added on each level. The number indicates the ratio of width to height.
For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
scale level.
anchor_size: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: a list of integer numbers or Tensors representing
[height, width] of the input image size.The image_size should be divided
by the largest feature stride 2^max_level.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.scales = scales
self.aspect_ratios = aspect_ratios
self.anchor_size = 4
self.image_size = image_size
self._boxes = self.multi_level_generator()
self._anchor_boundaries = self._compute_anchor_boundaries()
def single_level_generator(self,level):
aspect_ratios_sqrt = np.sqrt(self.aspect_ratios)
aspect_ratios_sqrt = np.expand_dims(aspect_ratios_sqrt,axis=0)
if self.num_scales:
scales = np.expand_dims(np.arange(self.num_scales),axis=1)
intermidate_scale = (2 ** (scales / self.num_scales)).astype(np.float32)
print(intermidate_scale.shape)
else :
intermidate_scale = np.expand_dims(self.scales,axis=1)
stride = 2 ** level
k = (intermidate_scale.shape[0]) * len(self.aspect_ratios)
base_anchor_size = self.anchor_size * stride * intermidate_scale
half_anchor_heights = np.matmul(base_anchor_size , 1/aspect_ratios_sqrt)*.5
half_anchor_widths = np.matmul(base_anchor_size , aspect_ratios_sqrt)*.5
half_anchor_heights = np.reshape(half_anchor_heights, [1, 1, k])
half_anchor_widths = np.reshape(half_anchor_widths, [1, 1, k])
cx = np.arange(0.5 * stride, self.image_size[0], stride)
cy = np.arange(0.5 * stride, self.image_size[1], stride)
cx_grid, cy_grid = np.meshgrid(cx, cy)
cx_grid = np.expand_dims(cx_grid, axis=-1)
cy_grid = np.expand_dims(cy_grid, axis=-1)
boxes =np.stack([cx_grid - half_anchor_widths, cy_grid - half_anchor_heights,
cx_grid + half_anchor_widths, cy_grid + half_anchor_heights],
axis=-1)
return boxes.astype(np.float32)
def unpack_labels(self, labels):
"""Unpacks an array of labels into multiscales labels."""
unpacked_labels = collections.OrderedDict()
count = 0
for level in range(self.min_level, self.max_level + 1):
feat_size_y = self.image_size[0] // 2**level
feat_size_x = self.image_size[1] // 2**level
steps = feat_size_y * feat_size_x * self.anchors_per_location
unpacked_labels[level] = np.reshape(labels[count:count + steps],
[feat_size_y, feat_size_x, -1])
count += steps
return unpacked_labels
def multi_level_generator(self):
return np.concatenate([np.reshape(self.single_level_generator(level),
[-1,self.single_level_generator(level).shape[-1]])
for level in range(self.min_level, self.max_level + 1) ],axis=0)
@property
def boxes(self):
return self._boxes
@property
def anchor_boundaries(self):
return self._anchor_boundaries
@property
def anchors_per_location(self):
if self.num_scales:
return self.num_scales * len(self.aspect_ratios)
else:
return self.scales.shape[0] * len(self.aspect_ratios)
def _compute_anchor_boundaries(self):
boundaries = [0]
for i in range(3, 8):
num_anchors = (
np.math.ceil(self.image_size[0] / 2**i) *
np.math.ceil(self.image_size[1] / 2**i) * self.anchors_per_location)
boundaries += [boundaries[-1] + num_anchors]
return boundaries
class AnchorParameters:
"""
The parameters that define how anchors are generated.
Args
sizes : List of sizes to use. Each size corresponds to one feature level.
strides : List of strides to use. Each stride correspond to one feature level.
ratios : List of ratios to use per location in a feature map.
scales : List of scales to use per location in a feature map.
"""
def __init__(self, sizes=(32, 64, 128, 256, 512),
strides=(8, 16, 32, 64, 128),
ratios=(1, 0.5, 2),
scales=(2 ** 0, 2 ** (1. / 3.), 2 ** (2. / 3.))):
self.sizes = sizes
self.strides = strides
self.ratios = np.array(ratios, dtype=keras.backend.floatx())
self.scales = np.array(scales, dtype=keras.backend.floatx())
def num_anchors(self):
return len(self.ratios) * len(self.scales)
"""
The default anchor parameters.
"""
AnchorParameters.default = AnchorParameters(
sizes=[32, 64, 128, 256, 512],
strides=[8, 16, 32, 64, 128],
# ratio=h/w
ratios=np.array([1., 1/2, 2., 1/4, 4., 1/8, 8. ], keras.backend.floatx()),
scales=np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)], keras.backend.floatx()),)
# AnchorParameters.default = AnchorParameters(
# sizes=[32, 64, 128, 256, 512],
# strides=[8, 16, 32, 64, 128],
# # ratio=h/w
# ratios=np.array([1,1/2,2,1/3,3,1/6,6,1/9,9], keras.backend.floatx()),
# scales=np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)], keras.backend.floatx()),)
# AnchorParameters.default = AnchorParameters(
# sizes=[16, 32, 64, 128, 256],
# strides=[8, 16, 32, 64, 128],
# # ratio=h/w
# ratios=np.array([.2,.3,.5,1.,2,3,5], keras.backend.floatx()),
# scales=np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0),2], keras.backend.floatx()),
# )
def anchor_targets_bbox(
anchors,
image_group,
annotations_group,
num_classes,
negative_overlap=0.4,
positive_overlap=0.5,
detect_quadrangle=False
):
"""
Generate anchor targets for bbox detection.
Args
anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
image_group: List of BGR images.
annotations_group: List of annotations (np.array of shape (N, 5) for (x1, y1, x2, y2, label)).
num_classes: Number of classes to predict.
mask_shape: If the image is padded with zeros, mask_shape can be used to mark the relevant part of the image.
negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).
Returns
labels_batch: batch that contains labels & anchor states (np.array of shape (batch_size, N, num_classes + 1),
where N is the number of anchors for an image and the last column defines the anchor state
(-1 for ignore, 0 for bg, 1 for fg).
regression_batch: batch that contains bounding-box regression targets for an image & anchor states
(np.array of shape (batch_size, N, 4 + 1), where N is the number of anchors for an image,
the first 4 columns define regression targets for (x1, y1, x2, y2) and the last column defines
anchor states (-1 for ignore, 0 for bg, 1 for fg).
"""
assert (len(image_group) == len(annotations_group)), "The length of the images and annotations need to be equal."
assert (len(annotations_group) > 0), "No data received to compute anchor targets for."
for annotations in annotations_group:
assert ('bboxes' in annotations), "Annotations should contain bboxes."
assert ('labels' in annotations), "Annotations should contain labels."
batch_size = len(image_group)
if detect_quadrangle:
regression_batch = np.zeros((batch_size, anchors.shape[0], 5 + 1), dtype=np.float32)
else:
regression_batch = np.zeros((batch_size, anchors.shape[0], 4 + 1), dtype=np.float32)
labels_batch = np.zeros((batch_size, anchors.shape[0], num_classes + 1), dtype=np.float32)
# compute labels and regression targets
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
if annotations['bboxes'].shape[0]:
# obtain indices of gt annotations with the greatest overlap
# argmax_overlaps_inds: id of ground truth box has greatest overlap with anchor
# (N, ), (N, ), (N, ) N is num_anchors
positive_indices, ignore_indices, argmax_overlaps_inds = compute_gt_annotations(anchors,
annotations['bboxes'],
negative_overlap,
positive_overlap)
labels_batch[index, ignore_indices, -1] = -1
labels_batch[index, positive_indices, -1] = 1
regression_batch[index, ignore_indices, -1] = -1
regression_batch[index, positive_indices, -1] = 1
# compute target class labels
labels_batch[
index, positive_indices, annotations['labels'][argmax_overlaps_inds[positive_indices]].astype(int)] = 1
# regression_batch[index, :, :4] = bbox_transform(anchors, annotations['bboxes'][argmax_overlaps_inds, :])
regression_batch[index, :, :4] = annotations['bboxes'][argmax_overlaps_inds, :]
if detect_quadrangle:
regression_batch[index, :, 4:5] = annotations['angles'][argmax_overlaps_inds, :]
# regression_batch[index, :, 4:6] = annotations['alphas'][argmax_overlaps_inds, :]
# regression_batch[index, :, 6] = annotations['ratios'][argmax_overlaps_inds]
# regression_batch[index, :, 6:10] = annotations['bboxes-q'][argmax_overlaps_inds, :]
# ignore anchors outside of image
if image.shape:
anchors_centers = np.vstack([(anchors[:, 0] + anchors[:, 2]) / 2, (anchors[:, 1] + anchors[:, 3]) / 2]).T
indices = np.logical_or(anchors_centers[:, 0] >= image.shape[1], anchors_centers[:, 1] >= image.shape[0])
labels_batch[index, indices, -1] = -1
regression_batch[index, indices, -1] = -1
return labels_batch, regression_batch
def compute_gt_annotations(
anchors,
annotations,
negative_overlap=0.4,
positive_overlap=0.5
):
"""
Obtain indices of gt annotations with the greatest overlap.
Args
anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
annotations: np.array of shape (K, 5) for (x1, y1, x2, y2, label).
negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).
Returns
positive_indices: indices of positive anchors, (N, )
ignore_indices: indices of ignored anchors, (N, )
argmax_overlaps_inds: ordered overlaps indices, (N, )
"""
# (N, K)
overlaps = compute_overlap(anchors.astype(np.float64), annotations.astype(np.float64))
# (N, )
argmax_overlaps_inds = np.argmax(overlaps, axis=1)
# (N, )
max_overlaps = overlaps[np.arange(overlaps.shape[0]), argmax_overlaps_inds]
# assign "dont care" labels
# (N, )
positive_indices = max_overlaps >= positive_overlap
# adam: in case of there are gt boxes has no matched positive anchors
nonzero_inds = np.nonzero(overlaps == np.max(overlaps, axis=0))
positive_indices[nonzero_inds[0]] = 1
# (N, )
ignore_indices = (max_overlaps > negative_overlap) & ~positive_indices
return positive_indices, ignore_indices, argmax_overlaps_inds
def layer_shapes(image_shape, model):
"""
Compute layer shapes given input image shape and the model.
Args
image_shape: The shape of the image.
model: The model to use for computing how the image shape is transformed in the pyramid.
Returns
A dictionary mapping layer names to image shapes.
"""
shape = {
model.layers[0].name: (None,) + image_shape,
}
for layer in model.layers[1:]:
nodes = layer._inbound_nodes
for node in nodes:
input_shapes = [shape[inbound_layer.name] for inbound_layer in node.inbound_layers]
if not input_shapes:
continue
shape[layer.name] = layer.compute_output_shape(input_shapes[0] if len(input_shapes) == 1 else input_shapes)
return shape
def make_shapes_callback(model):
"""
Make a function for getting the shape of the pyramid levels.
"""
def get_shapes(image_shape, pyramid_levels):
shape = layer_shapes(image_shape, model)
image_shapes = [shape["P{}".format(level)][1:3] for level in pyramid_levels]
return image_shapes
return get_shapes
def guess_shapes(image_shape, pyramid_levels):
"""
Guess shapes based on pyramid levels.
Args
image_shape: The shape of the image.
pyramid_levels: A list of what pyramid levels are used.
Returns
A list of image shapes at each pyramid level.
"""
image_shape = np.array(image_shape[:2])
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in pyramid_levels]
return image_shapes
def anchors_for_shape(
image_shape,
pyramid_levels=None,
anchor_params=None,
shapes_callback=None,
):
"""
Generators anchors for a given shape.
Args
image_shape: The shape of the image.
pyramid_levels: List of ints representing which pyramids to use (defaults to [3, 4, 5, 6, 7]).
anchor_params: Struct containing anchor parameters. If None, default values are used.
shapes_callback: Function to call for getting the shape of the image at different pyramid levels.
Returns
np.array of shape (N, 4) containing the (x1, y1, x2, y2) coordinates for the anchors.
"""
if pyramid_levels is None:
pyramid_levels = [3, 4, 5, 6, 7]
if anchor_params is None:
anchor_params = AnchorParameters.default
if shapes_callback is None:
shapes_callback = guess_shapes
feature_map_shapes = shapes_callback(image_shape, pyramid_levels)
# compute anchors over all pyramid levels
all_anchors = np.zeros((0, 4), dtype=np.float32)
for idx, p in enumerate(pyramid_levels):
anchors = generate_anchors(
base_size=anchor_params.sizes[idx],
ratios=anchor_params.ratios,
scales=anchor_params.scales
)
shifted_anchors = shift(feature_map_shapes[idx], anchor_params.strides[idx], anchors)
all_anchors = np.append(all_anchors, shifted_anchors, axis=0)
return all_anchors.astype(np.float32)
def shift(feature_map_shape, stride, anchors):
"""
Produce shifted anchors based on shape of the map and stride size.
Args
feature_map_shape : Shape to shift the anchors over.
stride : Stride to shift the anchors with over the shape.
anchors: The anchors to apply at each location.
"""
# create a grid starting from half stride from the top left corner
shift_x = (np.arange(0, feature_map_shape[1]) + 0.5) * stride
shift_y = (np.arange(0, feature_map_shape[0]) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((
shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel()
)).transpose()
A = anchors.shape[0]
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
return all_anchors
def generate_anchors(base_size=16, ratios=None, scales=None):
"""
Generate anchor (reference) windows by enumerating aspect ratios X scales w.r.t. a reference window.
Args:
base_size:
ratios:
scales:
Returns:
"""
if ratios is None:
ratios = AnchorParameters.default.ratios
if scales is None:
scales = AnchorParameters.default.scales
num_anchors = len(ratios) * len(scales)
# initialize output anchors
anchors = np.zeros((num_anchors, 4))
anchors[:, 2:] = base_size * np.tile(np.repeat(scales, len(ratios))[None], (2, 1)).T
areas = anchors[:, 2] * anchors[:, 3]
# correct for ratios
anchors[:, 2] = np.sqrt(areas / np.tile(ratios, len(scales)))
anchors[:, 3] = anchors[:, 2] * np.tile(ratios, len(scales))
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
return anchors
def bbox_transform(anchors, gt_boxes, scale_factors=None):
wa = anchors[:, 2] - anchors[:, 0]
ha = anchors[:, 3] - anchors[:, 1]
cxa = anchors[:, 0] + wa / 2.
cya = anchors[:, 1] + ha / 2.
w = gt_boxes[:, 2] - gt_boxes[:, 0]
h = gt_boxes[:, 3] - gt_boxes[:, 1]
cx = gt_boxes[:, 0] + w / 2.
cy = gt_boxes[:, 1] + h / 2.
# Avoid NaN in division and log below.
ha += 1e-7
wa += 1e-7
h += 1e-7
w += 1e-7
tx = (cx - cxa) / wa
ty = (cy - cya) / ha
tw = np.log(w / wa)
th = np.log(h / ha)
if scale_factors:
ty /= scale_factors[0]
tx /= scale_factors[1]
th /= scale_factors[2]
tw /= scale_factors[3]
targets = np.stack([ty, tx, th, tw], axis=1)
return targets
|
#!/usr/bin/env python3
import io
import numpy as np
import cv2
import fcntl, os
from PIL import Image
import pycrow as crow
import threading
crow.create_udpgate(12, 10011)
crow.set_crowker(".12.192.168.1.93:10009")
thr = threading.Thread(target=crow.spin, args=())
thr.start()
#crow.diagnostic_enable()
data = None
def handler(pack):
global data
data = pack.data()
#print(data[-40:-1])
crow.subscribe("video_stream", handler, qos=0, ackquant=200, rqos=0, rackquant=200)
while(True):
if (data is not None):
#print(len(data))
tmpFile = io.BytesIO()
tmpFile.write(data)
tmpFile.seek(0)
img = Image.open(tmpFile)
I = np.asarray(img)
I=cv2.flip(I, -1);
#I = cv2.resize(I,(int(640), int(480)))
I = cv2.resize(I,(int(640*3/2), int(480*3/2)))
cv2.imshow('frame',I)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
###################################################################################
# Nosetest settings
###################################################################################
# biothings specific options - these should be identical to the production server
# you are testing for... For example, JSONLD_CONTEXT_URL should point to a file
# with contents identical to the file pointed to by JSONLD_CONTEXT_PATH on the
# production server (if your intention is to test the production server).
JSONLD_CONTEXT_URL = ""
API_VERSION = "v1"
QUERY_ENDPOINT = "query"
ANNOTATION_ENDPOINT = "drugs"
# This is the name of the environment variable to load for testing
HOST_ENVAR_NAME = ""
# This is the URL of the production server, if the above envar can't be loaded, nosetest defaults to this
NOSETEST_DEFAULT_URL = "http://{% base_url %}"
###################################################################################
# Nosetests used in tests.py, fill these in with IDs/queries.
###################################################################################
# This is the test for fields in the {% annotation_endpoint %} object. You should pick an ID
# with a representative set of root level annotations associated with it.
ANNOTATION_OBJECT_ID = ''
# This is the list of expected keys that the JSON object returned by the ID above
ANNOTATION_OBJECT_EXPECTED_ATTRIBUTE_LIST = []
# -----------------------------------------------------------------------------------
# This is a list of IDs (& options) to test a GET to the /{% annotation_endpoint %} endpoint
ANNOTATION_GET_IDS = []
# -----------------------------------------------------------------------------------
# This is a list of dictionaries to test a POST to the /{% annotation_endpoint %} endpoint
ANNOTATION_POST_DATA = []
# -----------------------------------------------------------------------------------
# This is a list of query strings (& options to test a GET to the /{% query_endpoint %} endpoint
QUERY_GETS = []
# -----------------------------------------------------------------------------------
# This is a list of dictionaries to test a POST to the /{% query_endpoint %} endpoint
QUERY_POST_DATA = []
# -----------------------------------------------------------------------------------
# This is a sample ID that will have non-ascii characters injected into it to test non-ascii
# handling on the /{% annotation_endpoint %} endpoint
ANNOTATION_NON_ASCII_ID = ''
# -----------------------------------------------------------------------------------
# This is a sample query that will have non-ascii characters injected into it to test
# non-ascii handling on the /{% query_endpoint %} endpoint
QUERY_NON_ASCII = ''
# -----------------------------------------------------------------------------------
# This is a sample query to test the callback function
QUERY_CALLBACK_TEST = ''
# -----------------------------------------------------------------------------------
# This is a sample query to test the query size cap. This query should be one that has more than 1000 total hits.
QUERY_SIZE_TEST = ''
# -----------------------------------------------------------------------------------
# This is the minimum number of unique field keys (from /metadata/fields)
MINIMUM_NUMBER_OF_ACCEPTABLE_FIELDS = 0
# -----------------------------------------------------------------------------------
# Some random fields to spot check the return from /metadata/fields
TEST_FIELDS_GET_FIELDS_ENDPOINT = []
# -----------------------------------------------------------------------------------
# Any additional fields added for check_fields subset test
CHECK_FIELDS_SUBSET_ADDITIONAL_FIELDS = []
|
import numpy as np
from numpy.fft import fft2, ifft2, fftshift, ifftshift
import skimage.io as io
import matplotlib.pyplot as plt
from skimage.metrics import peak_signal_noise_ratio as compute_psnr
from pypher.pypher import psf2otf
from pdb import set_trace
from fspecial import fspecial_gaussian_2d
img = io.imread('birds_gray.png').astype(float)/255
# Task 2b - Wiener filtering
c = fspecial_gaussian_2d((35, 35), 5.)
cFT = psf2otf(c, img.shape)
# Blur image with kernel
blur = np.zeros_like(img)
sigmas = [0, 0.001, 0.01, 0.1]
for sigma in sigmas:
# Add noise to blurred image
unfilt = blur + sigma * np.random.randn(*blur.shape)
### Your code here ###
|
"""Linting module."""
import shlex
import subprocess
from dataclasses import dataclass
from functools import wraps
from typing import Callable, List, Optional, Union, overload
import typer
from typing_extensions import Literal
PACKAGE_NAME = 'roboto'
def run_command(
command: Union[str, List[str]], *args, **kwargs
) -> subprocess.CompletedProcess:
"""Wrapper for subprocess.run to support passing the command as a string."""
split_command = shlex.split(command) if isinstance(command, str) else command
return subprocess.run(split_command, check=True, *args, **kwargs)
def command_output(command: Union[str, List[str]]) -> str:
"""Run a command and get its stdout."""
process = run_command(command, stdout=subprocess.PIPE)
return process.stdout.decode('utf8')
@dataclass
class CommandError:
"""Represent that a given command failed."""
command_line: str
exit_code: int
@dataclass
class CommandSuccess:
"""Represent that a given command ran successfully."""
command_line: str
Result = Union[CommandError, CommandSuccess]
def check_commands(f: Callable[..., List[Result]]) -> Callable[..., List[Result]]:
"""Make a function that returns Results terminate the app if any of them failed."""
@wraps(f)
def _inner(*args, **kwargs) -> List[Result]:
results = f(*args, **kwargs)
failed_results = [r for r in results if isinstance(r, CommandError)]
if failed_results:
for failed in failed_results:
print(
f'Command "{failed.command_line}" failed with error code '
f'{failed.exit_code}.'
)
raise typer.Exit(code=1)
return results
return _inner
@overload
def execute(
command: Union[str, List[str]], *, raise_error: Literal[True] = True
) -> CommandSuccess:
"""Overload for when raise_error is True.
In this case, we never return CommandError (we raise the subprocess
exception)."""
@overload
def execute(command: Union[str, List[str]], *, raise_error: Literal[False]) -> Result:
"""Overload for when raise_error is True.
In this case, we never raise, and instead we return CommandError."""
def execute(command: Union[str, List[str]], *, raise_error: bool = True) -> Result:
"""Echo and run a command."""
command_str = command if isinstance(command, str) else ' '.join(command)
print(f'### Executing: {command_str}')
try:
run_command(command)
except subprocess.CalledProcessError as e:
if raise_error:
raise
return CommandError(command_str, e.returncode)
return CommandSuccess(command_str)
EVERYTHING = [
'roboto',
'tests',
'bot_tester',
'develop.py',
'tasks.py',
]
EXCLUDES: List[str] = []
def apply_excludes(files: List[str]):
"""Apply exclusions to a list of files."""
return [f for f in files if not any(e in f for e in EXCLUDES)]
APP = typer.Typer()
@APP.command()
def install_dev_tools(
ci: bool = typer.Option( # noqa: B008
default=False, help='Avoid installing tools that are unneeded for CI jobs.'
)
):
"""Install development tools."""
extra_deps = ['pre-commit'] if not ci else []
execute(
[
'poetry',
'run',
'pip',
'install',
'black',
'flake8',
'flake8-bugbear',
'isort',
'mypy',
'pylint',
'pylint-quotes',
*extra_deps,
],
)
if not ci:
execute('pre-commit install')
def test(
coverage: bool = typer.Option( # noqa: B008
default=False, help='Generate coverage information.'
),
html: bool = typer.Option( # noqa: B008
default=False, help='Generate an html coverage report.'
),
) -> List[Result]:
"""Run tests."""
coverage_flag = [f'--cov={PACKAGE_NAME}'] if coverage else []
return [
execute(['pytest', *coverage_flag, 'tests'], raise_error=False),
*(coverage_html() if coverage and html else ()),
]
APP.command()(check_commands(test))
def lint(
files: Optional[List[str]] = typer.Argument(default=None,), # noqa: B008
*,
full_report: bool = typer.Option( # noqa: B008
default=True, help='Print detailed reports.'
),
) -> List[Result]:
"""Run all linters.
If files is omitted. everything is linted.
"""
subject = apply_excludes(files if files else EVERYTHING)
if not subject:
return []
pylint_reports = ['-r', 'y'] if full_report else ['-r', 'n']
return [
execute(['mypy', *subject], raise_error=False),
execute(['flake8', *subject], raise_error=False),
execute(['pylint', *pylint_reports, *subject], raise_error=False),
]
APP.command()(check_commands(lint))
def format( # pylint: disable=redefined-builtin
files: Optional[List[str]] = typer.Argument(default=None), # noqa: B008
check: bool = typer.Option( # noqa: B008
default=False, help='Only checks instead of modifying.'
),
) -> List[Result]:
"""Run all formatters.
If files is omitted. everything is linted.
"""
black_check_flag = ['--check'] if check else []
isort_check_flag = ['-c'] if check else []
subject = apply_excludes(files if files else EVERYTHING)
if not subject:
return []
return [
execute(['black', '-q', *black_check_flag, *subject], raise_error=False),
execute(
['isort', '-rc', '-y', '-q', *isort_check_flag, *subject], raise_error=False
),
]
APP.command()(check_commands(format))
def coverage_html():
"""Generate an html coverage report."""
return [
execute('coverage html', raise_error=False),
]
APP.command()(check_commands(coverage_html))
def static_checks() -> List[Result]:
"""Run all static checks over all code."""
return [
*lint([]),
*format([], check=True),
]
APP.command()(check_commands(static_checks))
def all_checks() -> List[Result]:
"""Run all checks (static checks and tests) over all code."""
return [
*static_checks(),
*test(),
]
APP.command()(check_commands(all_checks))
if __name__ == '__main__':
APP()
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def splitListToParts(self, root, k):
cur = root
for N in xrange(1001):
if not cur:
break
cur = cur.next
width, remainder = divmod(N, k)
ans = []
cur = root
for i in xrange(k):
head = write = ListNode(None)
for j in xrange(width + (i < remainder)):
write.next = write = ListNode(cur.val)
if cur:
cur = cur.next
ans.append(head.next)
return ans
|
from gpiozero.pins.mock import MockFactory
from gpiozero import Device, LED
import config.ENVIRONMENT as Config
import sqlite3
import sys
from time import sleep
if Config.SHOULD_MOCK_GPIOZERO:
Device.pin_factory = MockFactory()
GPIO_REGISTRY_LED = dict()
for gpioId in Config.GPIO_REGISTRY:
GPIO_REGISTRY_LED[gpioId] = LED(gpioId)
print(f'Observing schedule for {gpioId}')
SQL_GET_GPIO_OPEN_NOW = '''SELECT DISTINCT name FROM schedule_led WHERE DATETIME("now", "localtime") BETWEEN open_at AND close_at;'''
def getOpenGPIOIdNow(db):
raOpenGPIOId = []
try:
cursor = db.cursor()
cursor.execute(SQL_GET_GPIO_OPEN_NOW)
records = cursor.fetchall()
for row in records:
raOpenGPIOId.append(row[0])
cursor.close()
except:
print("Unexpected error", sys.exc_info()[0])
finally:
if cursor:
cursor.close()
return raOpenGPIOId
while True:
try:
db = sqlite3.connect(Config.DATABASE_FILE)
raOpenGPIOId = getOpenGPIOIdNow(db)
for gpioId, led in GPIO_REGISTRY_LED.items():
shouldBeOpen = gpioId in raOpenGPIOId
isOn = led.is_lit
if shouldBeOpen and not isOn:
led.on()
print(f'{gpioId} is now OPEN')
elif shouldBeOpen and isOn:
print(f'{gpioId} is OPEN')
else:
led.off()
if isOn:
print(f'{gpioId} is now CLOSED')
else:
print(f'{gpioId} is CLOSED')
except sqlite3.Error as error:
print("sqlite3.Error", error)
finally:
if (db):
db.close()
sleep(Config.SLEEP_CYCLE_S)
print("------")
|
from django.contrib.admin import AdminSite
class RecruitrAdminSite(AdminSite):
site_header = 'Recruitr Administration'
admin_site = RecruitrAdminSite()
|
# Predigame Levels
from copy import deepcopy
from .Globals import Globals
class Level:
def __init__(self):
self.globals = None
def setup(self):
""" setup all the sprites needed for this level """
raise NotImplementedError('Level.setup() cannot be called directly')
def completed(self):
""" execute an objective function to determine if this level is complete """
return False
def next(self):
""" return the next level """
return None
|
import struct
import typing
from typing import Optional
from .config import Config
from .dao import AirCon, Device, get_device_by_aircon, AirConStatus
from .base_bean import BaseBean
from .ctrl_enum import EnumCmdType, EnumDevice, EnumControl, EnumFanDirection, EnumFanVolume
class Encode:
def __init__(self):
self._fmt = '<'
self._len = 0
self._list = []
def write1(self, d):
self._fmt += 'B'
self._len += 1
self._list.append(d)
def write2(self, d):
self._fmt += 'H'
self._len += 2
self._list.append(d)
def write4(self, d):
self._fmt += 'I'
self._len += 4
self._list.append(d)
def writes(self, d):
self._fmt += str(len(d)) + 's'
self._len += len(d)
def pack(self, rewrite_length: bool = True):
if rewrite_length:
self._list[1] = self._len - 4
return struct.pack(self._fmt, *self._list)
@property
def len(self):
return self._len
class Param(BaseBean):
cnt = 0
def __init__(self, device_type: EnumDevice, cmd_type: EnumCmdType, has_result: bool):
Param.cnt += 1
BaseBean.__init__(self, Param.cnt, device_type, cmd_type)
self._has_result = has_result
def generate_subbody(self, s):
return
def to_string(self):
s = Encode()
s.write1(2) # 0 保留字
s.write2(16) # 1~2 长度,不含首尾保留字及长度本身
s.write1(13) # 3 保留字
s.write1(0) # 4 保留字
s.write1(self.subbody_ver) # 5 子体版本
s.write1(0) # 6 保留字
s.write4(self.cmd_id) # 7~10 自增命令ID
s.write1(self.target.value[0]) # 11 设备类型
s.write4(self.target.value[1]) # 12~15 设备类型id
s.write1(self.need_ack) # 16 是否需要ack
s.write2(self.cmd_type.value) # 17~18 命令类型id
self.generate_subbody(s)
s.write1(3) # 最后一位 保留字
return s.pack()
@property
def has_result(self):
return self._has_result
class HeartbeatParam(Param):
def __init__(self):
super().__init__(EnumDevice.SYSTEM, EnumCmdType.SYS_ACK, False)
def to_string(self):
s = Encode()
s.write1(2)
s.write2(0)
s.write1(3)
return s.pack()
class SystemParam(Param):
def __init__(self, cmd_type, has_result):
Param.__init__(self, EnumDevice.SYSTEM, cmd_type, has_result)
class HandShakeParam(SystemParam):
def __init__(self):
SystemParam.__init__(self, EnumCmdType.SYS_HAND_SHAKE, True)
class GetGWInfoParam(SystemParam):
def __init__(self):
SystemParam.__init__(self, EnumCmdType.SYS_GET_GW_INFO, True)
class GetRoomInfoParam(SystemParam):
def __init__(self):
SystemParam.__init__(self, EnumCmdType.SYS_GET_ROOM_INFO, True)
self._room_ids: typing.List[int] = []
self.type: int = 1
self.subbody_ver: int = 1
def generate_subbody(self, s):
s.write1(len(self.room_ids))
for r in self.room_ids:
s.write2(r)
if self.subbody_ver == 1 and r != 65535:
s.write1(self.type)
@property
def room_ids(self):
return self._room_ids
class Sensor2InfoParam(Param):
def __init__(self):
# todo: 未兼容固件低于02.04.00的网关
Param.__init__(self, EnumDevice.SENSOR, EnumCmdType.SENSOR2_INFO, True)
# self._sensor_type: int = 1
def generate_subbody(self, s):
s.write1(255)
class AirconParam(Param):
def __init__(self, cmd_cype, has_result):
Param.__init__(self, EnumDevice.AIRCON, cmd_cype, has_result)
class AirConCapabilityQueryParam(AirconParam):
def __init__(self):
AirconParam.__init__(self, EnumCmdType.AIR_CAPABILITY_QUERY, True)
self._aircons: typing.List[AirCon] = []
def generate_subbody(self, s):
s.write1(len(self._aircons))
for i in self._aircons:
s.write1(i.room_id)
s.write1(1)
s.write1(0)
@property
def aircons(self):
return self._aircons
@aircons.setter
def aircons(self, value):
self._aircons = value
class AirConRecommendedIndoorTempParam(AirconParam):
def __init__(self):
super().__init__(EnumCmdType.AIR_RECOMMENDED_INDOOR_TEMP, True)
class AirConQueryStatusParam(AirconParam):
def __init__(self):
super().__init__(EnumCmdType.QUERY_STATUS, True)
self._device = None # type: Optional[AirCon]
def generate_subbody(self, s):
s.write1(self._device.room_id)
s.write1(self._device.unit_id)
t = EnumControl.Type
flag = t.SWITCH | t.MODE | t.SETTED_TEMP
dev = self.device
if dev is not None:
if dev.fan_volume != EnumFanVolume.NO:
flag = flag | t.AIR_FLOW
if Config.is_new_version:
if dev.fan_direction1 != EnumFanDirection.FIX and dev.fan_direction2 != EnumFanDirection.FIX:
flag = flag | t.FAN_DIRECTION
if dev.bath_room:
flag = flag | t.BREATHE
elif dev.three_d_fresh_allow:
flag = flag | t.BREATHE
flag = flag | t.HUMIDITY
if dev.hum_fresh_air_allow:
flag = flag | t.FRESH_AIR_HUMIDIFICATION
s.write1(flag)
@property
def device(self):
return self._device
@device.setter
def device(self, v: AirCon):
self._device = v
class AirConControlParam(AirconParam):
def __init__(self, aircon: AirCon, new_status: AirConStatus):
super().__init__(EnumCmdType.CONTROL, False)
self.target = get_device_by_aircon(aircon)
self._aircon = aircon
self._new_status = new_status
def generate_subbody(self, s):
aircon = self._aircon
status = self._new_status
s.write1(aircon.room_id)
s.write1(aircon.unit_id)
li = []
flag = 0
if status.switch is not None:
flag = flag | EnumControl.Type.SWITCH
li.append((1, status.switch.value))
if status.mode is not None:
flag = flag | EnumControl.Type.MODE
li.append((1, status.mode.value))
if status.air_flow is not None:
flag = flag | EnumControl.Type.AIR_FLOW
li.append((1, status.air_flow.value))
if status.current_temp is not None:
flag = flag | EnumControl.Type.CURRENT_TEMP
li.append((2, status.current_temp))
if status.setted_temp is not None:
flag = flag | EnumControl.Type.SETTED_TEMP
li.append((2, status.setted_temp))
if Config.is_new_version:
if self.target != EnumDevice.BATHROOM:
if status.fan_direction1 is not None:
flag = flag | EnumControl.Type.FAN_DIRECTION
li.append((1, status.fan_direction1 | status.fan_direction2 << 4))
if self.target == EnumDevice.NEWAIRCON:
if status.humidity is not None:
flag = flag | EnumControl.Type.HUMIDITY
li.append((1, status.humidity))
s.write1(flag)
for bit, val in li:
if bit == 1:
s.write1(val)
elif bit == 2:
s.write2(val)
|
import unittest
from mdssdk.connection_manager.errors import CLIError
from mdssdk.vsan import Vsan
from mdssdk.zone import Zone
from mdssdk.zoneset import ZoneSet
from tests.test_zoneset.vars import *
log = logging.getLogger(__name__)
class TestZoneSetRemoveMembers(unittest.TestCase):
def __init__(self, testName, sw):
super().__init__(testName)
self.switch = sw
def setUp(self) -> None:
log.debug(self.switch.version)
log.debug(self.switch.ipaddr)
self.vsandb = self.switch.vsans
while True:
self.id = get_random_id()
if self.id not in self.vsandb.keys():
break
self.v = Vsan(switch=self.switch, id=self.id)
self.v.create()
self.zone = Zone(self.switch, "test_zone", self.id)
self.zone.create()
self.zoneset = ZoneSet(self.switch, "test_zoneset", self.id)
self.zoneset.create()
def test_remove_members_nonexisting(self):
self.assertEqual({}, self.zoneset.members)
with self.assertRaises(CLIError) as e:
self.zoneset.remove_members([self.zone])
self.assertEqual(
'The command " zoneset name test_zoneset vsan '
+ str(self.id)
+ ' ; no member test_zone " gave the error " Zone not present ".',
str(e.exception),
)
def test_remove_members(self):
zone1 = self.zone
zone2 = Zone(self.switch, "test_zone2", self.id)
zone2.create()
self.zoneset.add_members([zone1, zone2])
self.assertIsNotNone(self.zoneset.members)
self.zoneset.remove_members([zone1, zone2])
self.assertEqual({}, self.zoneset.members)
def tearDown(self) -> None:
self.v.delete()
|
print("validação masculina/ feminina")
resp = 1
while resp ==1:
sexo = str(input("informe o se sexo: [F/M]")).strip().upper()
if sexo == 'F' or sexo == 'M':
resp = resp +1
else:
print("os dados informados estão incorretos, digite novamente:")
if sexo == "F":
print("tenha um bom dia senhora!")
else:
print("tenha um bom dia senhor!")
|
import copy
import datetime
import logging
import os
import torch
from tqdm import tqdm
from .datasets import ToTensorflow
from .evaluation import evaluate as e
from .utils import load_dataset, load_model
logger = logging.getLogger(__name__)
MAX_NUM_MODELS_IN_CACHE = 3
def device():
return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ModelEvaluator:
def _pytorch_evaluator(self, model_name, model, dataset, *args, **kwargs):
"""
Evaluate Model on the given dataset and return the accuracy.
Args:
model_name:
model:
dataset:
*args:
**kwargs:
"""
logging_info = f"Evaluating model {model_name} on dataset {dataset.name} using Pytorch Evaluator"
logger.info(logging_info)
print(logging_info)
for metric in dataset.metrics:
metric.reset()
with torch.no_grad():
result_writer = e.ResultPrinter(model_name=model_name,
dataset=dataset)
for images, target, paths in tqdm(dataset.loader):
images = images.to(device())
logits = model.forward_batch(images)
softmax_output = model.softmax(logits)
if isinstance(target, torch.Tensor):
batch_targets = model.to_numpy(target)
else:
batch_targets = target
predictions = dataset.decision_mapping(softmax_output)
for metric in dataset.metrics:
metric.update(predictions,
batch_targets,
paths)
if kwargs["print_predictions"]:
result_writer.print_batch_to_csv(object_response=predictions,
batch_targets=batch_targets,
paths=paths)
def _tensorflow_evaluator(self, model_name, model, dataset, *args, **kwargs):
"""
Evaluate Model on the given dataset and return the accuracy.
Args:
model_name:
model:
dataset:
*args:
**kwargs:
Returns:
accuracy
"""
logging_info = f"Evaluation model {model_name} on dataset {dataset.name} using Tensorflow Evaluator"
logger.info(logging_info)
print(logging_info)
result_writer = e.ResultPrinter(model_name=model_name,
dataset=dataset)
for metric in dataset.metrics:
metric.reset()
for images, target, paths in tqdm(dataset.loader):
logits = model.forward_batch(images)
softmax_output = model.softmax(logits)
predictions = dataset.decision_mapping(softmax_output)
for metric in dataset.metrics:
metric.update(predictions,
target,
paths)
if kwargs["print_predictions"]:
result_writer.print_batch_to_csv(object_response=predictions,
batch_targets=target,
paths=paths)
def _get_datasets(self, dataset_names, *args, **kwargs):
dataset_list = []
for dataset in dataset_names:
dataset = load_dataset(dataset, *args, **kwargs)
dataset_list.append(dataset)
return dataset_list
def _to_tensorflow(self, datasets):
datasets = copy.deepcopy(datasets)
new_datasets = []
for dataset in datasets:
dataset.loader = ToTensorflow(dataset.loader)
new_datasets.append(dataset)
return new_datasets
def _get_evaluator(self, framework):
if framework == "tensorflow":
return self._tensorflow_evaluator
elif framework == 'pytorch':
return self._pytorch_evaluator
else:
raise NameError("Unsupported evaluator")
def _remove_model_from_cache(self, framework, model_name):
def _format_name(name):
return name.lower().replace("-", "_")
try:
if framework == "pytorch":
cachedir = "/root/.cache/torch/checkpoints/"
downloaded_models = os.listdir(cachedir)
for dm in downloaded_models:
if _format_name(dm).startswith(_format_name(model_name)):
os.remove(os.path.join(cachedir, dm))
except:
pass
def __call__(self, models, dataset_names, *args, **kwargs):
"""
Wrapper call to _evaluate function.
Args:
models:
dataset_names:
*args:
**kwargs:
Returns:
"""
logging.info("Model evaluation.")
_datasets = self._get_datasets(dataset_names, *args, **kwargs)
for model_name in models:
datasets = _datasets
model, framework = load_model(model_name, *args)
evaluator = self._get_evaluator(framework)
if framework == 'tensorflow':
datasets = self._to_tensorflow(datasets)
logger.info(f"Loaded model: {model_name}")
for dataset in datasets:
# start time
time_a = datetime.datetime.now()
evaluator(model_name, model, dataset, *args, **kwargs)
for metric in dataset.metrics:
logger.info(str(metric))
print(metric)
# end time
time_b = datetime.datetime.now()
c = time_b - time_a
if kwargs["print_predictions"]:
# print performances to csv
for metric in dataset.metrics:
e.print_performance_to_csv(model_name=model_name,
dataset_name=dataset.name,
performance=metric.value,
metric_name=metric.name)
if len(models) >= MAX_NUM_MODELS_IN_CACHE:
self._remove_model_from_cache(framework, model_name)
logger.info("Finished evaluation.")
|
l = float(input('Qual a largura da parede em metros?'))
a = float(input('Qual a altura da parede em metros?'))
area = l*a
tinta = area/2
print(f'A área da parede será de {area} metros quadrado sendo necessário a utilização de {tinta} litros de tinta')
|
from PIL import Image
import pytesseract
image = Image.open("ocr_example.jpg")
text_convert = pytesseract.image_to_string(image, lang = 'eng')
print(text_convert)
|
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import NotFound
from rest_framework.views import Response
from api.base import permissions as base_permissions
from api.base.exceptions import Gone
from api.base.views import JSONAPIBaseView
from api.base.renderers import PlainTextRenderer
from api.wikis.permissions import ContributorOrPublic, ExcludeWithdrawals
from api.wikis.serializers import (
WikiSerializer,
NodeWikiDetailSerializer,
RegistrationWikiDetailSerializer,
)
from framework.auth.oauth_scopes import CoreScopes
from addons.wiki.models import NodeWikiPage
class WikiMixin(object):
"""Mixin with convenience methods for retrieving the wiki page based on the
URL. By default, fetches the wiki page based on the wiki_id kwarg.
"""
serializer_class = WikiSerializer
wiki_lookup_url_kwarg = 'wiki_id'
def get_wiki(self, check_permissions=True):
pk = self.kwargs[self.wiki_lookup_url_kwarg]
wiki = NodeWikiPage.load(pk)
if not wiki:
raise NotFound
if wiki.is_deleted:
raise Gone
# only show current wiki versions
if not wiki.is_current:
raise NotFound
if check_permissions:
# May raise a permission denied
self.check_object_permissions(self.request, wiki)
return wiki
class WikiDetail(JSONAPIBaseView, generics.RetrieveAPIView, WikiMixin):
"""Details about a specific wiki. *Read-only*.
###Permissions
Wiki pages on public nodes are given read-only access to everyone. Wiki pages on private nodes are only visible to
contributors and administrators on the parent node.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed.
##Attributes
OSF wiki entities have the "wikis" `type`.
name type description
======================================================================================================
name string name of the wiki pag
path string the path of the wiki page
materialized_path string the path of the wiki page
date_modified iso8601 timestamp timestamp when the wiki was last updated
content_type string MIME-type
current_user_can_comment boolean Whether the current user is allowed to post comments
extra object
version integer version number of the wiki
##Relationships
###User
The user who created the wiki.
###Node
The project that the wiki page belongs to.
###Comments
The comments created on the wiki page.
##Links
self: the canonical api endpoint of this wiki
info: the canonical api endpoint of this wiki
download: the link to retrive the contents of the wiki page
##Query Params
*None*.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.WIKI_BASE_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeWikiDetailSerializer
view_category = 'wikis'
view_name = 'wiki-detail'
def get_serializer_class(self):
if self.get_wiki().node.is_registration:
return RegistrationWikiDetailSerializer
return NodeWikiDetailSerializer
# overrides RetrieveAPIView
def get_object(self):
return self.get_wiki()
class WikiContent(JSONAPIBaseView, generics.RetrieveAPIView, WikiMixin):
""" View for rendering wiki page content."""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.WIKI_BASE_READ]
required_write_scopes = [CoreScopes.NULL]
renderer_classes = (PlainTextRenderer, )
view_category = 'wikis'
view_name = 'wiki-content'
def get_serializer_class(self):
return None
def get(self, request, **kwargs):
wiki = self.get_wiki()
return Response(wiki.content)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.