content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API client library for Cloud DNS operatoins."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
class Client(object):
"""API client for Cloud DNS operations."""
_API_NAME = 'dns'
def __init__(self, version, client, messages=None):
self.version = version
self.client = client
self._service = self.client.managedZoneOperations
self.messages = messages or client.MESSAGES_MODULE
@classmethod
def FromApiVersion(cls, version):
return cls(version, apis.GetClientInstance('dns', version))
def Get(self, operation_ref):
return self._service.Get(
self.messages.DnsManagedZoneOperationsGetRequest(
operation=operation_ref.Name(),
managedZone=operation_ref.managedZone,
project=operation_ref.project))
def List(self, zone_ref, limit=None):
request = self.messages.DnsManagedZoneOperationsListRequest(
managedZone=zone_ref.Name(),
project=zone_ref.project)
return list_pager.YieldFromList(
self._service, request, limit=limit, field='operations')
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.patches import Polygon
from ... import units as u
from ...coordinates.representation import UnitSphericalRepresentation
from ...coordinates.matrix_utilities import rotation_matrix, matrix_product
__all__ = ['SphericalCircle']
def _rotate_polygon(lon, lat, lon0, lat0):
"""
Given a polygon with vertices defined by (lon, lat), rotate the polygon
such that the North pole of the spherical coordinates is now at (lon0,
lat0). Therefore, to end up with a polygon centered on (lon0, lat0), the
polygon should initially be drawn around the North pole.
"""
# Create a representation object
polygon = UnitSphericalRepresentation(lon=lon, lat=lat)
# Determine rotation matrix to make it so that the circle is centered
# on the correct longitude/latitude.
m1 = rotation_matrix(-(0.5 * np.pi * u.radian - lat0), axis='y')
m2 = rotation_matrix(-lon0, axis='z')
transform_matrix = matrix_product(m2, m1)
# Apply 3D rotation
polygon = polygon.to_cartesian()
polygon = polygon.transform(transform_matrix)
polygon = UnitSphericalRepresentation.from_cartesian(polygon)
return polygon.lon, polygon.lat
class SphericalCircle(Polygon):
"""
Create a patch representing a spherical circle - that is, a circle that is
formed of all the points that are within a certain angle of the central
coordinates on a sphere. Here we assume that latitude goes from -90 to +90
This class is needed in cases where the user wants to add a circular patch
to a celestial image, since otherwise the circle will be distorted, because
a fixed interval in longitude corresponds to a different angle on the sky
depending on the latitude.
Parameters
----------
center : tuple or `~astropy.units.Quantity`
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements.
radius : `~astropy.units.Quantity`
The radius of the circle
resolution : int, optional
The number of points that make up the circle - increase this to get a
smoother circle.
vertex_unit : `~astropy.units.Unit`
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(self, center, radius, resolution=100, vertex_unit=u.degree, **kwargs):
# Extract longitude/latitude, either from a tuple of two quantities, or
# a single 2-element Quantity.
longitude, latitude = center
# Start off by generating the circle around the North pole
lon = np.linspace(0., 2 * np.pi, resolution + 1)[:-1] * u.radian
lat = np.repeat(0.5 * np.pi - radius.to_value(u.radian), resolution) * u.radian
lon, lat = _rotate_polygon(lon, lat, longitude, latitude)
# Extract new longitude/latitude in the requested units
lon = lon.to_value(vertex_unit)
lat = lat.to_value(vertex_unit)
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 13 18:05:13 2017
@author: azkei
Arithmetic Operations using NumPy
"""
# Generate an array
a = np.arange(4)
# Sum
a+4
# Multiplication
a*2
b = np.arange(4,8)
# Add 2 arrays together
a+b
# Minus
a-b
# Multiply
a*b
# Multiple array with the sine or square root of the elements
# Sine
a * np.sin(b)
# Square Root
a * np.sqrt(b)
# Moving to a multi-dimensional case
A = np.arange(0,9).reshape(3,3)
B = np.ones((3,3))
|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
# input hand shape should be [5,4,6]
def show_hand(hand, fname=None, show_origin=False, view_point="front", scale="fit"):
finger_colors = ["#000055", "#111155", "#222255", "#333355", "#444455"]
fig, (ax) = plt.subplots(nrows=1, ncols=1, figsize=[7,5], subplot_kw=dict(projection='3d'))
for finger in range(5):
for bone in range(4):
ax.plot(xs=hand[finger,bone,::3], ys=hand[finger,bone,1::3], zs=hand[finger,bone,2::3], color=finger_colors[finger], lw=6-bone)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
if show_origin:
ax.scatter(0,0,0, s=5)
if scale=="fit":
ax.set_xlim(np.min(hand[:,:,::3]), np.max(hand[:,:,::3]))
ax.set_ylim(np.min(hand[:,:,1::3]), np.max(hand[:,:,1::3]))
ax.set_zlim(np.min(hand[:,:,2::3]), np.max(hand[:,:,2::3]))
elif scale=="unit":
view_scale=1
ax.set_xlim([-view_scale,view_scale])
ax.set_ylim([-view_scale,view_scale])
ax.set_zlim([-view_scale,view_scale])
if view_point == "front":
ax.view_init(elev=90, azim=-90)
elif view_point == "side":
ax.view_init(elev=0, azim=0)
else:
pass
if fname is None:
plt.show()
else:
plt.savefig(fname)
|
# Sublime Text 3 plugin to place the full path of the currently selected file in the status bar
# Place this file in ~/Library/Aplication Support/Sublime Text 3/Packages/User/
import sublime, sublime_plugin
class FilePathInStatusBar(sublime_plugin.EventListener):
def on_activated(self, view):
file_path = "" if view.file_name() is None else view.file_name()
view.set_status('currentPath', file_path)
|
#!/usr/bin/env python
"""
New Drawing class to create new mark and style on axes.
"""
# from copy import deepcopy, copy
from decimal import Decimal
import numpy as np
import toyplot
# from .Admixture import AdmixEdges
# for setting values from iterables
ITERABLE = (list, tuple, np.ndarray)
class GridSetup:
"""
Returns Canvas and Cartesian axes objects to fit a grid of trees.
"""
def __init__(self, nrows, ncols, width, height, layout):
# style args can include height/width, nrows, ncols, shared,...
self.nrows = nrows
self.ncols = ncols
self.width = width
self.height = height
self.layout = layout
# get .canvas and .axes
self.get_tree_dims()
self.get_canvas_and_axes()
def get_canvas_and_axes(self):
"""
Set .canvas and .axes objects
"""
self.canvas = toyplot.Canvas(
height=self.height,
width=self.width,
)
self.axes = [
self.canvas.cartesian(
grid=(self.nrows, self.ncols, i),
padding=10,
margin=25,
)
for i in range(self.nrows * self.ncols)
]
def get_tree_dims(self):
"""
get height and width if not set by user
"""
if self.ncols * self.nrows < 4:
minx = 250
miny = 250
else:
minx = 200
miny = 140
# wider than tall
if self.layout in ("d", "u"):
self.width = (
self.width if self.width
else min(750, minx * self.ncols)
)
self.height = (
self.height if self.height
else min(750, miny * self.nrows)
)
else:
self.height = (
self.height if self.height
else min(750, minx * self.nrows)
)
self.width = (
self.width if self.width
else min(750, miny * self.ncols)
)
class CanvasSetup:
"""
Returns Canvas and Cartesian axes objects
"""
def __init__(self, tree, axes, style):
# args includes axes
self.tree = tree
self.axes = axes
self.style = style
self.canvas = None
self.external_axis = False
# get the longest name for dimension fitting
self.lname = 0
if not all([i is None for i in self.style.tip_labels]):
self.lname = max([len(str(i)) for i in self.style.tip_labels])
# ntips and shape to fit with provided args
self.get_dims_from_tree_size()
# fills canvas and axes
self.get_canvas_and_axes()
# expand the domain/extents for the text
# self.fit_tip_labels()
# ticks for tree and scalebar
self.add_axes_style()
def get_dims_from_tree_size(self):
"""
Calculate reasonable canvas height and width for tree given N tips
"""
if self.style.layout == "c":
radius = max(
[0] + [i for i in [self.style.height, self.style.width] if i])
if not radius:
radius = 400
self.style.width = self.style.height = radius
return
if self.style.layout in ("r", "l"):
# height fit by tree size
if not self.style.height:
self.style.height = max(275, min(1000, 18 * self.tree.ntips))
# width fit by name size
if not self.style.width:
self.style.width = max(250, min(500, 250 + 5 * self.lname))
else:
# height fit by name size
if not self.style.height:
self.style.height = max(250, min(500, 250 + 5 * self.lname))
# width fit by tree size
if not self.style.width:
self.style.width = max(350, min(1000, 18 * self.tree.ntips))
def get_canvas_and_axes(self):
"""
"""
if self.axes is not None:
self.canvas = None
self.external_axis = True
else:
self.canvas = toyplot.Canvas(
height=self.style.height,
width=self.style.width,
)
self.axes = self.canvas.cartesian(
padding=self.style.padding
)
def add_axes_style(self):
"""
"""
# style axes with padding and show axes
self.axes.padding = self.style.padding
if not self.external_axis:
self.axes.show = True
if not self.style.scalebar:
self.axes.show = False
# scalebar
if self.style.scalebar:
if self.style.layout in ("r", "l"):
nticks = max((3, np.floor(self.style.width / 100).astype(int)))
self.axes.y.show = False
self.axes.x.show = True
self.axes.x.ticks.show = True
# generate locations
if self.style.use_edge_lengths:
th = self.tree.treenode.height
else:
th = self.tree.treenode.get_farthest_leaf(True)[1] + 1
if self.style.layout == "r":
top = self.style.xbaseline - th
else:
top = self.style.xbaseline + th
locs = np.linspace(self.style.xbaseline, top, nticks)
# auto-formatter for axes ticks labels
zer = abs(min(0, Decimal(locs[1]).adjusted()))
fmt = "{:." + str(zer) + "f}"
self.axes.x.ticks.locator = toyplot.locator.Explicit(
locations=locs,
labels=[fmt.format(i) for i in np.abs(locs)],
)
elif self.style.layout in ("u", "d"):
nticks = max((3, np.floor(self.style.height / 100).astype(int)))
self.axes.x.show = False
self.axes.y.show = True
self.axes.y.ticks.show = True
# generate locations
if self.style.use_edge_lengths:
th = self.tree.treenode.height
else:
th = self.tree.treenode.get_farthest_leaf(True)[1] + 1
if self.style.layout == "d":
top = self.style.ybaseline + th
else:
top = self.style.ybaseline - th
locs = np.linspace(self.style.ybaseline, top, nticks)
# auto-formatter for axes ticks labels
zer = abs(min(0, Decimal(locs[1]).adjusted()))
fmt = "{:." + str(zer) + "f}"
self.axes.y.ticks.locator = toyplot.locator.Explicit(
locations=locs,
labels=[fmt.format(i) for i in np.abs(locs)],
)
# elif self.style.layout == "d":
# nticks = max((3, np.floor(self.style.height / 100).astype(int)))
# self.axes.x.show = False
# self.axes.y.show = True
# self.axes.y.ticks.show = True
# # generate locations
# locs = np.linspace(0, self.tree.treenode.height, nticks)
# # auto-formatter for axes ticks labels
# zer = abs(min(0, Decimal(locs[1]).adjusted()))
# fmt = "{:." + str(zer) + "f}"
# self.axes.y.ticks.locator = toyplot.locator.Explicit(
# locations=locs,
# labels=[fmt.format(i) for i in np.abs(locs)],
# )
# def fit_tip_labels(self):
# """
# DEPRECATED SINCE V2 since Mark now sets its own extents correctly.
# Modifies display range to ensure tip labels fit. This is a bit hackish
# still. The problem is that the 'extents' range of the rendered text
# is not totally correct. So we add a little buffer here. Should add for
# user to be able to modify this if needed. If not using edge lengths
# then need to use unit length for treeheight.
# """
# # bail on unrooted for now; TODO
# if self.style.layout == "c":
# return
# # if names
# if self.lname:
# # get ratio of names to tree in plot
# ratio = max(self.lname / 10, 0.15)
# # have tree figure make up 85% of plot
# if self.style.use_edge_lengths:
# addon = self.tree.treenode.height
# else:
# addon = self.tree.treenode.get_farthest_leaf(True)[1] + 1
# addon *= ratio
# # modify display for layout
# if self.style.layout == "r":
# self.axes.x.domain.max = (addon / 2.) + self.style.xbaseline
# elif self.style.layout == "l":
# self.axes.x.domain.min = (-addon / 2.) + self.style.xbaseline
# # self.axes.x.domain.min -= self.style.xbaseline
# elif self.style.layout == "d":
# self.axes.y.domain.min = (-addon / 2.) + self.style.ybaseline
# elif self.style.layout == "u":
# self.axes.y.domain.max = (addon / 2.) + self.style.ybaseline
# # print(addon, ratio, self.axes.x.domain.min, self.axes.x.domain.max)
|
import pymongo
import time
client = pymongo.MongoClient('openshift.flg.jp', 30017)
db = client.fx
db.fxsetting_log.drop()
db.result_backtest.drop()
db.result_trade_sim.drop()
db.trade_practice.drop()
db.trade_practice_weekly.drop()
|
"""Idea comes from here:
https://github.com/keras-team/keras/blob/master/examples/addition_rnn.py
"""
import os
import pickle
import numpy as np
from manager import Manager
from keras.callbacks import TensorBoard, ModelCheckpoint
from keras.models import load_model
from chatbot.config import PATHS
from chatbot.train import limit_gpu_memory, create_model_attention
vocab = [str(i) for i in range(10)]
vocab_size = len(vocab)
index2word = {i: w for i, w in zip(range(vocab_size), vocab)}
word2index = {w: i for i, w in zip(range(vocab_size), vocab)}
n_timesteps_input = 6
n_timesteps_output = 4
PREPROCESSING_PARAMS = {
'max_seq_length_input': n_timesteps_input,
'max_seq_length_output': n_timesteps_output,
'full_vocab_size': vocab_size
}
HPARAMS = {
'hidden_units': 512,
'batch_size': 128,
'num_epochs': 100
}
manager = Manager()
def text_to_input(input_text):
batch_size = 1
indices_input = np.array(list(input_text)).astype(int)
print(indices_input)
onehot_inputs = np.zeros((batch_size, n_timesteps_input, vocab_size))
onehot_inputs[0, np.arange(n_timesteps_input), indices_input] = 1
print(onehot_inputs)
return onehot_inputs
def create_batch(batch_size, hidden_units):
# create random samples
numbers = np.random.randint(100, 1000, batch_size * 2)
numbers1 = numbers[:batch_size]
numbers2 = numbers[batch_size:]
numbers_result = numbers1 + numbers2
# inputs
input_str = [str(i) + str(j) for i, j in zip(numbers1, numbers2)]
indices_input = [np.array(list(s)).astype(int) for s in input_str]
onehot_inputs = np.zeros((batch_size, n_timesteps_input, vocab_size))
for i in range(batch_size):
onehot_inputs[i, np.arange(n_timesteps_input), indices_input[i]] = 1
# outputs
output_str = [f'{i:0{n_timesteps_output}}' for i in numbers_result]
indices_output = [np.array(list(s)).astype(int) for s in output_str]
onehot_outputs = np.zeros((batch_size, n_timesteps_output, vocab_size))
for i in range(batch_size):
onehot_outputs[i, np.arange(n_timesteps_output), indices_output[i]] = 1
output_list = []
for t in range(onehot_outputs.shape[1]):
output_list.append(onehot_outputs[:, t, :])
s0 = np.zeros((batch_size, hidden_units))
c0 = np.zeros((batch_size, hidden_units))
return [onehot_inputs, s0, c0], output_list
@manager.command()
def create_dataset(size):
size = int(size)
dataset = create_batch(size, HPARAMS['hidden_units'])
path = os.path.join(PATHS['data_dir'], 'summation_data.pickle')
pickle.dump(dataset, open(path, "wb"))
def generate_batch(batch_size, hidden_units):
while True:
yield create_batch(batch_size, hidden_units)
@manager.command()
def start_attention_gen(name):
limit_gpu_memory()
# create model
print("Creating model...")
model = create_model_attention(PREPROCESSING_PARAMS, HPARAMS, for_inference=False, use_embedding=False)
print("Compiling model...")
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Callbacks
tensorboard = TensorBoard(log_dir="{}/{}".format(PATHS['log_dir'], name), write_grads=True,
write_graph=True, write_images=True)
file_path = os.path.join(PATHS['models_dir'], f"{name}")
checkpoint = ModelCheckpoint(file_path + "-{epoch:02d}-{val_loss:.2f}.h5", verbose=1, period=10)
train_gen = generate_batch(HPARAMS['batch_size'], HPARAMS['hidden_units'])
test_gen = generate_batch(HPARAMS['batch_size'], HPARAMS['hidden_units'])
train_num_batches = 1000
test_num_batches = 100
model.fit_generator(
generator=train_gen, steps_per_epoch=train_num_batches,
epochs=HPARAMS['num_epochs'],
verbose=1, validation_data=test_gen, validation_steps=test_num_batches,
callbacks=[tensorboard, checkpoint],
)
@manager.command()
def start_attention(name, file_name=None):
limit_gpu_memory()
# read data
path = os.path.join(PATHS['data_dir'], 'summation_data.pickle')
dataset = pickle.load(open(path, "rb"))
# create model
print("Creating model...")
model = create_model_attention(PREPROCESSING_PARAMS, HPARAMS, for_inference=False, use_embedding=False,
bilstm=True)
print("Compiling model...")
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
if file_name is not None:
print("Loading model's weights...")
model.load_weights(os.path.join(PATHS['models_dir'], file_name))
# Callbacks
tensorboard = TensorBoard(log_dir="{}/{}".format(PATHS['log_dir'], name), write_grads=True,
write_graph=True, write_images=True)
file_path = os.path.join(PATHS['models_dir'], f"{name}")
checkpoint = ModelCheckpoint(file_path + "-{epoch:02d}-{val_loss:.2f}.h5", verbose=1, period=10)
model.fit(x=dataset[0], y=dataset[1], batch_size=128, validation_split=0.05, shuffle=True,
epochs=HPARAMS['num_epochs'],
verbose=1,
callbacks=[tensorboard, checkpoint],
)
def reply_attention(input_text, model, preprocessing_params, hparams):
encoder_input = text_to_input(input_text)
s0 = np.zeros((1, hparams['hidden_units']))
c0 = np.zeros((1, hparams['hidden_units']))
prediction = model.predict([encoder_input, s0, c0])
prediction = np.argmax(prediction, axis=-1)
output = [index2word[int(i)] for i in prediction]
return str.join(' ', output)
@manager.command()
def do_inference_attention(file_name=None):
limit_gpu_memory()
if file_name is not None:
path = os.path.join(PATHS['models_dir'], file_name)
else:
path = PATHS["model"]
model = create_model_attention(PREPROCESSING_PARAMS, HPARAMS, use_embedding=False, for_inference=False)
model.load_weights(path)
finished = False
while not finished:
text = input("Input text (to finish enter 'f'): ")
if text == 'f':
finished = True
continue
replies = reply_attention(text, model, PREPROCESSING_PARAMS, HPARAMS)
# replies_without_unk = [r for r in replies if PREPROCESSING_PARAMS['unk'] not in r]
# print(len(replies_without_unk))
# for r in replies_without_unk:
print(replies)
if __name__ == '__main__':
manager.main()
#i = 0
#for g in generate_batch(3, 3):
# i += 1
# print(g)
# if i > 1:
# break
|
__version__ = '0.1.46'
default_app_config = 'wallets.apps.WalletsConfig'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 14 11:15:31 2018
@author: Kazuki
"""
import numpy as np
import pandas as pd
import os
import utils
utils.start(__file__)
#==============================================================================
PREF = 'prev_102_'
KEY = 'SK_ID_CURR'
os.system(f'rm ../feature/t*_{PREF}*')
# =============================================================================
#
# =============================================================================
prev = utils.read_pickles('../data/previous_application')
base = prev[[KEY]].drop_duplicates().set_index(KEY)
gr_app = prev[prev['NAME_CONTRACT_STATUS']=='Approved'].groupby(KEY)
gr_ref = prev[prev['NAME_CONTRACT_STATUS']=='Refused'].groupby(KEY)
col = ['AMT_INCOME_TOTAL', 'AMT_CREDIT', 'AMT_ANNUITY',
'AMT_CREDIT-dby-AMT_ANNUITY', 'DAYS_BIRTH']
train = utils.load_train([KEY]+col)
test = utils.load_test([KEY]+col)
train.columns = [KEY] + ['app_'+c for c in train.columns[1:]]
test.columns = [KEY] + ['app_'+c for c in test.columns[1:]]
col_init = train.columns.tolist()
# =============================================================================
# feature
# =============================================================================
base['cnt_approved'] = gr_app.size()
base['DAYS_DECISION_app_min'] = gr_app['DAYS_DECISION'].min()
base['DAYS_DECISION_app_max'] = gr_app['DAYS_DECISION'].max()
base['paid_sum'] = gr_app['amt_paid'].sum()
base['paid_sum_ratio'] = (base['paid_sum'] / gr_app['AMT_CREDIT'].sum())
base['debt_sum'] = (gr_app['AMT_CREDIT'].sum() - base['paid_sum'])
base['cnt_refused'] = gr_ref.size()
base['DAYS_DECISION_ref_min'] = gr_ref['DAYS_DECISION'].min()
base['DAYS_DECISION_ref_max'] = gr_ref['DAYS_DECISION'].max()
base['approved_ratio'] = base['cnt_approved'] / base['cnt_approved'] + base['cnt_refused']
base.reset_index(inplace=True)
# =============================================================================
# merge
# =============================================================================
def mk_feature(df):
df['DAYS_DECISION_app_min-m-DAYS_BIRTH'] = df['DAYS_DECISION_app_min'] - df['app_DAYS_BIRTH']
df['DAYS_DECISION_app_max-m-DAYS_BIRTH'] = df['DAYS_DECISION_app_max'] - df['app_DAYS_BIRTH']
df['DAYS_DECISION_ref_min-m-DAYS_BIRTH'] = df['DAYS_DECISION_ref_min'] - df['app_DAYS_BIRTH']
df['DAYS_DECISION_ref_max-m-DAYS_BIRTH'] = df['DAYS_DECISION_ref_max'] - df['app_DAYS_BIRTH']
df['paid_sum-dby-AMT_INCOME_TOTAL'] = df['paid_sum'] / df['app_AMT_INCOME_TOTAL']
df['debt_sum-dby-AMT_INCOME_TOTAL'] = df['debt_sum'] / df['app_AMT_INCOME_TOTAL']
df['paid_sum-dby-AMT_ANNUITY'] = df['paid_sum'] / df['app_AMT_ANNUITY']
df['debt_sum-dby-AMT_ANNUITY'] = df['debt_sum'] / df['app_AMT_ANNUITY']
return
train2 = pd.merge(train, base, on=KEY, how='left')
mk_feature(train2)
test2 = pd.merge(test, base, on=KEY, how='left')
mk_feature(test2)
# =============================================================================
# output
# =============================================================================
train2.drop(col_init, axis=1, inplace=True)
test2.drop(col_init, axis=1, inplace=True)
utils.to_feature(train2.add_prefix(PREF), '../feature/train')
utils.to_feature(test2.add_prefix(PREF), '../feature/test')
#==============================================================================
utils.end(__file__)
|
import os
from unittest import TestCase
class Test(TestCase):
def load_resource(self, path):
base_path = os.path.split(__file__)[0]
path = os.path.join(base_path, "files", path)
return open(path, 'rb').read()
|
"""General file method and JSON handling"""
import copy
import glob
import json
import imghdr
import os
import warnings
import numpy as np
from .data_methods import num_digits
__author__ = "Sean Mullan"
__copyright__ = "Sean Mullan"
__license__ = "mit"
def ensure_dir(*paths):
"""Check if a given directory exists, and create it if it doesn't. Multiple directories can be passed as a top-to-bottom path structure.
Parameters
----------
*paths : iterable
One or more nested directories to ensure
Returns
-------
str
Joined filepath of all ensured directories
"""
full_path = ''
for path in paths:
full_path = os.path.join(full_path, str(path))
if os.path.exists(full_path) and os.path.isdir(full_path):
continue
elif os.path.exists(full_path):
raise ValueError("A file without an extension is blocking directory creation at {}".format(full_path))
else:
os.makedirs(full_path, exist_ok=True)
return full_path
def next_filename(filename):
"""Check if a given file exists, and return a new filename for a numbered copy if it does."""
filename = str(filename)
if os.path.isfile(filename):
base, extension = filename.rsplit('.', 1)
i = 2
while True:
next_check = '{}_{}.{}'.format(base, i, extension)
if os.path.isfile(next_check):
i += 1
else:
return next_check
else:
return filename
# JSON methods
def load_json(filename):
"""Load a JSON file, and re-form any numpy arrays if :func:`~gouda.save_json` was used to write them."""
with open(filename, 'r') as f:
data = json.load(f)
if isinstance(data, dict) and 'slice_start' in data:
data = slice(data['slice_start'], data['slice_stop'], data['slice_step'])
elif isinstance(data, list):
if data[-1] == 'numpy':
np_filename = filename.rsplit('.', 1)[0] + '_array.npz'
arrays = np.load(np_filename)
data = data[0]
elif data[-1] == 'numpy_zip':
np_filename = filename.rsplit('.', 1)[0] + '_arrayzip.npz'
arrays = np.load(np_filename)
data = data[0]
elif data[-1] == 'numpy_embed':
data = data[0]
# else:
# return data
def renumpy(_data):
if isinstance(_data, list):
if len(_data) == 2 and 'numpy.' in _data[0]:
_data = np.dtype(_data[0][6:]).type(_data[1])
elif len(_data) == 2 and 'set.' == _data[0]:
_data = set(renumpy(_data[1]))
else:
for i in range(len(_data)):
_data[i] = renumpy(_data[i])
elif isinstance(_data, dict):
if 'numpy_array' in _data:
if isinstance(_data['numpy_array'], list):
new_data = np.array(_data['numpy_array'], dtype=_data['dtype']).reshape(_data['shape'])
else:
new_data = arrays[_data['numpy_array']]
if new_data.dtype != _data['dtype'] or list(new_data.shape) != _data['shape']:
raise ValueError("Numpy array file doesn't match expected stored numpy array data")
return new_data
elif 'slice_start' in _data:
_data = slice(_data['slice_start'], _data['slice_stop'], _data['slice_step'])
else:
for key in _data.keys():
_data[key] = renumpy(_data[key])
return _data
# if len(data) == 1:
# data = data[0]
data = renumpy(data)
return data
def is_jsonable(data):
"""Check to see if data is JSON serializable"""
try:
json.dumps(data)
return True
except (TypeError, OverflowError):
return False
def save_json(data, filename, embed_arrays=True, compressed=False):
"""Save a list/dict/numpy.ndarray as a JSON file.
Parameters
----------
data : [list, dict, numpy.ndarray]
Data to save as a JSON file
filename : string
Path to write the JSON to
embed_arrays : bool [defaults to True]
Whether to embed any numpy arrays into the JSON as lists with metadata. If false saves them to a separate file with placeholders in the JSON.
compressed : bool [defaults to False]
If saving numpy arrays in a separate file, this determines if they are zipped or not.
NOTE
----
JSON files saved this way can be read with any JSON reader, but will have an extra numpy tag at the end that is used to tell :func:`~gouda.load_json` how to read the arrays back in.
"""
out_arrays = {}
used_arrays = [False]
if embed_arrays and compressed:
warnings.warn('Cannot compress an array that is embedded in a JSON', UserWarning)
compressed = False
def unnumpy(_data):
if isinstance(_data, list):
new_data = []
for i in range(len(_data)):
new_data.append(unnumpy(_data[i]))
elif isinstance(_data, set):
new_data = ['set.', unnumpy(list(_data))]
elif isinstance(_data, dict):
new_data = {}
for key in _data.keys():
new_data[key] = unnumpy(_data[key])
elif isinstance(_data, slice):
new_data = {'slice_start': _data.start, 'slice_stop': _data.stop, 'slice_step': _data.step}
elif isinstance(_data, np.ndarray):
used_arrays[0] = True
if embed_arrays:
new_data = {"numpy_array": _data.tolist(), "dtype": str(_data.dtype), "shape": _data.shape}
else:
new_data = {"numpy_array": 'array_{}'.format(len(out_arrays)), 'dtype': str(_data.dtype), 'shape': _data.shape}
out_arrays['array_{}'.format(len(out_arrays))] = _data
elif 'numpy' in str(type(_data)):
dtype = str(_data.dtype)
if np.issubdtype(_data, np.integer):
_data = int(_data)
elif np.issubdtype(_data, np.floating):
_data = float(_data)
new_data = ['numpy.' + dtype, _data]
else:
new_data = copy.copy(_data)
return new_data
data = unnumpy(data)
if used_arrays[0]:
data = [data]
if compressed:
data.append('numpy_zip')
elif embed_arrays:
data.append('numpy_embed')
else:
data.append('numpy')
with open(filename, 'w') as f:
json.dump(data, f, indent=2, sort_keys=True)
if len(out_arrays) != 0:
np_filename = filename.rsplit('.', 1)[0]
if compressed:
np.savez_compressed(np_filename + '_arrayzip.npz', **out_arrays)
else:
np.savez(np_filename + '_array.npz', **out_arrays)
def is_image(path):
"""Check if the path is an image file"""
path = str(path)
try:
return imghdr.what(path) is not None
except IsADirectoryError:
return False
def basicname(path):
"""Return the basename of the path without the extension"""
return os.path.splitext(os.path.basename(path))[0]
def get_sorted_filenames(pattern, sep='_', ending=True, reverse=False):
"""Sort filenames based on ending digits
Parameters
----------
pattern : str
The pattern used with glob to find files
sep : str
The separator between the filename and the indexing value (the default is '_')
ending_index : bool
Whether the indexing value is at the end of the filename or the start (the default is True)
reverse : bool
Whether to reverse the order of the returned filenames
NOTESs
-----
ending_index=True with sep='_' would look like 'filename_1.txt', and ending_index=False would look like '1_filename.txt'
This method is only useful in the case where you have file_2.txt and file_10.txt where file_10 would be sorted first with other methods because the 1 is at the same inde as the 2.
"""
def get_copy_num(x):
x = basicname(x)
item = x.rsplit(sep, 1) if ending else x.split(sep, 1)
if len(item) != 2:
return -1
item = item[int(ending)]
if str.isdigit(item):
return int(item)
else:
return -1
pattern = str(pattern)
files = glob.glob(pattern)
max_num = -1
for item in files:
max_num = max(max_num, get_copy_num(item))
digits = num_digits(max_num)
key_string = "{:0" + str(digits) + "d}"
def get_copy_key(x):
x = basicname(x)
item = x.rsplit(sep, 1) if ending else x.split(sep, 1)
if len(item) != 2:
return x
key = item[int(ending)]
path = item[int(~ending)]
if str.isdigit(key):
key = key_string.format(int(key))
return sep.join([path, key]) if ending else sep.join([key, path])
return sorted(files, key=get_copy_key, reverse=False)
|
import csv
from Bio import SeqIO
from collections import Counter
def read_in_vars(var_handle):
vars = []
with open(var_handle) as fi:
lines = fi.readlines()
for line in lines[1:]:
# print()
vars.append(line.strip().split(','))
return vars
def read_in_genome(genome_handle):
output_genome = {}
genome = SeqIO.parse(genome_handle, 'fasta')
for x in genome:
# print(x)
assert x.id not in output_genome
output_genome[x.id] = x.seq
return output_genome
def char_vars(vars, genome, guppy):
"""
1. what nucleotide follows the variant position?
2. is there a homopolymer of that nucleotide?
a. if so, how long is it?
"""
insertion_error_in_homopolymer = 0
deletion_error_in_homopolymer = 0
nt_count_dict = {'A': [], 'T': [], 'C': [], 'G': []}
for var in vars:
#print(var)
if len(var[2]) != len(var[3]):
pos = int(var[1])
ref_nt = genome[var[0]][pos - 1]
following_nt = genome[var[0]][pos]
# non-matching insertions into homopolymers e.g. AAA -> ATAA
alt_string = ''.join(genome[var[0]][pos - 1: pos + 100])
alt_string = alt_string.replace(var[2], var[3], 1)
if alt_string.startswith(('AA', 'TT', 'CC', 'GG')):
k = -(len(var[2]) - len(var[3]))
# k = 0
for nt in alt_string:
if nt == ref_nt:
# print(genome[var[0]][pos:pos + 2])
k += 1
else:
# print(i)
if k > 1:
insertion_error_in_homopolymer += 1
# if i == 1:
# print()
# print(var)
# print('remember that the count on the line below referes to the alt, not the ref which is the sequence below')
# print(f'The following nt is {ref_nt} & its count is {k}')
# print(f"Finding homopolymer of length 1 on position {(pos - 1)} in the genome" )
# print(genome[var[0]][pos-1:pos+10])
# print(guppy)
break
# this checks for if the variant position is the same as the following base.
# if it is, then this error is likely to be an insertion of a different base within the homopolymer
# deletions within homopolymers e.g. ATAA -> AAA
elif genome[var[0]][pos - 1] == genome[var[0]][pos]:
# check that the last base of hte alt isn't the same as the ref
# not sure about this logic...
if var[3][-1] != var[2][0]:
# see explanation of j below where we set i
j = -(len(var[2]) - len(var[3]))
# j = 0
for nt in genome[var[0]][pos - 1:pos + 1000]:
if nt == following_nt:
# print(genome[var[0]][pos:pos + 2])
j += 1
else:
# print(i)
if j > 1:
deletion_error_in_homopolymer += 1
# print()
# print(var)
# print(f'The following nt is {following_nt} & its count is {j}')
# print('remember that the count on the line below referes to the alt, not the ref which is the sequence below')
# print(f"Finding homopolymer of length 1 on position {(pos - 1)} in the genome" )
# print(genome[var[0]][pos-1:pos+10])
# print(guppy)
break
else:
# print()
# print(var)
# print(genome[var[0]][pos - 1: pos + 10])
# print(guppy)
# print(genome[var[0]][pos - 1])
# since we are using the nanopore genome as the reference, the alt actually contains information about
# the true length of the variants. therefore, we need to change the length of thehomopolymers we're
# reporting in order to reflect the alt, rathr than the ref.
# we do this by taking away the lenght of the alt from the length of the ref and then flipping the sign
# of the answer.
i = -(len(var[2]) - len(var[3]))
for nt in genome[var[0]][pos:pos + 1000]:
if nt == following_nt:
# print(genome[var[0]][pos:pos + 2])
i += 1
else:
# print(i)
nt_count_dict[following_nt].append(i)
# if i == 1:
# print()
# print(var)
# print('remember that the count on the line below referes to the alt, not the ref which is the sequence below')
# print(f'The following nt is {following_nt} & its count is {i}')
# print(f"Finding homopolymer of length 1 on position {(pos - 1)} in the genome" )
# print(genome[var[0]][pos-1:pos+10])
break
# pprint.pprint(nt_count_dict)
output_dict = {}
for nt in nt_count_dict:
output_dict.update({nt: Counter(nt_count_dict[nt])})
# print(nt, Counter(nt_count_dict[nt]))
return output_dict
def write_results(char_vars_output, guppy, output):
"""
:param guppy:
:param char_vars_output: Output of char_vars() i.e a dict where nt are keys & counter obj are values for each nt
:param output: name of csv file to be produced
:return:
"""
with open(output, 'w') as out:
writer = csv.writer(out)
writer.writerow(['Nuc', 'Homopolymer Length', f'{guppy}count'])
for nt in char_vars_output.keys():
# char_vars_output will be dict obj with key == nt & value == Counter obj
# print(char_vars_output[nt])
for length in char_vars_output[nt]: # Homopolymer length in Counter obj
writer.writerow([nt, length, char_vars_output[nt][length]])
def run_for_each_guppy(gcsv, gfasta, guppy_version, output):
vars = read_in_vars(gcsv)
genome = read_in_genome(gfasta)
char_vars_results = char_vars(vars, genome, guppy_version)
print("Writing to csv file")
write_results(char_vars_results, guppy_version, output)
def main():
# # var_handle = '/Users/malcolmorian/acinetoBacterTestCase/acinetobacter_g5normed.csv'
# #
# # genome_handle = '/Users/malcolmorian/acinetoBacterTestCase/acineto_g5.fasta'
# species = ['1_Acinetobacter_baumannii_J9','2_Citrobacter_koseri_MINF_9D','3_Enterobacter_kobei_MSB1_1B',
# '4_Haemophilus_unknown_M1C132_1','5_Klebsiella_oxytoca_MSB1_2C','6_CHF10J',
# '7_Klebsiella_variicola_INF345','8_Serratia_marcescens_17-147-1671']
#
# species_ref3_dir = '/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/2021.08.17_NAPA/Indel_Xtrization/g3'
# species_ref5_dir = '/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/2021.08.17_NAPA/Indel_Xtrization/g5'
#
# for sp in species:
# out3 = f'{species_ref3_dir}/{sp}/{sp}_g3indelChecked.csv'
# out5 = f'{species_ref5_dir}/{sp}/{sp}_g5indelChecked.csv'
# run_for_each_guppy(f'{species_ref3_dir}/{sp}/{sp}_g3norm.csv',
# f'{species_ref3_dir}/{sp}/consensus.fasta', 'g3', out3)
# run_for_each_guppy(f'{species_ref5_dir}/{sp}/{sp}_g5norm.csv', f'{species_ref5_dir}/{sp}/consensus.fasta', 'g5',
# out5)
acit = read_in_genome('/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/2021.08.17_NAPA/Indel_Xtrization/g3/1_Acinetobacter_baumannii_J9/consensus.fasta')
print(acit['contig_3'][4325:4346])
if __name__ == '__main__':
main()
|
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file was compiled from a KSY format file downloaded from:
# https://github.com/kaitai-io/kaitai_struct_formats
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import collections
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class BytesWithIo(KaitaiStruct):
"""Helper type to work around Kaitai Struct not providing an `_io` member for plain byte arrays.
"""
SEQ_FIELDS = ["data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['data']['start'] = self._io.pos()
self.data = self._io.read_bytes_full()
self._debug['data']['end'] = self._io.pos()
|
'''
Feather the IRAM and NOEMA data together.
'''
import numpy as np
from spectral_cube import SpectralCube
import astropy.units as u
import scipy.ndimage as nd
from paths import iram_matched_data_path, noema_data_path
from constants import co21_freq
from cube_analysis.feather_cubes import feather_cube
noema_cube = SpectralCube.read(noema_data_path('M33-ARM05_yclean.tc_final.image.pbcor.K.26regrid.fits'))
iram_cube = SpectralCube.read(iram_matched_data_path("m33.co21_iram.noema_regrid.spatial.fits"))
# Cut the IRAM cube to the extent of the NOEMA data
iram_cube = iram_cube.spectral_slab(noema_cube.spectral_extrema[0],
noema_cube.spectral_extrema[1])
# Convert the NOEMA cube to K
# noema_cube = noema_cube.to(u.K)
# Also need the pb map. It is constant across the channels so grab the first
# channel
noema_pb = SpectralCube.read(noema_data_path('yclean_05/M33-ARM05_yclean.tc_final.pb.fits'))[0]
# Define a mask that will be used to smoothly taper the IRAM data near the
# map edges. Otherwise the comparison is dominated by ringing in the Fourier
# transform.
weight_arr = (noema_pb > 0.4).astype(float)
# Taper the edges
weight_arr = nd.gaussian_filter(weight_arr, 10)
feather_cube(noema_cube, iram_cube,
verbose=True, save_feather=True,
save_name=noema_data_path('M33-ARM05_yclean.tc_final.image.pbcor.K.26regrid.feather.fits', no_check=True),
num_cores=1, chunk=100,
restfreq=co21_freq,
weights=weight_arr)
# Now do the same for the 0.5 km/s data
noema_cube = SpectralCube.read(noema_data_path('M33-ARM05_yclean.tc_final.image.pbcor.K.fits'))
iram_cube = SpectralCube.read(iram_matched_data_path("m33.co21_iram.noema_regrid.fits"))
# Convert the NOEMA cube to K
# noema_cube.allow_huge_operations = True
# noema_cube = noema_cube.to(u.K)
feather_cube(noema_cube, iram_cube,
verbose=True, save_feather=True,
save_name=noema_data_path('M33-ARM05_yclean.tc_final.image.pbcor.K.feather.fits', no_check=True),
num_cores=1, chunk=100,
restfreq=co21_freq,
weights=weight_arr)
|
# -*- coding: utf-8 -*-#
# (c) Copyright IBM Corp. 2020. All Rights Reserved.
from resilient_lib.ui import Datatable, Tab, Field, create_tab
class QRadarTab(Tab):
SECTION = "fn_qradar_integration"
NAME = "QRadar Offense Details"
UUID = "d1ca8936-897b-4a83-8225-01c58db0470b"
CONTAINS = [
Field("qradar_id"),
Field("qr_offense_index_type"),
Field("qr_offense_index_value"),
Field("qr_offense_source"),
Field("qr_source_ip_count"),
Field("qr_destination_ip_count"),
Field("qr_event_count"),
Field("qr_flow_count"),
Field("qr_assigned"),
Field("qr_magnitude"),
Field("qr_credibility"),
Field("qr_relevance"),
Field("qr_severity"),
Datatable("qr_offense_top_events"),
Datatable("qr_triggered_rules"),
Datatable("qr_top_destination_ips"),
Datatable("qr_top_source_ips"),
Datatable("qr_categories"),
Datatable("qr_assets")
]
SHOW_IF = [
Field("qradar_id").conditions.has_value()
]
create_tab(QRadarTab, update_existing=True)
|
from itertools import combinations, product
import pytest
from permuta import Perm
from permuta.misc.union_find import UnionFind
from tilings import GriddedPerm, Tiling
from tilings.algorithms import (
Factor,
FactorWithInterleaving,
FactorWithMonotoneInterleaving,
)
# ------------------------------------------------
# Fixture and utility
# ------------------------------------------------
@pytest.fixture
def tiling1():
t = Tiling(
obstructions=(
GriddedPerm(Perm((2, 1, 0)), ((0, 0),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((1, 2),) * 3),
GriddedPerm(Perm((2, 0, 1)), ((3, 0),) * 3),
GriddedPerm(Perm((1, 0)), ((1, 1),) * 2),
GriddedPerm(Perm((1, 0)), ((2, 2),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 1), (2, 2))),
)
)
return t
@pytest.fixture
def tiling2():
t = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((0, 0),) * 2),
GriddedPerm(Perm((0, 1)), ((0, 1),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 0),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 1),) * 2),
GriddedPerm(Perm((0, 1)), ((3, 3),) * 2),
GriddedPerm(Perm((0, 1)), ((4, 3),) * 2),
GriddedPerm(Perm((0, 1)), ((4, 3),) * 2),
GriddedPerm(Perm((0, 1, 2)), ((2, 3),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((2, 2),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((3, 2),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((4, 2),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (1, 0), (1, 1))),
GriddedPerm(Perm((0, 1, 2)), ((2, 2), (3, 2), (4, 2))),
GriddedPerm(Perm((0, 1)), ((0, 1), (1, 1))),
],
requirements=[
[GriddedPerm(Perm((0, 1)), ((0, 0), (0, 1)))],
[
GriddedPerm(Perm((0, 1)), ((2, 3), (3, 3))),
GriddedPerm(Perm((0, 1)), ((3, 3), (4, 3))),
],
],
)
return t
@pytest.fixture
def not_fact_tiling():
not_fact_tiling = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((0, 0), (0, 0))),
GriddedPerm(Perm((0, 1)), ((0, 0), (0, 1))),
GriddedPerm(Perm((0, 1)), ((0, 1), (0, 1))),
]
)
return not_fact_tiling
@pytest.fixture
def factor1(tiling1):
return Factor(tiling1)
@pytest.fixture
def factor2(tiling2):
return Factor(tiling2)
@pytest.fixture
def factor1_with_int(tiling1):
return FactorWithInterleaving(tiling1)
@pytest.fixture
def factor2_with_int(tiling2):
return FactorWithInterleaving(tiling2)
@pytest.fixture
def factor1_with_mon_int(tiling1):
return FactorWithMonotoneInterleaving(tiling1)
@pytest.fixture
def factor2_with_mon_int(tiling2):
return FactorWithMonotoneInterleaving(tiling2)
# ------------------------------------------------
# Test for the class Factor
# ------------------------------------------------
def test_init(tiling1):
f = Factor(tiling1)
assert f._tiling == tiling1
assert f._active_cells == frozenset([(0, 0), (1, 1), (2, 2), (1, 2), (3, 0)])
assert isinstance(f._cell_unionfind, UnionFind)
def test_cell_to_int(factor1):
for c in product(range(4), range(3)):
assert factor1._int_to_cell(factor1._cell_to_int(c)) == c
assert set(range(12)) == set(
factor1._cell_to_int(c) for c in product(range(4), range(3))
)
def test_int_to_cell(factor1):
for i in range(12):
assert factor1._cell_to_int(factor1._int_to_cell(i)) == i
def test_get_cell_representative(factor1):
all_rep = set(
factor1._get_cell_representative(c) for c in product(range(4), range(3))
)
assert len(all_rep) == 12
def test_unite_two_cells(factor1):
cells = [(0, 0), (1, 1)]
factor1._unite_cells(cells)
assert factor1._get_cell_representative(
cells[0]
) == factor1._get_cell_representative(cells[1])
all_rep = set(
factor1._get_cell_representative(c) for c in product(range(4), range(3))
)
assert len(all_rep) == 11
def test_unite_single_cell(factor1):
cells = [(0, 0)]
factor1._unite_cells(cells)
all_rep = set(
factor1._get_cell_representative(c) for c in product(range(4), range(3))
)
assert len(all_rep) == 12
def test_unite_multiple_cells(factor1):
cells = [(0, 0), (1, 1), (2, 2), (3, 2)]
factor1._unite_cells(cells)
for c1, c2 in combinations(cells, r=2):
assert factor1._get_cell_representative(c1) == factor1._get_cell_representative(
c2
)
all_rep = set(
factor1._get_cell_representative(c) for c in product(range(4), range(3))
)
assert len(all_rep) == 9
def test_unite_no_cell(factor1):
all_rep = set(
factor1._get_cell_representative(c) for c in product(range(4), range(3))
)
assert len(all_rep) == 12
def test_unite_obstruction(factor1, factor2):
comp1 = [(1, 1), (2, 2)]
factor1._unite_obstructions()
assert factor1._get_cell_representative(
comp1[0]
) == factor1._get_cell_representative(comp1[1])
all_rep = set(
factor1._get_cell_representative(c) for c in product(range(4), range(3))
)
assert len(all_rep) == 11
comp1 = [(0, 0), (0, 1), (1, 0), (1, 1)]
comp2 = [(2, 2), (3, 2), (4, 2)]
factor2._unite_obstructions()
for c1, c2 in combinations(comp1, r=2):
print(c1, c2)
assert factor2._get_cell_representative(c1) == factor2._get_cell_representative(
c2
)
for c1, c2 in combinations(comp2, r=2):
print(c1, c2)
assert factor2._get_cell_representative(c1) == factor2._get_cell_representative(
c2
)
all_rep = set(
factor2._get_cell_representative(c) for c in product(range(5), range(4))
)
assert len(all_rep) == 15
def test_unite_requirements(factor2):
comp1 = [(0, 0), (0, 1)]
comp2 = [(2, 3), (3, 3), (4, 3)]
factor2._unite_requirements()
for c1, c2 in combinations(comp1, r=2):
print(c1, c2)
assert factor2._get_cell_representative(c1) == factor2._get_cell_representative(
c2
)
for c1, c2 in combinations(comp2, r=2):
print(c1, c2)
assert factor2._get_cell_representative(c1) == factor2._get_cell_representative(
c2
)
all_rep = set(
factor2._get_cell_representative(c) for c in product(range(5), range(4))
)
assert len(all_rep) == 17
def test_same_row_or_col(factor1):
assert factor1._same_row_or_col((0, 0), (2, 0))
assert not factor1._same_row_or_col((0, 3), (2, 0))
assert factor1._same_row_or_col((2, 0), (2, 0))
assert factor1._same_row_or_col((2, 0), (2, 2))
def test_unite_rows_and_cols(factor1, factor2):
factor1._unite_rows_and_cols()
comp1 = [(1, 1), (1, 2), (2, 2)]
for c1, c2 in combinations(comp1, r=2):
print(c1, c2)
assert factor1._get_cell_representative(c1) == factor1._get_cell_representative(
c2
)
assert factor1._get_cell_representative((0, 0)) == factor1._get_cell_representative(
(3, 0)
)
all_rep = set(
factor1._get_cell_representative(c) for c in product(range(4), range(3))
)
assert len(all_rep) == 9
factor2._unite_rows_and_cols()
comp1 = [(0, 0), (0, 1), (1, 0), (1, 1)]
comp2 = [(2, 2), (3, 2), (4, 2), (2, 3), (3, 3), (4, 3)]
all_rep = set(
factor2._get_cell_representative(c) for c in product(range(5), range(4))
)
assert len(all_rep) == 12
for c1, c2 in combinations(comp1, r=2):
print(c1, c2)
assert factor2._get_cell_representative(c1) == factor2._get_cell_representative(
c2
)
for c1, c2 in combinations(comp2, r=2):
print(c1, c2)
assert factor2._get_cell_representative(c1) == factor2._get_cell_representative(
c2
)
def test_unite_all(factor1, factor2):
factor1._unite_all()
comp1 = [(1, 1), (1, 2), (2, 2)]
all_rep = set(
factor1._get_cell_representative(c) for c in product(range(4), range(3))
)
assert len(all_rep) == 9
for c1, c2 in combinations(comp1, r=2):
print(c1, c2)
assert factor1._get_cell_representative(c1) == factor1._get_cell_representative(
c2
)
assert factor1._get_cell_representative((0, 0)) == factor1._get_cell_representative(
(3, 0)
)
factor2._unite_all()
comp1 = [(0, 0), (0, 1), (1, 0), (1, 1)]
comp2 = [(2, 2), (3, 2), (4, 2), (2, 3), (3, 3), (4, 3)]
all_rep = set(
factor2._get_cell_representative(c) for c in product(range(5), range(4))
)
assert len(all_rep) == 12
for c1, c2 in combinations(comp1, r=2):
print(c1, c2)
assert factor2._get_cell_representative(c1) == factor2._get_cell_representative(
c2
)
for c1, c2 in combinations(comp2, r=2):
print(c1, c2)
assert factor2._get_cell_representative(c1) == factor2._get_cell_representative(
c2
)
def test_get_components(factor1, factor2):
comp1 = {(1, 1), (1, 2), (2, 2)}
comp2 = {(0, 0), (3, 0)}
assert comp1 in factor1.get_components()
assert comp2 in factor1.get_components()
assert len(factor1.get_components()) == 2
comp1 = {(0, 0), (0, 1), (1, 0), (1, 1)}
comp2 = {(2, 2), (3, 2), (4, 2), (2, 3), (3, 3), (4, 3)}
assert comp1 in factor2.get_components()
assert comp2 in factor2.get_components()
assert len(factor1.get_components()) == 2
empty_tiling = Tiling()
assert Factor(empty_tiling).get_components() == tuple()
point_tiling = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((0, 0), (0, 0))),
GriddedPerm(Perm((1, 0)), ((0, 0), (0, 0))),
],
requirements=[[GriddedPerm(Perm((0,)), ((0, 0),))]],
)
assert Factor(point_tiling).get_components() == ({(0, 0)},)
def test_get_factor_obs_and_reqs(factor1, factor2):
obs1 = tuple(
sorted(
[
GriddedPerm(Perm((2, 1, 0)), ((0, 0),) * 3),
GriddedPerm(Perm((2, 0, 1)), ((3, 0),) * 3),
]
)
)
obs2 = tuple(
sorted(
[
GriddedPerm(Perm((0, 1, 2)), ((1, 2),) * 3),
GriddedPerm(Perm((1, 0)), ((1, 1),) * 2),
GriddedPerm(Perm((1, 0)), ((2, 2),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 1), (2, 2))),
]
)
)
f1_obs_and_reqs = factor1._get_factors_obs_and_reqs()
assert len(f1_obs_and_reqs) == 2
assert (obs1, tuple(), tuple()) in f1_obs_and_reqs
assert (obs2, tuple(), tuple()) in f1_obs_and_reqs
obs1 = tuple(
sorted(
[
GriddedPerm(Perm((0, 1)), ((0, 0),) * 2),
GriddedPerm(Perm((0, 1)), ((0, 1),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 0),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 1),) * 2),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (1, 0), (1, 1))),
GriddedPerm(Perm((0, 1)), ((0, 1), (1, 1))),
]
)
)
obs2 = tuple(
sorted(
[
GriddedPerm(Perm((0, 1)), ((3, 3),) * 2),
GriddedPerm(Perm((0, 1)), ((4, 3),) * 2),
GriddedPerm(Perm((0, 1, 2)), ((2, 3),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((2, 2),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((3, 2),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((4, 2),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((2, 2), (3, 2), (4, 2))),
]
)
)
reqs1 = ((GriddedPerm(Perm((0, 1)), ((0, 0), (0, 1))),),)
reqs2 = (
(
GriddedPerm(Perm((0, 1)), ((2, 3), (3, 3))),
GriddedPerm(Perm((0, 1)), ((3, 3), (4, 3))),
),
)
f2_obs_and_reqs = factor2._get_factors_obs_and_reqs()
assert len(f2_obs_and_reqs) == 2
assert (obs1, reqs1, tuple()) in f2_obs_and_reqs
assert (obs2, reqs2, tuple()) in f2_obs_and_reqs
def test_factorable(factor1, factor2, not_fact_tiling):
assert factor1.factorable()
assert factor2.factorable()
empty_tiling = Tiling()
assert not Factor(empty_tiling).factorable()
point_tiling = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((0, 0), (0, 0))),
GriddedPerm(Perm((1, 0)), ((0, 0), (0, 0))),
],
requirements=[[GriddedPerm(Perm((0,)), ((0, 0),))]],
)
assert not Factor(point_tiling).factorable()
assert not Factor(not_fact_tiling).factorable()
def test_factor(factor1, factor2):
f1 = Tiling(
obstructions=[
GriddedPerm(Perm((2, 1, 0)), ((0, 0),) * 3),
GriddedPerm(Perm((2, 0, 1)), ((1, 0),) * 3),
]
)
f2 = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1, 2)), ((0, 1),) * 3),
GriddedPerm(Perm((1, 0)), ((0, 0),) * 2),
GriddedPerm(Perm((1, 0)), ((1, 1),) * 2),
GriddedPerm(Perm((0, 1)), ((0, 0), (1, 1))),
]
)
assert len(factor1.factors()) == 2
assert f1 in factor1.factors()
assert f2 in factor1.factors()
f1 = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((0, 0),) * 2),
GriddedPerm(Perm((0, 1)), ((0, 1),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 0),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 1),) * 2),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (1, 0), (1, 1))),
GriddedPerm(Perm((0, 1)), ((0, 1), (1, 1))),
],
requirements=[[GriddedPerm(Perm((0, 1)), ((0, 0), (0, 1)))]],
)
f2 = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((1, 1),) * 2),
GriddedPerm(Perm((0, 1)), ((2, 1),) * 2),
GriddedPerm(Perm((0, 1, 2)), ((0, 1),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((0, 0),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((1, 0),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((2, 0),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (1, 0), (2, 0))),
],
requirements=[
[
GriddedPerm(Perm((0, 1)), ((0, 1), (1, 1))),
GriddedPerm(Perm((0, 1)), ((1, 1), (2, 1))),
],
],
)
assert len(factor2.factors()) == 2
assert f1 in factor2.factors()
assert f2 in factor2.factors()
def test_reducible_factorisations():
t = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((0, 0),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 1),) * 2),
GriddedPerm(Perm((0, 1)), ((2, 2),) * 2),
]
)
fo = Factor(t)
f1 = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((0, 0),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 1),) * 2),
]
)
f2 = Tiling(obstructions=[GriddedPerm(Perm((0, 1)), ((0, 0),) * 2)])
assert set([f1, f2]) in map(set, fo.reducible_factorisations())
assert [f2, f2, f2] not in fo.reducible_factorisations()
def test_factor_empty_tiling():
t = Tiling(obstructions=[GriddedPerm(Perm(), [])])
for fac_algo in [Factor, FactorWithInterleaving, FactorWithMonotoneInterleaving]:
assert fac_algo(t).get_components() == tuple()
assert not fac_algo(t).factorable()
assert fac_algo(t).factors() == (t,)
# ------------------------------------------------------------
# Test for the class FactorWithMonotoneInterleaving
# ------------------------------------------------------------
def test_unite_rows_and_cols_monotone_interleaving(
factor1_with_mon_int, factor2_with_mon_int
):
factor1_with_mon_int._unite_rows_and_cols()
all_rep = set(
factor1_with_mon_int._get_cell_representative(c)
for c in product(range(4), range(3))
)
assert len(all_rep) == 11
assert factor1_with_mon_int._get_cell_representative(
(0, 0)
) == factor1_with_mon_int._get_cell_representative((3, 0))
factor2_with_mon_int._unite_rows_and_cols()
comp1 = [(2, 2), (3, 2), (4, 2), (2, 3)]
all_rep = set(
factor2_with_mon_int._get_cell_representative(c)
for c in product(range(5), range(4))
)
assert len(all_rep) == 17
for c1, c2 in combinations(comp1, r=2):
assert factor2_with_mon_int._get_cell_representative(
c1
) == factor2_with_mon_int._get_cell_representative(c2)
def test_mon_int_factor(factor1_with_mon_int, factor2_with_mon_int):
f1 = Tiling(
obstructions=[
GriddedPerm(Perm((2, 1, 0)), ((0, 0),) * 3),
GriddedPerm(Perm((2, 0, 1)), ((1, 0),) * 3),
]
)
f2 = Tiling(
obstructions=[
GriddedPerm(Perm((1, 0)), ((0, 0),) * 2),
GriddedPerm(Perm((1, 0)), ((1, 1),) * 2),
GriddedPerm(Perm((0, 1)), ((0, 0), (1, 1))),
]
)
f3 = Tiling(obstructions=[GriddedPerm(Perm((0, 1, 2)), ((0, 1),) * 3)])
assert len(factor1_with_mon_int.factors()) == 3
assert f1 in factor1_with_mon_int.factors()
assert f2 in factor1_with_mon_int.factors()
assert f3 in factor1_with_mon_int.factors()
f1 = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((0, 0),) * 2),
GriddedPerm(Perm((0, 1)), ((0, 1),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 0),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 1),) * 2),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (1, 0), (1, 1))),
GriddedPerm(Perm((0, 1)), ((0, 1), (1, 1))),
],
requirements=[[GriddedPerm(Perm((0, 1)), ((0, 0), (0, 1)))]],
)
f2 = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((1, 1),) * 2),
GriddedPerm(Perm((0, 1)), ((2, 1),) * 2),
GriddedPerm(Perm((0, 1, 2)), ((0, 1),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((0, 0),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((1, 0),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((2, 0),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (1, 0), (2, 0))),
],
requirements=[
[
GriddedPerm(Perm((0, 1)), ((0, 1), (1, 1))),
GriddedPerm(Perm((0, 1)), ((1, 1), (2, 1))),
],
],
)
assert len(factor2_with_mon_int.factors()) == 2
assert f1 in factor2_with_mon_int.factors()
assert f2 in factor2_with_mon_int.factors()
# ------------------------------------------------------------
# Test for the class FactorWithInterleaving
# ------------------------------------------------------------
def test_unite_rows_and_cols_interleaving(factor1_with_int, factor2_with_int):
factor1_with_int._unite_rows_and_cols()
all_rep = set(
factor1_with_int._get_cell_representative(c)
for c in product(range(4), range(3))
)
assert len(all_rep) == 12
factor2_with_int._unite_rows_and_cols()
all_rep = set(
factor2_with_int._get_cell_representative(c)
for c in product(range(5), range(4))
)
assert len(all_rep) == 20
def test_unite_all_interleaving(factor1_with_int):
print(factor1_with_int._tiling)
factor1_with_int._unite_all()
assert factor1_with_int._get_cell_representative(
(1, 1)
) == factor1_with_int._get_cell_representative((2, 2))
all_rep = set(
factor1_with_int._get_cell_representative(c)
for c in product(range(4), range(3))
)
assert len(all_rep) == 11
def test_int_factor(factor1_with_int, factor2_with_int):
f1 = Tiling(obstructions=[GriddedPerm(Perm((2, 1, 0)), ((0, 0),) * 3)])
f2 = Tiling(obstructions=[GriddedPerm(Perm((2, 0, 1)), ((1, 0),) * 3)])
f3 = Tiling(
obstructions=[
GriddedPerm(Perm((1, 0)), ((0, 0),) * 2),
GriddedPerm(Perm((1, 0)), ((1, 1),) * 2),
GriddedPerm(Perm((0, 1)), ((0, 0), (1, 1))),
]
)
f4 = Tiling(obstructions=[GriddedPerm(Perm((0, 1, 2)), ((0, 1),) * 3)])
assert len(factor1_with_int.factors()) == 4
assert f1 in factor1_with_int.factors()
assert f2 in factor1_with_int.factors()
assert f3 in factor1_with_int.factors()
assert f4 in factor1_with_int.factors()
f1 = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((0, 0),) * 2),
GriddedPerm(Perm((0, 1)), ((0, 1),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 0),) * 2),
GriddedPerm(Perm((0, 1)), ((1, 1),) * 2),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (1, 0), (1, 1))),
GriddedPerm(Perm((0, 1)), ((0, 1), (1, 1))),
],
requirements=[[GriddedPerm(Perm((0, 1)), ((0, 0), (0, 1)))]],
)
f2 = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1, 2)), ((0, 0),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((1, 0),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((2, 0),) * 3),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (1, 0), (2, 0))),
]
)
f3 = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((1, 0),) * 2),
GriddedPerm(Perm((0, 1)), ((2, 0),) * 2),
GriddedPerm(Perm((0, 1, 2)), ((0, 0),) * 3),
],
requirements=[
[
GriddedPerm(Perm((0, 1)), ((0, 0), (1, 0))),
GriddedPerm(Perm((0, 1)), ((1, 0), (2, 0))),
],
],
)
assert len(factor2_with_int.factors()) == 3
assert f1 in factor2_with_int.factors()
assert f2 in factor2_with_int.factors()
assert f3 in factor2_with_int.factors()
|
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestRates(McrouterTestCase):
config = './mcrouter/test/test_rates.json'
extra_args = ['--destination-rate-limiting']
def setUp(self):
# The order here must corresponds to the order of hosts in the .json
self.mc = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(
self.config,
extra_args=self.extra_args)
def test_basic(self):
mcrouter = self.get_mcrouter()
time.sleep(1.1)
key = "basic"
# 2 pass, 3rd rate limited
mcrouter.set(key, "1")
self.assertEqual(self.mc.get(key), "1")
mcrouter.set(key, "2")
self.assertEqual(self.mc.get(key), "2")
mcrouter.set(key, "3")
self.assertEqual(self.mc.get(key), "2")
def test_burst(self):
mcrouter = self.get_mcrouter()
time.sleep(1.1)
key = "burst"
# even though rate is 4/s, only 3 pass (max burst)
mcrouter.set(key, "1")
self.assertEqual(self.mc.get(key), "1")
mcrouter.set(key, "2")
self.assertEqual(self.mc.get(key), "2")
mcrouter.set(key, "3")
self.assertEqual(self.mc.get(key), "3")
mcrouter.set(key, "4")
self.assertEqual(self.mc.get(key), "3")
|
import pandas as pd
from pipedown.nodes.base.metric import Metric
from pipedown.utils.urls import get_node_url
class MeanAbsolutePercentageError(Metric):
CODE_URL = get_node_url("metrics/mean_absolute_percentage_error.py")
def run(self, y_pred: pd.Series, y_true: pd.Series):
return 100 * ((y_pred - y_true).abs() / y_true).mean()
def get_metric_name(self):
return "mean_absolute_percentage_error"
|
'''
Authentication demo example for Jeeves with confidentiality policies.
'''
#from macropy.case_classes import macros, case
import JeevesLib
class User:
def __init__(self, userId):
self.userId = userId
class AuctionContext():
def __init__(self, user, time, bids):
self.user = user
self.time = time
self.bids = bids
class Bid:
def __init__(self, value, owner, policy):
lab = JeevesLib.mkLabel ()
# TODO: Add policy that the output channel has to be either the owner or
# satisfy the policy on it (policy(oc)).
JeevesLib.restrict(lab
, lambda oc: JeevesLib.jor(
lambda: oc.user == owner, lambda: policy(oc)))
self.value = JeevesLib.mkSensitive(lab, value, -1)
self.owner = owner
|
import unittest
from brigitte.card import Card
class TestCard(unittest.TestCase):
value = 2
sign = '♣'
def test_init(self):
card = Card(self.value, self.sign)
self.assertEqual(card.value, str(self.value))
self.assertEqual(card.sign, self.sign)
self.assertIsNotNone(card.id)
def test_str(self):
card = Card(self.value, self.sign)
self.assertEqual(str(card), card.value + card.sign)
def test_eq(self):
card = Card(self.value, self.sign)
self.assertEqual(card, Card(self.value, self.sign, card.id))
self.assertNotEqual(card, Card(self.value, self.sign, '123'))
def test_weight(self):
for value, weight in {
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
'10': 10,
'J': 11,
'Q': 12,
'K': 13,
'A': 14
}.items():
self.assertEqual(Card(value, self.sign).weight(), weight)
def test_order_level(self):
for value, order_level in {
'2': 15,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
'10': 10,
'J': 11,
'Q': 12,
'K': 13,
'A': 14
}.items():
self.assertEqual(Card(value, self.sign).order_level(), order_level)
def test_to_dict(self):
card = Card(self.value, self.sign)
card_dict = card.to_dict()
self.assertEqual(card_dict['id'], card.id)
self.assertEqual(card_dict['value'], card.value)
self.assertEqual(card_dict['sign'], card.sign)
def test_from_dict(self):
card = Card(self.value, self.sign)
card_from_dict = Card.from_dict(card.to_dict())
self.assertEqual(card_from_dict.id, card.id)
self.assertEqual(card_from_dict.sign, card.sign)
self.assertEqual(card_from_dict.value, card.value)
if __name__ == '__main__':
unittest.main()
|
from django import forms
from tally_ho.apps.tally.models.clearance import Clearance
from tally_ho.libs.models.enums import actions_prior
from tally_ho.libs.models.enums import clearance_resolution
class ClearanceForm(forms.ModelForm):
class Meta:
model = Clearance
fields = [
'center_name_missing',
'center_name_mismatching',
'center_code_missing',
'center_code_mismatching',
'form_already_in_system',
'form_incorrectly_entered_into_system',
'other',
# Recommendations
'action_prior_to_recommendation',
'resolution_recommendation',
# Comments
'team_comment',
'supervisor_comment']
other = forms.CharField(required=False)
action_prior_to_recommendation = forms.TypedChoiceField(
required=False, choices=actions_prior.ACTION_CHOICES, coerce=int)
resolution_recommendation = forms.TypedChoiceField(
required=False, choices=clearance_resolution.CLEARANCE_CHOICES,
coerce=int)
|
import torch.utils.data as data
import os
import os.path
import torch
import numpy as np
import cv2
import json
from DatasetGeneration import dividePointCloud
data_dir_imgs = '/home/dream/study/codes/PCCompletion/datasets/pix3d/pix3d'
HEIGHT = 128
WIDTH = 128
PAD = 35
choice = [torch.Tensor([1, 0, 0]), torch.Tensor([0, 0, 1]), torch.Tensor([1, 0, 1]), torch.Tensor([-1, 0, 0]),
torch.Tensor([-1, 1, 0])]
def get_pix3d_models(cat):
with open(os.path.join(data_dir_imgs, 'pix3d.json'), 'r') as f:
models_dict = json.load(f)
models = []
cats = [cat] # 'sofa'['chair', 'table']
# Check for truncation and occlusion before adding a model to the evaluation list
for d in models_dict:
if d['category'] in cats:
if not d['truncated'] and not d['occluded'] and not d['slightly_occluded']:
models.append(d)
print('Total models = {}\n'.format(len(models)))
return models
def rotate(xyz, xangle=0, yangle=0, zangle=0):
rotmat = np.eye(3)
rotmat=rotmat.dot(np.array([
[1.0,0.0,0.0],
[0.0,np.cos(xangle),-np.sin(xangle)],
[0.0,np.sin(xangle),np.cos(xangle)],
]))
rotmat=rotmat.dot(np.array([
[np.cos(yangle),0.0,-np.sin(yangle)],
[0.0,1.0,0.0],
[np.sin(yangle),0.0,np.cos(yangle)],
]))
rotmat=rotmat.dot(np.array([
[np.cos(zangle),-np.sin(zangle),0.0],
[np.sin(zangle),np.cos(zangle),0.0],
[0.0,0.0,1.0]
]))
return xyz.dot(rotmat)
def pc_normalize(pc):
""" pc: NxC, return NxC """
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
pc = pc / m
return pc
catname_lower={'Chair':"chair","Table":'table'}
data_out_father='/home/dream/study/codes/PCCompletion/datasets/my_pix3d/'
class Pix3DMultiDataset(data.Dataset):
def __init__(self, class_choice=None):
self.cat = class_choice
models = get_pix3d_models(catname_lower[self.cat])
self.models=models
self.father_path=os.path.join(data_out_father,"four")
def __getitem__(self, index):
ind=index
model_path, file = os.path.split(os.path.join(data_dir_imgs, self.models[ind]['model']))
pcl_2025 = np.loadtxt(os.path.join(self.father_path,catname_lower[self.cat],'pc2025',str(ind)+'.pts')).astype(np.float32)
pcl_2025=torch.from_numpy(pcl_2025)
pcl_256 = np.loadtxt(os.path.join(self.father_path,catname_lower[self.cat], 'pc256', str(ind) + '.pts')).astype(np.float32)
pcl_256 = torch.from_numpy(pcl_256)
pcl_1024 = np.loadtxt(os.path.join(self.father_path,catname_lower[self.cat], 'pc1024', str(ind) + '.pts')).astype(np.float32)
pcl_1024 = torch.from_numpy(pcl_1024)
image_path=os.path.join(self.father_path,catname_lower[self.cat], 'image_clean', str(ind) + '.png')
ip_image = cv2.imread(image_path)
ip_image = cv2.cvtColor(ip_image, cv2.COLOR_BGR2RGB)
incomplete = pcl_1024
gt = pcl_256
image = torch.from_numpy(np.transpose(ip_image, (2, 0, 1)))
return incomplete, gt, image,pcl_2025,image_path
def __len__(self):
return len(self.models)
class Pix3DSingleDataset(data.Dataset):
def __init__(self, class_choice=None):
self.cat = class_choice
models = get_pix3d_models(catname_lower[self.cat])
self.models=models
self.father_path = os.path.join(data_out_father, "three")
def __getitem__(self, index):
ind=int(index/5)
center=index%5
model_path, file = os.path.split(os.path.join(data_dir_imgs, self.models[ind]['model']))
pcl_2025 = np.loadtxt(
os.path.join(self.father_path, catname_lower[self.cat], 'pc2025', str(ind) + '.pts')).astype(np.float32)
pcl_2025 = torch.from_numpy(pcl_2025)
pcl_256 = np.loadtxt(os.path.join(self.father_path, catname_lower[self.cat], 'pc256', str(index) + '.pts')).astype(
np.float32)
pcl_256 = torch.from_numpy(pcl_256)
pcl_1024 = np.loadtxt(
os.path.join(self.father_path, catname_lower[self.cat], 'pc1024', str(index) + '.pts')).astype(np.float32)
pcl_1024 = torch.from_numpy(pcl_1024)
image_path = os.path.join(self.father_path, catname_lower[self.cat], 'image_clean', str(ind) + '.png')
ip_image = cv2.imread(image_path)
ip_image = cv2.cvtColor(ip_image, cv2.COLOR_BGR2RGB)
incomplete = pcl_1024
gt = pcl_256
image = torch.from_numpy(np.transpose(ip_image, (2, 0, 1)))
return incomplete, gt, image, pcl_2025, image_path
def __len__(self):
return len(self.models)*5
import matplotlib.pyplot as plt
if __name__ == '__main__':
dset = Pix3DSingleDataset(class_choice='Table')
assert dset
#dataloader = torch.utils.data.DataLoader(dset, batch_size=16, shuffle=False, num_workers=16)
# for epoch in range(0, 10):
# for i, data in enumerate(dataloader, 0):
# incomplete, gt, image, cmpgt = data
# np.savetxt('cmppc' + '.pts', cmpgt[0], fmt = "%f %f %f")
# break
incomplete, gt, image, cmpgt, image_path = dset[0]
print(cmpgt.size())
print(gt.size())
print(image.size())
plt.show()
np.savetxt('cmppc' + '.pts', cmpgt, fmt="%f %f %f")
np.savetxt('incomplete' + '.pts', incomplete, fmt="%f %f %f")
np.savetxt('gt' + '.pts', gt, fmt="%f %f %f")
# d = PartDataset( root='./dataset/shapenetcore_partanno_segmentation_benchmark_v0/',classification=False, class_choice=None, npoints=4096, split='test')
# print(len(dset))
# incomplete,gt, image, filename= dset[1000]
#
# print(incomplete.size())
# print(gt.size())
# print(image.size())
# print(incomplete)
# ps = ps.numpy()
#np.savetxt('ps'+'.xyz', incomplete, fmt = "%f %f %f")
|
from interactions.aop import AffordanceObjectPair
from interactions.context import InteractionContext, InteractionSource
from interactions.priority import Priority
from sims4.tuning.tunable import TunableSimMinute, TunableReference
from sims4.utils import classproperty
from situations.situation import Situation
from situations.situation_complex import CommonInteractionCompletedSituationState, CommonSituationState, SituationComplexCommon, SituationStateData
from situations.situation_types import SituationSerializationOption
from tunable_time import TunableTimeOfDay
import alarms
import clock
import services
class CelebrateState(CommonSituationState):
pass
class CountdownState(CommonSituationState):
FACTORY_TUNABLES = {'countdown_affordance': TunableReference(manager=services.affordance_manager()), 'count_mixer': TunableReference(manager=services.affordance_manager()), 'celebrate_time': TunableTimeOfDay(description='\n Time of Day to Celebrate\n ', default_hour=0), 'time_to_start_count': TunableTimeOfDay(description='\n Time to start performing the Count.\n ', default_hour=11, default_minute=30), 'interval_between_counts': TunableSimMinute(description='\n The interval between each count animation.\n ', minimum=1, default=5)}
def __init__(self, *args, countdown_affordance=None, count_mixer=None, celebrate_time=None, time_to_start_count=None, interval_between_counts=None, **kwargs):
super().__init__(*args, **kwargs)
self.countdown_affordance = countdown_affordance
self.count_mixer = count_mixer
self.celebrate_time = celebrate_time
self.time_to_start_count = time_to_start_count
self.interval_between_counts = interval_between_counts
self._celebrate_timer = None
self._count_timer = None
def _count_callback(self, _):
for sim in self.owner.all_sims_in_situation_gen():
parent_si = sim.si_state.get_si_by_affordance(self.countdown_affordance)
if parent_si is not None:
interaction_context = InteractionContext(sim, InteractionSource.PIE_MENU, Priority.Critical)
aop = AffordanceObjectPair(self.count_mixer, None, self.countdown_affordance, parent_si)
aop.test_and_execute(interaction_context)
def _celebrate_callback(self, _):
self._change_state(self.owner.celebrate_state())
def on_activate(self, reader=None):
super().on_activate(reader)
now = services.game_clock_service().now()
time_till_first_count = now.time_till_next_day_time(self.time_to_start_count)
time_till_celebration = now.time_till_next_day_time(self.celebrate_time)
repeat_time_span = clock.interval_in_sim_minutes(self.interval_between_counts)
if time_till_first_count > time_till_celebration:
time_of_first_count = now + time_till_first_count + clock.interval_in_sim_days(-1)
time_since_first_count = now - time_of_first_count
time_of_next_count = time_of_first_count + repeat_time_span*(int(time_since_first_count.in_ticks()/repeat_time_span.in_ticks()) + 1)
time_till_first_count = time_of_next_count - now
self._count_timer = alarms.add_alarm(self, time_till_first_count, self._count_callback, repeating=True, repeating_time_span=repeat_time_span)
self._celebrate_timer = alarms.add_alarm(self, time_till_celebration, self._celebrate_callback)
def on_deactivate(self):
super().on_deactivate()
if self._count_timer is not None:
alarms.cancel_alarm(self._count_timer)
self._count_timer = None
if self._celebrate_timer is not None:
alarms.cancel_alarm(self._celebrate_timer)
self._celebrate_timer = None
class CountdownSituation(SituationComplexCommon):
INSTANCE_TUNABLES = {'celebrate_state': CelebrateState.TunableFactory(locked_args={'time_out': None, 'allow_join_situation': None}), 'countdown_state': CountdownState.TunableFactory(locked_args={'time_out': None, 'allow_join_situation': None})}
REMOVE_INSTANCE_TUNABLES = Situation.NON_USER_FACING_REMOVE_INSTANCE_TUNABLES
@classmethod
def _states(cls):
return (SituationStateData(1, CountdownState, factory=cls.countdown_state), SituationStateData(2, CelebrateState, factory=cls.celebrate_state))
@classproperty
def situation_serialization_option(cls):
return SituationSerializationOption.DONT
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return list(cls.countdown_state._tuned_values.job_and_role_changes.items())
@classmethod
def default_job(cls):
pass
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._celebrate_timer = None
self._count_timer = None
def start_situation(self):
super().start_situation()
self._change_state(self.countdown_state())
|
import random
from operator import add, sub, mul
from brain_games.constants import MINIMAL_RANDOM, MAXIMAL_RANDOM
def greeting():
return 'What is the result of the expression?'
def main_action():
first_el = random.randint(MINIMAL_RANDOM, MAXIMAL_RANDOM)
second_el = random.randint(MINIMAL_RANDOM, MAXIMAL_RANDOM)
symbol, function = random.choice((
('+', add),
('-', sub),
('*', mul)
))
correct_ans = function(first_el, second_el)
str_to_question = '{} {} {}'.format(
first_el,
symbol,
second_el)
correct_ans = str(correct_ans)
return str_to_question, correct_ans
|
# This file is designed to perform unit and integration tests on all functions
# in parabolic_pde_solver.py
# Test have been written with TDD in mind where possible
import pytest
import numpy as np
from math import pi, isclose
from unittest.mock import MagicMock
from parabolic_pde_solver import pde_solve, diags_m, create_A_CN
from parabolic_pde_solver import create_B_CN, create_A_BE, create_A_FE
from parabolic_pde_solver import solve_BE, solve_FE, solve_CN
from scipy.sparse import diags, csr_matrix
base_mx = 100 # number of gridpoints in space
base_mt = 1000 # number of gridpoints in time
base_L = 11 # length of spatial domain
base_T = 0.5 # total time to solve for
base_u_I = lambda x: np.sin(pi*x/base_L) # initial temperature distriibution
def test_unit_ensure_returns():
# Arrange
# Act
res = pde_solve(base_L, base_T, base_u_I, base_mx, base_mt)
# Assert
assert res != None
def test_unit_throws_if_not_passed_specified_params():
#Arrange
thrown = False
#Act
try:
res = pde_solve()
except Exception as e:
thrown = e
#assert
assert thrown != False
def test_integration_ensures_that_u_I_is_called_multiple_times():
# Arrange
mock = MagicMock()
# Act
pde_solve(base_L, base_T, mock, base_mx, base_mt)
# Assert
assert mock.call_count == base_mx
def test_unit_ensures_that_u_I_is_called_with_correct_args():
# Arrange
def fake_u_I(x):
assert isinstance(x, float) == True
# Act
pde_solve(base_L, base_T, fake_u_I, base_mx, base_mt)
# Assert
def test_int_ensures_the_bcf_func_is_called_corrrectly():
# Arrange
mock = MagicMock(return_value=[0,0])
# Act
pde_solve(base_L, base_T, base_u_I, base_mx, base_mt, bcf=mock)
# Assert
assert mock.call_count == base_mt
def test_unit_ensures_bcf_is_called_with_the_correct_params():
# Arrange
def fake_bcf(t):
assert type(t) == np.float64
return [0,0]
# Act
pde_solve(base_L, base_T, base_u_I, base_mx, base_mt, bcf=fake_bcf)
# Assert
def test_E2E_agaist_exact_solution_to_heat_equation():
# Arrange
# set problem parameters/functions
kappa = 1 # diffusion constant
L=11 # length of spatial domain
T=0.5 # total time to solve for
# set numerical parameters
mx = 100 # number of gridpoints in space
mt = 1000 # number of gridpoints in time
# define initial params
def u_I(x):
# initial temperature distribution
y = np.sin(pi*x/L)
return y
# define this to compare witht the exact solution
def u_exact(x,t):
# the exact solution
y = np.exp(-kappa*(pi**2/L**2)*t)*np.sin(pi*x/L)
return y
# Act
# solve the heat equation
[u_j, x, t] = pde_solve(L, T, u_I, mx, mt, f_kappa=lambda x: 1)
# Assert
looped = False
# compare sol vs u_exact with error threashold
for i in range(len(u_j)):
exact = u_exact(x[i], T)
assert isclose(u_j[i], exact, abs_tol=1e-3)
looped = True
assert looped == True
def test_E2E_agaist_heat_equation_varying_bcf():
# Arrange
# set problem parameters/functions
kappa = 1 # diffusion constant
L=11 # length of spatial domain
T=0.5 # total time to solve for
# set numerical parameters
mx = 10 # number of gridpoints in space
mt = 10 # number of gridpoints in time
# define initial params
def u_I(x):
# initial temperature distribution
y = np.sin(pi*x/L)
return y
def bcf(t):
return [t, t]
# Act
# solve the heat equation
[u_j, x, t] = pde_solve(L, T, u_I, mx, mt, bcf=bcf)
# Assert
# check solution at final value boundary conditions
assert u_j[0] == T
assert u_j[-1] == T
def test_E2E_agaist_heat_equation_varying_diffusion_coefficient():
# Arrange
# set problem parameters/functions
L=11 # length of spatial domain
T=0.5 # total time to solve for
# set numerical parameters
mx = 10 # number of gridpoints in space
mt = 1000 # number of gridpoints in time
# define initial params
def u_I(x):
# initial temperature distribution
y = np.sin(pi*x/L)
return y
# Act
# solve the heat equation
[u_j, x, t] = pde_solve(L, T, u_I, mx, mt, f_kappa=lambda x: x)
print("u_j {}".format(u_j))
assert np.isclose(u_j, [0, 0.26789984, 0.40498984, 0.49342122, 0.52927611, 0.5140508,
0.45421537, 0.36018634, 0.24470462, 0.12086154, 0]).all()
def test_E2E_agaist_heat_equation_varying_diffusion_coefficient():
# Arrange
# set problem parameters/functions
L=11 # length of spatial domain
T=0.5 # total time to solve for
# set numerical parameters
mx = 10 # number of gridpoints in space
mt = 1000 # number of gridpoints in time
# define initial params
def u_I(x):
# initial temperature distribution
y = np.sin(pi*x/L)
return y
def heat_source(x, t):
res = np.piecewise(x, [x < 5, x >= 5], [-1, 1])
return res
# Act
# solve the heat equation
[u_j, x, t] = pde_solve(L, T, u_I, mx, mt, heat_source=heat_source)
print("u_j {}".format(u_j))
assert np.isclose(u_j, [0, -863.00360932, -983.72884818, -972.80346348,-751.16896889,753.06057621,974.46691545,984.96351132,863.66058543,0]).all()
""" The below section contains test for diags_m"""
def test_unit_tri_diag_returns_a_grid_of_the_correct_size():
# Arrange
m = 2
n = 1
# Act
M = diags_m(m, n)
# Assert
assert M.shape == (2,1)
def test_unit_expected_output_1():
# Arrange
# Act
M = diags_m(2, 2, [-1, 0, 1], [1, 2, 3])
# Assert
np.testing.assert_array_equal(M, [[2, 3], [1, 2]])
def test_unit_expected_output_2():
# Arrange
# Act
M = diags_m(2, 2, [-1, 0, 1], [5, 3, 4])
# Assert
np.testing.assert_array_equal(M, [[3, 4], [5, 3]])
"""The above section contains tests for diags_m"""
"""The below contains tests for create_ACN"""
def test_E2E_createACN_outputs_the_correct_matrix1():
# Arrange
x=np.ones(10)
f_kappa=lambda x: 1
deltax=1
deltat=1
# Act
ACN = create_A_CN(x, f_kappa, deltat, deltax, logger=False)
# Assert
looped = False
dense = ACN.todense()
[x, y] = dense.shape
for i in range(x):
for j in range(y):
if i==j:
assert dense.item((i,j)) == 2
looped = True
elif abs(i-j) == 1:
assert dense.item((i,j)) == -0.5
else:
assert dense.item((i,j)) == 0
assert looped == True
def test_E2E_createACN_outputs_the_correct_matrix2():
# Arrange
x=np.ones(10)
f_kappa=lambda x: 2*x
deltax=1
deltat=1
# Act
ACN = create_A_CN(x, f_kappa, deltat, deltax, logger=False)
# Assert
looped = False
dense = ACN.todense()
[x, y] = dense.shape
for i in range(x):
for j in range(y):
if i==j:
assert dense.item((i,j)) == 3
looped = True
elif abs(i-j) == 1:
assert dense.item((i,j)) == -1
else:
assert dense.item((i,j)) == 0
assert looped == True
"""The above contains test for create_ACN"""
"""The below contains tests for create_BCN"""
def test_E2E_create_BCN_outputs_the_correct_matrix1():
# Arrange
x=np.ones(10)
f_kappa=lambda x: 1
deltax=1
deltat=1
# Act
BCN = create_B_CN(x, f_kappa, deltat, deltax, logger=False)
# Assert
looped = False
dense = BCN.todense()
[x, y] = dense.shape
for i in range(x):
for j in range(y):
if i==j:
assert dense.item((i,j)) == 0
looped = True
elif abs(i-j) == 1:
assert dense.item((i,j)) == 0.5
else:
assert dense.item((i,j)) == 0
assert looped == True
def test_E2E_createBCN_outputs_the_correct_matrix2():
# Arrange
x=np.ones(10)
f_kappa=lambda x: 2*x
deltax=1
deltat=1
# Act
BCN = create_B_CN(x, f_kappa, deltat, deltax, logger=False)
# Assert
looped = False
dense = BCN.todense()
[x, y] = dense.shape
for i in range(x):
for j in range(y):
if i==j:
assert dense.item((i,j)) == -1
looped = True
elif abs(i-j) == 1:
assert dense.item((i,j)) == 1
else:
assert dense.item((i,j)) == 0
assert looped == True
"""The above contains tests for create_BCN"""
"""The below contains test for create_ABE"""
def test_E2E_create_ABE_outputs_the_correct_matrix1():
# Arrange
x=np.ones(10)
f_kappa=lambda x: 1
deltax=1
deltat=1
# Act
ABE = create_A_BE(x, f_kappa, deltat, deltax, logger=False)
# Assert
looped = False
dense = ABE.todense()
[x, y] = dense.shape
for i in range(x):
for j in range(y):
if i==j:
assert dense.item((i,j)) == 3
looped = True
elif abs(i-j) == 1:
assert dense.item((i,j)) == -1
else:
assert dense.item((i,j)) == 0
assert looped == True
"""The above contains tests for create_ABE"""
"""The below contains tests for create AFE"""
def test_E2E_create_AFE_outputs_the_correct_matrix1():
# Arrange
x=np.ones(10)
f_kappa=lambda x: 1
deltax=1
deltat=1
# Act
[AFE, lmbda_v] = create_A_FE(x, f_kappa, deltat, deltax, logger=False)
# Assert
looped = False
dense = AFE.todense()
[x, y] = dense.shape
for i in range(x):
assert lmbda_v[i] == 1
for j in range(y):
if i==j:
assert dense.item((i,j)) == -1
looped = True
elif abs(i-j) == 1:
assert dense.item((i,j)) == 1
else:
assert dense.item((i,j)) == 0
assert looped == True
"""The above contains tests for create_AFE"""
"""The below contains tests for solve_CN"""
def test_E2E_solve_CN_outputs_the_correct_result():
# Arrange
u_j=[2, 3, 4]
A_CN=csr_matrix([[1, -2, 0],[-2, 1, -2],[0, -2, 1]])
B_CN=csr_matrix([[2,3,6],[5,6,9],[1,2,2]])
heat_j=[7,1,5]
# Act
u_jp1 = solve_CN(u_j, A_CN, B_CN, heat_j)
# Assert
assert np.allclose(u_jp1, [ -4.57142857, -23.28571429, -27.57142857])
"""The above contains tests for solve_CN"""
"""The above contains tests for solve_FE"""
def test_E2E_solve_FE_outputs_the_correct_result():
# Arrange
u_j=[0,0,0]
A_BE=csr_matrix([[1, -2, 0],[-2, 1, -2],[0, -2, 1]])
heat_j=csr_matrix([0,1,0])
lmbda=8
bc1=6
bc2=5
# Act
u_jp1 = solve_FE(u_j, A_BE, heat_j, lmbda, bc1,bc2)
# Assert
assert np.allclose(u_jp1, [48, 1, 40])
"""The above contains tests for solve_FE"""
"""The below contains tests for solve_BE"""
def test_E2E_solve_BE_outputs_the_correct_result():
# Arrange
u_j=[5,8,9]
A_BE=csr_matrix([[1, -2, 0],[-2, 1, -2],[0, -2, 1]])
heat_j=[0,1,0]
# Act
u_jp1 = solve_BE(u_j, A_BE, heat_j)
# Assert
assert np.allclose(u_jp1, [-5.28571429, -4.14285714, -1.28571429])
"""The above contains tests for solve_BE"""
|
from utils.collectors import GitHubVersionCollector
from utils.configuration import Configuration
class NginxVersionCollector(GitHubVersionCollector):
owner = "nginx"
repo = "nginx"
@staticmethod
def get_application_name() -> str:
return "nginx"
def __init__(self, config: Configuration):
super().__init__(config, self.owner, self.repo)
class CalicoVersionCollector(GitHubVersionCollector):
owner = "projectcalico"
repo = "calico"
@staticmethod
def get_application_name() -> str:
return "calico"
def __init__(self, config: Configuration):
super().__init__(config, self.owner, self.repo)
class HaproxyVersionCollector(GitHubVersionCollector):
owner = "haproxy"
repo = "haproxy"
@staticmethod
def get_application_name() -> str:
return "haproxy"
def __init__(self, config: Configuration):
super().__init__(config, self.owner, self.repo)
class IstioVersionCollector(GitHubVersionCollector):
owner = "istio"
repo = "istio"
@staticmethod
def get_application_name() -> str:
return "istio"
def __init__(self, config: Configuration):
super().__init__(config, self.owner, self.repo)
|
import pytest
from amshared.driverpack import DriverPack
def test_driverpack_singleton(drvpack):
dp = DriverPack(drvpack, singleton=True, autoinject=False)
dp.update(x=1)
dp.update(y=2)
dp.update(a='one', b='two')
dp.update(z=3, c='three') # extra
fun_inst = dp['fun']
assert fun_inst is dp['fun']
del dp['x']
assert ' is ' in dp['fun']() # 'x' not required: driver instance reused
cls_inst = dp['cls']
assert cls_inst is dp['cls']
del dp['y']
assert ' means ' in dp['cls']()
def test_driverpack_attributes_empty(drvpack):
dp = DriverPack(drvpack, singleton=True)
dp.update(x=1, y=2)
assert dp.x is None
assert dp.y is None
def test_driverpack_attributes_none(drvpack):
dp = DriverPack(drvpack, singleton=True, keys_as_attributes=None)
dp.update(x=1, y=2)
assert dp.x == 1
assert dp.y == 2
def test_driverpack_attributes_list(drvpack):
dp = DriverPack(drvpack, singleton=True, keys_as_attributes=['x'])
dp.update(x=1, y=2)
assert dp.x == 1
assert dp.y is None
def test_driverpack_fails(drvpack):
dp = DriverPack(drvpack, singleton=False, autoinject=False)
dp.update(x=1)
dp.update(y=2)
dp.update(a='one', b='two')
dp.update(z=3, c='three') # extra
fun_inst = dp['fun']
assert fun_inst is not dp['fun']
cls_inst = dp['cls']
del dp['y']
with pytest.raises(TypeError, match='y'):
print(dp['cls'])
assert ' means ' in cls_inst()
def test_driverpack_autoinject(drvpack):
dp = DriverPack(drvpack, singleton=False, autoinject=True)
dp.pack.update(x=lambda: 11)
dp.pack.update(a=lambda: 'eleven')
fun_inst = dp['fun']
assert fun_inst() == "'eleven' is 11"
del dp.pack['a']
with pytest.raises(TypeError, match='a'):
fun_inst = dp['fun']
def multiplied(x, a=2):
return x * a
dp.pack.update(mult=multiplied)
assert dp['mult'] == 22
dp.update(a=0)
assert dp['mult'] == 0
def test_driverpack_cascade(drvpack):
dp = DriverPack(drvpack, singleton=True, autoinject=True)
dp['x'] = 11
dp['a'] = 'eleven'
fun_inst = dp['fun']
assert fun_inst() == "'eleven' is 11"
dp.cascade_delete('a')
assert 'x' in dp
assert 'fun' not in dp
def test_driverpack_close(drvpack):
dp = DriverPack(drvpack, singleton=True, autoinject=True)
dp['content'] = 'Secret'
box = dp['secret']
assert box.content == 'Secret'
del dp['secret']
assert hasattr(box, 'reveal') is True
assert hasattr(box, 'content') is False
def test_driverpack_with(drvpack):
with DriverPack(drvpack, singleton=True, autoinject=True) as dp:
dp['content'] = 'Secret'
box = dp['secret']
assert hasattr(box, 'reveal') is True
assert hasattr(box, 'content') is False
|
from .parser import *
from .node import *
|
import logging
import hocort.execute as exe
from hocort.classifiers.classifier import Classifier
logger = logging.getLogger(__file__)
class Kraken2(Classifier):
"""
Kraken2 implementation of the Classifier abstract base class.
"""
def build_index(path_out, fasta_in, threads=1, options=[], **kwargs):
"""
Builds an index.
Parameters
----------
path_out : string
Path where the output index is written.
fasta_in : string
Path where the input FASTA file is located.
threads : int
Number of threads to use.
options : list
An options list where additional arguments may be specified.
Returns
-------
returncode : int
Resulting returncode after the process is finished.
"""
if not path_out or not fasta_in: return 1
# 1. download taxonomy
# kraken2-build --threads n --download-taxonomy --db database
logger.info('Downloading taxonomy, this may take a while...')
cmd1 = ['kraken2-build', '--threads', str(threads), '--download-taxonomy', '--db', path_out]
returncode1, stdout1, stderr1 = exe.execute(cmd1)
logger.info('\n' + stdout1)
logger.info('\n' + stderr1)
if(returncode1 != 0): return returncode1
# 2. add fasta to library
# kraken2-build --threads n --add-to-library reference.fna --db database
logger.info('Adding reference fasta to library...')
cmd2 = ['kraken2-build', '--threads', str(threads), '--add-to-library', fasta_in, '--db', path_out]
returncode2, stdout2, stderr2 = exe.execute(cmd2)
logger.info('\n' + stdout2)
logger.info('\n' + stderr2)
if(returncode2 != 0): return returncode2
# 3. build db from library
# kraken2-build --threads n --build --db database
logger.info('Building database...')
cmd3 = ['kraken2-build', '--threads', str(threads), '--build', '--db', path_out]
returncode3, stdout3, stderr3 = exe.execute(cmd3)
logger.info('\n' + stdout3)
logger.info('\n' + stderr3)
if(returncode3 != 0): return returncode3
# 4. clean up unnecessary files
# kraken2-build --threads n --clean --db database
logger.info('Cleaning up...')
cmd4 = ['kraken2-build', '--threads', str(threads), '--clean', '--db', path_out]
returncode4, stdout4, stderr4 = exe.execute(cmd4)
logger.info('\n' + stdout4)
logger.info('\n' + stderr4)
if(returncode4 != 0): return returncode4
return 0
def classify(index, seq1, classified_out, unclassified_out, seq2=None, threads=1, options=[]):
"""
Matches sequences to a reference database and classifies them.
Parameters
----------
index : string
Path where the output index is written.
seq1 : string
Path where the first input FastQ file is located.
classified_out : string
Path where the output FastQ file with matching sequences is written.
unclassified_out : string
Path where the output FastQ file with non-matching sequences is written.
seq2 : string
Path where the second input FastQ file is located.
threads : int
Number of threads to use.
options : list
An options list where additional arguments may be specified.
Returns
-------
returncode : int
Resulting returncode after the process is finished.
"""
if not index or not seq1 or not classified_out or not unclassified_out: return 1
cmd = ['kraken2', '--threads', str(threads), '--db', index, '--classified-out', classified_out, '--unclassified-out', unclassified_out] + options
if seq2:
cmd += ['--paired', seq1, seq2]
else: cmd += [seq1]
returncode, stdout, stderr = exe.execute(cmd)
#logger.info('\n' + stdout)
logger.info('\n' + stderr)
return returncode
|
import uuid
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from django.views.generic import ListView, DetailView, CreateView, TemplateView
from .models import PostJobModel, ApplicationModel
from user_profile.models import UserProfileInfo
from users.models import CustomUser
from .forms import PostJobForm
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib import messages
from django.urls import reverse_lazy
from django.db.models import Q
from django.shortcuts import render, redirect, HttpResponseRedirect, reverse
@login_required
def createJobView(request):
if request.method == "POST":
form = PostJobForm(request.POST)
if form.is_valid():
Job_title = form.cleaned_data['Job_title']
Company = form.cleaned_data['Company']
Job_location = form.cleaned_data['Job_location']
Employee_type = form.cleaned_data['Employee_type']
Description = form.cleaned_data['Description']
Add_skills = form.cleaned_data['Add_skills']
p = PostJobModel.objects.create(Job_title=Job_title, Company=Company,
Job_location=Job_location, Employee_type=Employee_type, Description=Description,
Add_skills=Add_skills, Job_author_id=request.user.id)
p.save()
CustomUser.objects.filter(id=request.user.id).update(is_posted_job=True)
return redirect('onlyRedirect')
else:
print(form.errors)
message = form.errors
return render(request, "message/error.html", {'message': message})
else:
form = PostJobForm()
return render(request, 'jobs/post_job.html', {'form':form})
def onlyRedirect(request):
message = 'Your job post is waiting for approval'
job_list = PostJobModel.objects.filter(Is_approved=True)
return render(request, 'jobs/job_list.html', {'message': message, 'job_list': job_list})
class JobListView(ListView):
model = PostJobModel
context_object_name = 'job_list'
template_name = 'jobs/job_list.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
context['job_list'] = PostJobModel.objects.filter(Is_approved=True)
if self.request.user.id:
context['is_posted_job'] = CustomUser.objects.filter(id=self.request.user.id).values('is_posted_job')[0]["is_posted_job"]
return context
class JobsDetailView(LoginRequiredMixin, DetailView):
model = PostJobModel
context_object_name = 'job_detail'
template_name = 'jobs/job_detail.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
b = UserProfileInfo.objects.filter(id_id=self.request.user.id)
if b:
context['is_user_profile_created'] = True
else:
context['is_user_profile_created'] = False
return context
class SearchResultsListView(ListView):
model = PostJobModel
context_object_name = 'search_job_list'
template_name = 'jobs/search_results.html'
#queryset = PostJobModel.objects.filter(Job_title__icontains='executive')
def get_queryset(self):
query1 = self.request.GET.get('q1')
query2 = self.request.GET.get('q2')
return PostJobModel.objects.filter(
(Q(Job_title__icontains=query1) | Q(Add_skills__icontains=query1) | Q(Company__icontains=query1))
&
Q(Job_location__icontains=query2)
&
Q(Is_approved__icontains=True)
)
@login_required
def applicantCreateView(request):
Job_id = uuid.UUID(request.POST.get('id'))
Job_title = PostJobModel.objects.filter(Job_id=Job_id)[0].Job_title
Applicant_id = request.user.id
a = ApplicationModel.objects.filter(Applicant_id=request.user.id, Job_id=Job_id)
if a.exists():
return render(request, 'jobs/already_applied.html', {'message': 'You have already applied to this post.'})
else:
p = ApplicationModel.objects.create(Job_id=Job_id, Job_title=Job_title,
Applicant_id=Applicant_id, first_name=request.user.first_name, last_name=request.user.last_name)
p.save()
return redirect('application_done')
class ApplicationDone(TemplateView):
template_name = 'jobs/application_done.html'
class ApplicantList(LoginRequiredMixin, ListView):
model = ApplicationModel
context_object_name = 'applicant_list'
template_name = 'jobs/applicant_list.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['applicant_list'] = ApplicationModel.objects.filter(Job__Job_author_id=self.request.user.id)
return context
|
import torch
from torchvision.transforms import ToPILImage
from torchvision.utils import make_grid
from matplotlib import pyplot as plt
try:
import plotly.graph_objects as go
except:
print('failed to import plotly')
from .data import UnannotatedDataset
def to_image(tensor, adaptive=False):
if len(tensor.shape) == 4:
tensor = tensor[0]
if adaptive:
tensor = (tensor - tensor.min()) / (tensor.max() - tensor.min())
else:
tensor = ((tensor + 1) / 2).clamp(0, 1)
return ToPILImage()((255 * tensor.cpu().detach()).to(torch.uint8))
def to_image_grid(tensor, adaptive=False, **kwargs):
return to_image(make_grid(tensor, **kwargs), adaptive)
def overlayed(img, mask, channel=1, nrow=1):
imgs_grid = make_grid(torch.clamp(img, -1, 1), nrow=nrow, padding=5)
masks_grid = make_grid(mask, nrow=nrow, padding=5, pad_value=0.5).cpu()[0]
overlay = 0.5 * (1 + imgs_grid.clone().cpu())
overlay -= 0.5 * masks_grid.unsqueeze(0)
overlay[channel] += masks_grid
return torch.clamp(overlay, 0, 1)
def draw_with_mask(img, masks, names=None, horizontal=True):
if isinstance(masks, torch.Tensor) and len(masks.shape) < 5:
masks = [masks,]
if horizontal:
fig, axs = plt.subplots(ncols=len(masks) + 1, figsize=(3 * len(masks) + 3, 3), dpi=250)
else:
fig, axs = plt.subplots(nrows=len(masks) + 1, figsize=(len(img), len(masks) + 3), dpi=250)
nrow = 1 if horizontal else img.shape[0]
axs[0].axis('off')
axs[0].set_title('original', fontsize=8)
axs[0].imshow(to_image(make_grid(torch.clamp(img, -1, 1), nrow=nrow, padding=5)))
for i, mask in enumerate(masks):
overlay = overlayed(img, mask, int(i > 0), nrow)
ax = axs[i + 1]
ax.axis('off')
if names is not None:
ax.set_title(names[i], fontsize=8)
ax.imshow(to_image(overlay, True))
return fig
class SamplesGrid(object):
def __init__(self, dataset_dir, size):
self.dataset_dir = dataset_dir
self.set_size(size)
def __call__(self):
grid = make_grid(next(iter(self.dataloader)), nrow=self.grid_size[0])
return to_image(grid)
def set_size(self, size):
self.grid_size = size
self.dataloader = torch.utils.data.DataLoader(
UnannotatedDataset(self.dataset_dir), size[0] * size[1], shuffle=True)
def plotly_lines(fig, ticks, values, name, color, opacity=1, dash='dash'):
fig.add_trace(go.Scatter(
x=ticks,
y=values,
name=name,
# marker=dict(size=10),
showlegend=(name is not None),
line=dict(color='rgba({}, {}, {}, {})'.format(*color, opacity), width=3, dash=dash),
))
def plotly_prepare_fig(fig):
fig.update_layout(
template='plotly_white',
xaxis=dict(
showline=True,
showgrid=True,
showticklabels=True,
linewidth=2,
linecolor='rgb(204, 204, 204)',
# ticks='outside',
),
yaxis=dict(
showgrid=True,
zeroline=True,
showline=True,
linecolor='rgb(204, 204, 204)',
showticklabels=True,
),
legend=go.layout.Legend(
traceorder="normal",
font=dict(
size=18,
family='Times New Roman',
color='Black',
),
x=0.25, y=0.25,
bgcolor="rgba(255, 255, 255, 0.0)",
orientation="v",
),
# autosize=True,
width=280,
height=300,
margin=dict(
autoexpand=False,
l=70,
r=0,
t=0,
b=55,
),
showlegend=True,
plot_bgcolor='white'
)
fig.update_xaxes(
color='Black',
title_font=dict(family='Times New Roman', size=24),
tickfont=dict(size=16,
family='Times New Roman',
color='Black'),
)
fig.update_yaxes(
title_font=dict(family='Times New Roman', size=24),
color='Black',
tickfont=dict(size=16,
family='Times New Roman',
color='Black'),
)
|
import os
def main():
filepath = 'data/raw'
for item in os.listdir(filepath):
if 'swo' in item:
path = os.path.join('data/raw', item)
os.remove(path)
else:
continue
if __name__ == '__main__':
main()
|
from datetime import datetime, timedelta
from pprint import pprint
import requests
def next_search_date(start):
in_a_week = start.date() + timedelta(days=7)
return in_a_week
def url_for_date(search_date):
url = "https://playtomic.io/api/v1/availability?user_id=me&tenant_id=da776daf-43b3-11e8-8674-52540049669c&sport_id=PADEL&local_start_min={}T00%3A00%3A00&local_start_max={}T23%3A59%3A59"
url = url.format(search_date, search_date)
return url
def get_data(url):
data = requests.get(url)
return data.json()
def transform(data):
# [
# {'resource_id': 'f457dda9-3a49-455b-91cb-f6dd47f412bd',
# 'slots': [{'duration': 90, 'price': '7.5 EUR', 'start_time': '10:30:00'}],
# 'start_date': '2021-02-26'},
#
# {'resource_id': '91c341b0-bbc2-4671-9c54-8ddf8816a0d1',
# 'slots': [{'duration': 60, 'price': '5 EUR', 'start_time': '11:00:00'}],
# 'start_date': '2021-02-26'},
#
# {'resource_id': 'bdbd8388-7447-4675-b01e-7f26b2d723af',
# 'slots': [{'duration': 60, 'price': '5 EUR', 'start_time': '12:00:00'}],
# 'start_date': '2021-02-26'}
# ]
result = []
for element in data:
resource_id = element['resource_id']
paddle_court_name = get_paddle_court_name(resource_id)
slots = element['slots']
available_slots_count = len(slots)
result.append({'name': paddle_court_name,'available_slots': available_slots_count})
return result
# 'bdbd8388-7447-4675-b01e-7f26b2d723af' = 'paddle 1',
# 'ffec2bf4-9914-4201-9cef-b4d1bd277b1a' = 'paddle 2',
# 'f457dda9-3a49-455b-91cb-f6dd47f412bd' = 'paddle 3',
# '91c341b0-bbc2-4671-9c54-8ddf8816a0d1' = 'paddle 4',
# 'f9c1edd7-4c52-45ce-b5b0-755b8d73ea26' = 'paddle 5',
def get_paddle_court_name(resource_id):
paddle_name = ''
if resource_id == 'bdbd8388-7447-4675-b01e-7f26b2d723af':
paddle_name = 'Paddle 1'
elif resource_id == 'ffec2bf4-9914-4201-9cef-b4d1bd277b1a':
paddle_name = 'Paddle 2'
elif resource_id == 'f457dda9-3a49-455b-91cb-f6dd47f412bd':
paddle_name = 'Paddle 3'
elif resource_id == '91c341b0-bbc2-4671-9c54-8ddf8816a0d1':
paddle_name = 'Paddle 4'
elif resource_id == 'f9c1edd7-4c52-45ce-b5b0-755b8d73ea26':
paddle_name = 'Paddle 5'
return paddle_name
def save(data, dt):
filename = 'sunday_paddle.txt'
dt_str = dt.strftime('%m/%d/%Y %H:%M')
with(open(filename, 'a')) as f:
for x in data:
line = '{}, {}, {}\n'.format(dt_str, x['name'], x['available_slots'])
f.write(line)
f.write('-'*20)
f.write('\n')
def run():
run_time = datetime.now()
next_search = next_search_date(run_time)
url = url_for_date(next_search)
data = get_data(url)
data = transform(data)
save(data, run_time)
if __name__ == '__main__':
run()
|
apiAttachAvailable = u'API\u53ef\u4f9b\u4f7f\u7528'
apiAttachNotAvailable = u'\u4e0d\u4f9b\u4f7f\u7528'
apiAttachPendingAuthorization = u'\u5f85\u6388\u6743'
apiAttachRefused = u'\u62d2\u7edd'
apiAttachSuccess = u'\u6210\u529f'
apiAttachUnknown = u'\u4e0d\u8be6'
budDeletedFriend = u'\u5df2\u4ece\u670b\u53cb\u540d\u5355\u4e2d\u5220\u9664'
budFriend = u'\u670b\u53cb'
budNeverBeenFriend = u'\u4ece\u672a\u52a0\u5165\u670b\u53cb\u540d\u5355'
budPendingAuthorization = u'\u5f85\u6388\u6743'
budUnknown = u'\u4e0d\u8be6'
cfrBlockedByRecipient = u'\u901a\u8bdd\u88ab\u63a5\u6536\u65b9\u5c01\u9501'
cfrMiscError = u'\u5176\u5b83\u7c7b\u9519\u8bef'
cfrNoCommonCodec = u'\u65e0\u5e38\u89c1\u7f16\u89e3\u7801\u5668'
cfrNoProxyFound = u'\u627e\u4e0d\u5230\u4ee3\u7406\u670d\u52a1\u5668'
cfrNotAuthorizedByRecipient = u'\u5f53\u524d\u7528\u6237\u672a\u7ecf\u63a5\u6536\u65b9\u6388\u6743'
cfrRecipientNotFriend = u'\u63a5\u6536\u65b9\u4e0d\u662f\u670b\u53cb\u3002'
cfrRemoteDeviceError = u'\u8fdc\u7a0b\u58f0\u97f3\u8bbe\u5907\u9519\u8bef'
cfrSessionTerminated = u'\u4f1a\u8bdd\u7ed3\u675f'
cfrSoundIOError = u'\u97f3\u54cd\u8f93\u5165/\u8f93\u51fa\u9519\u8bef'
cfrSoundRecordingError = u'\u97f3\u54cd\u5f55\u97f3\u9519\u8bef'
cfrUnknown = u'\u4e0d\u8be6'
cfrUserDoesNotExist = u'\u7528\u6237/\u7535\u8bdd\u53f7\u7801\u4e0d\u5b58\u5728'
cfrUserIsOffline = u'\u5979\u6216\u4ed6\u5904\u4e8e\u8131\u673a\u72b6\u6001'
chsAllCalls = u'\u65e7\u7248\u5bf9\u8bdd'
chsDialog = u'\u5bf9\u8bdd'
chsIncomingCalls = u'\u591a\u4eba\u5bf9\u8bdd\u5f85\u63a5\u53d7'
chsLegacyDialog = u'\u65e7\u7248\u5bf9\u8bdd'
chsMissedCalls = u'\u5bf9\u8bdd'
chsMultiNeedAccept = u'\u591a\u4eba\u5bf9\u8bdd\u5f85\u63a5\u53d7'
chsMultiSubscribed = u'\u591a\u4eba\u52a0\u5165'
chsOutgoingCalls = u'\u591a\u4eba\u52a0\u5165'
chsUnknown = u'\u4e0d\u8be6'
chsUnsubscribed = u'\u5df2\u9000\u51fa'
clsBusy = u'\u5fd9'
clsCancelled = u'\u5df2\u53d6\u6d88'
clsEarlyMedia = u'\u6b63\u5728\u64ad\u653e\u65e9\u671f\u4fe1\u53f7\uff08Early Media\uff09'
clsFailed = u'\u5bf9\u4e0d\u8d77\uff0c\u547c\u53eb\u5931\u8d25\uff01'
clsFinished = u'\u5b8c\u6bd5'
clsInProgress = u'\u6b63\u5728\u8fdb\u884c\u901a\u8bdd'
clsLocalHold = u'\u672c\u65b9\u6682\u5019'
clsMissed = u'\u4e2a\u672a\u5e94\u7b54\u547c\u53eb'
clsOnHold = u'\u4fdd\u6301'
clsRefused = u'\u62d2\u7edd'
clsRemoteHold = u'\u5bf9\u65b9\u6682\u5019'
clsRinging = u'\u547c\u53eb'
clsRouting = u'\u6b63\u5728\u63a5\u901a'
clsTransferred = u'\u4e0d\u8be6'
clsTransferring = u'\u4e0d\u8be6'
clsUnknown = u'\u4e0d\u8be6'
clsUnplaced = u'\u4ece\u672a\u62e8\u6253'
clsVoicemailBufferingGreeting = u'\u6b63\u5728\u51c6\u5907\u95ee\u5019\u8bed'
clsVoicemailCancelled = u'\u8bed\u97f3\u7559\u8a00\u5df2\u53d6\u6d88'
clsVoicemailFailed = u'\u8bed\u97f3\u90ae\u4ef6\u5931\u8d25'
clsVoicemailPlayingGreeting = u'\u6b63\u5728\u64ad\u653e\u95ee\u5019\u8bed'
clsVoicemailRecording = u'\u5f55\u5236\u8bed\u97f3\u90ae\u4ef6'
clsVoicemailSent = u'\u8bed\u97f3\u7559\u8a00\u5df2\u53d1\u9001'
clsVoicemailUploading = u'\u6b63\u5728\u4e0a\u8f7d\u8bed\u97f3\u7559\u8a00'
cltIncomingP2P = u'\u62e8\u5165\u5bf9\u7b49\u7535\u8bdd'
cltIncomingPSTN = u'\u62e8\u5165\u7535\u8bdd'
cltOutgoingP2P = u'\u62e8\u51fa\u5bf9\u7b49\u7535\u8bdd'
cltOutgoingPSTN = u'\u62e8\u51fa\u7535\u8bdd'
cltUnknown = u'\u4e0d\u8be6'
cmeAddedMembers = u'\u6240\u6dfb\u6210\u5458'
cmeCreatedChatWith = u'\u66fe\u4e0e\u6b64\u4eba\u804a\u5929'
cmeEmoted = u'\u4e0d\u8be6'
cmeLeft = u'\u79bb\u5f00'
cmeSaid = u'\u5df2\u8bf4\u8fc7'
cmeSawMembers = u'\u770b\u5230\u6210\u5458'
cmeSetTopic = u'\u8bbe\u5b9a\u4e3b\u9898'
cmeUnknown = u'\u4e0d\u8be6'
cmsRead = u'\u5df2\u8bfb\u53d6'
cmsReceived = u'\u5df2\u63a5\u6536'
cmsSending = u'\u6b63\u5728\u53d1\u9001...'
cmsSent = u'\u5df2\u53d1\u9001'
cmsUnknown = u'\u4e0d\u8be6'
conConnecting = u'\u6b63\u5728\u8fde\u63a5'
conOffline = u'\u8131\u673a'
conOnline = u'\u8054\u673a'
conPausing = u'\u6682\u505c\u4e2d'
conUnknown = u'\u4e0d\u8be6'
cusAway = u'\u79bb\u5f00'
cusDoNotDisturb = u'\u8bf7\u52ff\u6253\u6270'
cusInvisible = u'\u9690\u8eab'
cusLoggedOut = u'\u8131\u673a'
cusNotAvailable = u'\u4e0d\u4f9b\u4f7f\u7528'
cusOffline = u'\u8131\u673a'
cusOnline = u'\u8054\u673a'
cusSkypeMe = u'Skype Me'
cusUnknown = u'\u4e0d\u8be6'
cvsBothEnabled = u'\u89c6\u9891\u53d1\u9001\u548c\u63a5\u6536'
cvsNone = u'\u65e0\u89c6\u9891'
cvsReceiveEnabled = u'\u89c6\u9891\u63a5\u6536'
cvsSendEnabled = u'\u89c6\u9891\u53d1\u9001'
cvsUnknown = u''
grpAllFriends = u'\u6240\u6709\u670b\u53cb'
grpAllUsers = u'\u6240\u6709\u7528\u6237'
grpCustomGroup = u'\u81ea\u5b9a\u4e49'
grpOnlineFriends = u'\u8054\u673a\u670b\u53cb'
grpPendingAuthorizationFriends = u'\u5f85\u6388\u6743'
grpProposedSharedGroup = u'Proposed Shared Group'
grpRecentlyContactedUsers = u'\u6700\u8fd1\u8054\u7cfb\u8fc7\u7684\u7528\u6237'
grpSharedGroup = u'Shared Group'
grpSkypeFriends = u'Skype\u670b\u53cb'
grpSkypeOutFriends = u'SkypeOut\u670b\u53cb'
grpUngroupedFriends = u'\u672a\u5206\u7ec4\u7684\u670b\u53cb'
grpUnknown = u'\u4e0d\u8be6'
grpUsersAuthorizedByMe = u'\u7ecf\u6211\u6388\u6743'
grpUsersBlockedByMe = u'\u88ab\u6211\u5c01\u9501'
grpUsersWaitingMyAuthorization = u'\u6b63\u7b49\u5f85\u6211\u7684\u6388\u6743'
leaAddDeclined = u'\u6dfb\u52a0\u906d\u62d2'
leaAddedNotAuthorized = u'\u88ab\u6dfb\u52a0\u4eba\u987b\u7ecf\u6388\u6743'
leaAdderNotFriend = u'\u6dfb\u52a0\u4eba\u987b\u4e3a\u670b\u53cb'
leaUnknown = u'\u4e0d\u8be6'
leaUnsubscribe = u'\u5df2\u9000\u51fa'
leaUserIncapable = u'\u7528\u6237\u4e0d\u80fd\u4f7f\u7528'
leaUserNotFound = u'\u7528\u6237\u672a\u627e\u5230'
olsAway = u'\u79bb\u5f00'
olsDoNotDisturb = u'\u8bf7\u52ff\u6253\u6270'
olsNotAvailable = u'\u4e0d\u4f9b\u4f7f\u7528'
olsOffline = u'\u8131\u673a'
olsOnline = u'\u8054\u673a'
olsSkypeMe = u'Skype Me'
olsSkypeOut = u'SkypeOut'
olsUnknown = u'\u4e0d\u8be6'
smsMessageStatusComposing = u'Composing'
smsMessageStatusDelivered = u'Delivered'
smsMessageStatusFailed = u'Failed'
smsMessageStatusRead = u'Read'
smsMessageStatusReceived = u'Received'
smsMessageStatusSendingToServer = u'Sending to Server'
smsMessageStatusSentToServer = u'Sent to Server'
smsMessageStatusSomeTargetsFailed = u'Some Targets Failed'
smsMessageStatusUnknown = u'Unknown'
smsMessageTypeCCRequest = u'Confirmation Code Request'
smsMessageTypeCCSubmit = u'Confirmation Code Submit'
smsMessageTypeIncoming = u'Incoming'
smsMessageTypeOutgoing = u'Outgoing'
smsMessageTypeUnknown = u'Unknown'
smsTargetStatusAcceptable = u'Acceptable'
smsTargetStatusAnalyzing = u'Analyzing'
smsTargetStatusDeliveryFailed = u'Delivery Failed'
smsTargetStatusDeliveryPending = u'Delivery Pending'
smsTargetStatusDeliverySuccessful = u'Delivery Successful'
smsTargetStatusNotRoutable = u'Not Routable'
smsTargetStatusUndefined = u'Undefined'
smsTargetStatusUnknown = u'Unknown'
usexFemale = u'\u5973'
usexMale = u'\u7537'
usexUnknown = u'\u4e0d\u8be6'
vmrConnectError = u'\u8fde\u63a5\u9519\u8bef'
vmrFileReadError = u'\u6587\u4ef6\u8bfb\u53d6\u9519\u8bef'
vmrFileWriteError = u'\u6587\u4ef6\u5199\u5165\u9519\u8bef'
vmrMiscError = u'\u5176\u5b83\u7c7b\u9519\u8bef'
vmrNoError = u'\u65e0\u9519\u8bef'
vmrNoPrivilege = u'\u65e0\u4f7f\u7528\u8bed\u97f3\u7559\u8a00\u6743\u9650'
vmrNoVoicemail = u'\u4e0d\u5b58\u5728\u8be5\u8bed\u97f3\u7559\u8a00'
vmrPlaybackError = u'\u64ad\u653e\u9519\u8bef'
vmrRecordingError = u'\u5f55\u97f3\u9519\u8bef'
vmrUnknown = u'\u4e0d\u8be6'
vmsBlank = u'\u7a7a\u767d\u7559\u8a00'
vmsBuffering = u'\u6b63\u5728\u7f13\u51b2'
vmsDeleting = u'\u6b63\u5728\u5220\u9664'
vmsDownloading = u'\u6b63\u5728\u4e0b\u8f7d'
vmsFailed = u'\u5931\u8d25'
vmsNotDownloaded = u'\u672a\u4e0b\u8f7d'
vmsPlayed = u'\u5df2\u64ad\u653e\u7559\u8a00'
vmsPlaying = u'\u6b63\u5728\u64ad\u653e'
vmsRecorded = u'\u5df2\u5f55\u97f3\u7559\u8a00'
vmsRecording = u'\u5f55\u5236\u8bed\u97f3\u90ae\u4ef6'
vmsUnknown = u'\u4e0d\u8be6'
vmsUnplayed = u'\u672a\u64ad\u653e\u7684\u7559\u8a00'
vmsUploaded = u'\u4e0a\u8f7d\u5b8c\u6bd5'
vmsUploading = u'\u6b63\u5728\u4e0a\u8f7d'
vmtCustomGreeting = u'\u81ea\u5b9a\u4e49\u95ee\u5019\u8bed'
vmtDefaultGreeting = u'\u9ed8\u8ba4\u95ee\u5019\u8bed'
vmtIncoming = u'\u63a5\u6536\u8bed\u97f3\u90ae\u4ef6'
vmtOutgoing = u'\u5916\u51fa'
vmtUnknown = u'\u4e0d\u8be6'
vssAvailable = u'\u53ef\u4f9b\u4f7f\u7528'
vssNotAvailable = u'\u4e0d\u4f9b\u4f7f\u7528'
vssPaused = u'\u6682\u505c'
vssRejected = u'\u62d2\u7edd\u53d7\u8bdd'
vssRunning = u'\u8fd0\u884c\u4e2d'
vssStarting = u'\u5f00\u59cb'
vssStopping = u'\u505c\u6b62\u4e2d'
vssUnknown = u'\u4e0d\u8be6'
|
import yaml
from usecases.configuration.exceptions \
import UnknownConfigurationFileTypeException
from usecases.configuration.exceptions \
import InvalidConfigurationFileException
class ConfigurationManager():
def __init__(self, configuration_file):
self.configuration_file = configuration_file
self.configuration = []
def parse_configuration(self):
configuration = self.read_configuration()
self.validate_configuration(configuration)
self.configuration = configuration
def get_configuration(self):
return self.configuration
def read_configuration(self):
if (self.configuration_file.endswith(".yml") or
self.configuration_file.endswith(".yaml")):
return self.__read_yaml_configuration()
else:
raise UnknownConfigurationFileTypeException()
def __read_yaml_configuration(self):
with open(self.configuration_file, 'r') as stream:
data = yaml.safe_load(stream)
return data
def validate_configuration(self, configuration):
if not configuration.get('environments'):
raise InvalidConfigurationFileException()
for environment in configuration['environments']:
if not environment.get('name'):
raise InvalidConfigurationFileException()
if not environment.get('screens'):
raise InvalidConfigurationFileException()
for screen in environment['screens']:
if not screen.get('url'):
raise InvalidConfigurationFileException()
if not screen.get('format'):
raise InvalidConfigurationFileException()
if (not screen.get('data') or
screen['data'] is None):
raise InvalidConfigurationFileException()
if not screen.get('main'):
raise InvalidConfigurationFileException()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import sys
import logging
def get_logger(level):
azure_logger = logging.getLogger("azure")
azure_logger.setLevel(level)
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s'))
azure_logger.addHandler(handler)
uamqp_logger = logging.getLogger("uamqp")
uamqp_logger.setLevel(logging.INFO)
uamqp_logger.addHandler(handler)
return azure_logger
|
import logging
import sys
import os
import numpy as np
import pickle
import tvm
import topi
from topi.testing import conv2d_nchw_python
from tvm import te
from tvm import autotvm
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
import tvm.contrib.graph_runtime as runtime
#from tvm.autotvm.task.topi_integration import deserialize_args
from collections import namedtuple
import argparse
#import logging
#logging.getLogger('autotvm').setLevel(logging.DEBUG)
#logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
num_threads = 32
os.environ["TVM_NUM_THREADS"] = str(num_threads)
def tune_kernels(args, N, H, W, CO, CI, KH, KW, strides, padding, dilation, trials, key,
measure_option,
tuner,
early_stopping,
):
data = ('TENSOR', (N, CI, H, W), 'float32')
kernel = ('TENSOR',(CO, CI, KH, KW), 'float32')
origin_layout = 'NCHW'
feature_type = args.feature
print('Feature:',feature_type)
if 'small' == args.search_size:
func_create = 'conv2d_NCHWc_small.x86'
elif 'mid' == args.search_size:
func_create = 'conv2d_NCHWc_mid.x86'
elif 'wide' == args.search_size:
func_create = 'conv2d_NCHWc_wide.x86'
elif 'huge' == args.search_size:
func_create = 'conv2d_NCHWc_huge.x86'
#elif 'nchw_small' == args.search_size:
# func_create = 'conv2d_NCHW_small.x86'
#elif 'nchw_mid' == args.search_size:
# func_create = 'conv2d_NCHW_mid.x86'
#elif 'nchw_wide' == args.search_size:
# func_create = 'conv2d_NCHW_wide.x86'
else:
func_create = 'conv2d_NCHWc.x86'
count = args.num_iters
likwid_event = args.likwid_event
random = args.random
sa_n_iter = args.sa_num_iters
save_features = not (args.no_save_features)
task = autotvm.task.create(func_create,
args=(data, kernel, strides, padding, 1, origin_layout, origin_layout, 'float32'),
target='llvm -mcpu=core-avx2')
if 'NCHWc' in func_create:
using_NCHWc = True
else:
using_NCHWc = False
print(task.config_space)
trials = min(trials, len(task.config_space))
for i in range(count):
if random:
log_filename = '%s_%i_%s_%s_%icore_rand.log' % (key, i, feature_type, args.search_size,num_threads)
else:
log_filename = '%s_%i_%s_%s_%icore.log' % (key, i, feature_type, args.search_size ,num_threads)
if args.key_id != None and count == 1:
save_ind = int(args.key_id)
else:
save_ind = i
if likwid_event != None:
if random:
pickle_file = 'data/conv/likwid_rand_%s_%s_features_%icore_%i_%s_%i.pkl' % (key, feature_type, num_threads, trials, args.search_size, save_ind)
else:
pickle_file = 'data/conv/likwid_%s_%s_features_%icore_%i_%s_%i.pkl' % (key, feature_type, num_threads, trials, args.search_size, save_ind)
else:
if random:
pickle_file = 'data/conv/rand_%s_%s_features_%icore_%i_%s_%i.pkl' % (key, feature_type, num_threads, trials, args.search_size, save_ind)
else:
pickle_file = 'data/conv/%s_%s_features_%icore_%i_%s_%i.pkl' % (key, feature_type, num_threads, trials, args.search_size, save_ind)
if os.path.exists(pickle_file):
print('File exists', pickle_file)
continue
tuner = autotvm.tuner.XGBTuner(task, feature_type=feature_type, loss_type='rank', plan_size=80, sa_n_iter=sa_n_iter)
tuner.tune(n_trial=trials,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(trials),
autotvm.callback.log_to_file(log_filename)],
likwid_event=likwid_event, save_features=save_features, random=random)
dispatch_context = autotvm.apply_history_best(log_filename)
best_config = dispatch_context.query(task.target, task.workload)
print("\nBest config:")
print(best_config)
try:
os.remove(log_filename)
except:
pass
if save_features:
with open(pickle_file , 'wb') as output:
pickle.dump([best_config, task, tuner.cost_model.saved_features], output, pickle.HIGHEST_PROTOCOL)
def tune_and_evaluate():
print("Start tuning...")
dilation = 1;
benchmarks = {
#'vision0':[1, 224, 224, 64, 3, 3, 3],
#'vision1':[1, 112, 112, 128, 64, 3, 3],
#'vision2':[1, 56, 56, 256, 128, 3, 3],
#'vision3':[1, 28, 28, 512, 256, 3, 3],
#'vision4':[1, 14, 14, 512, 512, 3, 3],
#'OCR1':[1, 480, 48, 16, 1, 3, 3],
#'OCR2':[1, 240, 24, 32, 16, 3, 3],
#'OCR3':[1, 120, 12, 64, 32, 3, 3],
#'OCR4':[1, 60, 6, 128, 64, 3, 3],
'yolo0':[1, 544, 544, 32, 3, 3, 3],
'yolo2':[1, 272, 272, 64, 32, 3, 3],
'yolo2_L3':[2, 272, 272, 64, 32, 3, 3],
'yolo4':[1, 136, 136, 128, 64, 3, 3],
'yolo4_L3':[4, 136, 136, 128, 64, 3, 3],
'yolo5':[1, 136, 136, 64, 128, 1, 1],
'yolo5_L3':[4, 136, 136, 64, 128, 1, 1],
'yolo7':[1, 68, 68, 256, 128, 3, 3],
'yolo7_L3':[8, 68, 68, 256, 128, 3, 3],
'yolo9':[1, 68, 68, 128, 256, 1, 1],
'yolo9_L3':[8, 68, 68, 128, 256, 1, 1],
'yolo12':[1, 34, 34, 512, 256, 3, 3],
'yolo12_L3':[16, 34, 34, 512, 256, 3, 3],
'yolo13':[1, 34, 34, 256, 512, 1, 1],
'yolo13_L3':[16, 34, 34, 256, 512, 1, 1],
'yolo17':[1, 17, 17, 1024, 512, 3, 3],
'yolo17_L3':[32, 17, 17, 1024, 512, 3, 3],
'yolo19':[1, 17, 17, 512, 1024, 1, 1],
'yolo19_L3':[32, 17, 17, 512, 1024, 1, 1],
'yolo23':[1, 17, 17, 28269, 1024, 1, 1],
}
parser = argparse.ArgumentParser(description='Run conv2d benchmarks in TVM')
parser.add_argument( '-b','--benchmark', help="Which benchmark to use, int from 0-19", default=0, type=int)
parser.add_argument( '-f','--feature', help="Type of feature to use, one of 'datavol', 'itervar', 'datavol_itervar', 'itervar_silent_dv'", default='itervar', type=str)
parser.add_argument( '-s','--search_size', help="Type of search space to use, one of 'small', 'mid', 'wide'", default='huge', type=str)
parser.add_argument( '-n','--num_iters', help="Int. number of times to run training", default=1, type=int)
parser.add_argument( '-t','--trials', help="Int. Number of trials to sample", default=2000, type=int)
parser.add_argument( '-l','--likwid_event', help='Likwid event to capture during training', default=None)
parser.add_argument( '-r','--random', help="Use XGB+SA to select samples, or randomly select", default=False, action='store_true')
parser.add_argument( '-k','--key_id', help="Key ID for RPC server.", default=None, type=str)
parser.add_argument('--sa_num_iters', help="Number of iterations of simulated annealing", default=500, type=int)
parser.add_argument('--no_save_features', help="Should save features", default=False, action='store_true')
args = parser.parse_args()
trials = args.trials
key = list(benchmarks.keys())[args.benchmark]
N, H, W, CO, CI, KH, KW = benchmarks[key]
strides, padding, dilation = 1, 1, 1
if KH == 1:
padding = 0
tuning_option = {
'tuner': 'xgb',
'early_stopping': None,
'measure_option': autotvm.measure_option(
builder=autotvm.LocalBuilder(timeout=10, n_parallel=80 ),
runner=autotvm.LocalRunner(repeat=10,number=4),
),
}
print("N, H, W, CO, CI, KH, KW, strides, padding \n" , N, H, W, CO, CI, KH, KW, strides, padding)
tune_kernels(args, N, H, W, CO, CI, KH, KW, strides, padding, dilation, trials, key, **tuning_option)
tune_and_evaluate()
|
# -*- coding: utf-8 -*-
"""
ITU-R BT.2020 Colourspace
=========================
Defines the *ITU-R BT.2020* colourspace:
- :attr:`colour.models.BT2020_COLOURSPACE`.
References
----------
- :cite:`InternationalTelecommunicationUnion2015h` : International
Telecommunication Union. (2015). Recommendation ITU-R BT.2020 - Parameter
values for ultra-high definition television systems for production and
international programme exchange. Retrieved from https://www.itu.int/\
dms_pubrec/itu-r/rec/bt/R-REC-BT.2020-2-201510-I!!PDF-E.pdf
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.colorimetry import ILLUMINANTS
from colour.models.rgb import (RGB_Colourspace, normalised_primary_matrix,
oetf_BT2020, eotf_BT2020)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'BT2020_PRIMARIES', 'BT2020_WHITEPOINT_NAME', 'BT2020_WHITEPOINT',
'BT2020_TO_XYZ_MATRIX', 'XYZ_TO_BT2020_MATRIX', 'BT2020_COLOURSPACE'
]
BT2020_PRIMARIES = np.array([
[0.7080, 0.2920],
[0.1700, 0.7970],
[0.1310, 0.0460],
])
"""
*ITU-R BT.2020* colourspace primaries.
BT2020_PRIMARIES : ndarray, (3, 2)
"""
BT2020_WHITEPOINT_NAME = 'D65'
"""
*ITU-R BT.2020* colourspace whitepoint name.
BT2020_WHITEPOINT_NAME : unicode
"""
BT2020_WHITEPOINT = (
ILLUMINANTS['CIE 1931 2 Degree Standard Observer'][BT2020_WHITEPOINT_NAME])
"""
*ITU-R BT.2020* colourspace whitepoint.
BT2020_WHITEPOINT : ndarray
"""
BT2020_TO_XYZ_MATRIX = normalised_primary_matrix(BT2020_PRIMARIES,
BT2020_WHITEPOINT)
"""
*ITU-R BT.2020* colourspace to *CIE XYZ* tristimulus values matrix.
BT2020_TO_XYZ_MATRIX : array_like, (3, 3)
"""
XYZ_TO_BT2020_MATRIX = np.linalg.inv(BT2020_TO_XYZ_MATRIX)
"""
*CIE XYZ* tristimulus values to *ITU-R BT.2020* colourspace matrix.
XYZ_TO_BT2020_MATRIX : array_like, (3, 3)
"""
BT2020_COLOURSPACE = RGB_Colourspace(
'ITU-R BT.2020',
BT2020_PRIMARIES,
BT2020_WHITEPOINT,
BT2020_WHITEPOINT_NAME,
BT2020_TO_XYZ_MATRIX,
XYZ_TO_BT2020_MATRIX,
oetf_BT2020,
eotf_BT2020,
)
BT2020_COLOURSPACE.__doc__ = """
*ITU-R BT.2020* colourspace.
References
----------
:cite:`InternationalTelecommunicationUnion2015h`
BT2020_COLOURSPACE : RGB_Colourspace
"""
|
#!/usr/bin/python
import json
import argparse
def parseArgs( ):
parser = argparse.ArgumentParser( description='' )
parser.add_argument( '-r', '--readmeDataFile', required=True, help='Absolute path of the README data file.' )
parser.add_argument( '-m', '--moduleDataFile', required=True, help='Absolute path of the module data file.' )
parser.add_argument( '-o', '--outputReadmeFile', required=True, help='Absolute path of the output README file.' )
return parser.parse_args( )
def readJSONFile( fileName ):
with open( fileName ) as json_file:
return json.load( json_file )
if __name__ == "__main__":
args = parseArgs( )
readmeData = readJSONFile( args.readmeDataFile )
moduleData = readJSONFile( args.moduleDataFile )
with open( args.outputReadmeFile, 'w' ) as output_file:
output_file.write( '# {}\n'.format( readmeData[ 'title' ] ) )
for section in readmeData[ 'sections' ]:
if section[ 'title' ]:
output_file.write( '## {}\n'.format( section[ 'title' ] ) )
output_file.write( '\n'.join( section[ 'text' ] ) )
output_file.write( '\n\n' )
output_file.write( '# Modules\n' )
output_file.write( '|Command|Description|Difficulty|Status|\n' )
output_file.write( '|-------|-----------|----------|------|\n' )
for module in moduleData:
output_file.write( '|{}|{}|{}|{}|\n'.format( module[ 'name' ], module[ 'description' ], module[ 'difficulty' ], module[ 'status' ] ) )
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Get a Google document"
class Input:
DOCUMENT_ID = "document_id"
class Output:
DOCUMENT = "document"
class GetDocumentInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"document_id": {
"type": "string",
"title": "Document ID",
"description": "Document ID",
"order": 1
}
},
"required": [
"document_id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetDocumentOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"document": {
"$ref": "#/definitions/document",
"title": "Document",
"description": "Document Object",
"order": 1
}
},
"required": [
"document"
],
"definitions": {
"document": {
"type": "object",
"title": "document",
"properties": {
"body": {
"type": "object",
"title": "Body",
"description": "Body",
"order": 1
},
"documentId": {
"type": "string",
"title": "Document ID",
"description": "Document ID",
"order": 5
},
"documentStyle": {
"type": "object",
"title": "Document Style",
"description": "Document Style",
"order": 2
},
"namedStyles": {
"type": "object",
"title": "Named Styles",
"description": "Named styles",
"order": 7
},
"revisionId": {
"type": "string",
"title": "Revision ID",
"description": "Revision ID",
"order": 4
},
"suggestionsViewMode": {
"type": "string",
"title": "Suggestions View Mode",
"description": "Suggestions view mode",
"order": 6
},
"title": {
"type": "string",
"title": "Title",
"description": "Title",
"order": 3
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
class BaseEncounter(object):
"""Base encounter class."""
def __init__(self, player, check_if_happens=True):
self.p = self.player = player
if (check_if_happens and self.check_if_happens()) or not check_if_happens:
enc_name = self.__class__.__name__
enc_dict = self.p.game.enc_count_dict
if enc_name in enc_dict:
enc_dict[enc_name] += 1
else:
enc_dict[enc_name] = 1
self.p.refresh_screen()
self.run()
def check_if_happens(self):
return True
def run(self):
pass
class Guaranteed(object):
@staticmethod
def check_if_happens():
return True
|
# coding=UTF-8
import tensorflow as tf
import numpy as np
import tools
class Net(object):
def __init__(self, inputs, labels=None, keep_prop=1.0,
trainable=True, training=True, reuse=False, train_mode='all'):
self.trainable = trainable
self.training = training
self.keep_prop = keep_prop
self.train_mode = train_mode
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):
self.outputs = self.buildHedNet(inputs, trainable, training)
if labels is not None:
# self.y = labels/255.0
self.y = tf.where(tf.less(labels, 32.), tf.fill(tf.shape(labels), 0.), tf.fill(tf.shape(labels), 1.))
self.loss = self.buildLoss(self.outputs, labels)
self.accuracy = self.buildAccuracy(self.outputs, labels)
def buildHedNet(self, inputs, trainable, training):
with tf.variable_scope("preprocess"):
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='mean')
net = inputs - mean
with tf.variable_scope("hed"):
trainable = trainable and (self.train_mode != "refine")
print("hed trainable: ", trainable)
W_init = tf.truncated_normal_initializer(0.0, 0.01)
b_init = tf.zeros_initializer()
with tf.variable_scope("stage_1"):
net = tools.conv('conv1', net, 16, [7, 7], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
net = tools.conv('conv2', net, 16, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
dsn1 = net
net = tools.pool('pool1', net, [2, 2], [2, 2], padding='SAME',
is_max_pool=True, print_shape=training)
with tf.variable_scope("stage_2"):
net = tools.conv('conv1', net, 32, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
net = tools.conv('conv2', net, 32, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
dsn2 = net
net = tools.pool('pool1', net, [2, 2], [2, 2], padding='SAME',
is_max_pool=True, print_shape=training)
with tf.variable_scope("stage_3"):
net = tools.conv('conv1', net, 48, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
net = tools.conv('conv2', net, 48, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
net = tools.conv('conv3', net, 48, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
dsn3 = net
net = tools.pool('pool1', net, [2, 2], [2, 2], padding='SAME',
is_max_pool=True, print_shape=training)
with tf.variable_scope("stage_4"):
net = tools.conv('conv1', net, 64, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
net = tools.conv('conv2', net, 64, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
net = tools.conv('conv3', net, 64, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
dsn4 = net
net = tools.pool('pool1', net, [2, 2], [2, 2], padding='SAME',
is_max_pool=True, print_shape=training)
with tf.variable_scope("stage_5"):
net = tools.conv('conv1', net, 96, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
net = tools.conv('conv2', net, 96, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
net = tools.conv('conv3', net, 96, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
dsn5 = net
with tf.variable_scope("fusion"):
with tf.variable_scope("dsn1"):
dsn1 = tools.conv('dsn1', dsn1, 1, [1, 1], [1, 1],
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
# dsn1 = tools.up_sampling('dsn1_unpool', dsn1, [tf.shape(inputs)[1], tf.shape(inputs)[2]])
dsn1_sigmoid = tools.activate("sigmoid", dsn1, tf.nn.sigmoid)
with tf.variable_scope("dsn2"):
dsn2 = tools.conv('dsn2', dsn2, 1, [1, 1], [1, 1],
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
dsn2 = tools.deconv_hed('deconv', dsn2, [tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2], 1], [4, 4], [2, 2],
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
# dsn2 = tools.up_sampling('dsn2_unpool', dsn2, [tf.shape(inputs)[1], tf.shape(inputs)[2]])
dsn2_sigmoid = tools.activate("sigmoid", dsn2, tf.nn.sigmoid,
trainable=trainable, print_shape=training)
with tf.variable_scope("dsn3"):
dsn3 = tools.conv('dsn3', dsn3, 1, [1, 1], [1, 1],
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
dsn3 = tools.deconv_hed('deconv', dsn3, [tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2], 1], [8, 8], [4, 4],
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
# dsn3 = tools.up_sampling('dsn3_unpool', dsn3, [tf.shape(inputs)[1], tf.shape(inputs)[2]])
dsn3_sigmoid = tools.activate("sigmoid", dsn3, tf.nn.sigmoid,
trainable=trainable, print_shape=training)
with tf.variable_scope("dsn4"):
dsn4 = tools.conv('dsn4', dsn4, 1, [1, 1], [1, 1],
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
dsn4 = tools.deconv_hed('deconv', dsn4, [tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2], 1], [16, 16], [8, 8],
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
# dsn4 = tools.up_sampling('dsn4_unpool', dsn4, [tf.shape(inputs)[1], tf.shape(inputs)[2]])
dsn4_sigmoid = tools.activate("sigmoid", dsn4, tf.nn.sigmoid,
trainable=trainable, print_shape=training)
with tf.variable_scope("dsn5"):
dsn5 = tools.conv('dsn5', dsn5, 1, [1, 1], [1, 1],
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
dsn5 = tools.deconv_hed('deconv', dsn5, [tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2], 1], [32, 32], [16, 16],
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
# dsn5 = tools.up_sampling('dsn5_unpool', dsn5, [tf.shape(inputs)[1], tf.shape(inputs)[2]])
dsn5_sigmoid = tools.activate("sigmoid", dsn5, tf.nn.sigmoid,
trainable=trainable, print_shape=training)
with tf.variable_scope("dsn_fusion"):
dsn_fusion = tf.concat([dsn1, dsn2, dsn3, dsn4, dsn5], axis=3, name='concat')
fusion_init = tf.constant_initializer(0.2)
dsn_fusion = tools.conv('fusion', dsn_fusion, 1, [1, 1], [1, 1],
padding='SAME', W_init=fusion_init,
trainable=trainable, print_shape=training)
dsn_fusion_sigmoid = tools.activate("sigmoid", dsn_fusion, tf.nn.sigmoid,
trainable=trainable, print_shape=training)
with tf.variable_scope("refine"):
trainable = self.trainable and (self.train_mode != "pred")
print("refine trainable: ", trainable)
W_init = tf.zeros_initializer()
b_init = tf.zeros_initializer()
net = tools.conv('conv1', inputs, 8, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
net = tools.conv('conv2', net, 8, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
net = tools.conv('conv3', net, 8, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
refine_add = tools.conv('conv4', net, 1, [1, 1], [1, 1], act=tf.nn.sigmoid,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
net = tools.conv('conv5', inputs, 8, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
net = tools.conv('conv6', net, 8, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
net = tools.conv('conv7', net, 8, [3, 3], [1, 1], act=tf.nn.relu,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
refine_sub = tools.conv('conv8', net, 1, [1, 1], [1, 1], act=tf.nn.sigmoid,
padding='SAME', W_init=W_init, b_init=b_init,
trainable=trainable, print_shape=training)
with tf.variable_scope("hed-out"):
self.dsn1_sigmoid = tf.identity(dsn1_sigmoid)
self.dsn2_sigmoid = tf.identity(dsn2_sigmoid)
self.dsn3_sigmoid = tf.identity(dsn3_sigmoid)
self.dsn4_sigmoid = tf.identity(dsn4_sigmoid)
self.dsn5_sigmoid = tf.identity(dsn5_sigmoid)
self.dsn_fusion_sigmoid = tf.identity(dsn_fusion_sigmoid)
self.dsn1 = tf.identity(dsn1)
self.dsn2 = tf.identity(dsn2)
self.dsn3 = tf.identity(dsn3)
self.dsn4 = tf.identity(dsn4)
self.dsn5 = tf.identity(dsn5)
self.dsn_fusion = tf.identity(dsn_fusion)
self.refine_add = tf.identity(refine_add)
self.refine_sub = tf.identity(refine_sub)
outputs = tf.identity(dsn_fusion_sigmoid)
if self.train_mode != "pred":
outputs = outputs + refine_add - refine_sub # 加深边缘,减去多余
return outputs
def sigmoid_cross_entropy(self, logits, y):
# count_neg = tf.maximum(tf.reduce_sum(1. - y), 1) # the number of 0 in y
# count_pos = tf.maximum(tf.reduce_sum(y), 1) # the number of 1 in y (less than count_neg)
# pos_weight = tf.minimum(count_neg/count_pos, 5)
# # targets * -log(sigmoid(logits)) * pos_weight + (1 - targets) * -log(1 - sigmoid(logits))
# cost = tf.nn.weighted_cross_entropy_with_logits(logits=logits, targets=y, pos_weight=pos_weight)
# # cost = tf.reduce_mean(cost)
# cost = tf.reduce_mean(cost * tf.maximum(count_pos/(count_neg+count_pos), 0.2))
# # cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y))
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y))
return cost
def buildLoss(self, outputs, labels):
with tf.variable_scope("loss"):
if self.train_mode == "pred":
cost_fusion = self.sigmoid_cross_entropy(self.dsn_fusion, self.y)
cost_dsn1 = self.sigmoid_cross_entropy(self.dsn1, self.y)
cost_dsn2 = self.sigmoid_cross_entropy(self.dsn2, self.y)
cost_dsn3 = self.sigmoid_cross_entropy(self.dsn3, self.y)
cost_dsn4 = self.sigmoid_cross_entropy(self.dsn4, self.y)
cost_dsn5 = self.sigmoid_cross_entropy(self.dsn5, self.y)
λ = 1.0
return cost_fusion + λ*cost_dsn1 + λ*cost_dsn2 + λ*cost_dsn3 + λ*cost_dsn4 + λ*cost_dsn5
else:
return tf.reduce_mean(tf.sqrt(tf.square(self.outputs - self.y) + 1e-12))
def buildAccuracy(self, outputs, labels):
with tf.variable_scope("accuracy"):
outputs_b = tf.where(tf.less(outputs, 0.25), tf.fill(tf.shape(outputs), 0.), tf.fill(tf.shape(outputs), 1.))
diff = tf.abs(outputs_b-self.y)
accuracy = 1.0 - tf.reduce_mean(diff)
with tf.variable_scope("edge_accuracy"):
edge_sum = tf.reduce_sum(self.y)+0.5
edge_accuracy = 1.0 - tf.reduce_sum(diff*tf.abs(self.y))/edge_sum
with tf.variable_scope("bg_accuracy"):
bg_sum = tf.reduce_sum(1.-self.y)+0.5
bg_accuracy = 1.0 - tf.reduce_sum(diff*tf.abs(1.-self.y))/bg_sum
self.edge_accuracy = tf.identity(edge_accuracy)
self.bg_accuracy = tf.identity(bg_accuracy)
return accuracy
|
MESSAGE_EMPTY_HASHPARAMSVAL = "Empty HASH or HASHPARAMSVAL"
HASH_VALUES_DO_NOT_MATCH = "Hash values do not match"
HASHES_DO_NOT_MATCHED = "Hashes do not match"
PAYMENT_ALREADY_PROCESSED_OR_NOT_EXISTS = "Payment already processed or not exists"
INVALID_CURRENCY_CODE = "Wrong currency code"
INVALID_ORDER_ID = "Invalid order id"
INVALID_AMOUNT_VALUE = "Invalid amount value"
|
from __future__ import annotations
import threading
import random
import string
from concurrent.futures import ThreadPoolExecutor, wait
from typing import Optional, Callable, Any
from ..constants import ALL_COMPLETED
__all__ = ("Executor", "ThreadManager")
class Executor(ThreadPoolExecutor):
def __init__(self, *args, **kwargs):
self._session_id = kwargs.pop("session_id")
self._thread_name = kwargs.pop("thread_name")
self._futures = []
self._thread_name += f":session_id={self.session_id}:task_number="
super().__init__(thread_name_prefix=self._thread_name, *args, **kwargs)
@property
def futures(self):
return self._futures
@property
def threads(self):
return self._threads
@property
def session_id(self):
return self._session_id
def submit(self, fn: Callable, *args: Any, **kwargs: Any):
future = super().submit(fn, *args, **kwargs)
self.futures.append(future)
return future
def clear_futures(self):
self.futures.clear()
def wait_for_futures(
self,
*,
timeout: Optional[int] = None,
return_when=ALL_COMPLETED,
purge: bool = True,
):
if not self.futures:
return None
result = wait(self.futures, timeout, return_when)
if purge:
self.clear_futures()
return result
class ThreadManager:
@property
def active_threads(self) -> list:
return threading.enumerate()
def create_new_executor(self, *, max_workers: int = 100, thread_name: str = "", session_id: str = None) -> Executor:
return Executor(
max_workers,
thread_name=thread_name,
session_id=session_id or self.generate_thread_session(),
)
def get_threads(self, session_id):
threads = []
for t in self.active_threads:
if session_id in t.name:
threads.append(t)
return threads
def generate_thread_session(self):
return "".join((random.sample(string.ascii_lowercase, 10)))
|
import os
import re
from datetime import datetime
use_sample = 0
input_file = os.path.join(os.path.dirname(
__file__), "sample.txt" if use_sample else "input.txt")
f = open(input_file, "r")
lines = f.read().splitlines()
starttime = datetime.now()
rules = []
my_ticket = []
tickets = []
section = 0
for line in lines:
if line == "":
section = section + 1
elif line == "your ticket:" or line == "nearby tickets:":
section = section # do nothing
elif section == 0:
(k, rest) = line.split(": ", 1)
(rule1, rule2) = rest.split(" or ", 1)
rules.append([int(x) for x in rule1.split("-")])
rules.append([int(x) for x in rule2.split("-")])
elif section == 1:
my_ticket = [int(x) for x in line.split(",")]
else:
tickets.append([int(x) for x in line.split(",")])
error_rate = 0
for ticket in tickets:
for value in ticket:
fails = [value < rule[0] or value > rule[1] for rule in rules]
if all(fails):
error_rate = error_rate + value
print(f"Ticket scanning error rate: {error_rate}")
endtime = datetime.now()
spent = endtime-starttime
print(f"Time taken: {spent}")
|
"""Contains the class for Remote PLIST attributes."""
import logging
import os
import tempfile
# pylint: disable=relative-import
try:
import bad_wolf
import config
import curl_requests
import misc
import option_packs
import package
import plist
except ImportError:
from . import bad_wolf
from . import config
from . import curl_requests
from . import misc
from . import option_packs
from . import package
from . import plist
# pylint: enable=relative-import
LOG = logging.getLogger(__name__)
class RemotePlist(object):
"""Class for remote plist as a source."""
def __init__(self, obj):
self._plist = obj
self._tmp_dir = os.path.join(tempfile.gettempdir(), config.BUNDLE_ID) # Use a temporary file as the destination. This is a tuple.
self._plist_url_path = misc.plist_url_path(self._plist)
self._plist_failover_url_path = os.path.join(config.AUDIOCONTENT_FAILOVER_URL, 'lp10_ms3_content_2016', self._plist)
self._all_packages = self._read_remote_plist()
# Empty attr for option packs.
self.option_packs = None
def _read_remote_plist(self):
"""Gets the property list."""
result = None
_basename = os.path.basename(self._plist_url_path)
_tmp_file = os.path.join(self._tmp_dir, _basename)
_bad_wolf_fixes = bad_wolf.BAD_WOLF_PKGS.get(_basename, None)
_bwd = None
_req = curl_requests.CURL(url=self._plist_url_path)
# NOTE 2019-11-04: Seems that using the 'resume' capability in cURL does not
# work here now for some reason, so don't resume.
if _req.status in config.HTTP_OK_STATUS:
_req.get(url=self._plist_url_path, output=_tmp_file, resume=False)
else:
_req.get(url=self._plist_failover_url_path, output=_tmp_file, resume=False)
_root = plist.readPlist(_tmp_file)
if _root:
result = set()
# Apply 'Bad Wolf' pathches
for _pkg in _root['Packages']:
_new_pkg = _root['Packages'][_pkg].copy() # Work on copy
# Create a new key called 'PackageName' that
# contains the value '_pkg' for use with content packs.
_new_pkg['PackageName'] = _pkg
if _bad_wolf_fixes:
_bwd = _bad_wolf_fixes.get(_pkg, None) # A dictionary from '_bad_wolf_fixes'
# Merge new/existing keys from matching '_bwd'
if _bwd:
_new_pkg.update(_bwd)
_pkg_obj = package.LoopPackage(**_new_pkg)
# pylint: disable=no-member
# Only add/process packages that are _not_ 'BadWolfIgnore = True'
if not _pkg_obj.BadWolfIgnore:
result.add(_pkg_obj)
# pylint: enable=no-member
# Now process option packs
_opt_packs = option_packs.OptionPack(source=_root, release=_basename)
self.option_packs = _opt_packs.option_packs
misc.clean_up(file_path=_tmp_file)
return result
@property
def mandatory_pkgs(self):
"""Returns the mandatory packages as objects in a set."""
result = None
result = set([_pkg for _pkg in self._all_packages if _pkg.IsMandatory])
return result
@property
def optional_pkgs(self):
"""Returns the optional packages as objects in a set."""
result = None
result = set([_pkg for _pkg in self._all_packages if not _pkg.IsMandatory])
return result
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import torch
class AntTruncatedObsEnv(mujoco_env.MujocoEnv, utils.EzPickle):
"""
External forces (sim.data.cfrc_ext) are removed from the observation.
Otherwise identical to Ant-v2 from
https://github.com/openai/gym/blob/master/gym/envs/mujoco/ant.py
"""
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'ant.xml', 5)
utils.EzPickle.__init__(self)
self._max_episode_steps=1000
def step(self, a):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_reward = (xposafter - xposbefore)/self.dt
ctrl_cost = .5 * np.square(a).sum()
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
survive_reward = 1.0
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self._get_obs()
cost = (abs(ob[0])-0.2)*(abs(ob[0])>=0.2)
return ob, reward, done, dict(
cost = cost,
reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost,
reward_survive=survive_reward)
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[1:],
self.sim.data.qvel.flat,
# np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
])
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
def termination_function(self,obs, act, next_obs):
x = next_obs[:, 1]
if torch.is_tensor(next_obs):
not_done = torch.isfinite(next_obs).all(dim=-1).float()\
* (x >= .2).float() \
* (x<=1.0).float()
else:
not_done = np.isfinite(next_obs).all(axis=-1) \
* (x >= 0.2) \
* (x <= 1.0)
done = 1-not_done
done = done[:,None]
return done
def get_cost_trajectory(self, trajectory):
"""
Reward function definition.
"""
# print(observation.shape)
# cost = np.any((abs(observation[:,0])>0.5))*5
thres = 0.2
if trajectory.ndim>2:
if torch.is_tensor(trajectory):
cost = (abs(trajectory[:,:,0])-thres)* (abs(trajectory[:,:,0])>=thres)
# cost = (abs(trajectory[:,:,0])-thres)**2 *(abs(trajectory[:,:,0])>=thres)
traj_cost = torch.sum(cost,dim=0)
return traj_cost
cost = (abs(trajectory[:,:,0])-thres)* (abs(trajectory[:,:,0])>=thres)
traj_cost = np.sum(cost,axis=0)
return traj_cost
else:
if torch.is_tensor(trajectory):
traj_cost = torch.sum((abs(trajectory[:,0])-thres)*(abs(trajectory[:,0])>=thres))
else:
traj_cost = np.sum((abs(trajectory[:,0])-thres)*(abs(trajectory[:,0])>=thres))
return traj_cost
# class HopperEnv(mujoco_env.MujocoEnv, utils.EzPickle):
# def __init__(self):
# dir_path = os.path.dirname(os.path.realpath(__file__))
# mujoco_env.MujocoEnv.__init__(self, '%s/assets/hopper.xml' % dir_path, 4)
# self.prev_qpos = None
# # mujoco_env.MujocoEnv.__init__(self, 'hopper.xml', 4)
# utils.EzPickle.__init__(self)
# self._max_episode_steps=1000
# def step(self, a):
# self.prev_qpos = self.sim.data.qpos[0]
# posbefore = self.sim.data.qpos[0]
# self.do_simulation(a, self.frame_skip)
# posafter, height, ang = self.sim.data.qpos[0:3]
# alive_bonus = 1.0
# reward = (posafter - posbefore) / self.dt
# reward += alive_bonus
# reward -=3* (height-1.3)**2
# reward -= 0.1 * np.square(a).sum()
# s = self.state_vector()
# # done = not (np.isfinite(s).all() and (np.abs(s[2:]) < 100).all() and
# # (height > .7) and (abs(ang) < .2))
# done = False
# ob = self._get_obs()
# info = {'redundant_reward':-3* (height-1.3)**2 }
# return ob, reward, done, info
# def _get_obs(self):
# return np.concatenate([
# (self.sim.data.qpos.flat[:1]-self.prev_qpos)/self.dt,
# self.sim.data.qpos.flat[1:],
# np.clip(self.sim.data.qvel.flat, -10, 10)
# ])
# def reset_model(self):
# qpos = self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq)
# qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
# self.set_state(qpos, qvel)
# self.prev_qpos = np.copy(self.sim.data.qpos[0])
# return self._get_obs()
# def viewer_setup(self):
# self.viewer.cam.trackbodyid = 2
# self.viewer.cam.distance = self.model.stat.extent * 0.75
# self.viewer.cam.lookat[2] = 1.15
# self.viewer.cam.elevation = -20
# class HalfCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):
# def __init__(self):
# self.prev_qpos = None
# dir_path = os.path.dirname(os.path.realpath(__file__))
# mujoco_env.MujocoEnv.__init__(self, '%s/assets/half_cheetah.xml' % dir_path, 5)
# utils.EzPickle.__init__(self)
# self._max_episode_steps=1000
# def step(self, action):
# self.prev_qpos = np.copy(self.sim.data.qpos.flat)
# self.do_simulation(action, self.frame_skip)
# ob = self.get_obs()
# reward_ctrl = -0.1 * np.square(action).sum()
# reward_run = ob[0] - 0.0 * np.square(ob[2])
# reward = reward_run + reward_ctrl
# done = False
# return ob, reward, done, {}
# def get_obs(self):
# return np.concatenate([
# (self.sim.data.qpos.flat[:1] - self.prev_qpos[:1]) / self.dt,
# self.sim.data.qpos.flat[1:],
# self.sim.data.qvel.flat,
# ])
# def reset_model(self):
# qpos = self.init_qpos + np.random.normal(loc=0, scale=0.001, size=self.model.nq)
# qvel = self.init_qvel + np.random.normal(loc=0, scale=0.001, size=self.model.nv)
# self.set_state(qpos, qvel)
# self.prev_qpos = np.copy(self.sim.data.qpos.flat)
# return self.get_obs()
# def viewer_setup(self):
# self.viewer.cam.distance = self.model.stat.extent * 0.25
# self.viewer.cam.elevation = -55
|
"""
Creating randomly moving turtle.
"""
import turtle as t
import random as ri
#Creating def for Boundary so that it doesn't go out of the window
def inside_window():
#reducing boundary from left side
left_limit = (-t.window_width() / 2) + 100
#reducing boundary from right side
right_limit = (t.window_width() / 2) - 100
#reducing boundary from top
top_limit = (t.window_height() / 2) - 100
#reducing boundary from bottom
bottom_limit = (-t.window_height() / 2) + 100
(x,y) = t.pos()
inside = left_limit < x < right_limit and bottom_limit < y < top_limit
return inside
#Function for random movement of turtle
def move_turtle():
if inside_window():
angle = ri.randint(0,180)
forw = ri.randint(0,200)
t.right(angle)
t.forward(forw)
else:
t.backward(200)
t.shape('turtle')
t.fillcolor('green')
t.bgcolor('black')
t.speed(1)
t.pensize(3)
while True:
move_turtle()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
from pandas_datareader import data
import statsmodels.api as sm
from statsmodels.tsa.seasonal import STL
#dn = np.random.randint(2, size=100)*2-1
#gwalk = np.cumprod(np.exp(dn*0.01))*100
def get_stock(stock,start,end):
df = data.DataReader(stock, 'stooq',start)["Close"]
df = df.iloc[::-1]
return df[start:end]
stock0 = '6758' #sony6758 Jal 9201 三井住友フィナンシャル 8316 docomo9437 ana9202 日産7201 fasuto9983 8411 みずほ 4005 住友化 4553 東和薬品 9432 NTT
stock = stock0 + '.JP'
bunseki = "series"
start = dt.date(2020,1,1)
end = dt.date(2020,6,5)
df = pd.DataFrame(get_stock(stock, start, end))
gwalk = df['Close'].values.tolist()
#gwalk = df['Close']
print(gwalk[0:3])
def EMA1(x, n):
#k = 3.45*(n+1)
a= 2/(n+1)
return pd.Series(x).ewm(alpha=a).mean()
y12 = EMA1(gwalk, 12)
y26 = EMA1(gwalk, 26)
MACD = y12 -y26
signal = EMA1(MACD, 9)
hist_=MACD-signal
ind = np.arange(len(signal))
fig, ax = plt.subplots(2,1)
ax[0].plot(gwalk,label="gwalk")
ax[0].plot(y12,label="y12")
ax[0].plot(y26,label="y26")
ax[1].plot(MACD,label="MACD")
ax[1].plot(signal,label="signal")
ax[1].bar(ind,hist_)
ax[0].legend()
ax[1].legend()
ax[0].grid()
ax[1].grid()
plt.savefig("./stock/{}/ema_close_%5K%25D_{}_{}now{}.png".format(stock0,stock,bunseki,start))
plt.pause(1)
plt.close()
|
"""Helper functions and types to aid with Python 2.5 - 3 support."""
import sys
import pymongo
if pymongo.version_tuple[0] < 3:
IS_PYMONGO_3 = False
else:
IS_PYMONGO_3 = True
PY3 = sys.version_info[0] == 3
if PY3:
import codecs
from io import BytesIO as StringIO
# return s converted to binary. b('test') should be equivalent to b'test'
def b(s):
return codecs.latin_1_encode(s)[0]
bin_type = bytes
txt_type = str
else:
try:
from io import StringIO
except ImportError:
from io import StringIO
# Conversion to binary only necessary in Python 3
def b(s):
return s
bin_type = str
txt_type = str
str_types = (bin_type, txt_type)
|
#!/usr/bin/env python
from iris_sdk.models.maps.base_map import BaseMap
class CallbackSubscriptionMap(BaseMap):
url = None
user = None
expiry = None
status = None
|
#!/usr/bin/env python3
import random
from time import sleep, time
import smbus
from .hsv7segment import HSV7Segment
start_time = time()
HTML_COLORS = {
# name hue sat val
'white': (0, 0, 0.25),
'black': (0, 0, 0),
'red': (0, 1, 0.5),
'maroon': (0, 1, 0.25),
'yellow': (0.16666666, 1, 0.5),
'olive': (0.16666666, 1, 0.25),
'lime': (0.33333333, 1, 0.5),
'green': (0.33333333, 1, 0.25),
'aqua': (0.5, 1, 0.5),
'teal': (0.5, 1, 0.25),
'blue': (0.66666666, 1, 0.5),
'navy': (0.66666666, 1, 0.25),
'fuchsia': (0.83333333, 1, 0.5),
'purple': (0.83333333, 1, 0.25)
}
class NumberDisplay(object):
def __init__(self, i2c_bus=None, ic_address=None, colors=HTML_COLORS):
self.hsv7seg = HSV7Segment(i2c_bus=i2c_bus, ic_address=ic_address, buffered=True)
self.segments_enabled = {'a':False, 'b':False, 'c':False, 'd':False, 'e':False, 'f':False, 'g':False, 'dp':False}
self.color = colors['white']
self.color_off = colors['black']
self.colors = colors
self.characters = {
'': {'a':False, 'b':False, 'c':False, 'd':False, 'e':False, 'f':False, 'g':False},
'0': {'a':True, 'b':True, 'c':True, 'd':True, 'e':True, 'f':True, 'g':False},
'1': {'a':False, 'b':True, 'c':True, 'd':False, 'e':False, 'f':False, 'g':False},
'2': {'a':True, 'b':True, 'c':False, 'd':True, 'e':True, 'f':False, 'g':True},
'3': {'a':True, 'b':True, 'c':True, 'd':True, 'e':False, 'f':False, 'g':True},
'4': {'a':False, 'b':True, 'c':True, 'd':False, 'e':False, 'f':True, 'g':True},
'5': {'a':True, 'b':False, 'c':True, 'd':True, 'e':False, 'f':True, 'g':True},
'6': {'a':True, 'b':False, 'c':True, 'd':True, 'e':True, 'f':True, 'g':True},
'7': {'a':True, 'b':True, 'c':True, 'd':False, 'e':False, 'f':False, 'g':False},
'8': {'a':True, 'b':True, 'c':True, 'd':True, 'e':True, 'f':True, 'g':True},
'9': {'a':True, 'b':True, 'c':True, 'd':True, 'e':False, 'f':True, 'g':True},
'A': {'a':True, 'b':True, 'c':True, 'd':False, 'e':True, 'f':True, 'g':True},
'C': {'a':True, 'b':False, 'c':False, 'd':True, 'e':True, 'f':True, 'g':False},
'E': {'a':True, 'b':False, 'c':False, 'd':True, 'e':True, 'f':True, 'g':True},
'F': {'a':True, 'b':False, 'c':False, 'd':False, 'e':True, 'f':True, 'g':True},
'H': {'a':False, 'b':True, 'c':True, 'd':False, 'e':True, 'f':True, 'g':True},
'J': {'a':False, 'b':True, 'c':True, 'd':True, 'e':True, 'f':False, 'g':False},
'L': {'a':False, 'b':False, 'c':False, 'd':True, 'e':True, 'f':True, 'g':False},
'P': {'a':True, 'b':True, 'c':False, 'd':False, 'e':True, 'f':True, 'g':True},
'R': {'a':True, 'b':True, 'c':True, 'd':False, 'e':True, 'f':True, 'g':True},
'U': {'a':False, 'b':True, 'c':True, 'd':True, 'e':True, 'f':True, 'g':False},
}
self.characters['B'] = self.characters['8']
self.characters['D'] = self.characters['0']
self.characters['G'] = self.characters['6']
self.characters['I'] = self.characters['1']
self.characters['K'] = self.characters['H']
self.characters['M'] = self.characters['H']
self.characters['N'] = self.characters['H']
self.characters['O'] = self.characters['0']
self.characters['Q'] = self.characters['0']
self.characters['S'] = self.characters['5']
self.characters['T'] = self.characters['7']
self.characters['V'] = self.characters['U']
self.characters['W'] = self.characters['U']
self.characters['X'] = self.characters['H']
self.characters['Y'] = self.characters['4']
self.characters['Z'] = self.characters['2']
def update(self):
for segment in self.hsv7seg.segments:
if not self.segments_enabled[segment]:
self.hsv7seg.segments[segment].hsv = self.color_off
else:
self.hsv7seg.segments[segment].hsv = self.color
self.hsv7seg.update()
def set_color(self, color):
if not isinstance(color, tuple) and len(color) != 3:
raise ValueError('set_color(color) must be passed a tuple consisting of (hue, saturation, value)')
if not (0 <= color[0] <= 1):
raise ValueError('set_color(color): hue must be between 0 and 1.')
if not (0 <= color[1] <= 1):
raise ValueError('set_color(color): saturation must be between 0 and 1.')
if not (0 <= color[2] <= 1):
raise ValueError('set_color(color): value must be between 0 and 1.')
self.color = color
self.update()
def display(self, character='', color=None):
character = str(character).upper()
if character not in self.characters:
#print('Warning: Character not found!')
character = ''
if color:
self.set_color(color)
self.segments_enabled.update(self.characters[character])
self.update()
def breathing(self):
"""Implementation of an LED breathing effect.
This function blocks and should be called in a thread or something.
"""
color = list(self.color)
top_pause_time = .2
bottom_pause_time = .4
while True:
print('Color:', color)
x = 0
while x < 1:
color[2] = x
self.set_color(color)
x += 0.00390625
sleep(top_pause_time)
while x >= 0:
color[2] = x
self.set_color(color)
x -= 0.00390625
sleep(bottom_pause_time)
color = [random.random(), random.random(), 0]
if __name__ == '__main__':
import random
display = NumberDisplay()
# Display the alphabet in random colors
if False:
for c in 'abcdefghijklmnopqrstuvwxyz':
while True:
color = random.choice(list(display.colors.keys()))
if color != 'black':
break
display.display(c, display.colors[color])
sleep(.5)
display.display()
# Fade a number in and out
if True:
color = (1, 0, 0)
display.display(8, color)
display.breathing()
# Display our colors
if False:
for color in sorted(HTML_COLORS):
if color == 'black':
continue
display.display('A', HTML_COLORS[color])
raw_input('This color is %s. Press enter for the next color.' % color)
display.display('')
# Countdown from 9 to 0 in progressively worrying colors
if False:
for i in range(9, -1, -1):
if i > 6:
display.display(i, display.colors['green'])
elif i > 3:
display.display(i, display.colors['yellow'])
else:
display.display(i, display.colors['red'])
sleep(1)
sleep(4)
display.display()
# Count from 0 to 9 in random colors
if False:
for i in range(10):
while True:
color = random.choice(list(display.colors.keys()))
if color != 'black':
break
display.display(i, display.colors[color])
sleep(1)
display.display()
|
from molfunc.molecules import FragmentMolecule
from molfunc.exceptions import MolFuncCritical
import os
here = os.path.dirname(os.path.abspath(__file__))
fragments_dir = os.path.join(here, 'fragments_lib')
class LibFragmentMolecule(FragmentMolecule):
def __init__(self, name, filename):
"""Fragment with name aliases"""
super().__init__(name=name, xyz_filename=filename)
self.smiles = None
self.aliases = []
def fragment_molecule_from_file(filename):
"""From a fragment in fragments_lib get a FragmentMolecule"""
mol = LibFragmentMolecule(name=os.path.basename(filename).rstrip('.xyz'),
filename=filename)
mol.smiles, mol.aliases = get_smiles_aliases(filename)
return mol
def get_smiles_aliases(filename):
"""For a fragment molecule in a file get the aliases for it
e.g. Me has aliased methyl and ch3"""
for i, line in enumerate(open(filename, 'r')):
# Aliases on the second line in the file
if i == 1 and len(line.split()) == 2:
# Line should be in the format: "SMILES alias1,alias2,alias3"
smiles, aliases_string = line.split()
return smiles, aliases_string.split(',')
raise MolFuncCritical(f'Fragment molecule in {filename} had no aliases')
def get_fragment_molecule(name=None, smiles=None):
"""From a name e.g. Me get the corresponding FragmentMolecule"""
if name is None and smiles is None:
raise MolFuncCritical('Cannot get the fragment')
# Iterate through all the fragments and return name or smiles matches
for fragment_molecule in fragments:
if name is not None and name.lower() in fragment_molecule.aliases:
return fragment_molecule
if smiles is not None and smiles == fragment_molecule.smiles:
return fragment_molecule
return None
# Populated when imported...
xyz_filepaths = [os.path.join(fragments_dir, fn)
for fn in os.listdir(fragments_dir) if fn.endswith('.xyz')]
# From all the xyz files populate fragments
fragments = [fragment_molecule_from_file(fn) for fn in xyz_filepaths]
# List of all aliases for fast checking if it exists
all_aliases = []
for fragment in fragments:
all_aliases += fragment.aliases
all_smiles = [fragment.smiles for fragment in fragments]
|
import numpy as np
import os
import sys
import random
import torch
import torchvision
import torchvision.transforms as transforms
from utils.dataset_utils import check, separate_data, split_data, save_file
from torchvision.datasets import ImageFolder, DatasetFolder
random.seed(1)
np.random.seed(1)
num_clients = 20
num_classes = 200
dir_path = "Tiny-imagenet/"
# http://cs231n.stanford.edu/tiny-imagenet-200.zip
# https://github.com/QinbinLi/MOON/blob/6c7a4ed1b1a8c0724fa2976292a667a828e3ff5d/datasets.py#L148
class ImageFolder_custom(DatasetFolder):
def __init__(self, root, dataidxs=None, train=True, transform=None, target_transform=None):
self.root = root
self.dataidxs = dataidxs
self.train = train
self.transform = transform
self.target_transform = target_transform
imagefolder_obj = ImageFolder(self.root, self.transform, self.target_transform)
self.loader = imagefolder_obj.loader
if self.dataidxs is not None:
self.samples = np.array(imagefolder_obj.samples)[self.dataidxs]
else:
self.samples = np.array(imagefolder_obj.samples)
def __getitem__(self, index):
path = self.samples[index][0]
target = self.samples[index][1]
target = int(target)
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
if self.dataidxs is None:
return len(self.samples)
else:
return len(self.dataidxs)
# Allocate data to users
def generate_dataset(dir_path, num_clients, num_classes, niid, real, partition, balance):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# Setup directory for train/test data
config_path = dir_path + "config.json"
train_path = dir_path + "train/train.json"
test_path = dir_path + "test/test.json"
if check(config_path, train_path, test_path, num_clients, num_classes, niid, real, partition):
return
# Get data
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = ImageFolder_custom(root=dir_path+'rawdata/tiny-imagenet-200/train/', transform=transform)
testset = ImageFolder_custom(root=dir_path+'rawdata/tiny-imagenet-200/val/', transform=transform)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=len(trainset), shuffle=False)
testloader = torch.utils.data.DataLoader(
testset, batch_size=len(testset), shuffle=False)
for _, train_data in enumerate(trainloader, 0):
trainset.data, trainset.targets = train_data
for _, test_data in enumerate(testloader, 0):
testset.data, testset.targets = test_data
dataset_image = []
dataset_label = []
dataset_image.extend(trainset.data.cpu().detach().numpy())
dataset_image.extend(testset.data.cpu().detach().numpy())
dataset_label.extend(trainset.targets.cpu().detach().numpy())
dataset_label.extend(testset.targets.cpu().detach().numpy())
dataset_image = np.array(dataset_image)
dataset_label = np.array(dataset_label)
# dataset = []
# for i in range(num_classes):
# idx = dataset_label == i
# dataset.append(dataset_image[idx])
X, y, statistic = separate_data((dataset_image, dataset_label), num_clients, num_classes,
niid, real, partition, balance)
train_data, test_data = split_data(X, y)
save_file(config_path, train_path, test_path, train_data, test_data, num_clients, num_classes,
statistic, niid, real, partition)
if __name__ == "__main__":
niid = True if sys.argv[1] == "noniid" else False
real = True if sys.argv[2] == "realworld" else False
partition = sys.argv[3] if sys.argv[3] != "-" else None
balance = True if sys.argv[4] == "balance" else False
generate_dataset(dir_path, num_clients, num_classes, niid, real, partition, balance)
|
from visualizer import Visualizer
v = Visualizer("cos(x)", "sin(y)")
plt = v.plot_color(skip=2)
plt.savefig("./img/vector_field.png")
|
# stdlib
from typing import Any
# proj
from arpeggio import *
def test_zeroormore_eolterm() -> None:
def grammar() -> Any: return first, second, EOF
def first() -> Any: return ZeroOrMore(["a", "b"], eolterm=True)
def second() -> Any: return "a"
# first rule should match only first line
# so that second rule will match "a" on the new line
input = """a a b a b b
a"""
parser = ParserPython(grammar, reduce_tree=False)
result = parser.parse(input)
assert result
def test_oneormore_eolterm() -> None:
def grammar() -> Any: return first, second, EOF
def first() -> Any: return OneOrMore(["a", "b"], eolterm=True)
def second() -> Any: return "a"
# first rule should match only first line
# so that second rule will match "a" on the new line
input = """a a a b a
a"""
parser = ParserPython(grammar, reduce_tree=False)
result = parser.parse(input)
assert result
|
import sublime, sublime_plugin, urllib, json
class EmailInlineCommand(sublime_plugin.TextCommand):
def run(self, edit):
region = sublime.Region(0, self.view.size())
content = self.view.substr(region)
api_url = 'http://premailer.dialect.ca/api/0.1/documents'
values = { 'html' : content,
'adapter' : self.view.settings().get('ei_premailer_adapter'),
'base_url' : self.view.settings().get('ei_premailer_base_url'),
'link_query_string' : self.view.settings().get('ei_premailer_link_query_string'),
'preserve_styles' : self.view.settings().get('ei_premailer_preserve_styles'),
'remove_ids' : self.view.settings().get('ei_premailer_remove_ids'),
'remove_clases' : self.view.settings().get('ei_premailer_remove_classes'),
'remove_comments' : self.view.settings().get('ei_premailer_remove_comments') }
data = urllib.parse.urlencode(values)
data = data.encode('utf-8')
req = urllib.request.urlopen(api_url, data)
api_response = req.read().decode('utf-8')
obj = json.loads(api_response)
html_url = obj['documents']['html']
content_req = urllib.request.urlopen(html_url)
inlined_content = content_req.read().decode('utf-8')
self.view.replace(edit, region, inlined_content)
|
import matplotlib.pyplot as plt
def plot_drift_vertical_lines(log_size, resp_drift=None, label="Concept drift ground truth", lw=3, alpha=0.9):
"""
Plots vertical lines corresponding to ground truth drifts
Parameters:
------------
log_size (int): Size of the event log to plot a drift at every 10%
resp_drift (int/None): Plot a vertical line at every 'resp_drift' traces
label (str): Label to write in the plot
lw (int): Line width
alpha (float): Line color transparency
"""
plt.rcParams["font.family"] = "Times New Roman"
first=True
if resp_drift is None:
resp_drift = int(log_size * 0.1)
for i in range(resp_drift, log_size, resp_drift):
if first:
first=False
plt.axvline(x=i, ls='--', lw=lw, c='darkgreen', label=label, alpha=alpha)
else:
plt.axvline(x=i, ls='--', lw=lw, c='darkgreen', alpha=alpha)
def plot_deteccao_drift(
run_df, col, detected_drifts, y_true, rolling_means, lowers, uppers, save_png=""
):
"""
Plots the execution of the drift detection method with the tolerance
boundaries and the rolling mean used to detect a drift.
Parameters:
------------
run_df (pd.DataFrame): Result of the trace clustering step, with the
values from the features of tracking the trace clustering evolution
col (str): Column in 'run_df' to be considered in the analysis
detected_drifts (list): List of index of detected drifts
y_true (list): List of index of ground truth drifts
rolling_means (list): Rolling average of values of 'col'
lowers (list): List of lower tolerance boundaries over the traces
uppers (list): List of uppers tolerance boundaries over the traces
save_png (str): Name of png file. If == "" does not save as file
"""
if save_png != "":
plt.ioff()
plt.rcParams["font.family"] = "Times New Roman"
fig = plt.figure(figsize=(18, 4))
ax = plt.gca()
ax.plot(run_df.index, run_df[col], c='#ff5f54', lw=5, label=col)
ax.plot(run_df.index, rolling_means, c='#35b588', linestyle='-', lw=4, marker='.', markeredgewidth=4, label="Rolling average")
ax.fill_between(run_df.index, lowers, uppers, facecolor='#52adff', alpha=0.1, label="Tolerance boundaries")
ax.plot(run_df.index, uppers, c='#52adff', alpha=0.5, marker='v', markeredgewidth=4)
ax.plot(run_df.index, lowers, c='#52adff', alpha=0.5, marker='^', markeredgewidth=4)
first=True
for val in y_true:
if first:
first=False
ax.axvline(x=val, ls='--', lw=4, c='darkgreen', alpha=0.8, label="True concept drift")
else:
ax.axvline(x=val, ls='--', lw=4, c='darkgreen', alpha=0.8)
first=True
for val in detected_drifts:
if first:
first=False
ax.axvline(x=val, ls='-.', lw=4, c='#deb100', alpha=0.8, label="Detected concept drift")
else:
ax.axvline(x=val, ls='-.', lw=4, c='#deb100', alpha=0.8)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(30)
leg = plt.legend(fontsize=32, loc='upper center', bbox_to_anchor=(0.48, -0.15),
fancybox=True, shadow=False, prop={"family":"Times New Roman", "size":"26"},
frameon=False, ncol=3, labelspacing=0.25, columnspacing=1)
for line in leg.get_lines():
line.set_linewidth(5)
if save_png != "":
plt.savefig(save_png, dpi=100, transparent=False)
plt.close(plt.gcf())
plt.ion()
|
"""Get invoice by profile id API method."""
from ibsng.handler.handler import Handler
class getInvoiceProfileByID(Handler):
"""Get invoice by profile id method class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.profile_id, int)
def setup(self, profile_id):
"""Setup required parameters.
:param int profile_id: profile id
:return: None
:rtype: None
"""
self.profile_id = profile_id
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
The dummy model encodes the model defined by django in backends.djsite
using SQLAlchemy.
This is done to query the database with more performant ORM of SA.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# pylint: disable=no-name-in-module, import-error, invalid-name
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import (Column, Table, ForeignKey, UniqueConstraint, select)
from sqlalchemy.types import (
Integer,
String,
DateTime,
Float,
Boolean,
Text,
)
from sqlalchemy.orm import (relationship, backref, sessionmaker)
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.dialects.postgresql import UUID
# MISC
from aiida.common import timezone
from aiida.common.utils import get_new_uuid
Base = declarative_base()
# pylint: disable=missing-docstring, too-few-public-methods
class DbLink(Base):
__tablename__ = "db_dblink"
id = Column(Integer, primary_key=True)
input_id = Column(Integer, ForeignKey('db_dbnode.id', deferrable=True, initially="DEFERRED"))
output_id = Column(Integer, ForeignKey('db_dbnode.id', ondelete="CASCADE", deferrable=True, initially="DEFERRED"))
type = Column(String(255))
input = relationship("DbNode", primaryjoin="DbLink.input_id == DbNode.id")
output = relationship("DbNode", primaryjoin="DbLink.output_id == DbNode.id")
label = Column(String(255), index=True, nullable=False)
class DbAttribute(Base):
__tablename__ = "db_dbattribute"
id = Column(Integer, primary_key=True)
dbnode_id = Column(Integer, ForeignKey('db_dbnode.id'))
key = Column(String(255))
datatype = Column(String(10))
tval = Column(String, default='')
fval = Column(Float, default=None, nullable=True)
ival = Column(Integer, default=None, nullable=True)
bval = Column(Boolean, default=None, nullable=True)
dval = Column(DateTime, default=None, nullable=True)
class DbExtra(Base):
__tablename__ = "db_dbextra"
id = Column(Integer, primary_key=True)
dbnode_id = Column(Integer, ForeignKey('db_dbnode.id'))
key = Column(String(255))
datatype = Column(String(10))
tval = Column(String, default='')
fval = Column(Float, default=None, nullable=True)
ival = Column(Integer, default=None, nullable=True)
bval = Column(Boolean, default=None, nullable=True)
dval = Column(DateTime, default=None, nullable=True)
class DbComputer(Base):
__tablename__ = "db_dbcomputer"
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), default=get_new_uuid)
name = Column(String(255), unique=True, nullable=False)
hostname = Column(String(255))
description = Column(Text, nullable=True)
transport_type = Column(String(255))
scheduler_type = Column(String(255))
transport_params = Column(String(255))
_metadata = Column('metadata', String(255), default="{}")
class DbUser(Base):
__tablename__ = "db_dbuser"
id = Column(Integer, primary_key=True)
email = Column(String(254), unique=True, index=True)
password = Column(String(128)) # Clear text password ?
first_name = Column(String(254), nullable=True)
last_name = Column(String(254), nullable=True)
institution = Column(String(254), nullable=True)
is_staff = Column(Boolean, default=False)
is_active = Column(Boolean, default=False)
last_login = Column(DateTime(timezone=True), default=timezone.now)
date_joined = Column(DateTime(timezone=True), default=timezone.now)
table_groups_nodes = Table(
'db_dbgroup_dbnodes', Base.metadata, Column('id', Integer, primary_key=True),
Column('dbnode_id', Integer, ForeignKey('db_dbnode.id', deferrable=True, initially="DEFERRED")),
Column('dbgroup_id', Integer, ForeignKey('db_dbgroup.id', deferrable=True, initially="DEFERRED")))
class DbGroup(Base):
__tablename__ = "db_dbgroup"
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), default=get_new_uuid)
label = Column(String(255), index=True)
type_string = Column(String(255), default="", index=True)
time = Column(DateTime(timezone=True), default=timezone.now)
description = Column(Text, nullable=True)
user_id = Column(Integer, ForeignKey('db_dbuser.id', ondelete='CASCADE', deferrable=True, initially="DEFERRED"))
user = relationship('DbUser', backref=backref('dbgroups', cascade='merge'))
dbnodes = relationship('DbNode', secondary=table_groups_nodes, backref="dbgroups", lazy='dynamic')
__table_args__ = (UniqueConstraint('label', 'type_string'),)
def __str__(self):
return '<DbGroup [type: {}] "{}">'.format(self.type_string, self.label)
class DbNode(Base):
__tablename__ = "db_dbnode"
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), default=get_new_uuid)
node_type = Column(String(255), index=True)
process_type = Column(String(255), index=True)
label = Column(String(255), index=True, nullable=True)
description = Column(Text(), nullable=True)
ctime = Column(DateTime(timezone=True), default=timezone.now)
mtime = Column(DateTime(timezone=True), default=timezone.now)
dbcomputer_id = Column(
Integer, ForeignKey('db_dbcomputer.id', deferrable=True, initially="DEFERRED"), nullable=True)
dbcomputer = relationship('DbComputer', backref=backref('dbnodes', passive_deletes=True))
user_id = Column(Integer, ForeignKey('db_dbuser.id', deferrable=True, initially="DEFERRED"), nullable=False)
user = relationship('DbUser', backref='dbnodes')
public = Column(Boolean, default=False)
nodeversion = Column(Integer, default=1)
attributes = relationship('DbAttribute', uselist=True, backref='dbnode')
extras = relationship('DbExtra', uselist=True, backref='dbnode')
outputs = relationship(
"DbNode",
secondary="db_dblink",
primaryjoin="DbNode.id == DbLink.input_id",
secondaryjoin="DbNode.id == DbLink.output_id",
backref=backref("inputs", passive_deletes=True),
passive_deletes=True)
@hybrid_property
def user_email(self):
"""
Returns: the email of the user
"""
return self.user.email
@user_email.expression
def user_email(self):
"""
Returns: the email of the user at a class level (i.e. in the database)
"""
return select([DbUser.email]).where(DbUser.id == self.user_id).label('user_email')
# Computer name
@hybrid_property
def computer_name(self):
"""
Returns: the of the computer
"""
return self.dbcomputer.name
@computer_name.expression
def computer_name(self):
"""
Returns: the name of the computer at a class level (i.e. in the database)
"""
return select([DbComputer.name]).where(DbComputer.id == self.dbcomputer_id).label('computer_name')
class DbAuthInfo(Base):
__tablename__ = "db_dbauthinfo"
id = Column(Integer, primary_key=True)
aiidauser_id = Column(Integer, ForeignKey(
'db_dbuser.id', ondelete='CASCADE', deferrable=True, initially="DEFERRED"))
aiidauser = relationship('DbUser', backref=backref('dbauthinfo', cascade='merge'))
dbcomputer_id = Column(Integer,
ForeignKey('db_dbcomputer.id', ondelete='CASCADE', deferrable=True, initially="DEFERRED"))
dbcomputer = relationship('DbComputer', backref=backref('dbauthinfo', passive_deletes=True))
_metadata = Column('metadata', String(255), default="{}")
auth_params = Column('auth_params', String(255), default="{}")
enabled = Column(Boolean, default=True)
__table_args__ = (UniqueConstraint("aiidauser_id", "dbcomputer_id"),)
class DbLog(Base):
__tablename__ = "db_dblog"
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), default=get_new_uuid)
time = Column(DateTime(timezone=True), default=timezone.now)
loggername = Column(String(255), index=True)
levelname = Column(String(255), index=True)
dbnode_id = Column(Integer, ForeignKey('db_dbnode.id', deferrable=True, initially="DEFERRED"), nullable=True)
dbnode = relationship('DbNode', backref=backref('dblogs', passive_deletes=True))
message = Column(Text(), nullable=True)
_metadata = Column('metadata', String(255), default="{}")
class DbComment(Base):
__tablename__ = "db_dbcomment"
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), default=get_new_uuid)
dbnode_id = Column(Integer, ForeignKey('db_dbnode.id', ondelete="CASCADE", deferrable=True, initially="DEFERRED"))
ctime = Column(DateTime(timezone=True), default=timezone.now)
mtime = Column(DateTime(timezone=True), default=timezone.now, onupdate=timezone.now)
user_id = Column(Integer, ForeignKey('db_dbuser.id', ondelete="CASCADE", deferrable=True, initially="DEFERRED"))
content = Column(Text, nullable=True)
dbnode = relationship('DbNode', backref='dbcomments')
user = relationship("DbUser")
def get_aldjemy_session():
"""
Use aldjemy to make a session
.. note:
Use only in this case. In normal production mode
it is safer make session explictly because it is more robust
"""
from aldjemy.core import get_engine
engine = get_engine()
_Session = sessionmaker(bind=engine)
return _Session()
session = get_aldjemy_session()
|
from typing import Tuple
from tempo.serve.utils import pipeline, predictmethod
def test_class_func_class():
@pipeline(
name="classifier",
models=[],
)
class MyPipeline:
@predictmethod
def predict(self, X: str) -> str:
return X
x = MyPipeline()
r = x.predict("hello")
assert r == "hello"
r = x("hello")
assert r == "hello"
def test_class_func():
@pipeline(
name="classifier",
models=[],
)
def predict(X: str) -> str:
return X
r = predict("hello")
assert r == "hello"
def test_clear_state_func():
@pipeline(
name="classifier",
models=[],
)
class MyPipeline:
def __init__(self):
self.cleared = False
@predictmethod
def predict(self, X: str) -> str:
return X
x = MyPipeline()
@pipeline(
name="classifier",
models=[x],
)
class MyPipeline2:
def __init__(self):
self.cleared = False
@predictmethod
def predict(self, X: str) -> str:
return x(X=X)
y = MyPipeline2()
y(X="hello")
def test_class_two_outputs():
@pipeline(
name="classifier",
models=[],
)
class MyPipeline:
@predictmethod
def predict(self, X: str) -> Tuple[str, str]:
return X, X
x = MyPipeline()
r1, r2 = x.predict("hello")
assert r1 == "hello"
assert r2 == "hello"
|
import warnings
if __name__ == '__main__':
print('Loading...')
warnings.filterwarnings("ignore")
import pyfiglet
import time
from rich.console import Console
from rich import print as rprint
from sklearn.linear_model import Ridge
from pypinyin import lazy_pinyin
import nagisa
import jieba
import regex
import numpy as np
import gtts
import sounddevice as sd
import librosa
import langdetect
from typing import Any, Callable, Dict, List, Tuple, Union
from pathlib import Path
import random
import copy
import re
import json
import hashlib
from tqdm import tqdm
def simple_digest(s: str):
return s[:10] + '-' + hashlib.md5(s.encode('utf-8')).hexdigest()
class SerializableObjectMeta(type):
def __new__(cls, name, bases, attrs):
annotations = {}
default_values = {}
for base in bases[::-1]:
annotations.update(base.__dict__.get('__annotations__', {}))
default_values.update(base.__dict__)
annotations.update(attrs.get('__annotations__', {}))
default_values.update(attrs)
default_values = {k: v for k,
v in default_values.items() if k in annotations}
attrs['__serializableobject_fields__'] = annotations
attrs['__serializableobject_values__'] = default_values
return type.__new__(cls, name, bases, attrs)
class SerializableObject(metaclass=SerializableObjectMeta):
'''一个抽象类,继承这个类的,可以调用serialize方法来打散成字典。便于与前端交互和存盘。'''
def __init__(self, *args:Tuple[Any], **kws:Dict[str, Any]):
for i, k in enumerate(self.__serializableobject_fields__):
if i < len(args):
setattr(self, k, args[i])
elif k in kws:
setattr(self, k, kws[k])
elif k in self.__serializableobject_values__:
setattr(self, k, self.__serializableobject_values__[k])
else:
raise ValueError(f"param {k} not specified")
def serialize(self):
ret = {}
for k, v in self.__serializableobject_fields__.items():
if type(v) is SerializableObjectMeta:
ret[k] = self.__dict__[k].serialize()
else:
ret[k] = self.__dict__[k]
return ret
@classmethod
def deserialize(cls, data):
state_dict = {}
for k, v in cls.__serializableobject_fields__.items():
if type(v) is SerializableObjectMeta:
state_dict[k] = v.deserialize(data[k])
elif k in data:
state_dict[k] = data[k]
return cls(**state_dict)
def __str__(self):
msg = self.__class__.__name__ + '('
msg += ', '.join([k + '=' + str(self.__dict__[k])
for k in self.__serializableobject_fields__])
msg += ')'
return msg
def __repr__(self):
return str(self)
def __eq__(self, rhs):
if type(rhs.__class__) is not SerializableObjectMeta:
return False
for k in self.__serializableobject_fields__:
if k not in rhs.__serializableobject_fields__:
return False
if rhs.__dict__[k] != self.__dict__[k]:
return False
return True
def __ne__(self, rhs):
return not self.__eq__(rhs)
class MemoryStat(SerializableObject):
EF: float = 2.5
interval: int = 0
upcoming: int = 0
def decrease_tick(self) -> bool:
self.upcoming = max(self.upcoming - 1, 0)
return self.upcoming == 0
def is_active(self) -> bool:
return self.upcoming == 0
def add_stat(self, q: int):
if q < 3:
self.interval = 0
self.upcoming = 0
self.EF = max(self.EF + (0.1 - (5 - q) * (0.08 + (5 - q) * 0.02)), 1.3)
if self.interval == 0:
self.interval = 1
elif self.interval == 1:
self.interval = 6
else:
self.interval = int(round(self.EF * self.interval))
self.upcoming = self.interval
class Question(SerializableObject):
title: str = ''
answer: str = ''
language: str = ''
autoplay: bool = False
memory_stat: MemoryStat = MemoryStat()
question_id: int = 0
reconstruct_pattern: str = '**Question** {title}\n{answer}'
match_method: Union[List[str], None] = None
match_ignore: Union[List[str], None] = None
invisible: bool = False
def __init__(self, *args, **kws):
super().__init__(*args, **kws)
if not self.language:
self.language = langdetect.detect(self.title + self.answer)
self.title = self.title.strip()
self.answer = self.answer.strip()
def get_uid(self):
return simple_digest(self.title + '#' + self.answer)
class AudioManager:
def __init__(self, cache_dir: Path):
cache_dir.mkdir(exist_ok=True, parents=True)
self.cache_dir = cache_dir
self.cache_size = sum(f.stat().st_size for f in cache_dir.glob('*.mp3'))
def get_cache_size(self):
return self.cache_size
def get_audio(self, title: str, force_download: bool = False, **params: Dict[str, Any]) -> Path:
name = simple_digest(title)
path = self.cache_dir.joinpath(f'{name}.mp3')
if not path.exists() or force_download:
tts = gtts.gTTS(title.replace('*', ''), **params)
tts.save(path)
if not force_download:
self.cache_size += path.stat().st_size
return path
def play_audio(self, data: str, force_download: bool = False, **params: Dict[str, Any]):
sd.stop()
path = self.get_audio(data, force_download, **params)
data, fs = librosa.load(path)
sd.play(data, fs, blocking=False)
list(jieba.cut('测试结巴分词'))
class MatchManager:
def __init__(self):
self.match_method: Dict[str, Callable[[Question, str], Tuple[bool, str]]] = [
('full-match', self.full_match),
('token-match', self.token_match),
('pinyin-match', self.pinyin_match),
('char-match', self.char_match),
]
def clean_word(self, s):
return regex.sub(r'[\p{P}\s]+', ' ', s.strip())
def split_word_zh(self, s):
return list(jieba.cut(s.lower()))
def split_word_ja(self, s):
return list(nagisa.tagging(s).words)
def first_pinyin_zh(self, s):
words = self.split_word_zh(s)
return [''.join(lazy_pinyin(w, 4)).upper() for w in words]
def pattern_match(self, patterns: List[str], data: Union[str, List], tag: str = '') -> Tuple[bool, str]:
'''
检查patterns中的每一个是否都在data之中出现了
'''
match_mask = np.zeros(len(data))
unmatched = []
for pat in patterns:
if pat in data:
i = data.index(pat)
if isinstance(data, str):
match_mask[i:i+len(pat)] = 1
else:
match_mask[i] = 1
else:
unmatched.append(pat)
msg = f"**{tag}** "
edge = np.diff(match_mask, prepend=0, append=0)
if edge[0]:
msg += "*"
for c, e in zip(data, edge[1:]):
msg += c
if e:
msg += "*"
if unmatched:
msg += ' {' + ' '.join(unmatched) + '}'
return len(unmatched) == 0, msg
def clean_first(func) -> Callable[..., Any]:
'''
将question.answer和用户的answer除去标点符号和空白字符
分别作为ground-truth和answer传递给被包装的函数
'''
def inner(self, question: Question, answer: str):
gt = self.clean_word(question.answer)
answer = self.clean_word(answer)
return func(self, answer, gt, question)
return inner
def then_pattern_match(tag) -> Callable[..., Tuple[bool, str]]:
'''函数处理完之后,输出 pattern, data, 返回调用pattern_match的结果'''
def wrapper(func):
def inner(self, *args, **kws):
pat, data = func(self, *args, **kws)
return self.pattern_match(pat, data, tag)
return inner
return wrapper
@clean_first
def full_match(self, answer: str, gt: str, question: Question):
if answer == gt:
return True, "**full-match** success"
else:
return False, "**full-match** failed"
@clean_first
@then_pattern_match("char-match")
def char_match(self, answer: str, gt: str, question: Question):
return list(answer), list(gt)
@clean_first
@then_pattern_match("token-match")
def token_match(self, answer: str, gt: str, question: Question):
func_name = 'split_word_' + question.language
if hasattr(self, func_name):
func = getattr(self, func_name)
return func(answer), func(gt)
else:
return list(answer), list(gt)
@clean_first
@then_pattern_match("pinyin-match")
def pinyin_match(self, answer: str, gt: str, question: Question):
return answer.split(), ''.join(self.first_pinyin_zh(gt))
def match_answer(self, question: Question, answer: str):
all_msg = []
for name, method in self.match_method:
need_match = False
if question.match_method:
if name in question.match_method:
need_match = True
elif question.match_ignore:
if name not in question.match_ignore:
need_match = True
else:
need_match = True
if need_match:
mat, msg = method(question, answer)
if mat:
return mat, msg
else:
all_msg.append(msg)
return False, '\n'.join(all_msg)
def auto_score(self, question: Question, answer: str, speed_score: float):
match, match_msg = self.match_answer(question, answer)
if match:
return max(speed_score, 3), match_msg
else:
return min(speed_score, 3), match_msg
class LLRegresser(SerializableObject):
max_history: int = 500
alpha: float = 1
X: Union[List[List[float]], None] = None
y: Union[List[List[float]], None] = None
def add_data(self, X: List[float], y: float):
if self.X is None:
self.X = []
if self.y is None:
self.y = []
self.X.append(X)
self.y.append(y)
if len(self.X) > self.max_history:
self.X.pop(0)
self.y.pop(0)
def estimate(self, X: List[float]) -> float:
cur_X = np.asarray(X)[np.newaxis, :]
cur_X = np.concatenate([cur_X, np.log(cur_X + 1)], axis=1)
try:
X = np.asarray(self.X)
X = np.concatenate([X, np.log(X + 1)], axis=1)
y = np.asarray(self.y)
return max(Ridge(alpha=self.alpha).fit(X, y).predict(cur_X)[0], 0.01)
except:
return cur_X.sum() * 0.1
class SpeedEstimator:
def __init__(self, path: Path):
path.parent.mkdir(exist_ok=True, parents=True)
self.path = path
self.estimators: Dict[str, LLRegresser] = {}
if self.path.exists():
self.load()
def save(self):
with self.path.open('w') as f:
json.dump({k: v.serialize()
for k, v in self.estimators.items()}, f)
def load(self):
with self.path.open() as f:
data = json.load(f)
self.estimators = {k: LLRegresser.deserialize(
v) for k, v in data.items()}
@staticmethod
def get_feature(question: Question, answer: str):
def str_feat(s):
return [
len(s),
min(map(len, re.split('\s+', s))),
len(re.split('\s+', s)),
min(map(len, s.split('\n'))),
len(s.split('\n'))
]
return str_feat(question.title) + str_feat(question.answer) + str_feat(answer)
def add_data(self, question: Question, answer: str, timing: float):
identifier = question.language
if identifier not in self.estimators:
self.estimators[identifier] = LLRegresser()
X = self.get_feature(question, answer)
y = timing
self.estimators[identifier].add_data(X, y)
def estimate(self, question: Question, answer: str):
identifier = question.language
if identifier not in self.estimators:
self.estimators[identifier] = LLRegresser()
X = self.get_feature(question, answer)
return self.estimators[identifier].estimate(X)
def speed_score(self, question: Question, answer: str, timing: float):
y = self.estimate(question, answer)
return min(int(y / timing * 7), 5)
class DataSource:
default_state = {
'autoplay': False,
'question': False,
'inline': False,
'language': '',
'invisible': False,
'match_method': None,
'match_ignore': None
}
dictation_preset = {
'autoplay': True,
'invisible': True
}
no_dictation_preset = {
'autoplay': False,
'invisible': False
}
def __init__(self, markdown_file: Path, database_file: Path):
markdown_file.parent.mkdir(exist_ok=True, parents=True)
database_file.parent.mkdir(exist_ok=True, parents=True)
self.markdown_file = markdown_file
self.database_file = database_file
self.q: List[Question] = []
self.db: Dict[str, Question] = {}
self._state = copy.copy(self.default_state)
self.state_stack = []
self.cmd_pattern = re.compile('(.*?)=(.*?)')
self.reconstruct_pattern = []
self.parse_message = []
self.current_line = 0
self.question_id = -1
self.current_question: Union[Question, None] = None
self.froce_state = {}
@property
def state(self):
self._state.update(self.froce_state)
return self._state
@state.setter
def state(self, value):
self._state = copy.copy(value)
self._state.update(self.froce_state)
def set_force_dictation(self):
self.froce_state.update(self.dictation_preset)
def set_force_no_dictation(self):
self.froce_state.update(self.no_dictation_preset)
def set_force_voice(self):
self.froce_state.update(autoplay=True)
def set_force_no_voice(self):
self.froce_state.update(autoplay=False)
def set_config(self, presets=[], forces=[]):
for preset in presets:
name = 'set_preset_' + preset.replace('-', '_')
if hasattr(self, name):
self.add_message('preset ' + preset)
getattr(self, name)()
for force in forces:
name = 'set_force_' + force.replace('-', '_')
if hasattr(self, name):
self.add_message('force ' + force)
getattr(self, name)()
def push_stack(self):
self.state_stack.append(copy.copy(self.state))
def pop_stack(self):
self.state = self.state_stack.pop()
def update_stack(self, *args, **kws):
self.state.update(*args, **kws)
self.state.update(self.froce_state)
def handle_voice(self):
self.update_stack(autoplay=True)
def handle_question(self):
self.update_stack(question=True)
def handle_inline(self):
self.update_stack(inline=True)
def handle_invisible(self):
self.update_stack(invisible=True)
def handle_language(self, lang):
self.update_stack(language=lang)
def handle_match_method(self, *params):
self.update_stack(match_method=params)
def handle_match_ignore(self, *params):
self.update_stack(match_ignore=params)
def handle_end_all(self):
self.state = self.state_stack[0]
self.state_stack = []
def handle_end(self):
self.pop_stack()
def handle_dictation(self):
self.update_stack(**self.dictation_preset)
def add_question(self, title, answer, **kws):
wrap_params = ['language', 'autoplay',
'match_method', 'match_ignore', 'invisible']
wrap_dict = {k: self.state[k] for k in wrap_params}
self.question_id += 1
q = Question(
title=title,
answer=answer,
questoin_id=self.question_id,
**wrap_dict,
**kws
)
if (uid := q.get_uid()) in self.db:
q.memory_stat = self.db[uid].memory_stat
self.current_question = q
self.q.append(q)
self.reconstruct_pattern.append(None)
return q
def add_message(self, msg):
self.parse_message.append(
f'File {self.markdown_file.stem} Line {self.current_line + 1}: {msg}')
def parse_markdown(self, md):
self.state.update(self.froce_state)
ignore_raw_text = False
for self.current_line, line in enumerate(md.split('\n')):
line = line.strip()
if line.startswith('```'):
ignore_raw_text = not ignore_raw_text
if ignore_raw_text:
self.reconstruct_pattern.append(line)
continue
inline_command = False
inline_depth = 0
ctrl_cmd = ''
if line.endswith('-->'):
idx = line.index('<!--')
line_new, ctrl_cmd = line[:idx], line[idx:]
for ctrl_part in re.findall('\s*<!--(.*?)-->\s*', ctrl_cmd):
part_tokens = ctrl_part.split('&')
if any([x.strip() == 'end-all' for x in part_tokens]):
self.handle_end_all()
elif any([x.strip() == 'end' for x in part_tokens]):
self.handle_end()
else:
self.push_stack()
inline_depth += 1
for ctrl in part_tokens:
ctrl_tokens = ctrl.strip().split('=')
cmd = ctrl_tokens[0].replace('-', '_')
if len(ctrl_tokens) > 1:
params = [x.strip()
for x in ctrl.split('=')[1].split(',')]
else:
params = []
if hasattr(self, 'handle_' + cmd):
getattr(self, 'handle_' + cmd)(*params)
else:
self.add_message(
f'Unknown Control Command {ctrl}')
if line_new:
line = line_new
inline_command = True
else:
self.reconstruct_pattern.append(line)
continue
if self.state['inline']:
tokens = re.split('\s+', line)
self.add_question(
title=tokens[0],
answer=' '.join(tokens[1:]),
reconstruct_pattern='{title} {answer} %s ' % ctrl_cmd.strip(
)
)
self.current_question = None
elif self.state['question']:
if mat := re.fullmatch(r'\*\*(.*?)\*\*(.*?)', line):
label, title = mat.groups()
self.current_question = self.add_question(
title, '', reconstruct_pattern='**%s** {title} %s\n{answer}' % (label, ctrl_cmd))
elif self.current_question is not None:
if line:
self.current_question.answer += line + '\n'
elif line != '':
self.add_message(f'Ignore line: {line}')
else:
self.reconstruct_pattern.append(line)
if inline_command:
for _ in range(inline_depth):
self.pop_stack()
def load(self):
if self.database_file.exists():
with self.database_file.open() as f:
for prob in json.load(f):
q: Question = Question.deserialize(prob)
self.db[q.get_uid()] = q
with self.markdown_file.open() as f:
self.parse_markdown(f.read())
return '\n'.join(self.parse_message)
def get_questions(self):
return self.q
def generate_markdown(self):
questions = sorted(self.q, key=lambda x: x.question_id)
ques_out = []
for q in questions:
ques_out.append(q.reconstruct_pattern.format(
title=q.title, answer=q.answer))
qidx = 0
reconstructed = []
for r in self.reconstruct_pattern:
if r is None:
reconstructed.append(ques_out[qidx])
qidx += 1
else:
reconstructed.append(r)
return '\n'.join(reconstructed)
def save(self) -> Dict[str, Any]:
with self.markdown_file.open('w') as f:
f.write(self.generate_markdown())
with self.database_file.open('w') as f:
json.dump([x.serialize() for x in self.q],
f, ensure_ascii=False, indent=4)
class HistoryStat(SerializableObject):
total_problems: int = 0
total_failed_problems: int = 0
total_answering: int = 0
total_failed_answering: int = 0
score_distribution: Union[None, List[int]] = None
max_combo: int = 0
total_using_time: float = 0
def __init__(self, *args, **kws):
super().__init__(*args, **kws)
if self.score_distribution is None:
self.score_distribution = [0] * 6 # 0, 1, 2, 3, 4, 5
class StatManager:
def __init__(self, file: Path):
file.parent.mkdir(exist_ok=True, parents=True)
self.file = file
self.history_stat = HistoryStat()
self.load()
def load(self):
self.last_tick = time.time()
if self.file.exists():
with self.file.open() as f:
self.history_stat = HistoryStat.deserialize(json.load(f))
def save(self):
with self.file.open('w') as f:
self.history_stat.total_using_time += time.time() - self.last_tick
self.last_tick = time.time()
json.dump(self.history_stat.serialize(), f)
def __getattr__(self, key):
if (hs:=self.__dict__.get('history_stat', None)) is not None:
if key in hs.__dict__: return hs.__dict__[key]
return self.__dict__[key]
def __setattr__(self, key, value):
if (hs:=self.__dict__.get('history_stat', None)) is not None:
if key in hs.__dict__:
hs.__dict__[key] = value
self.__dict__[key] = value
def get_data(self):
return self.history_stat.serialize()
class Session:
def __init__(self, data_srcs: List[DataSource], audio_manager: AudioManager, speed_estimator: SpeedEstimator, match_manager: MatchManager, stat_manager: StatManager):
self.state = 'loaded'
self.data_srcs = data_srcs
self.questions: List[Question] = []
for src in data_srcs:
self.questions += src.get_questions()
self.active_questions: List[Question] = []
self._decrease_tick()
self.prob_idx = 0
self.current_round = []
self.next_round = []
self.first_round = True
self.current_prob: Question = None
self.failed_probs = []
self.audio_manager = audio_manager
self.speed_estimator = speed_estimator
self.cache_autoplay_audio()
self.score_func = match_manager.auto_score
self.total_error = 0
self.combo = 0
self.current_timing = None
self.show_all = False
self.stat_manager = stat_manager
def cache_all_audio(self, force_download=False):
for prob in tqdm(self.questions, desc='cacheing audio'):
self.audio_manager.get_audio(
title=prob.title, force_download=force_download, lang=prob.language)
def cache_autoplay_audio(self, force_download=False):
for prob in tqdm(self.questions, desc='cacheing audio'):
if prob.autoplay:
self.audio_manager.get_audio(
title=prob.title, force_download=force_download, lang=prob.language)
def _decrease_tick(self):
for q in self.questions:
q.memory_stat.decrease_tick()
if q.memory_stat.is_active():
self.active_questions.append(q)
def save(self):
for src in self.data_srcs:
src.save()
self.speed_estimator.save()
self.stat_manager.save()
def error_msg(self, reason=''):
return {'state': self.state, "result": 'fail', 'reason': reason}
def success_msg(self, data):
data = data or {}
data['state'] = self.state
data['result'] = 'success'
return data
def when(*state):
def wrapper(func):
def inner(self, *args, **kws):
if self.state not in state:
return self.error_msg('can only be called at {}'.format(','.join(state)))
msg = func(self, *args, **kws)
return self.success_msg(msg)
return inner
return wrapper
@when("loaded")
def start(self) -> Dict[str, bool]:
config = {
'showall': False,
'shuffle': True,
}
if len(self.active_questions) == 0:
config['fastforward'] = False
self.state = 'configuring'
return config
@when("configuring")
def set_config(self, config):
if 'fastforward' in config and config['fastforward']:
while not self.active_questions:
self._decrease_tick()
if config['showall']:
self.active_questions = self.questions
self.show_all = True
if config['shuffle']:
random.shuffle(self.active_questions)
self.state = 'ready'
self.current_round = self.active_questions
self.prob_idx = 0
@when("ready", "round-end")
def next_prob(self):
if self.prob_idx < len(self.current_round):
self.current_prob = self.current_round[self.prob_idx]
self.state = 'answering'
data = self.current_prob.serialize()
data['combo'] = self.combo
data['total_error'] = self.total_error
data['round_idx'] = self.prob_idx
data['round_total'] = len(self.current_round)
data['round_remain'] = len(self.current_round) - self.prob_idx
data['total_remain'] = len(
self.current_round) + len(self.next_round) - self.prob_idx
data['next_round'] = len(self.next_round)
return data
else:
self.current_round = self.next_round
random.shuffle(self.current_round)
self.next_round = []
self.first_round = False
if not self.current_round:
self.state = 'end'
else:
self.state = 'round-end'
self.prob_idx = 0
@when("answering")
def score(self, answer: str, timing: float):
if answer == '':
return {'score': 0, 'message': 'No input'}
estimate = self.speed_estimator.estimate(self.current_prob, answer)
speed_score = self.speed_estimator.speed_score(
self.current_prob, answer, timing)
self.current_answer = answer
self.current_timing = timing
score, msg = self.score_func(self.current_prob, answer, speed_score)
msg = f'Timing: {timing:.2f}, Esti. {estimate:.2f}\n' + msg
return {'score': score, 'message': msg}
@when("answering")
def answer(self, q):
if self.first_round:
self.stat_manager.total_problems += 1
if not self.show_all:
self.current_prob.memory_stat.add_stat(q)
if q <= 3:
self.stat_manager.total_failed_problems += 1
self.failed_probs.append(self.current_prob)
self.stat_manager.total_answering += 1
self.stat_manager.score_distribution[q] += 1
if q >= 4:
self.combo += 1
else:
self.combo = 0
self.stat_manager.max_combo = max(self.stat_manager.max_combo, self.combo)
if q <= 3:
self.stat_manager.total_failed_answering += 1
self.next_round.append(self.current_prob)
self.total_error += 1
if q >= 3 and self.current_timing is not None:
self.speed_estimator.add_data(
self.current_prob, self.current_answer, self.current_timing)
self.current_answer = None
self.current_timing = None
if q >= 2:
self.prob_idx += 1
self.state = 'ready'
return {'combo': self.combo}
@when("round-end")
def get_failed_last_round(self):
return {'failed_probs': [prob.serialize() for prob in self.current_round]}
@when("answering")
def modify_answer(self, answer):
self.current_prob.answer = answer
def get_failed(self):
return {'failed_probs': [prob.serialize() for prob in self.failed_probs]}
def get_state(self):
return {'state': self.state}
class Server:
def __init__(self, root_path, search_path=None):
self.root_path = Path(root_path)
self.search_path = Path(search_path) if search_path else self.root_path
self.files: List[Path] = list(self.search_path.glob('*.md'))
self.speed_estimator = SpeedEstimator(
self.root_path.joinpath('.mhelper', '.speed-estimator.json'))
self.audio_manager = AudioManager(
self.root_path.joinpath('.mhelper', '.audio'))
self.match_manager = MatchManager()
self.stat_manager = StatManager(self.root_path.joinpath('.mhelper', '.stat.json'))
def get_file_names(self):
return [x.stem for x in self.files]
def new_session(self, indices, presets=[], forces=[]):
srcs = []
for idx in indices:
md_file = self.files[idx]
db_file = md_file.parent.joinpath(
'.mhelper', md_file.stem + '.json')
data_src = DataSource(md_file, db_file)
data_src.set_config(presets=presets, forces=forces)
print(data_src.load())
data_src.add_message
data_src.save()
srcs.append(data_src)
return Session(
audio_manager=self.audio_manager,
data_srcs=srcs,
speed_estimator=self.speed_estimator,
match_manager=self.match_manager,
stat_manager=self.stat_manager
)
class ConsoleFrontend:
best_console_size = (70, 20)
logo = '[blue]' + pyfiglet.figlet_format("MemoryHelper") + '[/blue]'
hint_ver = r' [red]v3.0[/red]'
file_head = r'[green]---------------------------- 请选择文件 ----------------------------[/green]'
ques_head = r'[green]---------------------------- 问 题 ----------------------------[/green]'
anse_head = r'[green]---------------------------- 回 答 ----------------------------[/green]'
scor_head = r'[green]---------------------------- 得 分 ----------------------------[/green]'
roun_head = r'[green]---------------------------- 一轮结束了 ----------------------------[/green]'
resu_head = r'[green]---------------------------- 答 案 ----------------------------[/green]'
end_head = r'[green]---------------------------- 结 束 ----------------------------[/green]'
stat_head = r'[green]---------------------------- 统 计 ----------------------------[/green]'
err_input_int = r'[red]请输入一个整数[/red]'
err_input_set = r'[red]请输入 {intset} 中的一个数[/red]'
entry_quit = r'退出'
entry_select_all = r'[green]都来一遍[/green]'
entry_stat = r'统计'
entry_about = r'关于'
hint_sel_file = r'请选择文件(空格间隔多个文件):'
hint_retry = r'[blue]重试[/blue]'
hint_combos = {
10: '[blue]' + pyfiglet.figlet_format('Comb 10\n Good!') + '[/blue]',
20: '[yellow]' + pyfiglet.figlet_format(' Comb 20\nVery Good!') + '[/yellow]',
30: '[purple]' + pyfiglet.figlet_format('Comb 30\nPerfect!') + '[/purple]',
40: '[green]' + pyfiglet.figlet_format('Comb 40\n Excellent!') + '[/green]',
50: '[pink]' + pyfiglet.figlet_format(' Comb 50\n You Made It!') + '[/pink]',
100: '[red]' + pyfiglet.figlet_format(' Comb 100\n Unbelievable!') + '[/red]',
200: '[red][bold]' + pyfiglet.figlet_format(' Comb 200\n Superman!') + '[/bold][/red]',
}
hint_invisible = r'[yellow]-- invisible --[/yellow]'
hint_round = r'本轮: {round_idx}/{round_total} 下一轮: {next_round} 合计错误: {total_error} Combo: {combo}'
hint_modify = r'[yellow]请输入修改后的答案[/yellow]'
hint_esti_score = r'[purple]Score:[/purple] {score}'
hint_input_score = r'InputScore(0~5):'
hint_show_fails = r'是否查看上一轮错题[0/1]:'
hint_show_fails_answer = r'用Ctrl-D查看答案'
hint_round_end = r'休息一下,开始下一轮'
hint_force_exit = r'[red][bold]强制退出MemoryHelper[/bold][/red]'
hint_duration = r'[yellow]在{hour:02d}:{min:02d}内回答了{cnt}个问题[/yellow]'
hint_max_combo = r'[pink] Max Combo: {combo} [/pink]'
hint_loading = r'[green]读取中...[/green]'
config_dictation = r'听写选项([bold]0默认[/bold],1强制听写,2强制不听写):'
config_force_voice = r'声音选项([bold]0默认[/bold],1强制有声,2强制无声):'
config_showall = r'全部都来一遍([bold]0[/bold]/1): '
config_shuffle = r'打乱顺序([bold]1[/bold]/0)?'
config_fastforward = r'没有问题会出现强行要做([bold]0[/bold]/1)?'
stat_pattern = \
r'''滚过的总问题数:{total_problems}
失败的总问题数:{total_failed_problems}
总回答数:{total_answering}
失败回答数:{total_failed_answering}
历史最大combo:{max_combo}
使用时间:{total_using_time}
分数分布:{score_distribution}
'''
about_message = \
r'''
[purple]KEKE[/purple]的死记硬背辅助软件
最开始是设计来背政治课的,[blue]思修[bold]军理[/bold]史纲[bold]马原[/bold]离谱性[bold]递增[/bold][/blue]
后来加入了[bold]中文匹配[/bold]和[bold]文本到语音转换[/bold],用来听写单词了
[yellow]现在的版本可以支持通用的问答记忆[/yellow]
非常适合[green]打字远快于手写[/green]的程序猿朋友
项目主页: https://github.com/KEKE046/memory-helper
软件遵循 [blue]Apache 2.0[/blue] 协议开源,欢迎提Issue
但提的Issue可能被KEKE[grey]鸽掉[/grey],想要新功能可以自己先尝试写一写'''
def __init__(self, root_path: Path, search_path: Path):
self.root_path = Path(root_path)
self.search_path = Path(search_path) if search_path else self.root_path
self.server = Server(self.root_path, self.search_path)
self.state = 'welcome'
self.session = None
self.console = Console()
self.start_time = None
def get_int(self, msg, in_set=[], default=0, multiple=False):
while True:
try:
rprint(msg, end='')
data = input().strip()
if not data:
return default
if multiple:
data = [int(x) for x in re.split('\s+', data)]
if in_set and all([x in in_set for x in data]):
return data
else:
data = int(data)
if in_set and data in in_set:
return data
except KeyboardInterrupt:
rprint(self.hint_force_exit)
exit(0)
except:
rprint(self.err_input_int)
rprint(self.err_input_set.format(
intset=','.join(map(str, in_set))))
def statistics(self):
self.console.clear()
rprint(self.stat_head)
stat = self.server.stat_manager.get_data()
rprint(self.stat_pattern.format(**stat))
input()
self.state = 'welcome'
def about(self):
self.console.clear()
rprint(self.logo)
rprint(self.hint_ver)
rprint()
rprint(self.about_message)
input()
self.state = 'welcome'
def welcome(self):
self.console.clear()
rprint(self.logo)
rprint(self.hint_ver)
rprint(self.file_head)
files = self.server.get_file_names()
rprint(f'[00] {self.entry_quit}')
for i, f in enumerate(files):
rprint(f'[{i + 1:02d}] {f}')
idx_sel_all = len(files) + 1
idx_stat = len(files) + 2
idx_about = len(files) + 3
rprint(f'[{idx_sel_all:02d}] {self.entry_select_all}')
rprint(f'[{idx_stat:02d}] {self.entry_stat}')
rprint(f'[{idx_about:02d}] {self.entry_about}')
indices = self.get_int(self.hint_sel_file, range(0, idx_about + 1), default=[0], multiple=True)
if 0 in indices:
self.state = ''
elif idx_stat in indices:
self.state = 'statistics'
elif idx_about in indices:
self.state = 'about'
else:
if idx_sel_all in indices:
indices = range(len(files))
rprint(self.hint_loading)
dictation = self.get_int(self.config_dictation, [0, 1, 2])
forces = [[], ["dictation"], ["no-dictation"]][dictation]
if dictation != 1:
voice = self.get_int(self.config_force_voice, [0, 1, 2])
forces += [[], ["voice"], ["no-voice"]][voice]
self.session = self.server.new_session(
[x - 1 for x in indices], forces=forces)
self.state = 'rounding'
def start(self):
while self.state:
getattr(self, self.state)()
@staticmethod
def wrap_markdown(s):
s = re.sub(r'\*\*(.*?)\*\*',
lambda x: '[yellow]' + x.group(1) + '[/yellow]', s)
s = re.sub(
r'\*(.*?)\*', lambda x: '[blue]' + x.group(1) + '[/blue]', s)
return s
def get_long_input(self):
while True:
data = []
try:
start_time = time.time()
while True:
user_input = input()
if user_input == '':
break
data.append(user_input)
end_time = time.time()
data = '\n'.join(data)
timing = end_time - start_time
break
except EOFError:
rprint('\n' + self.hint_retry, end='')
input()
except KeyboardInterrupt:
rprint(self.hint_force_exit)
exit(0)
return data, timing
def show_combo(self, combo):
self.console.clear()
if combo in self.hint_combos:
rprint(self.hint_combos[combo])
input()
def print_question(self, q):
rprint(self.ques_head)
if q['invisible']:
rprint(self.hint_invisible)
else:
rprint(self.wrap_markdown(q['title']))
rprint('')
rprint(self.anse_head)
if q['autoplay']:
self.session.audio_manager.play_audio(q['title'])
answer, timing = self.get_long_input()
ret = self.session.score(answer, timing)
score = ret['score']
score_msg = ret['message']
rprint(self.scor_head)
rprint(self.wrap_markdown(score_msg))
rprint(self.resu_head)
if q['invisible']:
rprint(self.wrap_markdown(q['title']))
rprint(self.wrap_markdown(q['answer']))
rprint()
rprint(self.hint_round.format(**q))
rprint(self.hint_esti_score.format(score=score))
score = self.get_int(self.hint_input_score,
range(-1, 6), default=score)
if score == -1:
rprint(self.hint_modify)
answer, _ = self.get_long_input()
self.session.modify_answer(answer)
q['answer'] = answer
self.console.clear()
self.print_question(q)
else:
combo = self.session.answer(score)['combo']
self.max_combo = max(self.max_combo, combo)
self.show_combo(combo)
def print_round_end_msg(self):
rprint(self.roun_head)
if self.get_int(self.hint_show_fails, [0, 1]):
failed = self.session.get_failed_last_round()['failed_probs']
rprint(self.hint_show_fails_answer)
for prob in failed:
rprint(self.wrap_markdown(prob['title']))
try:
input()
except EOFError:
rprint(self.wrap_markdown(prob['answer']))
rprint()
rprint(self.hint_round_end)
input()
def print_end_msg(self):
rprint(self.end_head)
duration = self.end_time - self.start_time
rprint(self.hint_duration.format(hour=int(duration//60//60),
min=int(duration//60 % 60), cnt=int(self.total_answered)))
input()
keys = sorted(list(self.hint_combos.keys()))
if any([x <= self.max_combo for x in keys]):
rprint(self.hint_max_combo.format(combo=self.max_combo))
combo = max([x for x in keys if x <= self.max_combo])
rprint(self.hint_combos[combo])
input()
def rounding(self):
self.start_time = time.time()
self.total_answered = 0
self.max_combo = 0
config = self.session.start()
if 'showall' in config:
config['showall'] = bool(self.get_int(
self.config_showall, [0, 1], default=0))
if 'shuffle' in config:
config['shuffle'] = bool(self.get_int(
self.config_shuffle, [0, 1], default=1))
if not config['showall'] and 'fastforward' in config:
config['fastforward'] = bool(self.get_int(
self.config_fastforward, [0, 1], default=0))
self.session.set_config(config)
while True:
self.console.clear()
ret = self.session.next_prob()
if ret['state'] == 'end':
break
elif ret['state'] == 'answering':
self.print_question(ret)
elif ret['state'] == 'round-end':
self.print_round_end_msg()
self.session.save()
self.total_answered += 1
self.end_time = time.time()
self.print_end_msg()
self.state = 'welcome'
if __name__ == '__main__':
import sys
search = sys.argv[1] if len(sys.argv) > 1 else '.'
cli = ConsoleFrontend(Path(__file__).parent, search)
cli.start()
|
import numpy as np
from multiagent.core import World, Agent, Landmark, Goal
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
world = World()
# add agents
world.agents = [Agent() for i in range(1)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = False
agent.silent = True
# add landmarks
world.landmarks = [Landmark() for i in range(1)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
# add goals (used only for vis)
world.goals = [Goal() for i in range(1)]
for i, goal in enumerate(world.goals):
goal.name = 'goal %d' % i
goal.collide = False
goal.movable = False
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.25, 0.25, 0.25])
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.75, 0.75, 0.75])
world.landmarks[0].color = np.array([0.75, 0.25, 0.25])
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
# dk: Check the distance between agent and landmark are initialized
# at least x distance
while self.check_distance(world.agents, landmark.state.p_pos):
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
for i, goal in enumerate(world.goals):
goal.state.p_pos = np.zeros(world.dim_p) - 2 # NOTE Initialize outside of the box
goal.state.p_vel = np.zeros(world.dim_p)
goal.color = np.array([0.0, 1.0, 0.0])
def check_distance(self, agents, landmark_p_pos):
assert len(agents) == 1
threshold = 1.
for i_agt, agent in enumerate(agents):
if np.linalg.norm(agent.state.p_pos - landmark_p_pos) < threshold:
return True
else:
return False
def reward(self, agent, world):
dist2 = np.sum(np.square(agent.state.p_pos - world.landmarks[0].state.p_pos))
return -dist2
def observation(self, agent, world):
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
return np.concatenate([agent.state.p_vel] + entity_pos)
|
from ceefax.fonts import get_font, font_list
import pytest
@pytest.mark.parametrize("fontname", font_list)
def test_fonts(fontname):
font = get_font(fontname)
text = font.text_to_ascii("Hello! ")
assert isinstance(text, str)
|
################################################################################
# Copyright 2021-2022 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
from ..Component import NotLocalFullTileElements
class NotLocalFullTileElementsVALU(NotLocalFullTileElements):
kernel = {"EnableMatrixInstruction": False}
"""
Partition thread-tile into writeElements for store code
This function creates the writeElement mapping for full tiles
(ie non-edge cases)
"""
def __call__(self, writer, kernel, edge):
elements = []
vectorwidth = 0
if edge:
vectorwidth = kernel["VectorWidth"] if kernel["_VectorStore"] else 1
vectorwidth = min(vectorwidth, writer.maxGwvw(kernel), kernel["AssertFree0ElementMultiple"])
assert(kernel["VectorWidth"] % vectorwidth == 0)
else:
vectorwidth = kernel["VectorWidth"] if kernel["_VectorStore"] else 1
vectorwidth = min(vectorwidth, writer.maxGwvw(kernel))
# Full tile loop:
for tt1 in range(0, kernel["ThreadTile1"]//kernel["VectorWidth"]):
for vc1 in range(0, kernel["VectorWidth"]):
for tt0 in range(0, kernel["ThreadTile0"]//kernel["VectorWidth"]):
for vc0 in range(0, kernel["VectorWidth"], vectorwidth): # note step by fullVw
element = (tt1, tt0, vc1, vc0)
elements.append(element)
return (vectorwidth, elements)
class NotLocalFullTileElementsMFMA(NotLocalFullTileElements):
kernel = {"EnableMatrixInstruction": True}
"""
Partition thread-tile into writeElements for store code
This function creates the writeElement mapping for full tiles
(ie non-edge cases)
"""
def __call__(self, writer, kernel, edge):
elements = []
storeVectorWidth = 0
if edge:
storeVectorWidth = kernel["StoreVectorWidth"] if kernel["_VectorStore"] else 1
storeVectorWidth = min(storeVectorWidth, writer.maxGwvw(kernel), kernel["AssertFree0ElementMultiple"])
else:
storeVectorWidth = kernel["StoreVectorWidth"] if kernel["_VectorStore"] else 1
storeVectorWidth = min(storeVectorWidth, writer.maxGwvw(kernel))
# handle mfma 4x4 instruction
matrixInstM = kernel["MatrixInstM"] * kernel["MatrixInstBM"] if (kernel["MatrixInstM"] == 4) else kernel["MatrixInstM"]
matrixInstN = kernel["MatrixInstN"] * kernel["MatrixInstBN"] if (kernel["MatrixInstN"] == 4) else kernel["MatrixInstN"]
matrixInstBM = 1 if (kernel["MatrixInstM"] == 4) else kernel["MatrixInstBM"]
matrixInstBN = 1 if (kernel["MatrixInstN"] == 4) else kernel["MatrixInstBN"]
outputsPerThread = matrixInstM * matrixInstN // kernel["WavefrontSize"]
# handle SourceSwap
totalTT0 = matrixInstBM * kernel["MIWaveTile"][0]
totalTT1 = matrixInstBN * kernel["MIWaveTile"][1]
totalTT0 = totalTT0 if kernel["SourceSwap"] else (totalTT0 * outputsPerThread)
totalTT1 = (totalTT1 * outputsPerThread) if kernel["SourceSwap"] else totalTT1
vectorWidth0 = kernel["VectorWidth"] if kernel["SourceSwap"] else kernel["MIOutputVectorWidth"]
MIOutputVectorWidthAdj = writer.lrvwB if writer.allowLRVWforTLUandMI else kernel["MIOutputVectorWidth"]
vectorWidth1 = MIOutputVectorWidthAdj if kernel["SourceSwap"] else 1
for tt1 in range(0, totalTT1//vectorWidth1):
for vc1 in range(0, vectorWidth1):
for tt0 in range(0, totalTT0//vectorWidth0):
for vc0 in range(0, vectorWidth0, storeVectorWidth): # note step by storeVectorWidth
element = (tt1, tt0, vc1, vc0)
elements.append(element)
return (storeVectorWidth, elements)
|
import json
from labboard.BaiduExpress import BaiduExpress
from flask import (
Blueprint, request, jsonify, current_app, render_template
)
bp = Blueprint("express", __name__, url_prefix="/express")
baidu_express = BaiduExpress()
@bp.route("/getExpressCompany", methods=["POST"])
def get_express_company():
if (request.method == "POST"):
number = request.form["number"]
return jsonify(baidu_express.get_express_company(number))
@bp.route("/getExpressState", methods=["POST"])
def get_express_state(number=None, company=None):
if (request.method == "POST"):
number = request.form["number"]
company = request.form["company"]
result = baidu_express.get_express_state(number, company)
result["number"] = number
return render_template("express/package_state.html", **{"item": result}) if (request.method == "POST") else result
@bp.route("/addPackage", methods=["POST"])
def add_package():
if (request.method == "POST"):
package_info = {
"number": request.form["number"],
"company": request.form["company"],
"name": request.form["name"]
}
with open(current_app.config["RECORD_FILE"], "r") as f:
record = json.load(f)
if (record.get("packages")):
for p in record["packages"]:
if (p == package_info):
return jsonify({"status": "duplicated"})
record["packages"].append(package_info)
else:
record["packages"] = [package_info]
with open(current_app.config["RECORD_FILE"], "w") as f:
json.dump(record, f)
return jsonify({"status": "ok"})
@bp.route("/getPackages", methods=["GET", "POST"])
def get_packages():
with open(current_app.config["RECORD_FILE"], "r") as f:
record = json.load(f)
if (record.get("packages")):
return jsonify(record["packages"])
else:
return jsonify([])
@bp.route("/deletePackage", methods=["POST"])
def delete_package():
if (request.method == "POST"):
package_info = {
"number": request.form["number"],
"name": request.form["name"]
}
with open(current_app.config["RECORD_FILE"], "r") as f:
record = json.load(f)
if (record.get("packages")):
for p in record["packages"]:
if (p["number"] == package_info["number"] and p["name"] == p["name"]):
record["packages"].remove(p)
with open(current_app.config["RECORD_FILE"], "w") as f:
json.dump(record, f)
return jsonify({"status": "ok"})
else:
return jsonify({"status": "no_such_package"})
|
import random
n = 20000
k = 100
f = open("E.in","w")
f.write("%d %d\n"%(n,k))
for i in range(n):
f.write("%d "%random.randint(1,1000000))
f.write("\n")
f.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sqlalchemy import Column, String, text
from sqlalchemy.dialects.mysql import BIGINT, TINYINT
from trest.db import Model as Base
class UserCertification(Base):
__tablename__ = 'user_certification'
user_id = Column(BIGINT(20), primary_key=True, server_default=text("'0'"), comment='主键,user表 id')
realname = Column(String(40), nullable=False, server_default=text("''"), comment='登录名、昵称')
idcardno = Column(String(40), nullable=False, server_default=text("''"), comment='身份证号码')
idcard_img = Column(String(200), nullable=False, server_default=text("''"), comment='手持身份证照片一张(要求头像清晰,身份证号码清晰)')
authorized = Column(TINYINT(1), nullable=False, server_default=text("'0'"), comment='认证状态:( 0 待审核;1 审核通过, 2 审核失败)')
client = Column(String(20), comment='客户端:web wechat android ios mobile')
ip = Column(String(40), comment='添加记录的IP地址')
updated_at = Column(BIGINT(13), comment='更新记录UTC时间')
created_at = Column(BIGINT(13), comment='创建记录UTC时间')
status = Column(TINYINT(1), nullable=False, server_default=text("'1'"), comment='状态:( 0 禁用;1 启用, 默认1)')
remark = Column(String(200), comment='备注;如果审核不通过,填写原因')
authorized_user_id = Column(BIGINT(20), comment='审核管理员ID,user 表 uuid')
|
import nltk
from libinit import preper_
from nltk.tokenize import word_tokenize
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
class Sentiment():
def __init__(self):
self.sia = SentimentIntensityAnalyzer()
self.preper = preper_
def _singleSent(self, sentence):
ss = self.sia.polarity_scores(sentence)
tempSentDict = {}
for k in ss:
# print('{0}: {1}, '.format(k, ss[k]), end="")
tempSentDict.update({k:ss[k]})
return tempSentDict
def _getSentList(self, lylist):
""" lylist = self.preper.prepSongLyrics(element['lyrics']) """
sentList = []
for elt in lylist:
sentDictForElt = self._singleSent(elt)
sentList.append(sentDictForElt)
return sentList
def _overallSongSent(self, sentlist):
pos = neg = neu = compound = count = 0
for el in sentlist:
pos += el["pos"]
neg += el["neg"]
neu += el["neu"]
compound += el["compound"]
count += 1
oSent = {
"pos" : (pos/count),
"neg" : (neg/count),
"neu" : (neu/count),
"compound" : (compound/count)
}
return oSent
def _getPosNeg(self, compound):
if compound >= 0.05:
return "POSITIVE"
return "NEGATIVE"
def songSent(self, song):
print("### Sentiment analysis for: %s by %s ###" %(song["track"], song["artist"]))
lylist = self.preper.prepSongLyrics(song["lyrics"])
sentList = self._getSentList(lylist)
oSent = self._overallSongSent(sentList)
# pn = self._getPosNeg(oSent["compound"])
# print(">>> %s" %pn)
return oSent
|
#!/usr/bin/python
import socket
MSGLEN=65536
class MySocket(socket.socket):
def accept(self):
sock, a
def accept(self):
sock, addr = self._sock.accept()
return _socketobject(_sock=sock), addr
accept.__doc__ = _realsocket.accept.__doc__
def mysend(self, msg):
totalsent = 0
while totalsent < MSGLEN:
sent = self.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
def myrecv(self):
chunks = []
bytes_recd = 0
while bytes_recd < MSGLEN:
chunk = self.recv(min(MSGLEN - bytes_recd, 2048))
if chunk == '':
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
return ''.join(chunks)
'''
demonstration class only
- coded for clarity, not efficiency
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
def connect(self, host, port):
self.sock.connect((host, port))
'''
|
# -*- coding: utf-8 -*-
"""
EvMore - pager mechanism
This is a pager for displaying long texts and allows stepping up and
down in the text (the name comes from the traditional 'more' unix
command).
To use, simply pass the text through the EvMore object:
from evennia.utils.evmore import EvMore
text = some_long_text_output()
EvMore(caller, text, always_page=False, session=None, justify_kwargs=None, **kwargs)
One can also use the convenience function msg from this module:
from evennia.utils import evmore
text = some_long_text_output()
evmore.msg(caller, text, always_page=False, session=None, justify_kwargs=None, **kwargs)
Where always_page decides if the pager is used also if the text is not
long enough to need to scroll, session is used to determine which session to relay to
and justify_kwargs are kwargs to pass to utils.utils.justify in order to change the formatting
of the text. The remaining **kwargs will be passed on to the
caller.msg() construct every time the page is updated.
"""
from builtins import object, range
from django.conf import settings
from evennia import Command, CmdSet
from evennia.commands import cmdhandler
from evennia.utils.utils import justify
_CMD_NOMATCH = cmdhandler.CMD_NOMATCH
_CMD_NOINPUT = cmdhandler.CMD_NOINPUT
# we need to use NAWS for this
_SCREEN_WIDTH = settings.CLIENT_DEFAULT_WIDTH
_SCREEN_HEIGHT = settings.CLIENT_DEFAULT_HEIGHT
# text
_DISPLAY = \
"""{text}
(|wmore|n [{pageno}/{pagemax}] retur|wn|n|||wb|nack|||wt|nop|||we|nnd|||wq|nuit)"""
class CmdMore(Command):
"""
Manipulate the text paging
"""
key = _CMD_NOINPUT
aliases = ["quit", "q", "abort", "a", "next", "n",
"back", "b", "top", "t", "end", "e"]
auto_help = False
def func(self):
"""
Implement the command
"""
more = self.caller.ndb._more
if not more and hasattr(self.caller, "account"):
more = self.caller.account.ndb._more
if not more:
self.caller.msg("Error in loading the pager. Contact an admin.")
return
cmd = self.cmdstring
if cmd in ("abort", "a", "q"):
more.page_quit()
elif cmd in ("back", "b"):
more.page_back()
elif cmd in ("top", "t", "look", "l"):
more.page_top()
elif cmd in ("end", "e"):
more.page_end()
else:
# return or n, next
more.page_next()
class CmdMoreLook(Command):
"""
Override look to display window and prevent OOCLook from firing
"""
key = "look"
aliases = ["l"]
auto_help = False
def func(self):
"""
Implement the command
"""
more = self.caller.ndb._more
if not more and hasattr(self.caller, "account"):
more = self.caller.account.ndb._more
if not more:
self.caller.msg("Error in loading the pager. Contact an admin.")
return
more.display()
class CmdSetMore(CmdSet):
"""
Stores the more command
"""
key = "more_commands"
priority = 110
def at_cmdset_creation(self):
self.add(CmdMore())
self.add(CmdMoreLook())
class EvMore(object):
"""
The main pager object
"""
def __init__(self, caller, text, always_page=False, session=None,
justify_kwargs=None, exit_on_lastpage=False, **kwargs):
"""
Initialization of the text handler.
Args:
caller (Object or Account): Entity reading the text.
text (str): The text to put under paging.
always_page (bool, optional): If `False`, the
pager will only kick in if `text` is too big
to fit the screen.
session (Session, optional): If given, this session will be used
to determine the screen width and will receive all output.
justify_kwargs (dict, bool or None, optional): If given, this should
be valid keyword arguments to the utils.justify() function. If False,
no justification will be done.
exit_on_lastpage (bool, optional): If reaching the last page without the
page being completely filled, exit pager immediately. If unset,
another move forward is required to exit. If set, the pager
exit message will not be shown.
kwargs (any, optional): These will be passed on
to the `caller.msg` method.
"""
self._caller = caller
self._kwargs = kwargs
self._pages = []
self._npages = []
self._npos = []
self.exit_on_lastpage = exit_on_lastpage
self._exit_msg = "Exited |wmore|n pager."
if not session:
# if not supplied, use the first session to
# determine screen size
sessions = caller.sessions.get()
if not sessions:
return
session = sessions[0]
self._session = session
# set up individual pages for different sessions
height = max(4, session.protocol_flags.get("SCREENHEIGHT", {0: _SCREEN_HEIGHT})[0] - 4)
width = session.protocol_flags.get("SCREENWIDTH", {0: _SCREEN_WIDTH})[0]
if justify_kwargs is False:
# no justification. Simple division by line
lines = text.split("\n")
else:
# we must break very long lines into multiple ones
justify_kwargs = justify_kwargs or {}
width = justify_kwargs.get("width", width)
justify_kwargs["width"] = width
justify_kwargs["align"] = justify_kwargs.get("align", 'l')
justify_kwargs["indent"] = justify_kwargs.get("indent", 0)
lines = []
for line in text.split("\n"):
if len(line) > width:
lines.extend(justify(line, **justify_kwargs).split("\n"))
else:
lines.append(line)
# always limit number of chars to 10 000 per page
height = min(10000 // max(1, width), height)
self._pages = ["\n".join(lines[i:i + height]) for i in range(0, len(lines), height)]
self._npages = len(self._pages)
self._npos = 0
if self._npages <= 1 and not always_page:
# no need for paging; just pass-through.
caller.msg(text=text, session=self._session, **kwargs)
else:
# go into paging mode
# first pass on the msg kwargs
caller.ndb._more = self
caller.cmdset.add(CmdSetMore)
# goto top of the text
self.page_top()
def display(self):
"""
Pretty-print the page.
"""
pos = self._pos
text = self._pages[pos]
page = _DISPLAY.format(text=text,
pageno=pos + 1,
pagemax=self._npages)
# check to make sure our session is still valid
sessions = self._caller.sessions.get()
if not sessions:
self.page_quit()
return
# this must be an 'is', not == check
if not any(ses for ses in sessions if self._session is ses):
self._session = sessions[0]
self._caller.msg(text=page, session=self._session, **self._kwargs)
def page_top(self):
"""
Display the top page
"""
self._pos = 0
self.display()
def page_end(self):
"""
Display the bottom page.
"""
self._pos = self._npages - 1
self.display()
def page_next(self):
"""
Scroll the text to the next page. Quit if already at the end
of the page.
"""
if self._pos >= self._npages - 1:
# exit if we are already at the end
self.page_quit()
else:
self._pos += 1
self.display()
if self.exit_on_lastpage and self._pos >= self._npages - 1:
self.page_quit()
def page_back(self):
"""
Scroll the text back up, at the most to the top.
"""
self._pos = max(0, self._pos - 1)
self.display()
def page_quit(self):
"""
Quit the pager
"""
del self._caller.ndb._more
self._caller.msg(text=self._exit_msg, **self._kwargs)
self._caller.cmdset.remove(CmdSetMore)
def msg(caller, text="", always_page=False, session=None, justify_kwargs=None, **kwargs):
"""
More-supported version of msg, mimicking the normal msg method.
Args:
caller (Object or Account): Entity reading the text.
text (str): The text to put under paging.
always_page (bool, optional): If `False`, the
pager will only kick in if `text` is too big
to fit the screen.
session (Session, optional): If given, this session will be used
to determine the screen width and will receive all output.
justify_kwargs (dict, bool or None, optional): If given, this should
be valid keyword arguments to the utils.justify() function. If False,
no justification will be done.
kwargs (any, optional): These will be passed on
to the `caller.msg` method.
"""
EvMore(caller, text, always_page=always_page, session=session,
justify_kwargs=justify_kwargs, **kwargs)
|
'''
## To work around a conflict between PIL and Image.
## Source: David Goodger's comment at http://sourceforge.net/p/docutils/bugs/137/
try:
import PIL.Image
import sys
sys.modules['Image'] = PIL.Image
except ImportError: pass
'''
import Image
from numpy import *
import numpy
def image_stack_from_filenames( filenames, size_mismatch_behavior = None, convert = None, tile = None, dtype = None ):
'''
Given a list of pathnames to images 'filenames',
optional parameter 'size_mismatch_behavior' which can be one of 'skip', 'error', or 'crop-upperleft' (default: 'error'), and
optional parameter 'convert' to specify what pixel format to convert the loaded image into ('L' for grayscale (default), 'RGB', or 'RGBA'),
optional parameter 'dtype' to decide which dtype format to return (values between 0 and 1 if floating point, otherwise between 0 and 255) (default: float),
returns a numpy.array containing all images (the first dimension selects the image).
Optional parameter tile, if present is a sequence of four integers.
The first two integers are the row and column of the tile, and the second two
integers specify how many pixel rows and columns are in each tile.
The resulting stack will have a zero in its shape's 1-th or 2-th entry
if the requested tile is outside of the images.
NOTE: 'size_mismatch_behavior' 'skip' means to skip frames whose size differs from filenames[0]'s size.
'''
assert len( filenames ) > 0
if size_mismatch_behavior is None: size_mismatch_behavior = 'error'
if convert is None: convert = 'L'
if dtype is None: dtype = float
assert issubdtype( dtype, float ) or issubdtype( dtype, integer )
stack = None
shapes = []
for frame_count, fname in enumerate( filenames ):
print 'Loading "%s"' % ( fname, )
img = Image.open( fname ).convert( convert )
arr = asarray( img, dtype = dtype )
if issubdtype( dtype, float ): arr /= 255.
shapes.append( arr.shape )
if tile is not None:
rows, cols = arr.shape[:2]
row,col, pixels_per_row, pixels_per_column = tile
assert row >= 0
assert col >= 0
assert pixels_per_row > 0
assert pixels_per_column > 0
arr = arr[
row*pixels_per_row : (row+1)*pixels_per_row,
col*pixels_per_column : (col+1)*pixels_per_column
]
## If this is our first image, allocate the stack.
if stack is None:
stack = empty( (len(filenames),) + arr.shape, dtype = dtype )
if arr.shape != stack.shape[1:]:
if 'error' == size_mismatch_behavior:
raise RuntimeError( 'Images have varying sizes.' )
elif 'skip' == size_mismatch_behavior:
print 'Ignoring image with the wrong size:', filename
continue
elif 'crop-upperleft' == size_mismatch_behavior:
minshape = tuple( array( [ arr.shape, stack.shape[1:] ] ).min(0) )
if stack.shape[1:] != minshape:
stack = stack[ :, :minshape[0], :minshape[1], :minshape[2] ]
arr = arr[ :minshape[0], :minshape[1], :minshape[2] ]
else:
raise NotImplementedError( 'Unknown size mismatch behavior: %s' % size_mismatch_behavior )
stack[ frame_count ] = arr
return stack
def arr2img( arr ):
'''
Given a numpy.array 'arr' representing a floating point image (one whose values are between 0 and 1),
returns the conversion of 'arr' into a PIL Image.
'''
assert arr.dtype in ( float32, float64, float )
return Image.fromarray( asarray( ( arr * 255 ).round(0).clip( 0, 255 ), dtype = uint8 ) )
def process_tiled_image_stack_from_filenames( process_image_stack, tile, filenames, size_mismatch_behavior = None, convert = None, dtype = None ):
'''
Given a function 'process_image_stack' that takes an image stack as would
be returned from image_stack_from_filenames(),
a 2-element 'tile' parameter specifying the row-and-column pixel dimensions
into which to tile the image stack that would be returned from
image_stack_from_filenames(),
and optional parameters 'size_mismatch_behavior' and 'convert' and 'dtype' to pass to
image_stack_from_filenames(),
returns the untiled result of process_image_stack() applied to the tiled image stack.
NOTE: For convenience, if the 'tile' parameter is None, this function skips the tiling and untiling step.
NOTE: For convenience, if the 'tile' parameter is 'auto', this function will assume square images
and choose a tiling that uses less than 1GB ram.
'''
if tile is None:
return process_image_stack( image_stack_from_filenames( filenames, size_mismatch_behavior = size_mismatch_behavior, convert = convert ) )
if tile == 'auto':
tile = tile_parameter_for_1GB( len( filenames ), size_mismatch_behavior = size_mismatch_behavior, convert = convert, dtype = dtype )
assert len( tile ) == 2
tile = tuple( tile )
assert tile[0] > 0
assert tile[1] > 0
tiles = [[]]
row = 0
col = 0
while True:
stack = image_stack_from_filenames( filenames, size_mismatch_behavior = size_mismatch_behavior, convert = convert, tile = (row,col) + tile )
print 'row, col, stack.shape:', row, col, stack.shape
## We can't average here, because one of the shape entries might be 0.
## If we fell off the columns,
## move to the next row.
if stack.shape[2] == 0:
tiles.append( [] )
row += 1
col = 0
## If we fell off the rows, we're done.
elif stack.shape[1] == 0:
## We will have added a final, unused row.
assert len( tiles[-1] ) == 0
del tiles[-1]
break
## Otherwise, we found a good tile.
else:
processed = process_image_stack( stack )
tiles[-1].append( processed )
col += 1
## Free the memory as soon as possible.
del stack
result = untile( tiles )
return result
def untile( tiles ):
'''
Given a sequence of sequences of numpy.arrays 'tiles'
representing a row-major 2D grid of tiles,
where the arrays in each row of 'tiles' have the same number of rows,
the arrays in each column of 'tiles' have the same number of columns,
and every array has the same number of higher dimension (color channels)
returns the arrays of the tile as a single, larger array composed of
the tile arrays in a layout corresponding to their layour in 'tiles'.
'''
## Copy the shape and modify the first two elements.
shape = list( tiles[0][0].shape )
rows = shape[0] = sum( [ tiles[row][0].shape[0] for row in xrange( len( tiles ) ) ] )
cols = shape[1] = sum( [ tiles[0][col].shape[1] for col in xrange( len( tiles[0] ) ) ] )
result = zeros( tuple( shape ), dtype = float )
rowoff = 0
coloff = 0
for row in tiles:
coloff = 0
for tile in row:
result[ rowoff : rowoff + tile.shape[0], coloff : coloff + tile.shape[1] ] = tile
coloff += tile.shape[1]
rowoff += tile.shape[0]
return result
def tile_parameter_for_1GB( num_images, GB = 1, size_mismatch_behavior = None, convert = None, dtype = None ):
'''
Returns a 'tile' parameter suitable for passing to image_stack_from_filename()
that conservatively limits the total RAM usage to 1 GB.
Optional parameter 'GB', if present specifies a different number of GB.
Optional parameters 'convert' and 'dtype' match the same-named parameter to
image_stack_from_filename().
'''
assert GB > 0
if convert is None: convert = 'L'
if dtype is None: dtype = numpy.dtype( float )
assert issubdtype( dtype, float ) or issubdtype( dtype, integer )
bytes_per_channel = dtype.itemsize
num_channels = len( convert )
overhead_factor = 2 if size_mismatch_behavior == 'crop-upperleft' else 1
## Let's use a maximum of 1 GB of RAM for a stack.
## Let N be the number of images.
## Let M be the amount of memory each image should contribute: 1 GB / N.
## Assuming square floating-point-with-double-precision RGBA images (4*8 bytes per pixel),
## and the need to make a copy of all image data (another factor of 2),
## tiles' pixel edge lengths should be sqrt( 1 GB / (N*4*8) ).
pixel_edge_length = max( int( sqrt( float( GB*1024*1024*1024 ) / ( num_images * num_channels*bytes_per_channel ) )/overhead_factor ), 1 )
print 'Automatic tiling with tiles of size:', pixel_edge_length, 'by', pixel_edge_length
tile = '%d/%d' % ( pixel_edge_length, pixel_edge_length )
return tile
|
import chess.svg
import requests
import scipy.stats as st
import numpy as np
import time
import config
def calc_percs(white, black, draws):
n = white + black + draws
if n > 0:
total_games = n
white_perc = white / n
black_perc = black / n
draw_perc = draws / n
return white_perc, black_perc, draw_perc, total_games
else:
return None, None, None, 0
def calc_value(p, n):
if n > 5:
lb_value = max(0, p - st.norm.ppf(1 - config.ALPHA/2) * np.sqrt(p * (1-p) / n))
ub_value = max(0, p + st.norm.ppf(1 - config.ALPHA/2) * np.sqrt(p * (1-p) / n))
else:
p = 0
lb_value = 0
ub_value = 0
return p, lb_value, ub_value, n
class Node():
def __init__(self, fen, lastmove = '', san = '', ):
self.fen = fen
self.short_fen = fen[:-4]
self.lastmove = lastmove
self.san = san
self.terminal = '#' in san
self.explored = False
self.best_move = None
self.board = chess.Board(fen)
self.stats = self.call_api()
self.parse_stats()
def show(self):
try:
print('')
display(chess.svg.board(self.board, lastmove = self.lastmove, size = 400))
print('')
except:
pass
def play(self, san):
board = chess.Board(self.fen)
move = board.push_san(san)
node = Node(board.fen(), lastmove = move, san = san)
return node
def call_api(self):
variant = config.VARIANT
speeds = config.SPEEDS
ratings = config.RATINGS
moves = config.MOVES
recentGames = 0
topGames = 0
play = ""
url = 'https://explorer.lichess.ovh/lichess?'
url += f'variant={variant}&'
for speed in speeds:
url += f'speeds[]={speed}&'
for rating in ratings:
url += f'ratings[]={rating}&'
url += f'recentGames={recentGames}&'
url += f'topGames={topGames}&'
url += f'moves={moves}&'
url += f'play={play}&'
url += f'fen={self.fen}'
self.opening_url = url
while True:
r = requests.get(url)
if r.status_code == 429:
print('Rate limited - waiting 10s...')
time.sleep(10)
else:
response = r.json()
break
return response
def parse_stats(self, move = None):
stats = self.stats
stats['white_perc'], stats['black_perc'], stats['draw_perc'], stats['total_games'] = calc_percs(stats['white'], stats['black'], stats['draws'])
for m in self.stats['moves']:
m['white_perc'], m['black_perc'], m['draw_perc'], m['total_games'] = calc_percs(m['white'], m['black'], m['draws'])
def score_moves(self):
moves = {}
best_lb_value = -np.inf
best_move = None
for move in self.stats['moves']:
if self.board.turn == chess.WHITE:
value, lb_value, ub_value, n = calc_value(move['white_perc'], move['total_games'])
else:
value, lb_value, ub_value, n = calc_value(move['black_perc'], move['total_games'])
key = move['san']
moves[key] = {
'value': value
, 'lb_value': lb_value
, 'ub_value': ub_value
, 'n': n
}
lb_potencies = {k:v['lb_value'] for k,v in moves.items()}
best_move = max(lb_potencies, key=lb_potencies.get)
potency = moves[best_move]['value']
lb_potency = moves[best_move]['lb_value']
ub_potency = moves[best_move]['ub_value']
n = moves[best_move]['n']
return moves, best_move, potency, (lb_potency, ub_potency), n
def find_move(self, move):
try: # Try to find the next move in the opening stats from the API
if move.uci() == 'e8g8':
move_uci = 'e8h8'
elif move.uci() == 'e1g1':
move_uci = 'e1h1'
elif move.uci() == 'e8c8':
move_uci = 'e8a8'
elif move.uci() == 'e1c1':
move_uci = 'e1a1'
else:
move_uci = move.uci()
move_stats = next(item for item in self.stats['moves'] if item["uci"] == move_uci)
except:
raise Exception(f'Cannot find move {move_uci} in opening explorer API response')
chance = move_stats['total_games'] / self.stats['total_games']
return move_stats, chance
def create_children(self):
children = []
for i, m in enumerate(self.stats['moves']):
node = self.play(m['san'])
children.append(node)
return children
|
import base64
from Crypto.Cipher import AES
#criando uma chave de criptografia
chave = "0123456789ABCDEF"
aes = AES.new(chave, AES.MODE_ECB)
#recebe o arquivo a ser criptografado
arquivo = raw_input()
#ler o arquivo e corrigir o seu tamanho
#o tanho dever ser um multiplo de 16 caracters
arq_entrada = open(arquivo, "r")
arq_entrada = arq_entrada.read()
#caso o tamanho nao seja muliplo de 16 ele verifica quantos caracteres
#faltam para prencher e os preenche com o caractere '#'
cryptoSaida = arq_entrada+'#'*(16-len(arq_entrada)%16)
#criptografando o arquivo corrigido
#alem disso vamos colocar os dados criptografados
#em uma forma que caracteres estranhos nao aparecam
texto_cifrado = base64.b32encode(aes.encrypt(cryptoSaida))
#nesta etapa eh realizado os passos anteriores mas desta vez no titulo
titulo_novo=base64.b32encode(aes.encrypt(arquivo+'#'*(16-len(arquivo)%16)))
arq_saida = open(titulo_novo,'w')
arq_saida.write(texto_cifrado)
arq_saida.close()
|
from . import CC
from . import default_settings
from . import m2det320_resnet101
from . import m2det320_vgg
from . import m2det512_vgg
from . import m2det704_vgg
from . import m2det800_vgg
|
# Wind Speed
from gpiozero import Button
class RainGauge:
'''
Process rain gauge clicks
How do we deal with wind gusts?
Locking?
'''
RAIN_GAUGE_BUTTON = 24 # GPIO number (Physical pin 18)
RAIN_UNIT = 0.518 # mm per button going to/from on/off (need to test this value)
def __init__(self, button=RAIN_GAUGE_BUTTON ):
'''
Set up interrupts to count edge changes of rain gauge
Our gauge flips from on to off, ever 0.518mm of rain
'''
self.rain_gauge_sensor = Button(button)
self.rain_gauge_count = 0
self.rain_gauge_sensor.when_pressed = self.flop
self.rain_gauge_sensor.when_released = self.flop
def flop():
'''
Button push interrupts call this counter
Locking?
'''
global self.wind_count
self.rain_gauge_count = self.rain_gauge_count + 1
def read_and_reset():
'''
Return the wind_cound, and reset it 0
Locking?
'''
rain_gauge_count = self.wind_count
self.rain_gauge_count = 0
return rain_gauge_count * RAIN_UNITS
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-17 14:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('example', '0002_taggeditem'),
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic', models.CharField(max_length=30)),
],
options={
'abstract': False,
},
),
migrations.AlterField(
model_name='comment',
name='entry',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='example.Entry'),
),
migrations.CreateModel(
name='ArtProject',
fields=[
('project_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='example.Project')),
('artist', models.CharField(max_length=30)),
],
options={
'abstract': False,
},
bases=('example.project',),
),
migrations.CreateModel(
name='ResearchProject',
fields=[
('project_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='example.Project')),
('supervisor', models.CharField(max_length=30)),
],
options={
'abstract': False,
},
bases=('example.project',),
),
migrations.AddField(
model_name='project',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_example.project_set+', to='contenttypes.ContentType'),
),
migrations.AddField(
model_name='company',
name='current_project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='companies', to='example.Project'),
),
migrations.AddField(
model_name='company',
name='future_projects',
field=models.ManyToManyField(to='example.Project'),
),
]
|
from subprocess import CalledProcessError
from typing import List, Tuple
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor, as_completed, CancelledError
import traceback
import os
import shutil
import datetime
from flacmirror.misc import format_date
from .encode import encode_flac
from .options import Options
from .files import source_is_newer, get_all_files, generate_output_path
def job_required(src_file: Path, dst_file: Path, options: Options) -> bool:
if not dst_file.exists():
return True
else:
if options.overwrite == "all":
return True
elif options.overwrite == "old":
if source_is_newer(src_file, dst_file):
return True
return False
def generate_jobs(options: Options) -> Tuple[List["Job"], List["JobDelete"]]:
extensions = ["flac"]
if options.copy_ext is not None:
for ext in options.copy_ext:
if ext.startswith("."):
ext = ext[1:]
extensions.append(ext)
src_files = get_all_files(
options.src_dir, extensions=extensions, allowed_names=options.copy_file
)
# Keep list of valid dst files even if there is no encode or copy job for them.
# This list is used to check which files need to be deleted.
dst_files: List[Path] = []
# We want copy jobs to be interleaved with encode jobs.
# Deletion jobs should get their own joblist.
jobs: List["Job"] = []
for src_file in src_files:
src_file_relative = src_file.relative_to(options.src_dir.absolute())
dst_file = generate_output_path(
base=options.dst_dir.absolute(),
input_suffix=".flac",
suffix=".ogg",
file=src_file_relative,
)
dst_files.append(dst_file)
if job_required(src_file, dst_file, options):
# copy or encode?
if src_file.suffix == ".flac":
jobs.append(JobEncode(src_file, dst_file))
else:
jobs.append(JobCopy(src_file, dst_file))
if not options.delete:
return jobs, []
jobs_delete = []
# Get a dst_files list that we can match against src_files
dst_files_found = get_all_files(options.dst_dir, extensions=None)
dst_files_set = set(bytes(dst_file) for dst_file in dst_files)
for dst_file_found in dst_files_found:
# If the found dst_file does not exist in the output list, delete it.
if not bytes(dst_file_found) in dst_files_set:
jobs_delete.append(JobDelete(dst_file_found))
return jobs, jobs_delete
class Job:
def run(self, options: Options):
pass
class JobEncode(Job):
def __init__(self, src_file: Path, dst_file: Path):
self.src_file = src_file
self.dst_file = dst_file
def run(self, options: Options):
print(f"Encoding: {str(self.src_file)}\nOutput : {str(self.dst_file)}")
if not options.dry_run:
self.dst_file.parent.mkdir(parents=True, exist_ok=True)
encode_flac(self.src_file, self.dst_file, options)
class JobCopy(Job):
def __init__(self, src_file: Path, dst_file: Path):
self.src_file = src_file
self.dst_file = dst_file
def run(self, options: Options):
print(f"Copying {str(self.src_file)}\n to {str(self.dst_file)}")
if not options.dry_run:
self.dst_file.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(str(self.src_file), str(self.dst_file))
class JobDelete(Job):
def __init__(self, file: Path):
self.file = file
def run(self, options: Options):
assert options.dst_dir.absolute() in self.file.absolute().parents
print(f"Deleting from dst:{self.file}")
if not options.dry_run:
self.file.unlink()
class JobQueue:
def __init__(self, options: Options):
self.options = options
print("Scanning files and calculating jobs...")
self.jobs, self.jobs_delete = generate_jobs(options)
self.futures = []
def run_singlethreaded(self):
for job in self.jobs:
job.run(self.options)
def run(self):
start_time = datetime.datetime.now()
if self.jobs_delete:
for job in self.jobs_delete:
print(f"Marked for deletion: {job.file}")
if not self.options.yes:
# prompt to ask for permission to delete
while True:
inp = input(
"Warning! The files listed above will be deleted. "
"Do you want to proceed? (y/[n]):"
)
if inp == "y":
break
elif inp == "n" or inp == "":
return
print("Deleting...")
for job in self.jobs_delete:
job.run(self.options)
if self.options.num_threads is not None:
num_threads = self.options.num_threads
else:
num_threads = os.cpu_count()
print("Running copy/encode jobs...")
with ThreadPoolExecutor(max_workers=num_threads) as e:
self.futures = [e.submit(job.run, self.options) for job in self.jobs]
for future in as_completed(self.futures):
try:
future.result()
except CancelledError:
pass
except CalledProcessError as e:
print(f"\nError when calling: {e.cmd}")
print(f"Process returned code: {e.returncode}")
# print(f"stdout:\n{e.stdout}")
print(f"stderr:\n{e.stderr.decode()}")
self.cancel()
# do not check all the other futures and print their errors
break
except Exception as e: # pylint: disable=broad-except
print("Error:")
print(traceback.format_exc())
self.cancel()
# do not check all the other futures and print their errors
break
stop_time = datetime.datetime.now()
print(f"All jobs done. Took {format_date(stop_time - start_time)}.")
def cancel(self):
print("Stopping pending jobs and finishing running jobs...")
for future in self.futures:
# Cancel still pending Futures if we stop early
future.cancel()
|
#
# PySNMP MIB module CISCO-CABLE-DIAG-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-CABLE-DIAG-CAPABILITY
# Produced by pysmi-0.3.4 at Mon Apr 29 17:34:26 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
NotificationGroup, ModuleCompliance, AgentCapabilities = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "AgentCapabilities")
Integer32, iso, NotificationType, Counter64, ObjectIdentity, Gauge32, Counter32, Unsigned32, ModuleIdentity, Bits, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "iso", "NotificationType", "Counter64", "ObjectIdentity", "Gauge32", "Counter32", "Unsigned32", "ModuleIdentity", "Bits", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "MibIdentifier")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoCableDiagCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 394))
ciscoCableDiagCapability.setRevisions(('2004-02-03 00:00',))
if mibBuilder.loadTexts: ciscoCableDiagCapability.setLastUpdated('200402030000Z')
if mibBuilder.loadTexts: ciscoCableDiagCapability.setOrganization('Cisco Systems, Inc.')
ciscoCableDiagCapCatOSV08R0301 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 394, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoCableDiagCapCatOSV08R0301 = ciscoCableDiagCapCatOSV08R0301.setProductRelease('Cisco CatOS 8.3(1) on Catalyst 6000/6500\n and Cisco 7600 series devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoCableDiagCapCatOSV08R0301 = ciscoCableDiagCapCatOSV08R0301.setStatus('current')
mibBuilder.exportSymbols("CISCO-CABLE-DIAG-CAPABILITY", PYSNMP_MODULE_ID=ciscoCableDiagCapability, ciscoCableDiagCapability=ciscoCableDiagCapability, ciscoCableDiagCapCatOSV08R0301=ciscoCableDiagCapCatOSV08R0301)
|
import cupy
import torch
import re
import math
kernel_AdaCoF_updateOutput = '''
extern "C" __global__ void kernel_AdaCoF_updateOutput(
const int n,
const float* input,
const float* weight,
const float* offset_i,
const float* offset_j,
float* output
) { for (int intIndex = (blockIdx.x * blockDim.x) + threadIdx.x; intIndex < n; intIndex += blockDim.x * gridDim.x) {
float dblOutput = 0.0;
const int intSample = ( intIndex / SIZE_3(output) / SIZE_2(output) / SIZE_1(output) ) % SIZE_0(output);
const int c = ( intIndex / SIZE_3(output) / SIZE_2(output) ) % SIZE_1(output);
const int i = ( intIndex / SIZE_3(output) ) % SIZE_2(output);
const int j = ( intIndex ) % SIZE_3(output);
for (int k = 0; k < F_SIZE; k += 1) {
for (int l = 0; l < F_SIZE; l += 1) {
float w = VALUE_4(weight, intSample, k*F_SIZE+l, i, j);
float alpha = VALUE_4(offset_i, intSample, k*F_SIZE+l, i, j);
float beta = VALUE_4(offset_j, intSample, k*F_SIZE+l, i, j);
int A = (int) alpha;
int B = (int) beta;
int i_k_A = i+k*DILATION+A;
if(i_k_A < 0)
i_k_A = 0;
if(i_k_A > SIZE_2(input) - 1)
i_k_A = SIZE_2(input) - 1;
int j_l_B = j+l*DILATION+B;
if(j_l_B < 0)
j_l_B = 0;
if(j_l_B > SIZE_3(input) - 1)
j_l_B = SIZE_3(input) - 1;
int i_k_A_1 = i+k*DILATION+A+1;
if(i_k_A_1 < 0)
i_k_A_1 = 0;
if(i_k_A_1 > SIZE_2(input) - 1)
i_k_A_1 = SIZE_2(input) - 1;
int j_l_B_1 = j+l*DILATION+B+1;
if(j_l_B_1 < 0)
j_l_B_1 = 0;
if(j_l_B_1 > SIZE_3(input) - 1)
j_l_B_1 = SIZE_3(input) - 1;
dblOutput += w * (
VALUE_4(input, intSample, c, i_k_A, j_l_B)*(1-(alpha-(float)A))*(1-(beta-(float)B)) +
VALUE_4(input, intSample, c, i_k_A_1, j_l_B)*(alpha-(float)A)*(1-(beta-(float)B)) +
VALUE_4(input, intSample, c, i_k_A, j_l_B_1)*(1-(alpha-(float)A))*(beta-(float)B) +
VALUE_4(input, intSample, c, i_k_A_1, j_l_B_1)*(alpha-(float)A)*(beta-(float)B)
);
}
}
output[intIndex] = dblOutput;
} }
'''
kernel_AdaCoF_updateGradWeight = '''
extern "C" __global__ void kernel_AdaCoF_updateGradWeight(
const int n,
const float* gradLoss,
const float* input,
const float* offset_i,
const float* offset_j,
float* gradWeight
) { for (int intIndex = (blockIdx.x * blockDim.x) + threadIdx.x; intIndex < n; intIndex += blockDim.x * gridDim.x) {
float floatOutput = 0.0;
const int intSample = ( intIndex / SIZE_3(gradWeight) / SIZE_2(gradWeight) / SIZE_1(gradWeight) ) % SIZE_0(gradWeight);
const int intDepth = ( intIndex / SIZE_3(gradWeight) / SIZE_2(gradWeight) ) % SIZE_1(gradWeight);
const int i = ( intIndex / SIZE_3(gradWeight) ) % SIZE_2(gradWeight);
const int j = ( intIndex ) % SIZE_3(gradWeight);
int k = intDepth / F_SIZE;
int l = intDepth % F_SIZE;
for (int c = 0; c < 3; c++)
{
float delta = VALUE_4(gradLoss, intSample, c, i, j);
float alpha = VALUE_4(offset_i, intSample, k*F_SIZE+l, i, j);
float beta = VALUE_4(offset_j, intSample, k*F_SIZE+l, i, j);
int A = (int) alpha;
int B = (int) beta;
int i_k_A = i+k*DILATION+A;
if(i_k_A < 0)
i_k_A = 0;
if(i_k_A > SIZE_2(input) - 1)
i_k_A = SIZE_2(input) - 1;
int j_l_B = j+l*DILATION+B;
if(j_l_B < 0)
j_l_B = 0;
if(j_l_B > SIZE_3(input) - 1)
j_l_B = SIZE_3(input) - 1;
int i_k_A_1 = i+k*DILATION+A+1;
if(i_k_A_1 < 0)
i_k_A_1 = 0;
if(i_k_A_1 > SIZE_2(input) - 1)
i_k_A_1 = SIZE_2(input) - 1;
int j_l_B_1 = j+l*DILATION+B+1;
if(j_l_B_1 < 0)
j_l_B_1 = 0;
if(j_l_B_1 > SIZE_3(input) - 1)
j_l_B_1 = SIZE_3(input) - 1;
floatOutput += delta * (
VALUE_4(input, intSample, c, i_k_A, j_l_B)*(1-(alpha-(float)A))*(1-(beta-(float)B)) +
VALUE_4(input, intSample, c, i_k_A_1, j_l_B)*(alpha-(float)A)*(1-(beta-(float)B)) +
VALUE_4(input, intSample, c, i_k_A, j_l_B_1)*(1-(alpha-(float)A))*(beta-(float)B) +
VALUE_4(input, intSample, c, i_k_A_1, j_l_B_1)*(alpha-(float)A)*(beta-(float)B)
);
}
gradWeight[intIndex] = floatOutput;
} }
'''
kernel_AdaCoF_updateGradAlpha = '''
extern "C" __global__ void kernel_AdaCoF_updateGradAlpha(
const int n,
const float* gradLoss,
const float* input,
const float* weight,
const float* offset_i,
const float* offset_j,
float* gradOffset_i
) { for (int intIndex = (blockIdx.x * blockDim.x) + threadIdx.x; intIndex < n; intIndex += blockDim.x * gridDim.x) {
float floatOutput = 0.0;
const int intSample = ( intIndex / SIZE_3(gradOffset_i) / SIZE_2(gradOffset_i) / SIZE_1(gradOffset_i) ) % SIZE_0(gradOffset_i);
const int intDepth = ( intIndex / SIZE_3(gradOffset_i) / SIZE_2(gradOffset_i) ) % SIZE_1(gradOffset_i);
const int i = ( intIndex / SIZE_3(gradOffset_i) ) % SIZE_2(gradOffset_i);
const int j = ( intIndex ) % SIZE_3(gradOffset_i);
int k = intDepth / F_SIZE;
int l = intDepth % F_SIZE;
for (int c = 0; c < 3; c++)
{
float delta = VALUE_4(gradLoss, intSample, c, i, j);
float w = VALUE_4(weight, intSample, k*F_SIZE+l, i, j);
float alpha = VALUE_4(offset_i, intSample, k*F_SIZE+l, i, j);
float beta = VALUE_4(offset_j, intSample, k*F_SIZE+l, i, j);
int A = (int) alpha;
int B = (int) beta;
int i_k_A = i+k*DILATION+A;
if(i_k_A < 0)
i_k_A = 0;
if(i_k_A > SIZE_2(input) - 1)
i_k_A = SIZE_2(input) - 1;
int j_l_B = j+l*DILATION+B;
if(j_l_B < 0)
j_l_B = 0;
if(j_l_B > SIZE_3(input) - 1)
j_l_B = SIZE_3(input) - 1;
int i_k_A_1 = i+k*DILATION+A+1;
if(i_k_A_1 < 0)
i_k_A_1 = 0;
if(i_k_A_1 > SIZE_2(input) - 1)
i_k_A_1 = SIZE_2(input) - 1;
int j_l_B_1 = j+l*DILATION+B+1;
if(j_l_B_1 < 0)
j_l_B_1 = 0;
if(j_l_B_1 > SIZE_3(input) - 1)
j_l_B_1 = SIZE_3(input) - 1;
floatOutput += delta * w * (
- VALUE_4(input, intSample, c, i_k_A, j_l_B)*(1-(beta-(float)B)) +
VALUE_4(input, intSample, c, i_k_A_1, j_l_B)*(1-(beta-(float)B)) -
VALUE_4(input, intSample, c, i_k_A, j_l_B_1)*(beta-(float)B) +
VALUE_4(input, intSample, c, i_k_A_1, j_l_B_1)*(beta-(float)B)
);
}
gradOffset_i[intIndex] = floatOutput;
} }
'''
kernel_AdaCoF_updateGradBeta = '''
extern "C" __global__ void kernel_AdaCoF_updateGradBeta(
const int n,
const float* gradLoss,
const float* input,
const float* weight,
const float* offset_i,
const float* offset_j,
float* gradOffset_j
) { for (int intIndex = (blockIdx.x * blockDim.x) + threadIdx.x; intIndex < n; intIndex += blockDim.x * gridDim.x) {
float floatOutput = 0.0;
const int intSample = ( intIndex / SIZE_3(gradOffset_j) / SIZE_2(gradOffset_j) / SIZE_1(gradOffset_j) ) % SIZE_0(gradOffset_j);
const int intDepth = ( intIndex / SIZE_3(gradOffset_j) / SIZE_2(gradOffset_j) ) % SIZE_1(gradOffset_j);
const int i = ( intIndex / SIZE_3(gradOffset_j) ) % SIZE_2(gradOffset_j);
const int j = ( intIndex ) % SIZE_3(gradOffset_j);
int k = intDepth / F_SIZE;
int l = intDepth % F_SIZE;
for (int c = 0; c < 3; c++)
{
float delta = VALUE_4(gradLoss, intSample, c, i, j);
float w = VALUE_4(weight, intSample, k*F_SIZE+l, i, j);
float alpha = VALUE_4(offset_i, intSample, k*F_SIZE+l, i, j);
float beta = VALUE_4(offset_j, intSample, k*F_SIZE+l, i, j);
int A = (int) alpha;
int B = (int) beta;
int i_k_A = i+k*DILATION+A;
if(i_k_A < 0)
i_k_A = 0;
if(i_k_A > SIZE_2(input) - 1)
i_k_A = SIZE_2(input) - 1;
int j_l_B = j+l*DILATION+B;
if(j_l_B < 0)
j_l_B = 0;
if(j_l_B > SIZE_3(input) - 1)
j_l_B = SIZE_3(input) - 1;
int i_k_A_1 = i+k*DILATION+A+1;
if(i_k_A_1 < 0)
i_k_A_1 = 0;
if(i_k_A_1 > SIZE_2(input) - 1)
i_k_A_1 = SIZE_2(input) - 1;
int j_l_B_1 = j+l*DILATION+B+1;
if(j_l_B_1 < 0)
j_l_B_1 = 0;
if(j_l_B_1 > SIZE_3(input) - 1)
j_l_B_1 = SIZE_3(input) - 1;
floatOutput += delta * w * (
- VALUE_4(input, intSample, c, i_k_A, j_l_B)*(1-(alpha-(float)A)) -
VALUE_4(input, intSample, c, i_k_A_1, j_l_B)*(alpha-(float)A) +
VALUE_4(input, intSample, c, i_k_A, j_l_B_1)*(1-(alpha-(float)A)) +
VALUE_4(input, intSample, c, i_k_A_1, j_l_B_1)*(alpha-(float)A)
);
}
gradOffset_j[intIndex] = floatOutput;
} }
'''
def cupy_kernel(strFunction, intFilterSize, intDilation, objectVariables):
strKernel = globals()[strFunction]
while True:
objectMatch = re.search('(SIZE_)([0-4])(\()([^\)]*)(\))', strKernel)
if objectMatch is None:
break
# end
intArg = int(objectMatch.group(2))
strTensor = objectMatch.group(4)
intSizes = objectVariables[strTensor].size()
strKernel = strKernel.replace(objectMatch.group(), str(intSizes[intArg]))
# end
while True:
objectMatch = re.search('(VALUE_)([0-4])(\()([^\)]+)(\))', strKernel)
if objectMatch is None:
break
# end
intArgs = int(objectMatch.group(2))
strArgs = objectMatch.group(4).split(',')
strTensor = strArgs[0]
intStrides = objectVariables[strTensor].stride()
strIndex = ['((' + strArgs[intArg + 1].replace('{', '(').replace('}', ')').strip() + ')*' + str(intStrides[intArg]) + ')' for intArg in range(intArgs)]
strKernel = strKernel.replace(objectMatch.group(0), strTensor + '[' + str.join('+', strIndex) + ']')
# end
strKernel = strKernel.replace('F_SIZE', str(intFilterSize))
strKernel = strKernel.replace('DILATION', str(intDilation))
return strKernel
# end
@cupy.memoize(for_each_device=True)
def cupy_launch(strFunction, strKernel):
return cupy.cuda.compile_with_cache(strKernel).get_function(strFunction)
# end
class FunctionAdaCoF(torch.autograd.Function):
# end
@staticmethod
def forward(ctx, input, weight, offset_i, offset_j, dilation):
ctx.save_for_backward(input, weight, offset_i, offset_j)
ctx.dilation = dilation
intSample = input.size(0)
intInputDepth = input.size(1)
intInputHeight = input.size(2)
intInputWidth = input.size(3)
intFilterSize = int(math.sqrt(weight.size(1)))
intOutputHeight = weight.size(2)
intOutputWidth = weight.size(3)
assert (intInputHeight - ((intFilterSize - 1) * dilation + 1) == intOutputHeight - 1)
assert (intInputWidth - ((intFilterSize - 1) * dilation + 1) == intOutputWidth - 1)
assert (input.is_contiguous() == True)
assert (weight.is_contiguous() == True)
assert (offset_i.is_contiguous() == True)
assert (offset_j.is_contiguous() == True)
output = input.new_zeros(intSample, intInputDepth, intOutputHeight, intOutputWidth)
if input.is_cuda == True:
class Stream:
ptr = torch.cuda.current_stream().cuda_stream
# end
n = output.nelement()
cupy_launch('kernel_AdaCoF_updateOutput', cupy_kernel('kernel_AdaCoF_updateOutput', intFilterSize, dilation, {
'input': input,
'weight': weight,
'offset_i': offset_i,
'offset_j': offset_j,
'output': output
}))(
grid=tuple([int((n + 512 - 1) / 512), 1, 1]),
block=tuple([512, 1, 1]),
args=[n, input.data_ptr(), weight.data_ptr(), offset_i.data_ptr(), offset_j.data_ptr(), output.data_ptr()],
stream=Stream
)
elif input.is_cuda == False:
raise NotImplementedError()
# end
return output
# end
@staticmethod
def backward(ctx, gradOutput):
input, weight, offset_i, offset_j = ctx.saved_tensors
dilation = ctx.dilation
intSample = input.size(0)
intInputDepth = input.size(1)
intInputHeight = input.size(2)
intInputWidth = input.size(3)
intFilterSize = int(math.sqrt(weight.size(1)))
intOutputHeight = weight.size(2)
intOutputWidth = weight.size(3)
assert (intInputHeight - ((intFilterSize - 1) * dilation + 1) == intOutputHeight - 1)
assert (intInputWidth - ((intFilterSize - 1) * dilation + 1) == intOutputWidth - 1)
assert (gradOutput.is_contiguous() == True)
gradInput = input.new_zeros(intSample, intInputDepth, intInputHeight, intInputWidth) if ctx.needs_input_grad[0] == True else None
gradWeight = input.new_zeros(intSample, intFilterSize ** 2, intOutputHeight, intOutputWidth) if ctx.needs_input_grad[1] == True else None
gradOffset_i = input.new_zeros(intSample, intFilterSize ** 2, intOutputHeight, intOutputWidth) if ctx.needs_input_grad[2] == True else None
gradOffset_j = input.new_zeros(intSample, intFilterSize ** 2, intOutputHeight, intOutputWidth) if ctx.needs_input_grad[2] == True else None
if input.is_cuda == True:
class Stream:
ptr = torch.cuda.current_stream().cuda_stream
# end
# weight grad
n_w = gradWeight.nelement()
cupy_launch('kernel_AdaCoF_updateGradWeight', cupy_kernel('kernel_AdaCoF_updateGradWeight', intFilterSize, dilation, {
'gradLoss': gradOutput,
'input': input,
'offset_i': offset_i,
'offset_j': offset_j,
'gradWeight': gradWeight
}))(
grid=tuple([int((n_w + 512 - 1) / 512), 1, 1]),
block=tuple([512, 1, 1]),
args=[n_w, gradOutput.data_ptr(), input.data_ptr(), offset_i.data_ptr(), offset_j.data_ptr(), gradWeight.data_ptr()],
stream=Stream
)
# alpha grad
n_i = gradOffset_i.nelement()
cupy_launch('kernel_AdaCoF_updateGradAlpha', cupy_kernel('kernel_AdaCoF_updateGradAlpha', intFilterSize, dilation, {
'gradLoss': gradOutput,
'input': input,
'weight': weight,
'offset_i': offset_i,
'offset_j': offset_j,
'gradOffset_i': gradOffset_i
}))(
grid=tuple([int((n_i + 512 - 1) / 512), 1, 1]),
block=tuple([512, 1, 1]),
args=[n_i, gradOutput.data_ptr(), input.data_ptr(), weight.data_ptr(), offset_i.data_ptr(), offset_j.data_ptr(), gradOffset_i.data_ptr()],
stream=Stream
)
# beta grad
n_j = gradOffset_j.nelement()
cupy_launch('kernel_AdaCoF_updateGradBeta', cupy_kernel('kernel_AdaCoF_updateGradBeta', intFilterSize, dilation, {
'gradLoss': gradOutput,
'input': input,
'weight': weight,
'offset_i': offset_i,
'offset_j': offset_j,
'gradOffset_j': gradOffset_j
}))(
grid=tuple([int((n_j + 512 - 1) / 512), 1, 1]),
block=tuple([512, 1, 1]),
args=[n_j, gradOutput.data_ptr(), input.data_ptr(), weight.data_ptr(), offset_i.data_ptr(), offset_j.data_ptr(), gradOffset_j.data_ptr()],
stream=Stream
)
elif input.is_cuda == False:
raise NotImplementedError()
# end
return gradInput, gradWeight, gradOffset_i, gradOffset_j, None
# end
# end
|
import torch.nn as nn
from model.bilstm import BiLSTM
from model.crf import CRF
class BiLSTM_CRF(nn.Module):
def __init__(self, data):
super(BiLSTM_CRF, self).__init__()
print("build batched lstmcrf...")
self.gpu = data.HP_gpu
# For CRF, we need to add extra two label START and END for downlayer lstm, use original label size for CRF
label_size = data.label_alphabet_size
data.label_alphabet_size += 2
self.lstm = BiLSTM(data)
self.crf = CRF(label_size, self.gpu)
def neg_log_likelihood_loss(self, gaz_list, char_inputs, bichar_inputs, char_seq_lengths, batch_label, mask):
outs = self.lstm.get_output_score(gaz_list, char_inputs, bichar_inputs, char_seq_lengths)
total_loss = self.crf.neg_log_likelihood_loss(outs, mask, batch_label)
scores, tag_seq = self.crf._viterbi_decode(outs, mask)
return total_loss, tag_seq
def forward(self, gaz_list, char_inputs, bichar_inputs, char_seq_lengths, mask):
outs = self.lstm.get_output_score(gaz_list, char_inputs, bichar_inputs, char_seq_lengths)
scores, tag_seq = self.crf._viterbi_decode(outs, mask)
return tag_seq
def get_lstm_features(self, gaz_list, char_inputs, bichar_inputs, char_seq_lengths):
return self.lstm.get_lstm_features(gaz_list, char_inputs, bichar_inputs, char_seq_lengths)
|
import discord
from discord.ext import commands
import json
with open(r"cogs\etc\Auth.json", "r") as Auth:
loadjson = json.load(Auth)
token = loadjson['DiscordToken']
class Bot(commands.Bot):
def __init__(self):
super().__init__(command_prefix="&")
def load_cogs(bot):
extensions = ['cogs.status',
'cogs.solo5x5',
'cogs.flexsr',
'cogs.tft',
'cogs.events']
failed = []
for extension in extensions:
try:
bot.load_extension(extension)
except Exception as e:
print(f"{e.__class__.__name__}: {str(e)}")
failed.append(extension)
if failed:
print(f"\n{' '.join(failed)}를 로드하는데 실패했습니다.\n")
return failed
if __name__ == '__main__':
bot = Bot()
load_cogs(bot)
bot.run(token)
|
"""
test_application
~~~~~~~~~~~~~~~~
Test the Sphinx class.
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from unittest.mock import Mock
import pytest
from docutils import nodes
from sphinx.errors import ExtensionError
from sphinx.testing.util import strip_escseq
from sphinx.util import logging
def test_events(app, status, warning):
def empty():
pass
with pytest.raises(ExtensionError) as excinfo:
app.connect("invalid", empty)
assert "Unknown event name: invalid" in str(excinfo.value)
app.add_event("my_event")
with pytest.raises(ExtensionError) as excinfo:
app.add_event("my_event")
assert "Event 'my_event' already present" in str(excinfo.value)
def mock_callback(a_app, *args):
assert a_app is app
assert emit_args == args
return "ret"
emit_args = (1, 3, "string")
listener_id = app.connect("my_event", mock_callback)
assert app.emit("my_event", *emit_args) == ["ret"], "Callback not called"
app.disconnect(listener_id)
assert app.emit("my_event", *emit_args) == [], \
"Callback called when disconnected"
def test_emit_with_nonascii_name_node(app, status, warning):
node = nodes.section(names=['\u65e5\u672c\u8a9e'])
app.emit('my_event', node)
def test_extensions(app, status, warning):
app.setup_extension('shutil')
warning = strip_escseq(warning.getvalue())
assert "extension 'shutil' has no setup() function" in warning
def test_extension_in_blacklist(app, status, warning):
app.setup_extension('sphinxjp.themecore')
msg = strip_escseq(warning.getvalue())
assert msg.startswith("WARNING: the extension 'sphinxjp.themecore' was")
@pytest.mark.sphinx(testroot='add_source_parser')
@pytest.mark.filterwarnings('ignore:The config variable "source_parsers"')
@pytest.mark.filterwarnings('ignore:app.add_source_parser\\(\\) does not support suffix')
def test_add_source_parser(app, status, warning):
assert set(app.config.source_suffix) == {'.rst', '.md', '.test'}
# .rst; only in :confval:`source_suffix`
assert '.rst' not in app.registry.get_source_parsers()
assert app.registry.source_suffix['.rst'] is None
# .md; configured by :confval:`source_suffix` and :confval:`source_parsers`
assert '.md' in app.registry.get_source_parsers()
assert app.registry.source_suffix['.md'] == '.md'
assert app.registry.get_source_parsers()['.md'].__name__ == 'DummyMarkdownParser'
# .test; configured by API
assert app.registry.source_suffix['.test'] == 'test'
assert 'test' in app.registry.get_source_parsers()
assert app.registry.get_source_parsers()['test'].__name__ == 'TestSourceParser'
@pytest.mark.sphinx(testroot='extensions')
def test_add_is_parallel_allowed(app, status, warning):
logging.setup(app, status, warning)
assert app.is_parallel_allowed('read') is True
assert app.is_parallel_allowed('write') is True
assert warning.getvalue() == ''
app.setup_extension('read_parallel')
assert app.is_parallel_allowed('read') is True
assert app.is_parallel_allowed('write') is True
assert warning.getvalue() == ''
app.extensions.pop('read_parallel')
app.setup_extension('write_parallel')
assert app.is_parallel_allowed('read') is False
assert app.is_parallel_allowed('write') is True
assert ("the write_parallel extension does not declare if it is safe "
"for parallel reading, assuming it isn't - please ") in warning.getvalue()
app.extensions.pop('write_parallel')
warning.truncate(0) # reset warnings
app.setup_extension('read_serial')
assert app.is_parallel_allowed('read') is False
assert "the read_serial extension is not safe for parallel reading" in warning.getvalue()
warning.truncate(0) # reset warnings
assert app.is_parallel_allowed('write') is True
assert warning.getvalue() == ''
app.extensions.pop('read_serial')
app.setup_extension('write_serial')
assert app.is_parallel_allowed('read') is False
assert app.is_parallel_allowed('write') is False
assert ("the write_serial extension does not declare if it is safe "
"for parallel reading, assuming it isn't - please ") in warning.getvalue()
app.extensions.pop('write_serial')
warning.truncate(0) # reset warnings
@pytest.mark.sphinx('dummy', testroot='root')
def test_build_specific(app):
app.builder.build = Mock()
filenames = [app.srcdir / 'index.txt', # normal
app.srcdir / 'images', # without suffix
app.srcdir / 'notfound.txt', # not found
app.srcdir / 'img.png', # unknown suffix
'/index.txt', # external file
app.srcdir / 'subdir', # directory
app.srcdir / 'subdir/includes.txt', # file on subdir
app.srcdir / 'subdir/../subdir/excluded.txt'] # not normalized
app.build(False, filenames)
expected = ['index', 'img.png', 'subdir/includes', 'subdir/excluded']
app.builder.build.assert_called_with(expected,
method='specific',
summary='4 source files given on command line')
|
import numpy as np
A = [[0.588679, 0.674133, -0.147233, -0.220814],
[-0.799857, -0.0946959, 0.00393738, -0.0674015],
[0.830193, 0.269214, 0.209219, -0.265506]]
A = np.array(A)
C = [[-0.144183, 0.539951, 0.827288],
[0.258239, -0.533043, 0.480274],
[0.561599, -0.350697, -0.85463],
[0.801968, 0.579835, -0.14939]]
C = np.array(C)
prod = A @ C
x = [[-0.170562, -0.117886, 0.969592],
[0.0390291, -0.421869, -0.700488],
[-0.145608, 0.0774382, 0.676964]]
x = np.array(x)
print(prod)
print(prod-x)
|
################################################################################
# Copyright 2016-2021 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
from .Common import printExit, printWarning, versionIsCompatible
from .SolutionStructs import Solution, ProblemSizes, ProblemType
from . import __version__
from . import Common
from . import SolutionLibrary
try:
import yaml
except ImportError:
printExit("You must install PyYAML to use Tensile (to parse config files). See http://pyyaml.org/wiki/PyYAML for installation instructions.")
try:
import msgpack
except ImportError:
print("Message pack python library not detected. Must use YAML backend instead.")
###################
# Writing functions
###################
def write(filename_noExt, data, format="yaml"):
"""Writes data to file with specified format; extension is appended based on format."""
if format == "yaml":
writeYAML(filename_noExt + ".yaml", data)
elif format == "msgpack":
writeMsgPack(filename_noExt + ".dat", data)
else:
printExit("Unrecognized format {}".format(format))
def writeYAML(filename, data, **kwargs):
"""Writes data to file in YAML format."""
# set default kwags for yaml dump
if "explicit_start" not in kwargs:
kwargs["explicit_start"] = True
if "explicit_end" not in kwargs:
kwargs["explicit_end"] = True
if "default_flow_style" not in kwargs:
kwargs["default_flow_style"] = None
with open(filename, "w") as f:
yaml.dump(data, f, **kwargs)
def writeMsgPack(filename, data):
"""Writes data to file in Message Pack format."""
with open(filename, "wb") as f:
msgpack.pack(data, f)
def writeSolutions(filename, problemSizes, solutions):
"""Writes solution YAML file."""
# convert objects to nested dictionaries
solutionStates = []
for hardcoded in solutions:
for solution in hardcoded:
solutionState = solution.getAttributes()
solutionState["ProblemType"] = solutionState["ProblemType"].state
solutionState["ProblemType"]["DataType"] = \
solutionState["ProblemType"]["DataType"].value
solutionState["ProblemType"]["DestDataType"] = \
solutionState["ProblemType"]["DestDataType"].value
solutionState["ProblemType"]["ComputeDataType"] = \
solutionState["ProblemType"]["ComputeDataType"].value
solutionStates.append(solutionState)
# write dictionaries
with open(filename, "w") as f:
f.write("- MinimumRequiredVersion: %s\n" % __version__ )
f.write("- ProblemSizes:\n")
if problemSizes:
for sizeRange in problemSizes.ranges:
f.write(" - Range: %s\n" % sizeRange)
for problemExact in problemSizes.exacts:
#FIXME-problem, this ignores strides:
f.write(" - Exact: %s\n" % str(problemExact))
yaml.dump(solutionStates, f, default_flow_style=None)
###############################
# Reading and parsing functions
###############################
def readYAML(filename):
"""Reads and returns YAML data from file."""
with open(filename, "r") as f:
data = yaml.load(f, yaml.SafeLoader)
return data
def parseSolutionsFile(filename):
"""Wrapper function to read and parse a solutions file."""
return parseSolutionsData(readYAML(filename), filename)
def parseSolutionsData(data, srcFile="?"):
"""Parses problem sizes and solutions from the data of a solutions file."""
if len(data) < 3:
printExit("Solution file {} is missing required fields (len = {} < 3".format(srcFile, len(data)))
versionString = data[0]["MinimumRequiredVersion"]
if not versionIsCompatible(versionString):
printWarning("Version = {} in solution file {} does not match Tensile version = {}" \
.format(srcFile, versionString, __version__) )
if "ProblemSizes" not in data[1]:
printExit("Solution file {} doesn't begin with ProblemSizes".format(srcFile))
problemSizesConfig = data[1]["ProblemSizes"]
solutions = []
for i in range(2, len(data)):
solutionState = data[i]
# force redo the deriving of parameters, make sure old version logic yamls can be validated
solutionState["AssignedProblemIndependentDerivedParameters"] = False
solutionState["AssignedDerivedParameters"] = False
solutionObject = Solution(solutionState)
solutions.append(solutionObject)
problemType = solutions[0]["ProblemType"]
problemSizes = ProblemSizes(problemType, problemSizesConfig)
return (problemSizes, solutions)
def parseLibraryLogicFile(filename):
"""Wrapper function to read and parse a library logic file."""
return parseLibraryLogicData(readYAML(filename), filename)
def parseLibraryLogicData(data, srcFile="?"):
"""Parses the data of a library logic file."""
if len(data) < 9:
printExit("Library logic file {} is missing required fields (len = {} < 9)".format(srcFile, len(data)))
versionString = data[0]["MinimumRequiredVersion"]
scheduleName = data[1]
architectureName = data[2] if isinstance(data[2], str) else data[2]["Architecture"]
deviceNames = data[3]
problemTypeState = data[4]
solutionStates = data[5]
indexOrder = data[6]
exactLogic = data[7]
rangeLogic = data[8]
if not versionIsCompatible(versionString):
printWarning("Version = {} in library logic file {} does not match Tensile version = {}" \
.format(srcFile, versionString, __version__) )
# unpack problemType
problemType = ProblemType(problemTypeState)
# unpack solutions
solutions = []
for i in range(0, len(solutionStates)):
solutionState = solutionStates[i]
if solutionState["KernelLanguage"] == "Assembly":
solutionState["ISA"] = Common.gfxArch(architectureName)
else:
solutionState["ISA"] = (0, 0, 0)
# force redo the deriving of parameters, make sure old version logic yamls can be validated
solutionState["AssignedProblemIndependentDerivedParameters"] = False
solutionState["AssignedDerivedParameters"] = False
solutionObject = Solution(solutionState)
if solutionObject["ProblemType"] != problemType:
printExit("ProblemType in library logic file {} doesn't match solution: {} != {}" \
.format(srcFile, problemType, solutionObject["ProblemType"]))
solutions.append(solutionObject)
newLibrary = SolutionLibrary.MasterSolutionLibrary.FromOriginalState(data, solutions)
return (scheduleName, deviceNames, problemType, solutions, indexOrder, \
exactLogic, rangeLogic, newLibrary, architectureName)
def rawLibraryLogic(data):
"""Returns a tuple of the data in a library logic file."""
versionString = data[0]
scheduleName = data[1]
architectureName = data[2]
deviceNames = data[3]
problemTypeState = data[4]
solutionStates = data[5]
indexOrder = data[6]
exactLogic = data[7]
rangeLogic = data[8]
otherFields = []
dataLength = len(data)
if dataLength > 9:
for idx in range(9, dataLength):
otherFields.append(data[idx])
return (versionString, scheduleName, architectureName, deviceNames,\
problemTypeState, solutionStates, indexOrder, exactLogic, rangeLogic, otherFields)
#################
# Other functions
#################
def createLibraryLogic(schedulePrefix, architectureName, deviceNames, logicTuple):
"""Creates the data for a library logic file suitable for writing to YAML."""
problemType = logicTuple[0]
solutions = logicTuple[1]
indexOrder = logicTuple[2]
exactLogic = logicTuple[3]
rangeLogic = logicTuple[4]
tileSelection = False
if len(logicTuple) > 5 and logicTuple[5]:
tileSelection = True
data = []
# Tensile version
data.append({"MinimumRequiredVersion":__version__})
# schedule name
data.append(schedulePrefix) # change from Tensile to vega10
data.append(architectureName)
# schedule device names
data.append(deviceNames)
# problem type
problemTypeState = problemType.state
problemTypeState["DataType"] = \
problemTypeState["DataType"].value
problemTypeState["DestDataType"] = \
problemTypeState["DestDataType"].value
problemTypeState["ComputeDataType"] = \
problemTypeState["ComputeDataType"].value
data.append(problemTypeState)
# solutions
solutionList = []
for solution in solutions:
solutionState = solution.getAttributes()
solutionState["ProblemType"] = solutionState["ProblemType"].state
solutionState["ProblemType"]["DataType"] = \
solutionState["ProblemType"]["DataType"].value
solutionState["ProblemType"]["DestDataType"] = \
solutionState["ProblemType"]["DestDataType"].value
solutionState["ProblemType"]["ComputeDataType"] = \
solutionState["ProblemType"]["ComputeDataType"].value
solutionList.append(solutionState)
if tileSelection:
tileSolutions = logicTuple[5]
for solution in tileSolutions:
solutionState = solution.getAttributes()
solutionState["ProblemType"] = solutionState["ProblemType"].state
solutionState["ProblemType"]["DataType"] = \
solutionState["ProblemType"]["DataType"].value
solutionState["ProblemType"]["DestDataType"] = \
solutionState["ProblemType"]["DestDataType"].value
solutionState["ProblemType"]["ComputeDataType"] = \
solutionState["ProblemType"]["ComputeDataType"].value
solutionList.append(solutionState)
data.append(solutionList)
# index order
data.append(indexOrder)
# exactLogic
exactLogicList = []
for key in exactLogic:
exactLogicList.append([list(key), exactLogic[key]])
data.append(exactLogicList)
# rangeLogic
data.append(rangeLogic)
if tileSelection:
tileSelectionLogic = {}
tileSelectionIndices = logicTuple[6]
tileSelectionLogic["TileSelectionIndices"] = tileSelectionIndices
data.append(tileSelectionLogic)
else:
data.append(None)
data.append(logicTuple[7])
return data
|
import re
import requests
# 爬取网站视频
respose=requests.get('http://www.xiaohuar.com/v/')
print(respose.status_code)# 响应的状态码
print(respose.content) #返回字节信息
print(respose.text) #返回文本内容
urls=re.findall(r'class="items".*?href="(.*?)"',respose.text,re.S) #re.S 把文本信息转换成1行匹配
url=urls[6] # 这里获取第5个url进入,
result=requests.get(url)
# 获取视频地址
mp4_url=re.findall(r'id="media".*?src="(.*?)"',result.text,re.S)[0]
print(mp4_url)
# video=requests.get(mp4_url)
# 下面这个就是把 video 的视频下载到本地 d盘下了
# with open('D:\\a.mp4','wb') as f:
# f.write(video.content)
|
from typing import List, Optional, Generator, Tuple
from copy import deepcopy
from curses import ascii
from .pane import Pane
from .mark import Highlight, Mark
from .ui import UI
from .utils import flatten
class BreakTheLoop(Exception):
pass
class PanesRenderer:
"""Renders panes with marks and handles user_input"""
def __init__(self, ui: UI, panes: List[Pane]):
self.ui = ui
self.panes = panes
self.secondary_mode = False
def loop(self) -> None:
user_input = ''
while True:
panes = _discard_marks_that_dont_match_user_input(deepcopy(self.panes), user_input)
if user_input:
chosen_mark = _the_only_mark_left(panes)
if chosen_mark:
if self.secondary_mode:
chosen_mark.perform_secondary_action()
else:
chosen_mark.perform_primary_action()
break
for pane in panes:
self._render_pane_text(pane)
self._overlay_marks(pane, user_input)
try:
user_input = self._handle_user_input(user_input)
except BreakTheLoop:
break
def _handle_user_input(self, user_input: str) -> str:
char = self.ui.getch()
if char == ascii.ESC:
raise BreakTheLoop
# backspace (ascii.BS does not work for some reason)
if char == 127:
if user_input:
user_input = user_input[:-1]
else:
raise BreakTheLoop
elif char == ascii.SP:
if self.secondary_mode:
self.secondary_mode = False
else:
self.secondary_mode = True
return user_input
else:
user_input += chr(char)
return user_input
def _render_top_border(self, pane: Pane) -> None:
pane_width = pane.right - pane.left + 1
self.ui.render_line(pane.top - 1, pane.left, '─' * pane_width, self.ui.DIM)
def _render_left_border(self, pane: Pane) -> None:
pane_height = pane.bottom - pane.top + 1
for ln in range(pane_height):
self.ui.render_line(pane.top + ln, pane.left - 1, '│', self.ui.DIM)
def _render_pane_text(self, pane: Pane) -> None:
if pane.top > 0:
self._render_top_border(pane)
if pane.left > 0:
self._render_left_border(pane)
lines = pane.text.split('\n')
for ln, line in enumerate(lines):
self.ui.render_line(pane.top + ln, pane.left, line, self.ui.DIM)
def _overlay_marks(self, pane: Pane, user_input: str) -> None:
for line_start, line_top, highlight in _get_highlights(pane):
mark_left = pane.left + highlight.start - line_start
self.ui.render_line(line_top, mark_left, highlight.text, self.ui.BOLD)
if isinstance(highlight, Mark) and highlight.hint:
hint_left = mark_left + len(user_input)
hint = highlight.hint[len(user_input):]
bg = self.ui.BLACK_ON_YELLOW if self.secondary_mode else self.ui.BLACK_ON_CYAN
self.ui.render_line(line_top, hint_left, hint, bg | self.ui.BOLD)
def _get_highlights(pane: Pane) -> Generator[Tuple[int, int, Highlight], None, None]:
running_character_total = 0
for ln, line in enumerate(pane.text.split('\n')):
line_start = running_character_total
running_character_total += len(line)
line_end = running_character_total
line_top = pane.top + ln
marks_that_start_on_current_line: List[Highlight] = [
m for m in pane.marks if line_end > m.start >= line_start
]
for mark in marks_that_start_on_current_line:
if mark.end > line_end:
tail_length = mark.end - line_end
tail_text = mark.text[-tail_length:]
mark.text = mark.text[:-tail_length]
yield (line_start, line_top, mark)
wrapped_mark_tail = Highlight(text=tail_text, start=line_end)
yield (line_end, line_top + 1, wrapped_mark_tail)
else:
yield (line_start, line_top, mark)
def _discard_marks_that_dont_match_user_input(panes: List[Pane], user_input: str) -> List[Pane]:
for pane in panes:
pane.marks = [
m for m in pane.marks if m.hint and m.hint.startswith(user_input)
]
return panes
def _the_only_mark_left(panes: List[Pane]) -> Optional[Mark]:
marks_left = flatten([
[m for m in p.marks] for p in panes
])
if len(marks_left) == 1:
return marks_left[0]
|
import urllib.request
import json
import getpass
import configparser
from player_if import Player_IF
class IF_Mopidy(Player_IF):
name = "Mopidy"
color = "#00A2E8"
useMopidy = False
artistName = ""
albumName = ""
def __init__(self, config):
self.config = config
self.useMopidy = self.config.getboolean('Player', 'UseMopidy', fallback=False)
if self.useMopidy:
print('Initialise Mopidy interface ... ', end='')
server = config.get('Player', 'MopidyServer', fallback="localhost")
port = config.get('Player', 'MopidyPort', fallback="6680")
self.url = 'http://' + server + ':' + port + '/mopidy/rpc'
self.SaveCoverInSongFolder = config.getboolean('Cover', 'SaveCoverInSongFolder', fallback=False)
self.initMopidyConfig()
print('OK')
else:
print('Mopidy not in use')
def initMopidyConfig(self):
username = getpass.getuser()
if username == "root":
mopidyConfigFile = "/etc/mopidy/mopidy.conf"
else:
mopidyConfigFile = "/home/" + username + "/.config/mopidy/mopidy.conf"
self.mopidyConfig = configparser.ConfigParser()
self.mopidyConfig.read(mopidyConfigFile)
def executeCommand(self, command):
#print('Send to Mopidy: ' + self.url + ' ' + command)
value_bytes = str.encode(command)
return urllib.request.urlopen(self.url, value_bytes)
def isPlaying(self):
if self.useMopidy:
try:
resp = self.executeCommand('{"jsonrpc": "2.0", "id": 1, "method": "core.playback.get_state"}')
except urllib.error.URLError as e:
#print(__name__, "getStatus", e.reason)
return 'Exception' # working in main?
string = resp.read().decode('utf-8')
#print(string)
json_obj = json.loads(string)
return json_obj['result'] == "playing"
def getSongAttributes(self):
try:
resp = self.executeCommand('{"jsonrpc": "2.0", "id": 1, "method": "core.playback.get_current_track"}')
except urllib.error.URLError as e:
print(__name__, "getSongAttributes", e.reason)
return 'Exception'
string = resp.read().decode('utf-8')
#print(string)
json_obj = json.loads(string)
artist = ''
album = ''
track = ''
uri = ''
songLength_ms = 0
if 'result' in json_obj and json_obj['result'] is not None:
if 'artists' in json_obj['result'] and 'name' in json_obj['result']['artists'][0]:
artist = json_obj['result']['artists'][0]['name']
if 'album' in json_obj['result'] and 'name' in json_obj['result']['album']:
album = json_obj['result']['album']['name']
if 'name' in json_obj['result']:
track = json_obj['result']['name']
if 'length' in json_obj['result']:
songLength_ms = json_obj['result']['length']
if 'uri' in json_obj['result']:
uri = json_obj['result']['uri']
self.artistName = artist
self.albumName = album
self.uri = uri
return artist, album, track, songLength_ms
def getSongPosition(self):
# not implemented yet
return 0
def getCoverImage(self, coverLoader):
if self.SaveCoverInSongFolder:
return coverLoader.getCoverFromLastFmOrLocal(self.artistName, self.albumName, self.getSongPath())
else:
return coverLoader.getCoverFromLastFmOrLocal(self.artistName, self.albumName)
def getSongPath(self):
# get media directory path from mopidy config
mediaPath = self.mopidyConfig.get('local', 'media_dir')
startIdx = self.uri.rfind(':')
endIdx = self.uri.rfind('/')
if startIdx == -1:
startIdx = 0
if endIdx == -1:
endIdx = 0
# create complete song path from media path and album path from URI from song attributes
songPath = mediaPath + '/' + self.uri[startIdx+1:endIdx]
# remove song file name
songPath = self.transAllHexToCharacter(songPath)
return songPath
# transform all hex codes in string to according character (eg %20 = space)
def transAllHexToCharacter(self, str):
i = 0
while i < len(str):
if str[i] == '%':
hex = str[i+1:i+3]
if hex[0] is not None and hex[0].isdigit() and hex[1] is not None and hex[1].isdigit():
char = bytearray.fromhex(hex).decode()
str = str.replace('%'+hex, char)
i += 1
return str
def play(self):
self.executeCommand('{"jsonrpc": "2.0", "id": 1, "method": "core.playback.play"}')
def pause(self):
self.executeCommand('{"jsonrpc": "2.0", "id": 1, "method": "core.playback.pause"}')
def stop(self):
self.executeCommand('{"jsonrpc": "2.0", "id": 1, "method": "core.playback.stop"}')
def next(self):
self.executeCommand('{"jsonrpc": "2.0", "id": 1, "method": "core.playback.next"}')
def prev(self):
self.executeCommand('{"jsonrpc": "2.0", "id": 1, "method": "core.playback.previous"}')
def getPlayImg(self):
return self.config.get('System','Path') + "/images/play_mopi_70.png"
def getPauseImg(self):
return self.config.get('System','Path') + "/images/pause_mopi_70.png"
def getColor(self):
return IF_Mopidy.color
def getPlayerName(self):
return IF_Mopidy.name
|
from pathlib import Path
import random
from babeval.vocab import get_vocab
NUM_SUBJECT_NOUNS_FROM_EACH_LIST = 50 # some number smaller than length of both singular and plural noun lists
NUM_OBJECT_NOUNS_FROM_EACH_LIST = 8 # some number smaller than length of both singular and plural noun lists
NUM_ADJECTIVES = 4
NUM_PREPOSITIONS = 2
template = 'the {} {} [MASK] {} .'
nouns_plural = (Path(__file__).parent / 'word_lists' / 'nouns_plural_annotator2.txt').open().read().split()
nouns_plural = [w for w in nouns_plural if w in get_vocab()]
nouns_singular = (Path(__file__).parent / 'word_lists' / 'nouns_singular_annotator2.txt').open().read().split()
nouns_singular = [w for w in nouns_singular if w in get_vocab()]
prepositions = (Path(__file__).parent / 'word_lists' / 'prepositions_annotator2.txt').open().read().split()
prepositions = [w for w in prepositions if w in get_vocab()]
adjectives = (Path(__file__).parent / 'word_lists' / 'adjectives_annotator2.txt').open().read().split()
adjectives = [w for w in adjectives if w in get_vocab()]
def main():
"""
example:
"the dog on the mat [MASK] brown"
considerations:
1. use equal proportion of sentences containing plural vs. singular subject nouns
2. use equal proportion of plural vs. singular object nouns n singular vs. plural sentences
3. use the same prepositional phrases for sentences with singular and plural subject nouns
"""
random.seed(3)
assert NUM_ADJECTIVES <= len(adjectives)
assert NUM_PREPOSITIONS <= len(prepositions)
assert NUM_SUBJECT_NOUNS_FROM_EACH_LIST < len(nouns_singular)
assert NUM_SUBJECT_NOUNS_FROM_EACH_LIST < len(nouns_plural)
nouns_subject_balanced = random.sample(nouns_singular, k=NUM_SUBJECT_NOUNS_FROM_EACH_LIST) + \
random.sample(nouns_plural, k=NUM_SUBJECT_NOUNS_FROM_EACH_LIST)
nouns_object_balanced = random.sample(nouns_singular, k=NUM_OBJECT_NOUNS_FROM_EACH_LIST) + \
random.sample(nouns_plural, k=NUM_OBJECT_NOUNS_FROM_EACH_LIST)
adjectives_sample = random.sample(adjectives, k=NUM_ADJECTIVES)
prepositions_sample = random.sample(prepositions, k=NUM_PREPOSITIONS)
prepositional_phrases = []
for preposition in prepositions_sample:
for noun_object in nouns_object_balanced:
prepositional_phrase = preposition + ' ' + 'the' + ' ' + noun_object
prepositional_phrases.append(prepositional_phrase)
print(f'Made {len(prepositional_phrases)} prepositional phrases')
for noun_subject in nouns_subject_balanced:
for pp in prepositional_phrases:
for adjective in adjectives_sample:
yield template.format(noun_subject, pp, adjective)
|
# $Id: sccp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Cisco Skinny Client Control Protocol."""
import dpkt
KEYPAD_BUTTON = 0x00000003
OFF_HOOK = 0x00000006
ON_HOOK = 0x00000007
OPEN_RECEIVE_CHANNEL_ACK = 0x00000022
START_TONE = 0x00000082
STOP_TONE = 0x00000083
SET_LAMP = 0x00000086
SET_SPEAKER_MODE = 0x00000088
START_MEDIA_TRANSMIT = 0x0000008A
STOP_MEDIA_TRANSMIT = 0x0000008B
CALL_INFO = 0x0000008F
DEFINE_TIME_DATE = 0x00000094
DISPLAY_TEXT = 0x00000099
OPEN_RECEIVE_CHANNEL = 0x00000105
CLOSE_RECEIVE_CHANNEL = 0x00000106
SELECT_SOFTKEYS = 0x00000110
CALL_STATE = 0x00000111
DISPLAY_PROMPT_STATUS = 0x00000112
CLEAR_PROMPT_STATUS = 0x00000113
ACTIVATE_CALL_PLANE = 0x00000116
class ActivateCallPlane(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_instance', 'I', 0),
)
class CallInfo(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('calling_party_name', '40s', ''),
('calling_party', '24s', ''),
('called_party_name', '40s', ''),
('called_party', '24s', ''),
('line_instance', 'I', 0),
('call_id', 'I', 0),
('call_type', 'I', 0),
('orig_called_party_name', '40s', ''),
('orig_called_party', '24s', '')
)
class CallState(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('call_state', 'I', 12), # 12: Proceed, 15: Connected
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class ClearPromptStatus(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class CloseReceiveChannel(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
)
class DisplayPromptStatus(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('msg_timeout', 'I', 0),
('display_msg', '32s', ''),
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class DisplayText(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('display_msg', '36s', ''),
)
class KeypadButton(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('button', 'I', 0),
)
class OpenReceiveChannel(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
('ms_packet', 'I', 0),
('payload_capability', 'I', 4), # 4: G.711 u-law 64k
('echo_cancel_type', 'I', 4),
('g723_bitrate', 'I', 0),
)
class OpenReceiveChannelAck(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('channel_status', 'I', 0),
('ip', '4s', ''),
('port', 'I', 0),
('passthruparty_id', 'I', 0),
)
class SelectStartKeys(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_id', 'I', 1),
('call_id', 'I', 0),
('softkey_set', 'I', 8),
('softkey_map', 'I', 0xffffffffL)
)
class SetLamp(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('stimulus', 'I', 9), # 9: Line
('stimulus_instance', 'I', 1),
('lamp_mode', 'I', 1),
)
class SetSpeakerMode(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('speaker', 'I', 2), # 2: SpeakerOff
)
class StartMediaTransmission(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
('remote_ip', '4s', ''),
('remote_port', 'I', 0),
('ms_packet', 'I', 0),
('payload_capability', 'I', 4), # 4: G.711 u-law 64k
('precedence', 'I', 0),
('silence_suppression', 'I', 0),
('max_frames_per_pkt', 'I', 1),
('g723_bitrate', 'I', 0),
)
class StartTone(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('tone', 'I', 0x24), # 0x24: AlertingTone
)
class StopMediaTransmission(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
)
class SCCP(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('len', 'I', 0),
('rsvd', 'I', 0),
('msgid', 'I', 0),
('msg', '0s', ''),
)
_msgsw = {
KEYPAD_BUTTON: KeypadButton,
OPEN_RECEIVE_CHANNEL_ACK: OpenReceiveChannelAck,
START_TONE: StartTone,
SET_LAMP: SetLamp,
START_MEDIA_TRANSMIT: StartMediaTransmission,
STOP_MEDIA_TRANSMIT: StopMediaTransmission,
CALL_INFO: CallInfo,
DISPLAY_TEXT: DisplayText,
OPEN_RECEIVE_CHANNEL: OpenReceiveChannel,
CLOSE_RECEIVE_CHANNEL: CloseReceiveChannel,
CALL_STATE: CallState,
DISPLAY_PROMPT_STATUS: DisplayPromptStatus,
CLEAR_PROMPT_STATUS: ClearPromptStatus,
ACTIVATE_CALL_PLANE: ActivateCallPlane,
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = self.len - 4
if n > len(self.data):
raise dpkt.NeedData('not enough data')
self.msg, self.data = self.data[:n], self.data[n:]
try:
p = self._msgsw[self.msgid](self.msg)
setattr(self, p.__class__.__name__.lower(), p)
except (KeyError, dpkt.UnpackError):
pass
|
print("Yes" if len(set(input())) == 2 else "No")
|
import sqlite3
from bank_bot.settings import TRANSACTION_MODEL_DATA
class Transaction(object):
def __init__(
self, sender_hash, recepient_hash, amount, transaction_hash, created_time=None
):
self.sender_hash = sender_hash
self.recepient_hash = recepient_hash
self.amount = amount
self.transaction_hash = transaction_hash
self.created_time = created_time
def __str__(self):
return TRANSACTION_MODEL_DATA.substitute(
sender_hash=self.sender_hash, reciever_hash=self.recepient_hash,
amount=self.amount, created=self.created_time, transaction_hash=self.transaction_hash
)
@classmethod
def create_transaction(cls, sender_hash, recepient_hash, amount, database):
return database.create_transaction(sender_hash, recepient_hash, amount)
@classmethod
def list_transactions(cls, user_hash, is_sender, database):
return database.inspect_transactions(user_hash, is_sender, cls)
@classmethod
def list_all_transactions(cls, user_hash, database):
return database.inspect_all_transactions(user_hash, cls)
@classmethod
def list_pair_history_transactions(cls, sender_hash, recepient_hash, database):
return database.inspect_pair_history_transactions(sender_hash, recepient_hash, cls)
|
# -*- coding: utf-8 -*-
# Copyright 2018 Alexandre Freitas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Python module to provide access to a Micro Focus™ Project and Portfolio Management Center server.
# """
__all__ = ['foundation', 'notification', 'request', 'server', 'session']
from server import Server
from session import Session
def get_languages(url):
"""Returns a list of available languages.
Returns a list of all languages available to the user on the login page.
Parameters
----------
url : str
Application server URL.
Returns
-------
list of str
List of all languages available on the login page.
"""
return Server.get_languages(url)
def logon(url, username, password, language):
"""Logs onto the application server.
Parameters
----------
url : str
Application server URL.
username : str
Username to log in.
password : str
Password to log in.
language : str
Language used by the user. Must be one of the languages returned by get_languages().
Returns
-------
:obj:`Session`
New user session on application server.
"""
return Session(url, username, password, language)
|
#!/usr/bin/env python
"""
Core elements of the model for conference resolution for SemEval 2018
This module will contain the dict objects for reference through out the
application. Any constant/format/list that is needed across files should
be housed here. No functions will be defined here as this will be the root
file for the model.
The standard Conll format is used for all input data where the default form is:
# Document ID: /<name of the show>-<season ID><episode ID> (e.g., /friends-s01e01).
# Scene ID: the ID of the scene within the episode.
# Token ID: the ID of the token within the sentence.
# Word form: the tokenized word.
# Part-of-speech tag: the part-of-speech tag of the word (auto generated).
# Constituency tag: the Penn Treebank style constituency tag (auto generated).
# Lemma: the lemma of the word (auto generated).
# Frameset ID: not provided (always "_").
# Word sense: not provided (always "_").
# Speaker: the speaker of this sentence.
# Named entity tag: the named entity tag of the word (auto generated).
# Entity ID: the entity ID of the mention, that is consistent across all documents.
"""
import semEval_core_functions
__author__ = 'Casey Beaird'
__credits__ = ['Casey Beaird', 'Chase Greco', 'Brandon Watts']
__license__ = 'MIT'
__version__ = '0.1'
# possible conll columns can be configured in any order but can only be specified once
DOCUMENT_ID = 'doc_id'
SCENE_ID = 'scene_id'
TOKEN_ID = 'token_id'
WORD = 'word'
POS = 'pos'
CONSTITUENCY = 'constituency'
LEMMA = 'lemma'
FRAMESET_ID = 'frame_id'
WORD_SENSE = 'ws'
SPEAKER = 'speaker'
NE = 'ne'
ENTITY_ID = 'e_id'
# model elements this is a fixed list of items expected in a model additional elements can be
# added by extending the model definition
MODEL_ENTITY_MAP = 'e'
MODEL_SPEAKERS = 's'
MODEL_WORDS = 'w'
MODEL_DISTRIBUTIONS = 'd'
DEFAULT_HEADINGS = (DOCUMENT_ID, SCENE_ID, TOKEN_ID, WORD, POS, CONSTITUENCY,
LEMMA, FRAMESET_ID, WORD_SENSE, SPEAKER, NE, ENTITY_ID)
EMPTY = '-'
# entity ID key'd dictionary for entity ID's and string names
entity_map = None
model_path = None
model = None
# function pointers for the model objects
updater_functions = {MODEL_ENTITY_MAP: semEval_core_functions.update_entities,
MODEL_SPEAKERS: semEval_core_functions.update_speakers,
MODEL_WORDS: semEval_core_functions.update_words,
MODEL_DISTRIBUTIONS: semEval_core_functions.update_dist_counts}
nn_model = None
|
import pandas as pd
import requests
from bs4 import BeautifulSoup
import utilities as utils
def get_page_text(url):
html_page = requests.get(url).content
soup = BeautifulSoup(html_page, 'lxml')
whitelist = ['p', 'strong', 'em', 'b', 'u', 'i', 'h1', 'h2', 'h3']
out = ""
for t in soup.find_all(text=True):
if t.parent.name in whitelist:
out += '{} '.format(t)
escape = ['\r', '\n', '\t', '\xa0']
for e in escape:
out = out.replace(e, '')
return out
def clean(text):
clean_text = utils.nlp_basic_clean(text)
clean_text = utils.nlp_tokenize(clean_text)
return clean_text
def sl(text):
sl = utils.nlp_remove_stopwords(text, extra_words=["reuters"])
sl = utils.nlp_lemmatize(sl)
return sl
|
def neural_network(inputs, weight):
prediction = weighted_sum(inputs, weight)
return prediction
def weighted_sum(inputs, weight):
output = 0
for i in range(len(inputs)):
output += inputs[i] * weights[i]
return output
number_of_all_rounders = 3
number_of_batsmen = 4
number_of_bowlers = 3
inputs = [number_of_all_rounders, number_of_batsmen, number_of_bowlers]
weights = [0.01, 0.2, 0.03]
print(neural_network(inputs, weights))
|
"""pyezviz camera api."""
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, Any
from .constants import DeviceCatagories, DeviceSwitchType, SoundMode
from .exceptions import PyEzvizError
if TYPE_CHECKING:
from .client import EzvizClient
class EzvizCamera:
"""Initialize Ezviz camera object."""
def __init__(
self, client: EzvizClient, serial: str, device_obj: dict | None = None
) -> None:
"""Initialize the camera object."""
self._client = client
self._serial = serial
self._alarmmotiontrigger: dict[str, Any] = {
"alarm_trigger_active": False,
"timepassed": None,
}
self._device = (
device_obj if device_obj else self._client.get_device_infos(self._serial)
)
self._last_alarm: dict[str, Any] = {}
self._switch: dict[int, bool] = {
switch["type"]: switch["enable"]
for switch in self._device.get("SWITCH", {})
}
def _detection_sensibility(self) -> Any:
"""load detection sensibility"""
result = "Unknown"
if self._switch.get(DeviceSwitchType.AUTO_SLEEP.value) is not True:
if (
self._device["deviceInfos"]["deviceCategory"]
== DeviceCatagories.BATTERY_CAMERA_DEVICE_CATEGORY.value
):
result = self._client.get_detection_sensibility(
self._serial,
"3",
)
else:
result = self._client.get_detection_sensibility(self._serial)
if self._switch.get(DeviceSwitchType.AUTO_SLEEP.value) is True:
result = "Hibernate"
return result
def _alarm_list(self) -> None:
"""get last alarm info for this camera's self._serial"""
_alarmlist = self._client.get_alarminfo(self._serial)
if _alarmlist["page"].get("totalResults") > 0:
self._last_alarm = _alarmlist["alarms"][0]
return self._motion_trigger()
def _local_ip(self) -> Any:
"""Fix empty ip value for certain cameras"""
if self._device.get("WIFI"):
if (
self._device["WIFI"].get("address")
and self._device["WIFI"]["address"] != "0.0.0.0"
):
return self._device["WIFI"]["address"]
# Seems to return none or 0.0.0.0 on some.
if self._device.get("CONNECTION"):
if (
self._device["CONNECTION"].get("localIp")
and self._device["CONNECTION"]["localIp"] != "0.0.0.0"
):
return self._device["CONNECTION"]["localIp"]
return "0.0.0.0"
def _motion_trigger(self) -> None:
"""Create motion sensor based on last alarm time."""
if not self._last_alarm.get("alarmStartTimeStr"):
return
_today_date = datetime.date.today()
_now = datetime.datetime.now().replace(microsecond=0)
_last_alarm_time = datetime.datetime.strptime(
self._last_alarm["alarmStartTimeStr"].replace("Today", str(_today_date)),
"%Y-%m-%d %H:%M:%S",
)
# returns a timedelta object
timepassed = _now - _last_alarm_time
self._alarmmotiontrigger = {
"alarm_trigger_active": bool(timepassed < datetime.timedelta(seconds=60)),
"timepassed": timepassed.total_seconds(),
}
def _is_alarm_schedules_enabled(self) -> bool:
"""Checks if alarm schedules enabled"""
_alarm_schedules = [
item for item in self._device.get("TIME_PLAN", {}) if item.get("type") == 2
]
if _alarm_schedules:
return bool(_alarm_schedules[0].get("enable"))
return False
def status(self) -> dict[Any, Any]:
"""Return the status of the camera."""
self._alarm_list()
return {
"serial": self._serial,
"name": self._device["deviceInfos"].get("name"),
"version": self._device["deviceInfos"].get("version"),
"upgrade_available": bool(
self._device["UPGRADE"].get("isNeedUpgrade") == 3
),
"status": self._device["deviceInfos"].get("status"),
"device_category": self._device["deviceInfos"].get("deviceCategory"),
"device_sub_category": self._device["deviceInfos"].get("deviceSubCategory"),
"sleep": self._switch.get(DeviceSwitchType.SLEEP.value)
or self._switch.get(DeviceSwitchType.AUTO_SLEEP.value),
"privacy": self._switch.get(DeviceSwitchType.PRIVACY.value),
"audio": self._switch.get(DeviceSwitchType.SOUND.value),
"ir_led": self._switch.get(DeviceSwitchType.INFRARED_LIGHT.value),
"state_led": self._switch.get(DeviceSwitchType.LIGHT.value),
"follow_move": self._switch.get(DeviceSwitchType.MOBILE_TRACKING.value),
"alarm_notify": bool(self._device["STATUS"].get("globalStatus")),
"alarm_schedules_enabled": self._is_alarm_schedules_enabled(),
"alarm_sound_mod": SoundMode(
self._device["STATUS"].get("alarmSoundMode")
).name,
"encrypted": bool(self._device["STATUS"].get("isEncrypt")),
"local_ip": self._local_ip(),
"wan_ip": self._device["CONNECTION"].get("netIp"),
"mac_address": self._device["deviceInfos"].get("mac"),
"local_rtsp_port": self._device["CONNECTION"].get("localRtspPort", "554")
if self._device["CONNECTION"].get("localRtspPort", "554") != 0
else "554",
"supported_channels": self._device["deviceInfos"].get("channelNumber"),
"detection_sensibility": self._detection_sensibility(),
"battery_level": self._device["STATUS"]
.get("optionals", {})
.get("powerRemaining"),
"PIR_Status": self._device["STATUS"].get("pirStatus"),
"Motion_Trigger": self._alarmmotiontrigger.get("alarm_trigger_active"),
"Seconds_Last_Trigger": self._alarmmotiontrigger.get("timepassed"),
"last_alarm_time": self._last_alarm.get("alarmStartTimeStr"),
"last_alarm_pic": self._last_alarm.get(
"picUrl",
"https://eustatics.ezvizlife.com/ovs_mall/web/img/index/EZVIZ_logo.png?ver=3007907502",
),
"last_alarm_type_code": self._last_alarm.get("alarmType", "0000"),
"last_alarm_type_name": self._last_alarm.get("sampleName", "NoAlarm"),
"wifiInfos": self._device.get("WIFI"),
"switches": self._switch,
}
def move(self, direction: str, speed: int = 5) -> bool:
"""Move camera."""
if direction not in ["right", "left", "down", "up"]:
raise PyEzvizError(f"Invalid direction: {direction} ")
# launch the start command
self._client.ptz_control(str(direction).upper(), self._serial, "START", speed)
# launch the stop command
self._client.ptz_control(str(direction).upper(), self._serial, "STOP", speed)
return True
def move_coordinates(self, x: float, y: float):
"""Move camera to specified coordinates."""
self._client.ptz_control_coordinates(self._serial, x, y)
return True
def alarm_notify(self, enable: int) -> bool:
"""Enable/Disable camera notification when movement is detected."""
return self._client.set_camera_defence(self._serial, enable)
def alarm_sound(self, sound_type: int) -> bool:
"""Enable/Disable camera sound when movement is detected."""
# we force enable = 1 , to make sound...
return self._client.alarm_sound(self._serial, sound_type, 1)
def alarm_detection_sensibility(
self, sensibility: int, type_value: int = 0
) -> bool | str:
"""Enable/Disable camera sound when movement is detected."""
# we force enable = 1 , to make sound...
return self._client.detection_sensibility(self._serial, sensibility, type_value)
def switch_device_audio(self, enable: int = 0) -> bool:
"""Switch audio status on a device."""
return self._client.switch_status(
self._serial, DeviceSwitchType.SOUND.value, enable
)
def switch_device_state_led(self, enable: int = 0) -> bool:
"""Switch led status on a device."""
return self._client.switch_status(
self._serial, DeviceSwitchType.LIGHT.value, enable
)
def switch_device_ir_led(self, enable: int = 0) -> bool:
"""Switch ir status on a device."""
return self._client.switch_status(
self._serial, DeviceSwitchType.INFRARED_LIGHT.value, enable
)
def switch_privacy_mode(self, enable: int = 0) -> bool:
"""Switch privacy mode on a device."""
return self._client.switch_status(
self._serial, DeviceSwitchType.PRIVACY.value, enable
)
def switch_sleep_mode(self, enable: int = 0) -> bool:
"""Switch sleep mode on a device."""
return self._client.switch_status(
self._serial, DeviceSwitchType.SLEEP.value, enable
)
def switch_follow_move(self, enable: int = 0) -> bool:
"""Switch follow move."""
return self._client.switch_status(
self._serial, DeviceSwitchType.MOBILE_TRACKING.value, enable
)
def change_defence_schedule(self, schedule: str, enable: int = 0) -> bool:
"""Change defence schedule. Requires json formatted schedules."""
return self._client.api_set_defence_schedule(self._serial, schedule, enable)
|
# -*- coding: utf-8 -*-
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from config import cfg
def services_buttons(callback_data_func):
services = cfg.get("services")
buttons = []
for svc in services:
buttons.append(
[InlineKeyboardButton(text="%s (%s)" % (svc.name, svc.mode), callback_data=callback_data_func(svc))])
return InlineKeyboardMarkup(buttons)
|
from warnings import warn
from pyhf import Model
__all__ = ["correlated_background", "uncorrelated_background"]
def __dir__():
return __all__
def correlated_background(signal, bkg, bkg_up, bkg_down, batch_size=None):
r"""
Construct a simple single channel :class:`~pyhf.pdf.Model` with a
:class:`~pyhf.modifiers.histosys` modifier representing a background
with a fully correlated bin-by-bin uncertainty.
Args:
signal (:obj:`list`): The data in the signal sample.
bkg (:obj:`list`): The data in the background sample.
bkg_up (:obj:`list`): The background sample under an upward variation
corresponding to :math:`\alpha=+1`.
bkg_down (:obj:`list`): The background sample under a downward variation
corresponding to :math:`\alpha=-1`.
batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute.
Returns:
~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema.
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.correlated_background(
... signal=[12.0, 11.0],
... bkg=[50.0, 52.0],
... bkg_up=[45.0, 57.0],
... bkg_down=[55.0, 47.0],
... )
>>> model.schema
'model.json'
>>> model.config.channels
['single_channel']
>>> model.config.samples
['background', 'signal']
>>> model.config.parameters
['correlated_bkg_uncertainty', 'mu']
>>> model.expected_data(model.config.suggested_init())
array([62., 63., 0.])
"""
spec = {
"channels": [
{
"name": "single_channel",
"samples": [
{
"name": "signal",
"data": signal,
"modifiers": [
{"name": "mu", "type": "normfactor", "data": None}
],
},
{
"name": "background",
"data": bkg,
"modifiers": [
{
"name": "correlated_bkg_uncertainty",
"type": "histosys",
"data": {"hi_data": bkg_up, "lo_data": bkg_down},
}
],
},
],
}
]
}
return Model(spec, batch_size=batch_size)
def uncorrelated_background(signal, bkg, bkg_uncertainty, batch_size=None):
"""
Construct a simple single channel :class:`~pyhf.pdf.Model` with a
:class:`~pyhf.modifiers.shapesys` modifier representing an uncorrelated
background uncertainty.
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.uncorrelated_background(
... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
... )
>>> model.schema
'model.json'
>>> model.config.channels
['singlechannel']
>>> model.config.samples
['background', 'signal']
>>> model.config.parameters
['mu', 'uncorr_bkguncrt']
>>> model.expected_data(model.config.suggested_init())
array([ 62. , 63. , 277.77777778, 55.18367347])
Args:
signal (:obj:`list`): The data in the signal sample
bkg (:obj:`list`): The data in the background sample
bkg_uncertainty (:obj:`list`): The statistical uncertainty on the background sample counts
batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute
Returns:
~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema
"""
spec = {
'channels': [
{
'name': 'singlechannel',
'samples': [
{
'name': 'signal',
'data': signal,
'modifiers': [
{'name': 'mu', 'type': 'normfactor', 'data': None}
],
},
{
'name': 'background',
'data': bkg,
'modifiers': [
{
'name': 'uncorr_bkguncrt',
'type': 'shapesys',
'data': bkg_uncertainty,
}
],
},
],
}
]
}
return Model(spec, batch_size=batch_size)
# Deprecated APIs
def _deprecated_api_warning(
deprecated_api, new_api, deprecated_release, remove_release
):
warn(
f"{deprecated_api} is deprecated in favor of {new_api} as of pyhf v{deprecated_release} and will be removed in release {remove_release}."
+ f" Please use {new_api}.",
DeprecationWarning,
stacklevel=3, # Raise to user level
)
def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):
"""
.. note:: Deprecated API: Use :func:`~pyhf.simplemodels.uncorrelated_background`
instead.
.. warning:: :func:`~pyhf.simplemodels.hepdata_like` will be removed in
``pyhf`` ``v0.7.0``.
"""
_deprecated_api_warning(
"pyhf.simplemodels.hepdata_like",
"pyhf.simplemodels.uncorrelated_background",
"0.6.2",
"0.7.0",
)
return uncorrelated_background(signal_data, bkg_data, bkg_uncerts, batch_size)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.