content stringlengths 5 1.05M |
|---|
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from .views import (
CustomerOrderCreateAPIView,
AcceptOrderCreateAPIView,
)
urlpatterns = [
path('customer/create/', CustomerOrderCreateAPIView.as_view(), name='order create'),
path('accept/', AcceptOrderCreateAPIView.as_view(), name='accept'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
# https://www.acmicpc.net/problem/12865
# 2020-11-1 Chul-Woong Yang
# 0-1 knapsack
import sys
#from typing import List, NamedTuple
from collections import namedtuple
def mi():
return map(int, sys.stdin.readline().split())
def milines():
return map(int, sys.stdin.readlines())
class Item(NamedTuple):
weight: int
value: int
def knapsack(l: List[Item], capacity:int) -> int:
"""l = (weight, value), Sum(weight) <= capacity, maximize total value"""
dp = [[-1 for _ in range(capacity + 1)]
for _ in range(len(l))]
def calc(id: int, rem_capacity: int) -> int:
if id == len(l) or rem_capacity <= 0:
return 0
if dp[id][rem_capacity] >= 0:
return dp[id][rem_capacity]
if l[id].weight > rem_capacity:
v = calc(id + 1, rem_capacity)
else:
v = max(calc(id + 1, rem_capacity - l[id].weight) + l[id].value,
calc(id + 1, rem_capacity))
dp[id][rem_capacity] = v
return v
return calc(0, capacity)
def main() -> None:
n, k = mi()
l = [Item(*mi()) for _ in range(n)]
print(knapsack(l, k))
def test_knapsack() -> None:
assert knapsack(list(map(lambda x: Item(*x),
[(6, 13), (4, 8), (3, 6), (5, 12)])), 7) == 14
if __name__ == '__main__':
main()
INPUT = '''
4 7
6 13
4 8
3 6
5 12
'''
OUTPUT = '''
14
'''
# pytest
import sys # noqa: E402
import io # noqa: E402
def test_main(capsys) -> None: # noqa: E302
sys.stdin = io.StringIO(INPUT.strip())
main()
sys.stdin = sys.__stdin__
out, err = capsys.readouterr()
print(out)
eprint(err)
assert out == OUTPUT.lstrip()
def eprint(*args, **kwargs): # noqa: E302
print(*args, file=sys.stderr, **kwargs)
|
import subprocess
import csv
from pathlib import Path
from os import listdir
from os.path import isdir, isfile
def getTestCasesNames(dir):
files = []
for file in listdir(dir):
if not isdir(file):
files.append(file)
return files
def readFile(dir, file):
content = None
if isfile(dir + "/" + file):
f = open(dir + "/" + file, "r")
content = f.read()
f.close()
return content
def getResultCode(main_exe, dir, file):
cmd = [main_exe]
input = readFile(dir, file).encode('utf-8')
result = subprocess.run(cmd, stdout=subprocess.PIPE, input=input)
return result.returncode
def getGoldCodes(csv_path):
result = {}
with open(csv_path) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
result[row['test']] = int(row['result'])
return result
"""
========== MAIN SCRIPT ==========
"""
script_path = Path(__file__).parent.absolute().as_posix()
main_exe = script_path + "/../etapa4"
dir = script_path + "/test_cases"
cases = getTestCasesNames(dir)
golds = getGoldCodes(script_path + "/gold.csv")
passed = []
invalid = []
cases.sort()
if (not isfile(main_exe)):
print("ERROR! Executable does not exist or is in wrong directory!")
print("Try running 'make'")
exit()
print("Running tests...")
print("")
print("Errors:")
for test in cases:
if not test in golds:
print(' - ', test, ': ', "GOLD NOT FOUND", sep='')
else:
result = int(getResultCode(main_exe, dir, test))
if result == golds[test]:
passed.append(test)
# print(' - ', test, ': ', "PASS", sep='')
else:
invalid.append(test)
print(' - ', test, ': ', "ERROR: (expected: ", golds[test], ", found: ", result, ")", sep='')
if(len(invalid) == 0):
final_result = "PASS"
else:
final_result = "ERROR"
print("")
print("")
print("FINAL RESULT:", final_result)
print("")
exit(len(invalid))
|
from collections import defaultdict
import random
import copy
import numpy as np
from torch.utils.data.sampler import Sampler
class TripletSampler(Sampler):
def __init__(self, labels, batch_size, p, k):
super(TripletSampler, self).__init__(None)
self.labels = labels
self.batch_size = batch_size
self.p = p
self.k = k
# print(len(self.labels))
# print(self.labels)
# Create label dict.
self.label_dict = defaultdict(list)
for index in range(len(self.labels)):
if self.labels[index] > 0:
self.label_dict[self.labels[index]].append(index)
# Estimate number of examples in an epoch.
length = 0
self.label_list = list(np.unique(self.labels))
self.label_list = [label for label in self.label_list if label > 0]
for label in self.label_list:
num = len(self.label_dict[label])
if num < self.k:
num = self.k
length += num - num % self.k
self.length = length
def __iter__(self):
# Make up mini batchs.
batch_idxs_dict = defaultdict(list)
for label in self.label_list:
idxs = copy.deepcopy(self.label_dict[label])
if len(idxs) < self.k:
idxs = np.random.choice(idxs, size=self.k, replace=True)
random.shuffle(idxs)
batch_idxs = []
for idx in idxs:
batch_idxs.append(idx)
if len(batch_idxs) == self.k:
batch_idxs_dict[label].append(batch_idxs)
batch_idxs = []
# Make up available batchs.
avai_labels = copy.deepcopy(self.label_list)
final_idxs = []
while len(avai_labels) >= self.p:
selected_labels = random.sample(avai_labels, self.p)
for label in selected_labels:
batch_idxs = batch_idxs_dict[label].pop(0)
final_idxs.extend(batch_idxs)
if len(batch_idxs_dict[label]) == 0:
avai_labels.remove(label)
self.length = len(final_idxs)
return iter(final_idxs)
def __len__(self):
return self.length
|
# Because Python is a reference counted language, it's important not to create
# reference cycles! Weakref is used to hold a reference to the parent
# GameState object.
import weakref
# Everything that happens is associated with an event. The event processing
# system keeps events to be processed in a queue.
from collections import deque
# Levels assign their own arbitrary unique IDs.
from uuid import uuid4
# You should really go read the docs on this:
# http://steveasleep.com/clubsandwich/api_event_dispatcher.html
from clubsandwich.event_dispatcher import EventDispatcher
# "Better to ask forgiveness than beg for permission" - LevelState's approach
# to querying cell data
from clubsandwich.tilemap import CellOutOfBoundsError
# Implementation of recursive shadowcasting for FoV
from clubsandwich.line_of_sight import get_visible_points
# The rest of the imports will be explained later.
from .entity import Entity, Item
from .behavior import (
CompositeBehavior,
BEHAVIORS_BY_ID,
)
from .const import (
EnumEventNames,
monster_types,
item_types,
)
# LevelState stores all information related to a single map and its
# inhabitants. It also handles the event loop.
class LevelState:
def __init__(self, tilemap, game_state):
# Things that don't change
self._game_state = weakref.ref(game_state)
self.tilemap = tilemap
self.uuid = uuid4().hex
# Things that do change
self.event_queue = deque()
self.entity_by_position = {}
self.items_by_position = {}
self._is_applying_events = False
# This is the object that remembers who wants to know about what, and what
# methods to call when things do happen.
self.dispatcher = EventDispatcher()
# We have to tell the dispatcher what all the possible events are before we
# fire them. This makes typos easy to catch.
for name in EnumEventNames:
self.dispatcher.register_event_type(name)
# Player is special, create them explicitly
self.player = None
self.player = self.create_entity(
monster_types.PLAYER,
self.tilemap.points_of_interest['stairs_up'])
# The level generator has helpfully told us where all the items should go.
# Just follow its directions.
for item_data in self.tilemap.points_of_interest['items']:
self.drop_item(Item(item_data.item_type), item_data.position)
# The level generator also told us about all the monsters. How nice!
for monster_data in self.tilemap.points_of_interest['monsters']:
self.create_entity(monster_data.monster_type, monster_data.position)
# There are two sets of points: points the player can see right now
# (self.los_cache), and points the player has seen in the past
# (self.level_memory_cache). self.update_los_cache() keeps both up to date.
self.level_memory_cache = set()
self.update_los_cache()
# Expose the GameState weakref as a property for convenience
@property
def game_state(self):
return self._game_state()
# FoV/line of sight can be a big deal in roguelikes, but it's really easy to
# compute using clubsandwich, so I won't spend much time explaining this.
# Just know that get_visible_points() returns a set of points that can be
# seen from the given vantage point.
def update_los_cache(self):
self.los_cache = get_visible_points(self.player.position, self.get_can_see)
self.level_memory_cache.update(self.los_cache)
# Create an entity, instantiate its behaviors, put it on the map
def create_entity(self, monster_type, position, behavior_state=None):
mt = monster_type
if mt.id == 'PLAYER':
# Only create one player at a time!
assert self.player is None
# No overlapping entities, please
assert position not in self.entity_by_position
# Instantiate the entity
entity = Entity(monster_type=mt)
entity.position = position
entity.behavior_state = behavior_state or {}
# Populate its inventory
for it_id in entity.monster_type.items:
entity.inventory.append(Item(item_types[it_id]))
# Instantiate the entity's behaviors. (See behavior.py.) If there's only
# one we can create it immediately; if there's more than one, we have to
# wrap them in a CompositeBehavior.
if len(mt.behaviors) == 1:
entity.add_behavior(BEHAVIORS_BY_ID[mt.behaviors[0]](entity, self))
else:
entity.add_behavior(CompositeBehavior(entity, self, [
BEHAVIORS_BY_ID[behavior_id](entity, self)
for behavior_id in mt.behaviors]))
# Remember the entity, process its events, draw it, let us look it up by
# position later
self.add_entity(entity)
return entity
def add_entity(self, entity):
# Behaviors are just buckets of event handlers attached to entities. They
# know how to subscribe themselves to dispatchers.
for behavior in entity.behaviors:
behavior.add_to_event_dispatcher(self.dispatcher)
# Remember this entity's position
if entity.position:
self.entity_by_position[entity.position] = entity
def remove_entity(self, entity):
# Unsubscribe behaviors from dispatcher
for behavior in entity.behaviors:
behavior.remove_from_event_dispatcher(self.dispatcher)
# Remove from the position index
if entity.position:
del self.entity_by_position[entity.position]
entity.position = None
# Item storage in the map is extremely simple. There's just a flat list of
# items per cell.
def drop_item(self, item, point, entity=None):
self.items_by_position.setdefault(point, [])
self.items_by_position[point].append(item)
if entity is not None:
self.fire(EnumEventNames.entity_dropped_item, data=item, entity=entity)
return True
### event stuff ###
# "Firing" an event just means remembering it for later. We don't want to get
# into a situation where we are handling event X, and some handler emits
# event Y before all the X handlers have finished, and then Y gets handled
# before the rest of the X handlers.
#
# This might not matter much in practice, but it makes things easier to
# reason about.
def fire(self, name, data=None, entity=None):
self.event_queue.append((name, entity, data))
# GameScene calls this each frame. It iterates over all the events in the
# queue and dispatches each one.
def consume_events(self):
# This is an overly paranoid check to make sure this doesn't get called
# inside itself.
assert not self._is_applying_events
self._is_applying_events = True
while self.event_queue:
(name, entity, data) = self.event_queue.popleft()
# Remember, the dispatcher is what actually remembers what objects want
# to get called for what events. Some events are associated with an
# entity. For those events, objects may subscribe only for that entity.
#
# If any of the handlers fire new events, they just get added to
# self.event_queue.
self.dispatcher.fire(name, entity, data)
self._is_applying_events = False
### action helper methods ###
# Super basic wrapper around firing the player_took_action event. Every enemy
# monster moves in response to this event, but if the player is dead, they
# are not on the map, and all kinds of stuff breaks. Easier to just not fire
# it if the player is dead.
def fire_player_took_action_if_alive(self, position=None):
if self.player.position is None:
return
position = position or self.player.position
self.fire(EnumEventNames.player_took_action, data=position, entity=None)
# The functions in actions.py call these methods to do what they do.
# They are general utility methods for querying and mutating the level.
# These methods are also used by draw_game.py for a few things.
def get_can_player_see(self, point):
# Sight test is super easy!
return point in self.los_cache
def get_can_player_remember(self, point):
# So is memory test!
return point in self.level_memory_cache
# This could potentially just be a call to get_can_player_see(), since right
# now nobody's trying to look at anything but the player with the same max
# distance, but it is instructive to leave it here as it is.
def test_line_of_sight(self, source, dest): # both args are entities
# always fail LOS when far away
if source.position is None or dest.position is None:
return False # someone is dead, so they can't see each other
if source.position.manhattan_distance_to(dest.position) > 30:
# Arbitrary distance limit on sight
return False
# Just make sure all the points on a bresenham line between the two
# entities are unblocked
for point in source.position.points_bresenham_to(dest.position):
if not self.get_can_see(point):
return False
return True
def get_entity_at(self, position):
try:
return self.entity_by_position[position]
except KeyError:
return None
def get_items_at(self, position):
try:
return self.items_by_position[position]
except KeyError:
return []
def get_is_terrain_passable(self, point):
try:
return self.tilemap.cell(point).terrain.walkable
except CellOutOfBoundsError:
return False
def get_can_move(self, entity, position, allow_player=False):
try:
# Don't allow moving if there's someone there already, unless the entity
# is allowed to try to move into the player's space (i.e. attack them).
#
# This method is currently only used by monsters to make AI decisions.
# Players just do what they want, and the move action just fails if they
# try to do something impossible.
if self.entity_by_position[position] == self.player and not allow_player:
return False
elif self.entity_by_position[position] != self.player:
return False
except KeyError:
pass
try:
cell = self.tilemap.cell(position)
except CellOutOfBoundsError:
return False
return cell.terrain.walkable
def get_can_see(self, position):
try:
cell = self.tilemap.cell(position)
return cell.terrain.lightable
except CellOutOfBoundsError:
return False
def get_can_open_door(self, entity):
# muahahaha only players can open doors. In practice this doesn't matter
# because enemies don't try to follow the player through doors, as their
# AI relies on line of sight.
return entity.is_player
def get_passable_neighbors(self, entity, allow_player=True):
return [
p for p in
list(entity.position.neighbors) + list(entity.position.diagonal_neighbors)
if self.get_can_move(entity, p, allow_player=True)]
|
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as ss
from bcodes.ratevector import create_rate_vector
from bcodes.stoichiometrymatrix import build_stoichiometry_matrix
from model.batch_growth import batch_growth as m
###############################################################################
# INPUTS
t_odes = [0, 9] # [t0 t_final]
t_sim = np.arange(1, 9.5, 0.5) # Times to simulate after fitting
i_obs = [8, 10, 12, 14] # 5, 6, 7 and 8h indices for the observed measurements
###############################################################################
# STATEMENTS
S = build_stoichiometry_matrix(m.id_sp, m.id_rs, m.mass_balances)
v = create_rate_vector(m.id_sp, m.id_rs, m.rates, m.params)
def odes(t, y):
return np.dot(S, v(y))
sol = solve_ivp(odes, t_odes, m.init, method='LSODA', vectorized=True,
t_eval=t_sim)
# Collecting observations and adding noise to them
obs = sol.y.T[i_obs]
t_obs = t_sim[i_obs]
dw_err = ss.norm.rvs(loc=0, scale=0.5, size=len(t_obs), random_state=42)
glc_err = ss.norm.rvs(loc=0, scale=2, size=len(t_obs), random_state=42)
obs[:, 0] += glc_err
obs[:, 1] += dw_err
if __name__ == '__main__':
fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(8, 4))
ax[0].plot(sol.t, sol.y.T[:, 0])
ax[0].scatter(t_obs, obs[:, 0], color='k')
ax[0].set_ylabel('Glucose (mmol)')
ax[1].plot(sol.t, sol.y.T[:, 1])
ax[1].scatter(t_obs, obs[:, 1], color='k')
ax[1].set_ylabel('Dry weight (g)')
ax[0].set_xlabel('Time (h)')
ax[1].set_xlabel('Time (h)')
fig.tight_layout()
plt.show()
|
#!/usr/bin/env python
import argparse
import os
import sys
import tempfile
def compare_pattern(pattern, filename, offset):
with open(filename, "r+b") as fd:
fd.seek(offset)
return pattern == fd.read(len(pattern)).decode("ascii")
def write_pattern(pattern, filename, offset):
with open(filename, "w+b") as fd:
fd.seek(offset)
fd.write(pattern.encode("ascii"))
# a-z,A-Z,0-9
ALPHANUMERIC_PATTERN = \
"".join([chr(i) for i in range(ord("a"), ord("z")+1)]) + \
"".join([chr(i) for i in range(ord("A"), ord("Z")+1)]) + \
"".join([chr(i) for i in range(ord("0"), ord("9")+1)])
def run(filename, pattern, offset=0):
write_pattern(pattern, filename, offset=offset)
assert compare_pattern(pattern, filename, offset=offset)
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument("--offset", default=0, type=int)
args = parser.parse_args()
pattern = ALPHANUMERIC_PATTERN * 30
filename = tempfile.NamedTemporaryFile(delete=True).name
try:
run(filename, pattern, offset=args.offset)
finally:
if os.path.exists(filename):
with open(filename, "r+b") as fd:
print(fd.read().decode("ascii"))
print("\n%r" % (repr(os.stat(filename))))
os.unlink(filename)
if __name__ == "__main__":
main(argv=sys.argv)
|
from tvm import cpp as tvm
def test_basic():
a = tvm.Var('a', 0)
b = tvm.Var('b', 0)
z = tvm.max(a, b)
assert tvm.format_str(z) == 'max(%s, %s)' % (a.name, b.name)
if __name__ == "__main__":
test_basic()
|
DATA_LIST_SIZE = 10
COLOR_CODE_LIST = ["#CD2626", "#FF7F0F", "#FF9CEE", "#6EB5FF", "#B10501",
"#4B11A8", "#1A73E8", "#617C58", "#827B60", "#C47451"]
OFFER_LIST = [5, 7, 10, 12, 15, 20, 25, 30, 35, 40]
|
"""
Some usefuls objects for doing unit tests.
Async test system is based on https://github.com/kwarunek/aiounittest
Copyright (C) 2017 The Pylp Authors.
This file is under the MIT License.
"""
import unittest
import asyncio
import inspect
from pylp.lib.runner import TaskEndTransformer
from pylp import Stream
# Create a function to wait for the end of the stream
def wait_processed(self):
"""Wait until the stream have processed the files."""
future = asyncio.Future()
self.pipe(TaskEndTransformer(future))
return future
# Set this function as a method of Stream
Stream.wait_processed = wait_processed
def async_test(func):
"""Run an asynchrounous test."""
loop = asyncio.get_event_loop()
if loop.is_closed():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
def test_func():
future = asyncio.ensure_future(func(), loop=loop)
loop.run_until_complete(future)
test_func.__doc__ = func.__doc__
return test_func
class AsyncTestCase(unittest.TestCase):
"""A test case that supports asynchrounous methods."""
def __getattribute__(self, name):
func = super().__getattribute__(name)
if name.startswith('test_') and inspect.iscoroutinefunction(func):
return async_test(func)
return func
|
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/beta_utils.ipynb (unless otherwise specified).
__all__ = ['get_beta_parameters', 'convolve_betas_fft', 'convolve_betas', 'balanced_accuracy_expected', 'beta_sum_pdf',
'beta_avg_pdf', 'beta_sum_cdf', 'beta_avg_cdf', 'beta_avg_inv_cdf', 'recall', 'precision', 'mpba_score',
'micro_f1_score']
# Cell
import numpy as np
from scipy.stats import beta
from scipy.integrate import trapz
from scipy.optimize import brentq
from sklearn import metrics
import numpy.fft
# Cell
def get_beta_parameters(confusion):
""" Extract the beta parameters from a confusion matrix.
Parameters
----------
confusion : array, shape = [n_classes, n_classes]
Where entry c_{ij} is the number of observations in class i but
are classified as class j.
Returns
-------
parameters: array of tuples
Each tuple (alpha_i, beta_i) is the parameters of a Beta distribution
that corresponds to class i.
"""
alphas, betas = [], []
# number of classes
k = len(confusion)
for i in range(k):
# alpha is 1 plus the number of objects that are correctly classified
alphas.append(1 + confusion[i, i])
# beta is 1 plus the number of objects that are incorrectly classified
betas.append(1 + confusion.sum(axis=1)[i] - confusion[i, i])
return list(zip(alphas, betas))
def convolve_betas_fft(parameters, res=0.001):
"""
Convolve a list of 1d float arrays together, using FFTs.
The arrays need not have the same length, but each array should
have length at least 1.
"""
# number of convolution
k = len(parameters)
# sum of three probabilities ranges from 0 to k
x = np.arange(0, k+res, res)
# compute the individual beta pdfs
pdfs = []
for par in parameters:
pdfs.append(beta.pdf(x, par[0], par[1]))
result_length = len(pdfs[0])
# Copy each array into a 2d array of the appropriate shape.
rows = numpy.zeros((len(pdfs), result_length))
for i, pdf in enumerate(pdfs):
rows[i] = pdf
# Transform, take the product, and do the inverse transform
# to get the convolution.
fft_of_rows = numpy.fft.fft(rows)
fft_of_convolution = fft_of_rows.prod(axis=0)
convolution = numpy.fft.ifft(fft_of_convolution)
# Assuming real inputs, the imaginary part of the output can
# be ignored.
return convolution.real / (sum(convolution.real) * res)
def convolve_betas(parameters, res=0.001):
""" Convolves k Beta distributions.
Parameters
----------
parameters : array of tuples
Each tuple (alpha_i, beta_i) is the parameters of a Beta distribution.
res : float, optional (default=0.001)
The precision of the resulting convolution, measured as step size in
the support.
Returns
-------
convolution : array, shape = [k / res]
The resulting convultion of the k Beta distributions, given the
specified presicion `res`.
"""
# number of convolution
k = len(parameters)
# sum of three probabilities ranges from 0 to k
x = np.arange(0, k+res, res)
# compute the individual beta pdfs
pdfs = []
for par in parameters:
pdfs.append(beta.pdf(x, par[0], par[1]))
# convolve k times
convolution = pdfs[0]
for i in range(1, k):
convolution = np.convolve(convolution, pdfs[i])
# reduce to the [0, k] support
convolution = convolution[0:len(x)]
# normalise so that all values sum to (1 / res)
convolution = convolution / (sum(convolution) * res)
return convolution
def balanced_accuracy_expected(confusion, fft=False):
""" Compute the expected value of the posterior balanced accuracy.
Parameters
----------
confusion : array, shape = [n_classes, n_classes]
Where entry c_{ij} is the number of observations in class i but
are classified as class j.
fft: bool
Use efficient fft convolution
Returns
-------
bal_accuracy_expected: float
"""
# number of classes
k = len(confusion)
# extract beta distribution parameters from the confusion matrix
parameters = get_beta_parameters(confusion)
# convolve the distributions and compute the expected value
k = len(confusion)
res = 0.001
x = np.arange(0, k + res, res)
if fft:
bal_accuracy = convolve_betas_fft(parameters, res)
else:
bal_accuracy = convolve_betas(parameters, res)
bal_accuracy_expected = (1/k) * np.dot(x, bal_accuracy * res)
return bal_accuracy_expected
def beta_sum_pdf(x, parameters, res=0.001, fft=False):
""" Compute the pdf of the sum of beta distributions.
Parameters
----------
x : array
A subset of the domain where we want evaluate the pdf.
parameters : array of tuples
Each tuple (alpha_i, beta_i) is the parameters of a Beta distribution.
res : float, optional (default=0.001)
The precision of the convolution, measured as step size in
the support.
Returns
-------
y : array
The pdf evaulated at x.
"""
if fft:
convolution = convolve_betas_fft(parameters, res)
else:
convolution = convolve_betas(parameters, res)
# convert x into a numpy array if it's not already
x = np.array(x)
# initialise the y vector
y = np.array([np.nan] * len(x))
# upper bound of support
k = len(parameters)
# set y to 0 if we're outside support
y[(x < 0) | (x > k)] = 0
# index in convolution vector that is closest to x
c_index = np.int_(x / res)
# fill in y vector
y[np.isnan(y)] = convolution[c_index[np.isnan(y)]]
return y
def beta_avg_pdf(x, parameters, res=0.001, fft=False):
""" Compute the pdf of the average of the k beta distributions.
Parameters
----------
x : array
A subset of the domain where we want evaluate the pdf.
parameters : array of tuples
Each tuple (alpha_i, beta_i) is the parameters of a Beta distribution.
res : float, optional (default=0.001)
The precision of the convolution, measured as step size in
the support.
Returns
-------
y : array
The pdf evaulated at x.
"""
k = len(parameters)
y = beta_sum_pdf(k * np.array(x), parameters, res, fft)
y = y * k
return y
def beta_sum_cdf(x, parameters, res=0.001):
""" Compute the cdf of the sum of the k beta distributions.
Parameters
----------
x : array
A subset of the domain where we want evaluate the cdf.
parameters : array of tuples
Each tuple (alpha_i, beta_i) is the parameters of a Beta distribution.
res : float, optional (default=0.001)
The precision of the convolution, measured as step size in
the support.
Returns
-------
y : array
The cdf evaulated at x.
"""
convolution = convolve_betas(parameters, res)
y = np.array([np.nan] * len(x))
for i in range(len(x)):
c_index = int(round(x[i] / res))
if c_index <= 0:
y[i] = 0
elif c_index >= len(convolution):
y[i] = 1
else:
y[i] = trapz(convolution[:c_index+1], dx=res)
return y
def beta_avg_cdf(x, parameters, res=0.001):
""" Compute the cdf of the average of the k beta distributions.
Parameters
----------
x : array
A subset of the domain where we want evaluate the cdf.
parameters : array of tuples
Each tuple (alpha_i, beta_i) is the parameters of a Beta distribution.
res : float, optional (default=0.001)
The precision of the convolution, measured as step size in
the support.
Returns
-------
y : array
The cdf evaulated at x.
"""
x = np.array(x)
k = len(parameters)
y = beta_sum_cdf(k * x, parameters, res)
return y
def beta_avg_inv_cdf(y, parameters, res=0.001):
""" Compute the inverse cdf of the average of the k beta distributions.
Parameters
----------
y : float
A float between 0 and 1 (the range of the cdf)
parameters : array of tuples
Each tuple (alpha_i, beta_i) is the parameters of a Beta distribution.
res : float, optional (default=0.001)
The precision of the convolution, measured as step size in
the support.
Returns
-------
x : float
the inverse cdf of y
"""
return brentq(lambda x: beta_avg_cdf([x], parameters, res)[0] - y, 0, 1)
def recall(confusion):
""" Compute the recall from a confusion matrix.
Parameters
----------
confusion : array, shape = [n_classes, n_classes]
Where entry c_{ij} is the number of observations in class i but
are classified as class j.
Returns
-------
recalls : array
A list of recalls, one for each class.
"""
# number of classes
k = len(confusion)
# extract recall from confusion matrix
recalls = []
for i in range(k):
recalls.append(confusion[i, i] / confusion.sum(axis=1)[i])
return recalls
def precision(confusion, classes, classifiers):
""" Compute the precision from a confusion matrix.
Parameters
----------
confusion : array, shape = [n_classes, n_classes]
Where entry c_{ij} is the number of observations in class i but
are classified as class j.
Returns
-------
precisions : array
A list of precisions, one for each class.
"""
# number of classes
k = len(confusion)
# extract recall from confusion matrix
precisions = []
for i in range(k):
precisions.append(confusion[i, i] / confusion.sum(axis=0)[i])
return precisions
def mpba_score(y_true, y_pred):
""" Compute the MPBA score of a classifier based on some test set.
Parameters
----------
y_true : array
Ground truth (correct) labels.
y_pred : array
Predicted labels, as returned by a classifier.
Returns
-------
balanced_accuracy_expected : float
The expected balanced accuracy rate on the test set.
"""
confusion_test = metrics.confusion_matrix(y_true, y_pred)
return balanced_accuracy_expected(confusion_test)
def micro_f1_score(y_true, y_pred, n_classes=None):
""" Compute the Micro-F1 score of a classifier based on some test set.
Parameters
----------
y_true : array
Ground truth (correct) labels.
y_pred : array
Predicted labels, as returned by a classifier.
Returns
-------
f1_score : float
The F1 score on the test set.
"""
if n_classes is not None:
average = 'binary' if n_classes == 2 else 'micro'
else:
average = 'binary' if len(np.unique(y_true)) == 2 else 'micro'
return metrics.f1_score(y_true, y_pred, average=average) |
"""
Exposes regular shell commands as services.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/shell_command/
"""
import logging
import subprocess
from homeassistant.util import slugify
DOMAIN = 'shell_command'
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""Setup the shell_command component."""
conf = config.get(DOMAIN)
if not isinstance(conf, dict):
_LOGGER.error('Expected configuration to be a dictionary')
return False
for name in conf.keys():
if name != slugify(name):
_LOGGER.error('Invalid service name: %s. Try %s',
name, slugify(name))
return False
def service_handler(call):
"""Execute a shell command service."""
try:
subprocess.call(conf[call.service], shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except subprocess.SubprocessError:
_LOGGER.exception('Error running command')
for name in conf.keys():
hass.services.register(DOMAIN, name, service_handler)
return True
|
# This file is part of rinohtype, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
from .annotation import NamedDestination
from .warnings import warn
__all__ = ['DocumentElement', 'Location']
class Location(object):
def __init__(self, document_element):
self.location = document_element.__class__.__name__
class DocumentElement(object):
"""An element that is directly or indirectly part of a :class:`Document`
and is eventually rendered to the output."""
def __init__(self, id=None, parent=None, source=None):
"""Initialize this document element as as a child of `parent`
(:class:`DocumentElement`) if it is not a top-level :class:`Flowable`
element. `source` should point to a node in the input's document tree
corresponding to this document element. It is used to point to a
location in the input file when warnings or errors are generated (see
the :meth:`warn` method).
Both parameters are optional, and can be set at a later point by
assigning to the identically named instance attributes."""
self.id = id
self.secondary_ids = []
self.parent = parent
self.source = source
def get_id(self, document, create=True):
try:
return self.id or document.ids_by_element[self]
except KeyError:
if create:
return document.register_element(self)
def get_ids(self, document):
primary_id = self.get_id(document)
yield primary_id
for id in self.secondary_ids:
yield id
@property
def source(self):
"""The source element this document element was created from."""
if self._source is not None:
return self._source
elif self.parent is not None:
return self.parent.source
else:
return Location(self)
@source.setter
def source(self, source):
"""Set `source` as the source element of this document element."""
self._source = source
@property
def elements(self):
yield self
def build_document(self, flowable_target):
"""Set document metadata and populate front and back matter"""
pass
def prepare(self, flowable_target):
"""Determine number labels and register references with the document"""
if self.get_id(flowable_target.document, create=False):
flowable_target.document.register_element(self)
def create_destination(self, container, at_top_of_container=False):
"""Create a destination anchor in the `container` to direct links to
this :class:`DocumentElement` to."""
create_destination(self, container, at_top_of_container)
def warn(self, message, container=None):
"""Present the warning `message` to the user, adding information on the
location of the related element in the input file."""
if self.source is not None:
message = '[{}] '.format(self.source.location) + message
if container is not None:
try:
message += ' (page {})'.format(container.page.formatted_number)
except AttributeError:
pass
warn(message)
def create_destination(flowable, container, at_top_of_container=False):
"""Create a destination anchor in the `container` to direct links to
`flowable` to."""
vertical_position = 0 if at_top_of_container else container.cursor
ids = flowable.get_ids(container.document)
destination = NamedDestination(*(str(id) for id in ids))
container.canvas.annotate(destination, 0, vertical_position,
container.width, None)
container.document.register_page_reference(container.page, flowable)
|
# coding: utf-8
import argparse
import time
import math
import torch
import torch.nn as nn
from torch import optim
import data
import sys
from utils import *
from setproctitle import setproctitle
from splitcross import *
sys.path.append("../")
from model import TrellisNetModel
parser = argparse.ArgumentParser(description='PyTorch TrellisNet Language Model')
parser.add_argument('--data', type=str, default='./data/wikitext-103',
help='location of the data corpus')
parser.add_argument('--name', type=str, default='Trellis_wordWT103',
help='name of the process')
parser.add_argument('--emsize', type=int, default=512,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=1600,
help='number of hidden units per layer')
parser.add_argument('--nout', type=int, default=512,
help='number of output units')
parser.add_argument('--lr', type=float, default=1e-3,
help='initial learning rate (default: 1e-3)')
parser.add_argument('--clip', type=float, default=0.07,
help='gradient clipping (default: 0.07)')
parser.add_argument('--epochs', type=int, default=25,
help='upper epoch limit (default: 25)')
parser.add_argument('--batch_size', type=int, default=40, metavar='N',
help='batch size (default: 40)')
# For most of the time, you should change these two together
parser.add_argument('--nlevels', type=int, default=75,
help='levels of the network')
parser.add_argument('--horizon', type=int, default=75,
help='The effective history size')
parser.add_argument('--dropout', type=float, default=0.1,
help='output dropout (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.1,
help='input dropout (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.0,
help='dropout applied to weights (0 = no dropout)')
parser.add_argument('--emb_dropout', type=float, default=0.0,
help='dropout applied to embedding layer (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.1,
help='dropout applied to hidden layers (0 = no dropout)')
parser.add_argument('--wdecay', type=float, default=0,
help='weight decay (default: 0)')
parser.add_argument('--tied', action='store_false',
help='tie the word embedding and softmax weights (default: True)')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--anneal', type=int, default=5,
help='learning rate annealing criteria (default: 5)')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--wnorm', action='store_false',
help='use weight normalization (default: True)')
parser.add_argument('--temporalwdrop', action='store_false',
help='only drop the temporal weights (default: True)')
parser.add_argument('--optim', type=str, default='Adam',
help='optimizer to use (default: Adam)')
parser.add_argument('--repack', action='store_false',
help='use repackaging (default: True)')
parser.add_argument('--aux', type=float, default=0.1,
help='use auxiliary loss (default: 0.1), -1 means no auxiliary loss used')
parser.add_argument('--aux_freq', type=float, default=25,
help='auxiliary loss frequency (default: 25)')
parser.add_argument('--seq_len', type=int, default=0,
help='total sequence length; if this is 0 then it defaults to args.horizon (default: 0)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval')
parser.add_argument('--when', nargs='+', type=int, default=[15, 20],
help='When to decay the learning rate')
parser.add_argument('--ksize', type=int, default=2,
help='conv kernel size (default: 2)')
parser.add_argument('--dilation', nargs='+', type=int, default=[1],
help='dilation rate (default: [1])')
parser.add_argument('--n_experts', type=int, default=0,
help='number of softmax experts (default: 0)')
parser.add_argument('--load', type=str, default='',
help='path to load the model')
args = parser.parse_args()
args.save = args.name + ".pt"
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
setproctitle(args.name)
torch.set_default_tensor_type('torch.FloatTensor')
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
import os
import hashlib
fn = 'corpus.{}.data'.format(hashlib.md5(args.data.encode()).hexdigest())
if os.path.exists(fn):
corpus = torch.load(fn)
else:
print('Processing dataset...')
corpus = data.Corpus(args.data)
torch.save(corpus, fn)
eval_batch_size = 1
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size)
val_data = batchify(corpus.valid, eval_batch_size)
test_data = batchify(corpus.test, test_batch_size)
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
self.log = open("logs/" + args.name + ".log", "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
self.log.flush()
self.terminal.flush()
pass
sys.stdout = Logger()
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
if len(args.load) > 0:
print("Loaded model\n")
model = torch.load(args.load)
else:
model = TrellisNetModel(ntoken=ntokens,
ninp=args.emsize,
nhid=args.nhid,
nout=args.nout,
nlevels=args.nlevels,
kernel_size=args.ksize,
dilation=args.dilation,
dropout=args.dropout,
dropouti=args.dropouti,
dropouth=args.dropouth,
emb_dropout=args.emb_dropout,
wdrop=args.wdrop,
temporalwdrop=args.temporalwdrop,
tie_weights=args.tied,
repack=args.repack,
wnorm=args.wnorm,
aux=(args.aux > 0),
aux_frequency=args.aux_freq,
n_experts=args.n_experts)
if args.cuda:
model.cuda()
splits = []
if ntokens > 75000:
splits = [2800, 20000, 76000] # This can be tuned.
criterion = SplitCrossEntropyLoss(args.emsize, splits=splits, verbose=False) # Use an adaptive softmax
if args.cuda:
criterion = criterion.cuda()
params = list(model.parameters()) + list(criterion.parameters())
lr = args.lr
optimizer = getattr(optim, args.optim)(params, lr=lr, weight_decay=args.wdecay)
###############################################################################
# Training code
###############################################################################
def evaluate(data_source):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
batch_size = data_source.size(1)
hidden = model.init_hidden(batch_size)
eff_history_mode = (args.seq_len > args.horizon and not args.repack)
if eff_history_mode:
validseqlen = args.seq_len - args.horizon
seq_len = args.seq_len
else:
validseqlen = args.horizon
seq_len = args.horizon
processed_data_size = 0
for i in range(0, data_source.size(0) - 1, validseqlen):
eff_history = args.horizon if eff_history_mode else 0
if i + eff_history >= data_source.size(0) - 1: continue
data, targets = get_batch(data_source, i, seq_len, evaluation=True)
if args.repack:
hidden = repackage_hidden(hidden)
else:
hidden = model.init_hidden(data.size(1))
data = data.t()
net = nn.DataParallel(model) if batch_size > 10 else model
(_, _, output), hidden, _ = net(data, hidden, decode=False)
output = output.transpose(0, 1)
targets = targets[eff_history:].contiguous().view(-1)
final_output = output[eff_history:].contiguous().view(-1, output.size(2))
loss = criterion(model.decoder.weight, model.decoder.bias, final_output, targets)
loss = loss.data
total_loss += (data.size(1) - eff_history) * loss
processed_data_size += data.size(1) - eff_history
data = None
output = None
targets = None
final_output = None
return total_loss.item() / processed_data_size
def train(epoch):
model.train()
total_loss = 0
total_aux_losses = 0
start_time = time.time()
hidden = model.init_hidden(args.batch_size)
eff_history_mode = (args.seq_len > args.horizon and not args.repack)
if eff_history_mode:
validseqlen = args.seq_len - args.horizon
seq_len = args.seq_len
else:
validseqlen = args.horizon
seq_len = args.horizon
for batch, i in enumerate(range(0, train_data.size(0) - 1, validseqlen)):
# When not using repackaging mode, we DISCARD the first arg.horizon outputs in backprop (which are
# the "effective history".
eff_history = args.horizon if eff_history_mode else 0
if i + eff_history >= train_data.size(0) - 1: continue
data, targets = get_batch(train_data, i, seq_len)
if args.repack:
hidden = repackage_hidden(hidden)
else:
hidden = model.init_hidden(args.batch_size)
optimizer.zero_grad()
data = data.t()
net = nn.DataParallel(model) if data.size(0) > 10 else model
(raw_output, _, output), hidden, all_outputs = net(data, hidden, decode=False)
raw_output = raw_output.transpose(0, 1)
output = output.transpose(0, 1)
targets = targets[eff_history:].contiguous().view(-1)
final_output = output[eff_history:].contiguous().view(-1, output.size(2))
dec_weight, dec_bias = model.decoder.weight, model.decoder.bias
# Loss 1: CE loss
raw_loss = criterion(dec_weight, dec_bias, final_output, targets)
# Loss 2: Aux loss
aux_losses = 0
if args.aux > 0:
all_outputs = all_outputs[:, :, eff_history:].permute(1, 2, 0, 3).contiguous()
aux_size = all_outputs.size(0) # The number of auxiliary losses
all_outputs = all_outputs.view(aux_size, -1, all_outputs.size(3))
aux_losses = args.aux * sum([criterion(dec_weight, dec_bias, all_outputs[i], targets) for i in range(aux_size)])
# Combine losses
loss = raw_loss + aux_losses
loss.backward()
torch.nn.utils.clip_grad_norm_(params, args.clip)
optimizer.step()
total_loss += raw_loss.data
if args.aux:
total_aux_losses += aux_losses.data
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss.item() / args.log_interval
cur_aux_loss = total_aux_losses.item() / args.log_interval if args.aux else 0
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:05.5f} | ms/batch {:5.2f} | '
'raw_loss {:5.2f} | aux_loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // validseqlen, lr,
elapsed * 1000 / args.log_interval, cur_loss, cur_aux_loss, math.exp(cur_loss)))
total_loss = 0
total_aux_losses = 0
start_time = time.time()
sys.stdout.flush()
data = None
raw_output = None
output = None
targets = None
final_output = None
all_outputs = None
def inference(epoch, epoch_start_time):
val_loss = evaluate(val_data)
test_loss = evaluate(test_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
print('| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} | '
'test ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(test_loss)))
print('-' * 89)
return val_loss, test_loss
# Loop over epochs
lr = args.lr
best_val_loss = None
all_val_losses = []
all_test_losses = []
try:
for epoch in range(1, args.epochs + 1):
epoch_start_time = time.time()
train(epoch)
val_loss, test_loss = inference(epoch, epoch_start_time)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
print('Saving model (new best validation) in ' + args.save)
best_val_loss = val_loss
if (len(all_val_losses) > args.anneal and val_loss > min(all_val_losses[:-args.anneal])) \
or epoch in args.when:
print("\n" + "*" * 89)
if lr > 1e-5:
print('Annealing learning rate')
lr /= 10.0
optimizer.param_groups[0]['lr'] = lr
print("*" * 89 + "\n")
all_val_losses.append(val_loss)
all_test_losses.append(test_loss)
sys.stdout.flush()
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
delete_cmd = input('DO YOU WANT TO DELETE THIS RUN [YES/NO]:')
if delete_cmd == "YES":
import os
os.remove('logs/' + args.name + ".log")
print("Removed log file")
os.remove('logs/' + args.name + ".pt")
print("Removed pt file")
# Load the best saved model
with open(args.save, 'rb') as f:
model = torch.load(f)
# Run on test data
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(test_loss, math.exp(test_loss)))
print('=' * 89)
|
"""
File: preprocessing.py
Author: Team ohia.ai
Description: Preprocess data for the HAAC 2018 Challenge.
"""
import os, re, glob, multiprocessing, click
import numpy as np
from PIL import Image
from collections import Counter
from ohia.utils import resize_smaller_dim, crop_square, make_dir
from functools import partial
# create wrapper to parallelize
def resize_crop_and_save(f, input_dir, output_dir, crop):
try:
img = Image.open(f)
if img.mode == 'RGB':
img = resize_smaller_dim(img)
if crop:
img = crop_square(img, 'center')
img.save(re.sub(input_dir, output_dir, f))
return(1)
else:
print(f'Skipping {f}')
return(0)
except:
print(f'Skipping {f}')
return(0)
@click.command()
@click.option('--input_dir', help='Input directory of images.', required=True)
@click.option('--output_dir', help='Output directory of images.', required=True)
@click.option('--file_path', default='/home/matt/repos/ohia.ai/data', help='Absolute path to data directories.')
@click.option('--min_count', default=1, help='Minimum numbers of images needed to create a class.')
@click.option('--n_thread', default=1, help='Number of threads to use.')
@click.option('--crop', default=False, help='Either: "center", "triangular" or "uniform".')
def main(input_dir, output_dir, min_count, n_thread, crop):
# get list of images
file_list = glob.glob(f'{file_path}/{input_dir}/**/*.jpg', recursive=True)
# filter images
label_list = [re.split('/', f)[-2] for f in file_list]
label_counts = Counter(label_list)
filtered_labels = [k for k,v in label_counts.items() if v>min_count]
filtered_file_list = [f for f,n in zip(file_list, label_list) if n in filtered_labels]
# create subdirectories
make_dir(f'{file_path}/{output_dir}')
for dir_name in filtered_labels:
make_dir(f'{file_path}/{output_dir}/{dir_name}')
# resize and save (in parallel)
pool = multiprocessing.Pool(n_thread)
f = partial(resize_crop_and_save, input_dir=input_dir, output_dir=output_dir, crop=crop)
successes = pool.map(f, filtered_file_list)
pool.close()
print(f'{np.sum(successes)} resized ({100*np.mean(successes)}%)')
if __name__ == '__main__':
"""
python preprocess.py \
--input_dir raw_images \
--output_dir filtered_images
"""
main() |
import tensorflow as tf
import numpy as np
import collections
from malaya_speech.utils import featurization
from malaya_speech.model.frame import Frame
from malaya_speech.utils.padding import (
sequence_nd as padding_sequence_nd,
sequence_1d,
)
from malaya_speech.utils.char import decode as char_decode
from malaya_speech.utils.subword import (
decode as subword_decode,
encode as subword_encode,
decode_multilanguage,
get_index_multilanguage,
align_multilanguage,
)
from malaya_speech.utils.execute import execute_graph
from malaya_speech.utils.activation import softmax
from malaya_speech.utils.featurization import universal_mel
from malaya_speech.utils.read import resample
from malaya_speech.utils.speechsplit import (
quantize_f0_numpy,
get_f0_sptk,
get_fo_pyworld,
)
from malaya_speech.utils.constant import MEL_MEAN, MEL_STD
BeamHypothesis = collections.namedtuple(
'BeamHypothesis', ('score', 'prediction', 'states')
)
class Abstract:
def __str__(self):
return f'<{self.__name__}: {self.__model__}>'
def _execute(self, inputs, input_labels, output_labels):
return execute_graph(
inputs=inputs,
input_labels=input_labels,
output_labels=output_labels,
sess=self._sess,
input_nodes=self._input_nodes,
output_nodes=self._output_nodes,
)
class Speakernet(Abstract):
def __init__(
self,
input_nodes,
output_nodes,
vectorizer,
sess,
model,
extra,
label,
name,
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._vectorizer = vectorizer
self._sess = sess
self._extra = extra
self.labels = label
self.__model__ = model
self.__name__ = name
def vectorize(self, inputs):
"""
Vectorize inputs.
Parameters
----------
inputs: List[np.array]
List[np.array] or List[malaya_speech.model.frame.Frame].
Returns
-------
result: np.array
returned [B, D].
"""
inputs = [
input.array if isinstance(input, Frame) else input
for input in inputs
]
inputs = [self._vectorizer(input) for input in inputs]
inputs, lengths = padding_sequence_nd(
inputs, dim=0, return_len=True
)
r = self._execute(
inputs=[inputs, lengths],
input_labels=['Placeholder', 'Placeholder_1'],
output_labels=['logits'],
)
return r['logits']
def __call__(self, inputs):
return self.vectorize(inputs)
class Speaker2Vec(Abstract):
def __init__(
self,
input_nodes,
output_nodes,
vectorizer,
sess,
model,
extra,
label,
name,
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._vectorizer = vectorizer
self._sess = sess
self._extra = extra
self.labels = label
self.__model__ = model
self.__name__ = name
def vectorize(self, inputs):
"""
Vectorize inputs.
Parameters
----------
inputs: List[np.array]
List[np.array] or List[malaya_speech.model.frame.Frame].
Returns
-------
result: np.array
returned [B, D].
"""
inputs = [
input.array if isinstance(input, Frame) else input
for input in inputs
]
inputs = [self._vectorizer(input, **self._extra) for input in inputs]
if self.__model__ == 'deep-speaker':
dim = 0
else:
dim = 1
inputs = padding_sequence_nd(inputs, dim=dim)
inputs = np.expand_dims(inputs, -1)
r = self._execute(
inputs=[inputs],
input_labels=['Placeholder'],
output_labels=['logits'],
)
return r['logits']
def __call__(self, inputs):
return self.vectorize(inputs)
class SpeakernetClassification(Abstract):
def __init__(
self,
input_nodes,
output_nodes,
vectorizer,
sess,
model,
extra,
label,
name,
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._vectorizer = vectorizer
self._sess = sess
self._extra = extra
self.labels = label
self.__model__ = model
self.__name__ = name
def predict_proba(self, inputs):
"""
Predict inputs, will return probability.
Parameters
----------
inputs: List[np.array]
List[np.array] or List[malaya_speech.model.frame.Frame].
Returns
-------
result: np.array
returned [B, D].
"""
inputs = [
input.array if isinstance(input, Frame) else input
for input in inputs
]
inputs = [self._vectorizer(input) for input in inputs]
inputs, lengths = padding_sequence_nd(
inputs, dim=0, return_len=True
)
r = self._execute(
inputs=[inputs, lengths],
input_labels=['Placeholder', 'Placeholder_1'],
output_labels=['logits'],
)
return softmax(r['logits'], axis=-1)
def predict(self, inputs):
"""
Predict inputs, will return labels.
Parameters
----------
inputs: List[np.array]
List[np.array] or List[malaya_speech.model.frame.Frame].
Returns
-------
result: List[str]
returned [B].
"""
probs = np.argmax(self.predict_proba(inputs), axis=1)
return [self.labels[p] for p in probs]
def __call__(self, input):
"""
Predict input, will return label.
Parameters
----------
inputs: np.array
np.array or malaya_speech.model.frame.Frame.
Returns
-------
result: str
"""
return self.predict([input])[0]
class Classification(Abstract):
def __init__(
self,
input_nodes,
output_nodes,
vectorizer,
sess,
model,
extra,
label,
name,
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._vectorizer = vectorizer
self._sess = sess
self._extra = extra
self.labels = label
self.__model__ = model
self.__name__ = name
def predict_proba(self, inputs):
"""
Predict inputs, will return probability.
Parameters
----------
inputs: List[np.array]
List[np.array] or List[malaya_speech.model.frame.Frame].
Returns
-------
result: np.array
returned [B, D].
"""
inputs = [
input.array if isinstance(input, Frame) else input
for input in inputs
]
inputs = [self._vectorizer(input, **self._extra) for input in inputs]
if self.__model__ == 'deep-speaker':
dim = 0
else:
dim = 1
inputs = padding_sequence_nd(inputs, dim=dim)
inputs = np.expand_dims(inputs, -1)
r = self._execute(
inputs=[inputs],
input_labels=['Placeholder'],
output_labels=['logits'],
)
return softmax(r['logits'], axis=-1)
def predict(self, inputs):
"""
Predict inputs, will return labels.
Parameters
----------
inputs: List[np.array]
List[np.array] or List[malaya_speech.model.frame.Frame].
Returns
-------
result: List[str]
returned [B].
"""
probs = np.argmax(self.predict_proba(inputs), axis=1)
return [self.labels[p] for p in probs]
def __call__(self, input):
"""
Predict input, will return label.
Parameters
----------
inputs: np.array
np.array or malaya_speech.model.frame.Frame.
Returns
-------
result: str
"""
return self.predict([input])[0]
class UNET(Abstract):
def __init__(self, input_nodes, output_nodes, sess, model, name):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(self, inputs):
"""
Enhance inputs, will return melspectrogram.
Parameters
----------
inputs: List[np.array]
Returns
-------
result: List
"""
inputs = [
input.array if isinstance(input, Frame) else input
for input in inputs
]
mels = [featurization.scale_mel(s).T for s in inputs]
x, lens = padding_sequence_nd(
mels, maxlen=256, dim=0, return_len=True
)
r = self._execute(
inputs=[x],
input_labels=['Placeholder'],
output_labels=['logits'],
)
l = r['logits']
results = []
for index in range(len(x)):
results.append(
featurization.unscale_mel(
x[index, : lens[index]].T + l[index, : lens[index], :, 0].T
)
)
return results
def __call__(self, inputs):
return self.predict(inputs)
class UNETSTFT(Abstract):
def __init__(
self, input_nodes, output_nodes, instruments, sess, model, name
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._instruments = instruments
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(self, input):
"""
Enhance inputs, will return waveform.
Parameters
----------
input: np.array
np.array or malaya_speech.model.frame.Frame.
Returns
-------
result: Dict
"""
if isinstance(input, Frame):
input = input.array
r = self._execute(
inputs=[input],
input_labels=['Placeholder'],
output_labels=list(self._output_nodes.keys()),
)
results = {}
for no, instrument in enumerate(self._instruments):
results[instrument] = r[f'logits_{no}']
return results
def __call__(self, input):
"""
Enhance inputs, will return waveform.
Parameters
----------
input: np.array
np.array or malaya_speech.model.frame.Frame.
Returns
-------
result: Dict
"""
return self.predict(input)
class UNET1D(Abstract):
def __init__(self, input_nodes, output_nodes, sess, model, name):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(self, input):
"""
Enhance inputs, will return waveform.
Parameters
----------
input: np.array
np.array or malaya_speech.model.frame.Frame.
Returns
-------
result: np.array
"""
if isinstance(input, Frame):
input = input.array
r = self._execute(
inputs=[input],
input_labels=['Placeholder'],
output_labels=['logits'],
)
return r['logits']
def __call__(self, input):
"""
Enhance inputs, will return waveform.
Parameters
----------
input: np.array
np.array or malaya_speech.model.frame.Frame.
Returns
-------
result: np.array
"""
return self.predict(input)
class Transducer(Abstract):
def __init__(
self,
input_nodes,
output_nodes,
featurizer,
vocab,
time_reduction_factor,
sess,
model,
name,
wavs,
stack=False,
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._back_pad = np.zeros(shape=(2000,))
self._front_pad = np.zeros(shape=(200,))
self._featurizer = featurizer
self._vocab = vocab
self._time_reduction_factor = time_reduction_factor
self._sess = sess
self.__model__ = model
self.__name__ = name
self._wavs = wavs
self._stack = stack
if self._stack:
self._len_vocab = [l.vocab_size for l in self._vocab]
def _check_decoder(self, decoder, beam_size):
decoder = decoder.lower()
if decoder not in ['greedy', 'beam']:
raise ValueError('mode only supports [`greedy`, `beam`]')
if beam_size < 1:
raise ValueError('beam_size must bigger than 0')
return decoder
def _get_inputs(self, inputs):
inputs = [
input.array if isinstance(input, Frame) else input
for input in inputs
]
index = len(inputs)
# pretty hacky, result from single batch is not good caused by batchnorm.
# have to append extra random wavs
if len(inputs) < len(self._wavs) + 1:
inputs = inputs + self._wavs[:(len(self._wavs) + 1) - len(inputs)]
# padded, lens = sequence_1d(inputs, return_len=True)
# padded = np.concatenate([self._front_pad, padded, self._back_pad], axis=-1)
# lens = [l + len(self._back_pad) + len(self._front_pad) for l in lens]
inputs = [np.concatenate([self._front_pad, wav, self._back_pad], axis=-1) for wav in inputs]
padded, lens = sequence_1d(inputs, return_len=True)
return padded, lens, index
def _combined_indices(
self, subwords, ids, l, reduction_factor=160, sample_rate=16000
):
result, temp_l, temp_r = [], [], []
for i in range(len(subwords)):
if ids[i] is None and len(temp_r):
data = {
'text': ''.join(temp_l),
'start': round(temp_r[0], 4),
'end': round(
temp_r[-1] + (reduction_factor / sample_rate), 4
),
}
result.append(data)
temp_l, temp_r = [], []
else:
temp_l.append(subwords[i])
temp_r.append(l[ids[i]])
if len(temp_l):
data = {
'text': ''.join(temp_l),
'start': round(temp_r[0], 4),
'end': round(temp_r[-1] + (reduction_factor / sample_rate), 4),
}
result.append(data)
return result
def predict_alignment(self, input, combined=True):
"""
Transcribe input and get timestamp, only support greedy decoder.
Parameters
----------
input: np.array
np.array or malaya_speech.model.frame.Frame.
combined: bool, optional (default=True)
If True, will combined subwords to become a word.
Returns
-------
result: List[Dict[text, start, end]]
"""
padded, lens, index = self._get_inputs([input])
r = self._execute(
inputs=[padded, lens],
input_labels=['X_placeholder', 'X_len_placeholder'],
output_labels=['non_blank_transcript', 'non_blank_stime'],
)
non_blank_transcript = r['non_blank_transcript']
non_blank_stime = r['non_blank_stime']
if combined:
if self._stack:
words, indices = align_multilanguage(
self._vocab, non_blank_transcript, get_index=True
)
else:
words, indices = self._vocab.decode(
non_blank_transcript, get_index=True
)
else:
words, indices = [], []
for no, ids in enumerate(non_blank_transcript):
if self._stack:
last_index, v = get_index_multilanguage(ids, self._vocab, self._len_vocab)
w = self._vocab[last_index]._id_to_subword(v - 1)
else:
w = self._vocab._id_to_subword(ids - 1)
if isinstance(w, bytes):
w = w.decode()
words.extend([w, None])
indices.extend([no, None])
return self._combined_indices(words, indices, non_blank_stime)
def greedy_decoder(self, inputs):
"""
Transcribe inputs, will return list of strings.
Parameters
----------
inputs: List[np.array]
List[np.array] or List[malaya_speech.model.frame.Frame].
Returns
-------
result: List[str]
"""
padded, lens, index = self._get_inputs(inputs)
results = []
r = self._execute(
inputs=[padded, lens],
input_labels=['X_placeholder', 'X_len_placeholder'],
output_labels=['greedy_decoder'],
)['greedy_decoder']
for row in r[:index]:
if self._stack:
d = decode_multilanguage(self._vocab, row[row > 0])
else:
d = subword_decode(self._vocab, row[row > 0])
results.append(d)
return results
def _beam_decoder(
self, enc, total, initial_states, beam_width=10, norm_score=True
):
kept_hyps = [
BeamHypothesis(
score=0.0, prediction=[0], states=initial_states
)
]
B = kept_hyps
for i in range(total):
A = B
B = []
while True:
y_hat = max(A, key=lambda x: x.score)
A.remove(y_hat)
r = self._execute(
inputs=[enc[i], y_hat.prediction[-1], y_hat.states],
input_labels=[
'encoded_placeholder',
'predicted_placeholder',
'states_placeholder',
],
output_labels=['ytu', 'new_states'],
)
ytu_, new_states_ = r['ytu'], r['new_states']
for k in range(ytu_.shape[0]):
beam_hyp = BeamHypothesis(
score=(y_hat.score + float(ytu_[k])),
prediction=y_hat.prediction,
states=y_hat.states,
)
if k == 0:
B.append(beam_hyp)
else:
beam_hyp = BeamHypothesis(
score=beam_hyp.score,
prediction=(beam_hyp.prediction + [int(k)]),
states=new_states_,
)
A.append(beam_hyp)
if len(B) > beam_width:
break
if norm_score:
kept_hyps = sorted(
B, key=lambda x: x.score / len(x.prediction), reverse=True
)[:beam_width]
else:
kept_hyps = sorted(B, key=lambda x: x.score, reverse=True)[
:beam_width
]
return kept_hyps[0].prediction
def beam_decoder(self, inputs, beam_size: int = 5):
"""
Transcribe inputs, will return list of strings.
Parameters
----------
inputs: List[np.array]
List[np.array] or List[malaya_speech.model.frame.Frame].
beam_size: int, optional (default=5)
beam size for beam decoder.
Returns
-------
result: List[str]
"""
padded, lens, index = self._get_inputs(inputs)
results = []
r = self._execute(
inputs=[padded, lens],
input_labels=['X_placeholder', 'X_len_placeholder'],
output_labels=['encoded', 'padded_lens', 'initial_states'],
)
encoded_, padded_lens_, s = (
r['encoded'],
r['padded_lens'],
r['initial_states'],
)
padded_lens_ = padded_lens_ // self._time_reduction_factor
for i in range(index):
r = self._beam_decoder(
enc=encoded_[i],
total=padded_lens_[i],
initial_states=s,
beam_width=beam_size,
)
if self._stack:
d = decode_multilanguage(self._vocab, r)
else:
d = subword_decode(self._vocab, r)
results.append(d)
return results
def predict(
self, inputs, decoder: str = 'greedy', beam_size: int = 5, **kwargs
):
"""
Transcribe inputs, will return list of strings.
Parameters
----------
inputs: List[np.array]
List[np.array] or List[malaya_speech.model.frame.Frame].
decoder: str, optional (default='greedy')
decoder mode, allowed values:
* ``'greedy'`` - will call self.greedy_decoder
* ``'beam'`` - will call self.beam_decoder
beam_size: int, optional (default=5)
beam size for beam decoder.
Returns
-------
result: List[str]
"""
decoder = self._check_decoder(decoder, beam_size)
if decoder == 'greedy':
return self.greedy_decoder(inputs)
else:
return self.beam_decoder(inputs, beam_size=beam_size)
def __call__(self, input, decoder: str = 'greedy', **kwargs):
"""
Transcribe input, will return a string.
Parameters
----------
input: np.array
np.array or malaya_speech.model.frame.Frame.
decoder: str, optional (default='beam')
decoder mode, allowed values:
* ``'greedy'`` - greedy decoder.
* ``'beam'`` - beam decoder.
**kwargs: keyword arguments passed to `predict`.
Returns
-------
result: str
"""
return self.predict([input], decoder=decoder, **kwargs)[0]
class Vocoder(Abstract):
def __init__(self, input_nodes, output_nodes, sess, model, name):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(self, inputs):
"""
Change Mel to Waveform.
Parameters
----------
inputs: List[np.array]
List[np.array] or List[malaya_speech.model.frame.Frame].
Returns
-------
result: List
"""
inputs = [
input.array if isinstance(input, Frame) else input
for input in inputs
]
padded, lens = sequence_1d(inputs, return_len=True)
r = self._execute(
inputs=[padded],
input_labels=['Placeholder'],
output_labels=['logits'],
)
return r['logits'][:, :, 0]
def __call__(self, input):
return self.predict([input])[0]
class Tacotron(Abstract):
def __init__(
self, input_nodes, output_nodes, normalizer, stats, sess, model, name
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._normalizer = normalizer
self._stats = stats
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(self, string, **kwargs):
"""
Change string to Mel.
Parameters
----------
string: str
Returns
-------
result: Dict[string, decoder-output, postnet-output, universal-output, alignment]
"""
t, ids = self._normalizer.normalize(string, **kwargs)
r = self._execute(
inputs=[[ids], [len(ids)]],
input_labels=['Placeholder', 'Placeholder_1'],
output_labels=[
'decoder_output',
'post_mel_outputs',
'alignment_histories',
],
)
v = r['post_mel_outputs'][0] * self._stats[1] + self._stats[0]
v = (v - MEL_MEAN) / MEL_STD
return {
'string': t,
'ids': ids,
'decoder-output': r['decoder_output'][0],
'postnet-output': r['post_mel_outputs'][0],
'universal-output': v,
'alignment': r['alignment_histories'][0],
}
def __call__(self, input):
return self.predict(input)
class Fastspeech(Abstract):
def __init__(
self, input_nodes, output_nodes, normalizer, stats, sess, model, name
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._normalizer = normalizer
self._stats = stats
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(
self,
string,
speed_ratio: float = 1.0,
f0_ratio: float = 1.0,
energy_ratio: float = 1.0,
**kwargs,
):
"""
Change string to Mel.
Parameters
----------
string: str
speed_ratio: float, optional (default=1.0)
Increase this variable will increase time voice generated.
f0_ratio: float, optional (default=1.0)
Increase this variable will increase frequency, low frequency will generate more deeper voice.
energy_ratio: float, optional (default=1.0)
Increase this variable will increase loudness.
Returns
-------
result: Dict[string, decoder-output, postnet-output, universal-output]
"""
t, ids = self._normalizer.normalize(string, **kwargs)
r = self._execute(
inputs=[[ids], [speed_ratio], [f0_ratio], [energy_ratio]],
input_labels=[
'Placeholder',
'speed_ratios',
'f0_ratios',
'energy_ratios',
],
output_labels=['decoder_output', 'post_mel_outputs'],
)
v = r['post_mel_outputs'][0] * self._stats[1] + self._stats[0]
v = (v - MEL_MEAN) / MEL_STD
return {
'string': t,
'ids': ids,
'decoder-output': r['decoder_output'][0],
'postnet-output': r['post_mel_outputs'][0],
'universal-output': v,
}
def __call__(self, input, **kwargs):
return self.predict(input, **kwargs)
class FastVC(Abstract):
def __init__(
self,
input_nodes,
output_nodes,
speaker_vector,
magnitude,
sess,
model,
name,
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._speaker_vector = speaker_vector
self._magnitude = magnitude
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(self, original_audio, target_audio):
"""
Change original voice audio to follow targeted voice.
Parameters
----------
original_audio: np.array or malaya_speech.model.frame.Frame
target_audio: np.array or malaya_speech.model.frame.Frame
Returns
-------
result: Dict[decoder-output, postnet-output]
"""
original_audio = (
input.array if isinstance(original_audio, Frame) else original_audio
)
target_audio = (
input.array if isinstance(target_audio, Frame) else target_audio
)
original_mel = universal_mel(original_audio)
target_mel = universal_mel(target_audio)
original_v = self._magnitude(self._speaker_vector([original_audio])[0])
target_v = self._magnitude(self._speaker_vector([target_audio])[0])
r = self._execute(
inputs=[
[original_mel],
[original_v],
[target_v],
[len(original_mel)],
],
input_labels=[
'mel',
'ori_vector',
'target_vector',
'mel_lengths',
],
output_labels=['mel_before', 'mel_after'],
)
return {
'decoder-output': r['mel_before'][0],
'postnet-output': r['mel_after'][0],
}
def __call__(self, original_audio, target_audio):
return self.predict(original_audio, target_audio)
class Split_Wav(Abstract):
def __init__(self, input_nodes, output_nodes, sess, model, name):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(self, input):
"""
Split an audio into 4 different speakers.
Parameters
----------
input: np.array or malaya_speech.model.frame.Frame
Returns
-------
result: np.array
"""
if isinstance(input, Frame):
input = input.array
r = self._execute(
inputs=[np.expand_dims([input], axis=-1)],
input_labels=['Placeholder'],
output_labels=['logits'],
)
r = r['logits']
return r[:, 0, :, 0]
def __call__(self, input):
return self.predict(input)
class Split_Mel(Abstract):
def __init__(self, input_nodes, output_nodes, sess, model, name):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._sess = sess
self.__model__ = model
self.__name__ = name
def _to_mel(self, y):
mel = universal_mel(y)
mel[mel <= np.log(1e-2)] = np.log(1e-2)
return mel
def predict(self, input):
"""
Split an audio into 4 different speakers.
Parameters
----------
input: np.array or malaya_speech.model.frame.Frame
Returns
-------
result: np.array
"""
if isinstance(input, Frame):
input = input.array
input = self._to_mel(input)
r = self._execute(
inputs=[input],
input_labels=['Placeholder', 'Placeholder_1'],
output_labels=['logits'],
)
r = r['logits']
return r[:, 0]
def __call__(self, input):
return self.predict(input)
class Wav2Vec2_CTC(Abstract):
def __init__(self, input_nodes, output_nodes, vocab, sess, mode, model, name):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._vocab = vocab
self._sess = sess
self._mode = mode
self.__model__ = model
self.__name__ = name
self._beam_size = 1
def _check_decoder(self, decoder, beam_size):
decoder = decoder.lower()
if decoder not in ['greedy', 'beam']:
raise ValueError('mode only supports [`greedy`, `beam`]')
if beam_size < 1:
raise ValueError('beam_size must bigger than 0')
return decoder
def _get_logits(self, padded, lens):
r = self._execute(
inputs=[padded, lens],
input_labels=['X_placeholder', 'X_len_placeholder'],
output_labels=['logits', 'seq_lens'],
)
return r['logits'], r['seq_lens']
def _tf_ctc(self, padded, lens, beam_size, **kwargs):
if tf.executing_eagerly():
logits, seq_lens = self._get_logits(padded, lens)
decoded = tf.compat.v1.nn.ctc_beam_search_decoder(
logits,
seq_lens,
beam_width=beam_size,
top_paths=1,
merge_repeated=True,
**kwargs,
)
preds = tf.sparse.to_dense(tf.compat.v1.to_int32(decoded[0][0]))
else:
if beam_size != self._beam_size:
self._beam_size = beam_size
self._decoded = tf.compat.v1.nn.ctc_beam_search_decoder(
self._output_nodes['logits'],
self._output_nodes['seq_lens'],
beam_width=self._beam_size,
top_paths=1,
merge_repeated=True,
**kwargs,
)[0][0]
r = self._sess.run(
self._decoded,
feed_dict={
self._input_nodes['X_placeholder']: padded,
self._input_nodes['X_len_placeholder']: lens,
},
)
preds = np.zeros(r.dense_shape, dtype=np.int32)
for i in range(r.values.shape[0]):
preds[r.indices[i][0], r.indices[i][1]] = r.values[i]
return preds
def predict(
self, inputs, decoder: str = 'beam', beam_size: int = 100, **kwargs
):
"""
Transcribe inputs, will return list of strings.
Parameters
----------
input: List[np.array]
List[np.array] or List[malaya_speech.model.frame.Frame].
decoder: str, optional (default='beam')
decoder mode, allowed values:
* ``'greedy'`` - greedy decoder.
* ``'beam'`` - beam decoder.
beam_size: int, optional (default=100)
beam size for beam decoder.
Returns
-------
result: List[str]
"""
decoder = self._check_decoder(decoder, beam_size)
inputs = [
input.array if isinstance(input, Frame) else input
for input in inputs
]
padded, lens = sequence_1d(inputs, return_len=True)
if decoder == 'greedy':
beam_size = 1
decoded = self._tf_ctc(padded, lens, beam_size, **kwargs)
results = []
for i in range(len(decoded)):
if self._mode == 'char':
r = char_decode(decoded[i], lookup=self._vocab).replace(
'<PAD>', ''
)
else:
r = subword_decode(self._vocab, decoded[i][decoded[i] > 0])
results.append(r)
return results
def predict_lm(self, inputs, lm, beam_size: int = 100, **kwargs):
"""
Transcribe inputs using Beam Search + LM, will return list of strings.
This method will not able to utilise batch decoding, instead will do loop to decode for each elements.
Parameters
----------
input: List[np.array]
List[np.array] or List[malaya_speech.model.frame.Frame].
lm: ctc_decoders.Scorer
Returned from `malaya_speech.stt.language_model()`.
beam_size: int, optional (default=100)
beam size for beam decoder.
Returns
-------
result: List[str]
"""
if self._mode != 'char':
raise ValueError('Model is not character based, not able to use `predict_lm`.')
try:
from ctc_decoders import ctc_beam_search_decoder
except BaseException:
raise ModuleNotFoundError(
'ctc_decoders not installed. Please install it by `pip install ctc-decoders` and try again.'
)
inputs = [
input.array if isinstance(input, Frame) else input
for input in inputs
]
padded, lens = sequence_1d(inputs, return_len=True)
logits, seq_lens = self._get_logits(padded, lens)
logits = np.transpose(logits, axes=(1, 0, 2))
logits = softmax(logits)
results = []
for i in range(len(logits)):
d = ctc_beam_search_decoder(
logits[i][: seq_lens[i]],
self._vocab,
beam_size,
ext_scoring_func=lm,
**kwargs,
)
results.append(d[0][1])
return results
def __call__(
self, input, decoder: str = 'greedy', lm: bool = False, **kwargs
):
"""
Transcribe input, will return a string.
Parameters
----------
input: np.array
np.array or malaya_speech.model.frame.Frame.
decoder: str, optional (default='beam')
decoder mode, allowed values:
* ``'greedy'`` - greedy decoder.
* ``'beam'`` - beam decoder.
lm: bool, optional (default=False)
**kwargs: keyword arguments passed to `predict` or `predict_lm`.
Returns
-------
result: str
"""
if lm:
method = self.predict_lm
else:
method = self.predict
return method([input], decoder=decoder, **kwargs)[0]
class CTC(Abstract):
def __init__(self, input_nodes, output_nodes, sess, model, name):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._sess = sess
self.__model__ = model
self.__name__ = name
class FastSpeechSplit(Abstract):
def __init__(
self,
input_nodes,
output_nodes,
speaker_vector,
gender_model,
sess,
model,
name,
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._speaker_vector = speaker_vector
self._gender_model = gender_model
self._sess = sess
self.__model__ = model
self.__name__ = name
self._modes = {'R', 'F', 'U', 'RF', 'RU', 'FU', 'RFU'}
self._freqs = {'female': [100, 600], 'male': [50, 250]}
def _get_data(self, x, sr=22050, target_sr=16000):
x_16k = resample(x, sr, target_sr)
if self._gender_model is not None:
gender = self._gender_model(x_16k)
lo, hi = self._freqs.get(gender, [50, 250])
f0 = get_f0_sptk(x, lo, hi)
else:
f0 = get_fo_pyworld(x)
f0 = np.expand_dims(f0, -1)
mel = universal_mel(x)
v = self._speaker_vector([x_16k])[0]
v = v / v.max()
if len(mel) > len(f0):
mel = mel[: len(f0)]
return x, mel, f0, v
def predict(
self,
original_audio,
target_audio,
modes=['R', 'F', 'U', 'RF', 'RU', 'FU', 'RFU'],
):
"""
Change original voice audio to follow targeted voice.
Parameters
----------
original_audio: np.array or malaya_speech.model.frame.Frame
target_audio: np.array or malaya_speech.model.frame.Frame
modes: List[str], optional (default = ['R', 'F', 'U', 'RF', 'RU', 'FU', 'RFU'])
R denotes rhythm, F denotes pitch target, U denotes speaker target (vector).
* ``'R'`` - maintain `original_audio` F and U on `target_audio` R.
* ``'F'`` - maintain `original_audio` R and U on `target_audio` F.
* ``'U'`` - maintain `original_audio` R and F on `target_audio` U.
* ``'RF'`` - maintain `original_audio` U on `target_audio` R and F.
* ``'RU'`` - maintain `original_audio` F on `target_audio` R and U.
* ``'FU'`` - maintain `original_audio` R on `target_audio` F and U.
* ``'RFU'`` - no conversion happened, just do encoder-decoder on `target_audio`
Returns
-------
result: Dict[modes]
"""
s = set(modes) - self._modes
if len(s):
raise ValueError(
f"{list(s)} not an element of ['R', 'F', 'U', 'RF', 'RU', 'FU', 'RFU']"
)
original_audio = (
input.array if isinstance(original_audio, Frame) else original_audio
)
target_audio = (
input.array if isinstance(target_audio, Frame) else target_audio
)
wav, mel, f0, v = self._get_data(original_audio)
wav_1, mel_1, f0_1, v_1 = self._get_data(target_audio)
mels, mel_lens = padding_sequence_nd(
[mel, mel_1], dim=0, return_len=True
)
f0s, f0_lens = padding_sequence_nd(
[f0, f0_1], dim=0, return_len=True
)
f0_org_quantized = quantize_f0_numpy(f0s[0, :, 0])[0]
f0_org_onehot = f0_org_quantized[np.newaxis, :, :]
uttr_f0_org = np.concatenate([mels[:1], f0_org_onehot], axis=-1)
f0_trg_quantized = quantize_f0_numpy(f0s[1, :, 0])[0]
f0_trg_onehot = f0_trg_quantized[np.newaxis, :, :]
r = self._execute(
inputs=[mels[:1], f0_trg_onehot, [len(f0s[0])]],
input_labels=['X', 'f0_onehot', 'len_X'],
output_labels=['f0_target'],
)
f0_pred = r['f0_target']
f0_pred_quantized = f0_pred.argmax(axis=-1).squeeze(0)
f0_con_onehot = np.zeros_like(f0_pred)
f0_con_onehot[0, np.arange(f0_pred.shape[1]), f0_pred_quantized] = 1
uttr_f0_trg = np.concatenate([mels[:1], f0_con_onehot], axis=-1)
results = {}
for condition in modes:
if condition == 'R':
uttr_f0_ = uttr_f0_org
v_ = v
x_ = mels[1:]
if condition == 'F':
uttr_f0_ = uttr_f0_trg
v_ = v
x_ = mels[:1]
if condition == 'U':
uttr_f0_ = uttr_f0_org
v_ = v_1
x_ = mels[:1]
if condition == 'RF':
uttr_f0_ = uttr_f0_trg
v_ = v
x_ = mels[1:]
if condition == 'RU':
uttr_f0_ = uttr_f0_org
v_ = v_1
x_ = mels[:1]
if condition == 'FU':
uttr_f0_ = uttr_f0_trg
v_ = v_1
x_ = mels[:1]
if condition == 'RFU':
uttr_f0_ = uttr_f0_trg
v_ = v_1
x_ = mels[:1]
r = self._execute(
inputs=[uttr_f0_, x_, [v_], [len(f0s[0])]],
input_labels=['uttr_f0', 'X', 'V', 'len_X'],
output_labels=['mel_outputs'],
)
mel_outputs = r['mel_outputs'][0]
if 'R' in condition:
length = mel_lens[1]
else:
length = mel_lens[0]
mel_outputs = mel_outputs[:length]
results[condition] = mel_outputs
return results
class Fastpitch(Abstract):
def __init__(
self, input_nodes, output_nodes, normalizer, stats, sess, model, name
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._normalizer = normalizer
self._stats = stats
self._sess = sess
self.__model__ = model
self.__name__ = name
def predict(
self,
string,
speed_ratio: float = 1.0,
pitch_ratio: float = 1.0,
pitch_addition: float = 0.0,
**kwargs,
):
"""
Change string to Mel.
Parameters
----------
string: str
speed_ratio: float, optional (default=1.0)
Increase this variable will increase time voice generated.
pitch_ratio: float, optional (default=1.0)
pitch = pitch * pitch_ratio, amplify existing pitch contour.
pitch_addition: float, optional (default=0.0)
pitch = pitch + pitch_addition, change pitch contour.
Returns
-------
result: Dict[string, decoder-output, postnet-output, pitch-output, universal-output]
"""
t, ids = self._normalizer.normalize(string, **kwargs)
r = self._execute(
inputs=[[ids], [speed_ratio], [pitch_ratio], [pitch_addition]],
input_labels=[
'Placeholder',
'speed_ratios',
'pitch_ratios',
'pitch_addition',
],
output_labels=['decoder_output', 'post_mel_outputs', 'pitch_outputs'],
)
v = r['post_mel_outputs'][0] * self._stats[1] + self._stats[0]
v = (v - MEL_MEAN) / MEL_STD
return {
'string': t,
'ids': ids,
'decoder-output': r['decoder_output'][0],
'postnet-output': r['post_mel_outputs'][0],
'pitch-output': r['pitch_outputs'][0],
'universal-output': v,
}
def __call__(self, input, **kwargs):
return self.predict(input, **kwargs)
class TransducerAligner(Abstract):
def __init__(
self,
input_nodes,
output_nodes,
featurizer,
vocab,
time_reduction_factor,
sess,
model,
name,
wavs,
dummy_sentences,
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._featurizer = featurizer
self._vocab = vocab
self._time_reduction_factor = time_reduction_factor
self._sess = sess
self.__model__ = model
self.__name__ = name
self._wavs = wavs
self._dummy_sentences = dummy_sentences
def _get_inputs(self, inputs, texts):
inputs = [
input.array if isinstance(input, Frame) else input
for input in inputs
]
index = len(inputs)
# pretty hacky, result from single batch is not good caused by batchnorm.
# have to append extra random wavs
if len(inputs) < len(self._wavs) + 1:
inputs = inputs + self._wavs[:(len(self._wavs) + 1) - len(inputs)]
texts = texts + self._dummy_sentences
padded, lens = sequence_1d(inputs, return_len=True)
targets = [subword_encode(self._vocab, t) for t in texts]
targets_padded, targets_lens = sequence_1d(targets, return_len=True)
return padded, lens, targets_padded, targets_lens, index
def _combined_indices(
self, subwords, ids, l, reduction_factor=160, sample_rate=16000
):
result, temp_l, temp_r = [], [], []
for i in range(len(subwords)):
if ids[i] is None and len(temp_r):
data = {
'text': ''.join(temp_l),
'start': round(temp_r[0], 4),
'end': round(
temp_r[-1] + (reduction_factor / sample_rate), 4
),
}
result.append(data)
temp_l, temp_r = [], []
else:
temp_l.append(subwords[i])
temp_r.append(l[ids[i]])
if len(temp_l):
data = {
'text': ''.join(temp_l),
'start': round(temp_r[0], 4),
'end': round(temp_r[-1] + (reduction_factor / sample_rate), 4),
}
result.append(data)
return result
def predict(self, input, transcription: str):
"""
Transcribe input, will return a string.
Parameters
----------
input: np.array
np.array or malaya_speech.model.frame.Frame.
transcription: str
transcription of input audio
Returns
-------
result: Dict[words_alignment, subwords_alignment, subwords, alignment]
"""
padded, lens, targets_padded, targets_lens, index = self._get_inputs([input],
[transcription])
r = self._execute(
inputs=[padded, lens, targets_padded, targets_lens],
input_labels=['X_placeholder', 'X_len_placeholder', 'subwords', 'subwords_lens'],
output_labels=['non_blank_transcript', 'non_blank_stime', 'decoded', 'alignment'],
)
non_blank_transcript = r['non_blank_transcript']
non_blank_stime = r['non_blank_stime']
decoded = r['decoded']
alignment = r['alignment']
words, indices = self._vocab.decode(
non_blank_transcript, get_index=True
)
words_alignment = self._combined_indices(words, indices, non_blank_stime)
words, indices = [], []
for no, ids in enumerate(non_blank_transcript):
w = self._vocab._id_to_subword(ids - 1)
if isinstance(w, bytes):
w = w.decode()
words.extend([w, None])
indices.extend([no, None])
subwords_alignment = self._combined_indices(words, indices, non_blank_stime)
subwords_ = [self._vocab._id_to_subword(ids - 1) for ids in decoded[decoded > 0]]
subwords_ = [s.decode() if isinstance(s, bytes) else s for s in subwords_]
alignment = alignment[:, targets_padded[0, :targets_lens[0]]].T
return {'words_alignment': words_alignment,
'subwords_alignment': subwords_alignment,
'subwords': subwords_,
'alignment': alignment}
def __call__(self, input, transcription: str):
"""
Transcribe input, will return a string.
Parameters
----------
input: np.array
np.array or malaya_speech.model.frame.Frame.
transcription: str
transcription of input audio
Returns
-------
result: Dict[words_alignment, subwords_alignment, subwords, alignment]
"""
return self.predict(input, transcription)
|
from model.unet import UNet
from tools.data import train_generator, test_generator, save_results, is_file, prepare_dataset
from tools.generator3 import DataGenerator
from tools.generator3 import DataGenerator
import os
# TODO: move to config .json files
img_height = 576
img_width = 729
img_size = (img_height, img_width)
train_path = 'S:/studenten/Rausch/06_Studienarbeit/03_CNN/generate_data/data/train_Graph_without_helpernodes'
test_path = 'S:/studenten/Rausch/06_Studienarbeit/03_CNN/generate_data/data/train_Graph_without_helpernodes/test'
save_path = '/Users/vsevolod.konyahin/Desktop/DataSet/results'
model_name = 'unet_model.hdf5'
model_weights_name = 'graphnet_weight_model.hdf5'
image_folder = 'image'
mask_folder = 'label'
import cv2
import numpy as np
max_node_dim = 128
if __name__ == "__main__":
from natsort import natsorted
import glob
from tools.utilz_graph import get_sorted_data_names_from_paths
path_to_image = os.path.join(train_path, image_folder)
path_to_mask = os.path.join(train_path, mask_folder)
image_names, mask_names = get_sorted_data_names_from_paths(path_to_image, path_to_mask)
val_fraction = 0.1
masks = glob.glob(path_to_mask+'/*')
masks = natsorted(masks)
print('length mask', len(masks))
val_frac = int(val_fraction*len(masks))
train_idx = list(range(len(masks) - val_frac))
val_idx = list(range(len(masks) - val_frac, len(masks)))
training_generator = DataGenerator(list_IDs=train_idx, path_to_image=path_to_image, path_to_mask=path_to_mask,
image_names=image_names, mask_names=mask_names, max_node_dim=max_node_dim,
batch_size=30)
validation_generator = DataGenerator(list_IDs=val_idx, path_to_image=path_to_image, path_to_mask=path_to_mask,
image_names=image_names, mask_names=mask_names, max_node_dim=max_node_dim,
batch_size=30)
from tools.utilz_analysis import plot_sample_from_train_generator
for i in range(2):
plot_sample_from_train_generator(training_generator, batch_nr = i)
|
class ExtraAttributesMixin:
"""Adds extra attributes to a forms fields
Value in the ``extra_attributes`` property will be added to the corresponding Key in fields dictionary
property
"""
extra_attributes = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for key, value in self.extra_attributes.items():
field = self.fields.get(key)
field.extra_attributes = value |
import movelist
import NotationParser
from NotationParser import ParseMoveList
from movelist import MoveList
gp = MoveList(28)
m = gp.GetPunishers(10, True)
print(m)
#moves = gp.getGameplan(1)
#for move in gp.gameplan:
# try:
# command = ParseMoveList(gp.getMoveCommand(move))
# except:
# print(move[0].text) |
from dgllife.data import JTVAEDataset, JTVAECollator
from torch.utils.data import DataLoader
from dgllife.model import load_pretrained
import torch
import numpy as np
#from tqdm import tqdm
from tqdm.notebook import tqdm
class JTNNWapper:
def __init__(self,
model=None):
if model is None:
self.model = load_pretrained('JTNN_ZINC') # Pretrained model loaded
else:
self.model=model
self.model.eval()
self.path_smiles = 'smiles.tmp'
def _write_smiles(self,smiles_list):
with open(self.path_smiles, mode='w') as f:
f.write("\n".join(smiles_list))
def _init_loader(self):
dataset = JTVAEDataset(data=self.path_smiles, vocab=self.model.vocab, training=False)
self.dataloader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=0,
collate_fn=JTVAECollator(False),
)
def encode(self,smiles_list):
self._write_smiles(smiles_list)
self._init_loader()
model=self.model
v_list=[]
#for it, batch in tqdm(enumerate(self.dataloader)):
ite=iter(self.dataloader)
for it in tqdm(range(len(self.dataloader))):
try:
#if True:
batch=next(ite)
gt_smiles = batch['mol_trees'][0].smiles
_, tree_vec, mol_vec = model.encode(batch)
mol_mean = model.G_mean(mol_vec)
tree_mean = model.T_mean(tree_vec)
# Following Mueller et al.
tree_log_var = -torch.abs(model.T_var(tree_vec))
epsilon = torch.randn(1, model.latent_size // 2)
tree_vec = tree_mean + torch.exp(tree_log_var // 2) * epsilon
mol_log_var = -torch.abs(model.G_var(mol_vec))
epsilon = torch.randn(1, model.latent_size // 2)
mol_vec = mol_mean + torch.exp(mol_log_var // 2) * epsilon
v1=tree_vec.detach().numpy().copy()
v2=mol_vec.detach().numpy().copy()
v=np.concatenate([v1,v2],axis=-1)
v_list.append(v.reshape(-1))
except:
#print("error: ",gt_smiles)
print("error")
v_list.append(np.zeros(model.latent_size,dtype=np.float32))
return np.array(v_list)
def decode(self,vecs):
half_size=int(self.model.latent_size/2)
sm_list=[]
for i,v in enumerate(vecs):
tree_vec=v[:half_size].reshape(-1,half_size)
mol_vec=v[half_size:].reshape(-1,half_size)
tree_vec=torch.from_numpy(tree_vec.astype(np.float32)).clone()
mol_vec=torch.from_numpy(mol_vec.astype(np.float32)).clone()
try:
dec_smiles = self.model.decode(tree_vec, mol_vec)
except:
print("error!", i)
dec_smiles="Error"
sm_list.append(dec_smiles)
return sm_list |
"""
read_row_by_row.py
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
This shows a pythonic way to use **Reader** class to go through a single
page spreadsheet row by row. The output is::
[1.0, 2.0, 3.0]
[4.0, 5.0, 6.0]
[7.0, 8.0, 9.0]
Please install pyexcel-xls
"""
import os
import pyexcel
def main(base_dir):
# "example.csv","example.xlsx","example.ods", "example.xlsm"
spreadsheet = pyexcel.get_sheet(file_name=os.path.join(base_dir,
"example.xls"))
# rows() returns row based iterator, meaning it can be iterated row by row
for row in spreadsheet.rows():
print(row)
# Alternatively, you can use::
# for row in spreadsheet:
# print row
# because by default **Reader** regards itself a row based iterator.
if __name__ == '__main__':
main(os.getcwd())
|
from output.models.nist_data.list_pkg.language.schema_instance.nistschema_sv_iv_list_language_max_length_1_xsd.nistschema_sv_iv_list_language_max_length_1 import NistschemaSvIvListLanguageMaxLength1
__all__ = [
"NistschemaSvIvListLanguageMaxLength1",
]
|
#!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot-methods/blob/master/LICENSE
import numpy
import uproot_methods.base
class Methods(uproot_methods.base.ROOTMethods):
def hello(self):
return "world", len(dir(self))
|
# ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
# ----------------------------------------------------------------------
"""Contains the Plugin object"""
import itertools
import os
import sys
import textwrap
from collections import OrderedDict
import six
import CommonEnvironment
from CommonEnvironment.CallOnExit import CallOnExit
from CommonEnvironment import StringHelpers
from CommonEnvironment import Interface
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
sys.path.insert(0, os.path.join(_script_dir, ".."))
with CallOnExit(lambda: sys.path.pop(0)):
from Plugin import Plugin as PluginBase, TypeVisitor as TypeVisitorBase
# ----------------------------------------------------------------------
@Interface.staticderived
class Plugin(PluginBase):
# ----------------------------------------------------------------------
# | Properties
Name = Interface.DerivedProperty("SharedLibraryTests")
Description = Interface.DerivedProperty(
"Generates code used when testing the Shared Library import/export layer",
)
# ----------------------------------------------------------------------
# | Methods
@staticmethod
@Interface.override
def Generate(
open_file_func,
global_custom_structs,
global_custom_enums,
data,
output_dir,
status_stream,
):
result_code = 0
status_stream.write("Preprocessing data...")
with status_stream.DoneManager():
type_info_data = []
for items in data:
type_info_data.append([TypeInfoData(item, global_custom_structs, global_custom_enums) for item in items])
status_stream.write("Generating Common Files...")
with status_stream.DoneManager() as this_dm:
this_dm.result = _GenerateCommonFiles(open_file_func, output_dir, this_dm.stream)
if this_dm.result != 0:
return this_dm.result
for desc, func in [("Generating .h files...", _GenerateHeaderFile)]:
status_stream.write(desc)
with status_stream.DoneManager(
suffix="\n",
) as dm:
for index, (items, items_type_info_data) in enumerate(
zip(data, type_info_data),
):
dm.stream.write(
"Processing '{}' ({} of {})...".format(
items[0].name,
index + 1,
len(data),
),
)
with dm.stream.DoneManager() as this_dm:
this_dm.result = func(
open_file_func,
output_dir,
items,
items_type_info_data,
this_dm.stream,
)
if dm.result < 0:
return dm.result
result_code = result_code or dm.result
return result_code
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def _GenerateHeaderFile(open_file_func, output_dir, items, all_type_info_data, output_stream):
with open_file_func(
os.path.join(output_dir, "SharedLibraryTests_{}.h".format(items[0].name)),
"w",
) as f:
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#pragma once
#include "SharedLibrary_{name}.h"
#include "Traits.h"
#include "Featurizers/Structs.h"
#include "SharedLibraryTests_Common.hpp"
#if (defined _MSC_VER)
# pragma warning(push)
// I don't know why MSVC thinks that there is unreachable
// code in these methods during release builds.
# pragma warning(disable: 4702) // Unreachable code
# pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used
# pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used
#endif
""",
).format(
name=items[0].name,
),
)
for item, type_info_data in zip(items, all_type_info_data):
template = getattr(item, "template", None)
if template:
suffix = "_{}_".format(template)
type_desc = " <{}>".format(template)
cpp_template_suffix = "<{}>".format(
type_info_data.InputTypeInfo.CppType,
)
else:
suffix = "_"
type_desc = ""
cpp_template_suffix = ""
if type_info_data.ConfigurationParamTypeInfos:
constructor_template_params = ", typename... ConstructorArgTs"
constructor_params = ",\n ConstructorArgTs &&... constructor_args"
constructor_args = "std::forward<ConstructorArgTs>(constructor_args)..., "
else:
constructor_template_params = ""
constructor_params = ""
constructor_args = ""
fit_prefix_statements = ""
transform_input_args = type_info_data.InputTypeInfo.GetTransformInputArgs()
if isinstance(transform_input_args, tuple):
transform_input_args, fit_prefix_statements = transform_input_args
# Special processing for vector<bool>
if type_info_data.InputTypeInfo.TypeName == "bool":
# vector<bool> isn't actually a bool, so we can't take a direct reference to it
for_loop = "for(bool input : inference_input)"
else:
for_loop = "for(auto const & input : inference_input)"
if type_info_data.OutputTypeInfo.TypeName == "bool":
# vector<bool> doesn't support emplace_back on some platforms
invocation_template = "results.push_back({});"
else:
invocation_template = "results.emplace_back({});"
# Get the output statement information
if item.has_dynamic_output:
output_statement_info = type_info_data.DynamicOutputTypeInfo.GetOutputInfo(
invocation_template=invocation_template,
result_name="results",
)
else:
output_statement_info = type_info_data.OutputTypeInfo.GetOutputInfo(
invocation_template=invocation_template,
result_name="results",
)
# Write the training statements
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* | {name}{type_desc} */
template <typename VectorInputT{constructor_template_params}>
void {name}{suffix}Test(
std::vector<VectorInputT> const &training_input,
std::vector<VectorInputT> const &inference_input,
std::function<bool (std::vector<{vector_result_type}> const &)> const &verify_func{constructor_params}
) {{
ErrorInfoHandle * pErrorInfo(nullptr);
// Create the estimator
{name}{suffix}EstimatorHandle *pEstimatorHandle(nullptr);
REQUIRE({name}{suffix}CreateEstimator({constructor_args}&pEstimatorHandle, &pErrorInfo));
REQUIRE(pEstimatorHandle != nullptr);
REQUIRE(pErrorInfo == nullptr);
// Train
if(training_input.empty() == false) {{
typename std::vector<VectorInputT>::const_iterator iter(training_input.begin());
while(true) {{
TrainingState trainingState(0);
REQUIRE({name}{suffix}GetState(pEstimatorHandle, &trainingState, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
if(trainingState != Training)
break;
FitResult result(0);
auto const & input(*iter);
{fit_prefix_statements}REQUIRE({name}{suffix}Fit(pEstimatorHandle, {fit_input_args}, &result, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
if(result == ResetAndContinue) {{
iter = training_input.begin();
continue;
}}
++iter;
if(iter == training_input.end()) {{
REQUIRE({name}{suffix}OnDataCompleted(pEstimatorHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
iter = training_input.begin();
}}
}}
}}
{name}{suffix}CompleteTraining(pEstimatorHandle, &pErrorInfo);
REQUIRE(pErrorInfo == nullptr);
// Once here, training should be complete
{{
bool is_complete(false);
REQUIRE({name}{suffix}IsTrainingComplete(pEstimatorHandle, &is_complete, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
REQUIRE(is_complete);
}}
// Create the Transformer
{name}{suffix}TransformerHandle * pTransformerHandle(nullptr);
REQUIRE({name}{suffix}CreateTransformerFromEstimator(pEstimatorHandle, &pTransformerHandle, &pErrorInfo));
REQUIRE(pTransformerHandle != nullptr);
REQUIRE(pErrorInfo == nullptr);
// Destroy the estimator
REQUIRE({name}{suffix}DestroyEstimator(pEstimatorHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
""",
).format(
name=item.name,
type_desc=type_desc,
suffix=suffix,
vector_result_type=output_statement_info.VectorResultType,
constructor_template_params=constructor_template_params,
constructor_params=constructor_params,
constructor_args=constructor_args,
fit_input_args=transform_input_args,
fit_prefix_statements="" if not fit_prefix_statements else "{}\n\n ".format(
StringHelpers.LeftJustify(
fit_prefix_statements.rstrip(),
12,
),
),
),
)
# Write the inferencing statements
inline_destroy_statement = "// No inline destroy statement"
trailing_destroy_statement = "// No trailing destroy statement"
if output_statement_info.DestroyArgs:
if output_statement_info.DestroyInline:
inline_destroy_statement = textwrap.dedent(
"""\
// Destroy the contents
REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
""",
).format(
name=item.name,
suffix=suffix,
args=output_statement_info.DestroyArgs,
)
else:
trailing_destroy_statement = textwrap.dedent(
"""\
for(auto & {var_name}: results) {{
REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
}}
""",
).format(
name=item.name,
suffix=suffix,
args=output_statement_info.DestroyArgs,
var_name=output_statement_info.DestroyVarName or "result",
)
if item.has_dynamic_output:
f.write(
StringHelpers.LeftJustify(
textwrap.dedent(
"""\
// Inference
std::vector<{vector_result_type}> results;
{for_loop} {{
{transform_prefix_statements}{transform_vars}
REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
if(true) {{
{transform_vars}
REQUIRE({name}{suffix}Flush(pTransformerHandle, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
""",
).format(
name=item.name,
suffix=suffix,
vector_result_type=output_statement_info.VectorResultType,
for_loop=for_loop,
transform_prefix_statements="" if not fit_prefix_statements else "{}\n\n ".format(
StringHelpers.LeftJustify(
fit_prefix_statements,
4,
).rstrip(),
),
transform_vars=StringHelpers.LeftJustify(
"\n".join(
[
"{} {};".format(var.Type, var.Name)
for var in output_statement_info.TransformVars
]
),
4,
),
transform_input_args=transform_input_args,
transform_output_args=", ".join(["&{}".format(p.Name) for p in output_statement_info.TransformVars]),
transform_statement=StringHelpers.LeftJustify(
output_statement_info.AppendResultStatement.rstrip(),
4,
),
inline_destroy_statement=StringHelpers.LeftJustify(
inline_destroy_statement.rstrip(),
4,
),
),
4,
skip_first_line=False,
),
)
else:
f.write(
StringHelpers.LeftJustify(
textwrap.dedent(
"""\
// Inference
std::vector<{vector_result_type}> results;
results.reserve(inference_input.size());
{for_loop} {{
{transform_prefix_statements}{transform_vars}
REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
""",
).format(
name=item.name,
suffix=suffix,
vector_result_type=output_statement_info.VectorResultType,
for_loop=for_loop,
transform_prefix_statements="" if not fit_prefix_statements else "{}\n\n ".format(
StringHelpers.LeftJustify(
fit_prefix_statements,
4,
).rstrip(),
),
transform_vars=StringHelpers.LeftJustify(
"\n".join(
[
"{} {};".format(var.Type, var.Name)
for var in output_statement_info.TransformVars
]
),
4,
),
transform_input_args=transform_input_args,
transform_output_args=", ".join(["&{}".format(p.Name) for p in output_statement_info.TransformVars]),
transform_statement=StringHelpers.LeftJustify(
output_statement_info.AppendResultStatement.rstrip(),
4,
),
inline_destroy_statement=StringHelpers.LeftJustify(
inline_destroy_statement.rstrip(),
4,
),
),
4,
skip_first_line=False,
),
)
f.write(
textwrap.dedent(
"""\
REQUIRE(verify_func(results));
{trailing_destroy_statement}
// Destroy the transformer
REQUIRE({name}{suffix}DestroyTransformer(pTransformerHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
}}
""",
).format(
name=item.name,
suffix=suffix,
trailing_destroy_statement=StringHelpers.LeftJustify(
trailing_destroy_statement.rstrip(),
4,
),
),
)
f.write(
textwrap.dedent(
"""\
#if (defined _MSC_VER)
# pragma warning(pop)
#endif
""",
),
)
# ----------------------------------------------------------------------
def _GenerateCommonFiles(open_file_func, output_dir, output_stream):
with open_file_func(
os.path.join(output_dir, "SharedLibraryTests_Common.hpp"),
"w",
) as f:
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#pragma once
#include "SharedLibrary_Common.hpp"
#if (defined _MSC_VER)
# pragma warning(push)
// I don't know why MSVC thinks that there is unreachable
// code in these methods during release builds.
# pragma warning(disable: 4702) // Unreachable code
# pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used
# pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used
#endif
""",
),
)
for type_info_class in TypeInfoData.EnumTypeInfoClasses():
type_info_class.CreateHelperMethods(f)
f.write(
textwrap.dedent(
"""\
#if (defined _MSC_VER)
# pragma warning(pop)
#endif
""",
),
)
return 0
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
class TypeInfoData(object):
# ----------------------------------------------------------------------
# |
# | Public Methods
# |
# ----------------------------------------------------------------------
def __init__(self, item, global_custom_structs, global_custom_enums):
# Create the custom enums
custom_enums = OrderedDict()
for custom_enum in itertools.chain(global_custom_enums, getattr(item, "custom_enums", [])):
if isinstance(custom_enum.underlying_type, six.string_types):
type_info = self._CreateTypeInfo(custom_enum.underlying_type)
assert type_info, custom_enum.underlying_type
custom_enum.underlying_type_info = type_info
custom_enums[custom_enum.name] = custom_enum
# Create the custom structs
custom_structs = OrderedDict()
for custom_struct in itertools.chain(global_custom_structs, getattr(item, "custom_structs", [])):
members = OrderedDict()
for member in custom_struct.members:
type_info = self._CreateTypeInfo(member.type)
assert type_info, member.type
assert member.name not in members, member.name
members[member.name] = type_info
custom_structs[custom_struct.name] = members
# Create the configuration param type infos
configuration_param_type_infos = []
for configuration_param in getattr(item, "configuration_params", []):
if configuration_param.type in custom_enums:
type_info = custom_enums[configuration_param.type].underlying_type_info
configuration_param.is_enum = True
else:
type_info = self._CreateTypeInfo(
configuration_param.type,
custom_structs=custom_structs,
custom_enums=custom_enums,
)
assert type_info, configuration_param.type
configuration_param_type_infos.append(type_info)
input_type_info = self._CreateTypeInfo(
item.input_type,
custom_structs=custom_structs,
custom_enums=custom_enums,
)
assert input_type_info, item.input_type
output_type_info = self._CreateTypeInfo(
item.output_type,
custom_structs=custom_structs,
custom_enums=custom_enums,
)
assert output_type_info, item.output_type
dynamic_output_info = self._CreateTypeInfo(
"vector<{}>".format(item.output_type),
custom_structs=custom_structs,
custom_enums=custom_enums,
)
# Commit the results
self.CustomStructs = custom_structs
self.ConfigurationParamTypeInfos = configuration_param_type_infos
self.InputTypeInfo = input_type_info
self.OutputTypeInfo = output_type_info
self.DynamicOutputTypeInfo = dynamic_output_info
# ----------------------------------------------------------------------
@classmethod
def EnumTypeInfoClasses(cls):
cls._InitTypeInfoClasses()
yield from cls._type_info_classes
# ----------------------------------------------------------------------
# |
# | Private Data
# |
# ----------------------------------------------------------------------
_type_info_classes = None
# ----------------------------------------------------------------------
# |
# | Private Methods
# |
# ----------------------------------------------------------------------
@classmethod
def _InitTypeInfoClasses(cls):
if cls._type_info_classes is not None:
return
from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo
type_info_classes = [
DatetimeTypeInfo,
MatrixTypeInfo,
SingleValueSparseVectorTypeInfo,
SparseVectorTypeInfo,
StringTypeInfo,
TupleTypeInfo,
UniqueIdTypeInfo,
VectorTypeInfo,
]
for compound_module in [ScalarTypeInfos, StructTypeInfos]:
for obj_name in dir(compound_module):
if (
obj_name.startswith("_")
or not obj_name.endswith("TypeInfo")
or obj_name == "TypeInfo"
):
continue
type_info_classes.append(getattr(compound_module, obj_name))
# Associate the type infos with the class rather than the instance
# so that we only need to perform this initialization once.
cls._type_info_classes = type_info_classes
# ----------------------------------------------------------------------
@classmethod
def _CreateTypeInfo(cls, the_type, *args, **kwargs):
cls._InitTypeInfoClasses()
is_optional = False
if the_type.endswith("?"):
the_type = the_type[:-1]
is_optional = True
type_info_class = None
for this_type_info_class in cls._type_info_classes:
if isinstance(this_type_info_class.TypeName, six.string_types):
if this_type_info_class.TypeName == the_type:
type_info_class = this_type_info_class
break
elif hasattr(this_type_info_class.TypeName, "match"):
if this_type_info_class.TypeName.match(the_type):
type_info_class = this_type_info_class
break
if type_info_class is None:
return None
return type_info_class(
*args,
member_type=the_type,
is_optional=is_optional,
create_type_info_func=cls._CreateTypeInfo,
**kwargs
)
|
# ----------------------------------------------------------------------------
# GS Widget Kit Copyright 2021 by Noah Rahm and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import wx
from wx import stc
class TextCtrl(stc.StyledTextCtrl):
def __init__(self, parent, value="", placeholder="", scrollbar=False, style=0, *args, **kwargs):
stc.StyledTextCtrl.__init__(self, parent, style=style | wx.TRANSPARENT_WINDOW, *args, **kwargs)
if scrollbar is False:
self.SetUseVerticalScrollBar(False)
self.SetUseHorizontalScrollBar(False)
self.SetCaretWidth(2)
self.SetCaretForeground("#5680c2")
self.SetMarginLeft(8)
self.SetMarginRight(8)
self.SetMarginWidth(1, 0)
self.SetEOLMode(stc.STC_EOL_LF)
self.SetLexer(stc.STC_LEX_NULL)
self.SetIndent(4)
self.SetUseTabs(False)
self.SetTabWidth(4)
self.SetValue(value)
self.SetScrollWidth(self.GetSize()[0])
self.SetScrollWidthTracking(True)
self.SetSelBackground(True, "#242424")
self.StyleSetBackground(stc.STC_STYLE_DEFAULT, wx.Colour("#333333"))
self.StyleSetForeground(stc.STC_STYLE_DEFAULT, wx.Colour("#ffffff"))
self.StyleSetFont(stc.STC_STYLE_DEFAULT,
wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT))
self.StyleClearAll()
self.SetValue(value)
class NativeTextCtrl(wx.TextCtrl):
def __init__(self, parent, value="", style=wx.BORDER_SIMPLE, *args, **kwargs):
wx.TextCtrl.__init__(self, parent, value=value, style=style, *args, **kwargs)
self.SetBackgroundColour(wx.Colour("#333333"))
self.SetForegroundColour(wx.Colour("#fff"))
|
#!/usr/bin/env python
# Wenchang Yang (wenchang@princeton.edu)
# Wed Jun 30 17:02:45 EDT 2021
if __name__ == '__main__':
from misc.timer import Timer
tt = Timer(f'start {__file__}')
import sys, os.path, os, glob, datetime
import xarray as xr, numpy as np, pandas as pd, matplotlib.pyplot as plt
#more imports
from scipy.optimize import minimize
from scipy.stats import genextreme
from numpy import exp, log
from tqdm import tqdm
from .shared import plot_smp_return_period, plot_return_period, gev_return_period, gev_return_period_inverse
#
if __name__ == '__main__':
tt.check('end import')
#
#start from here
#negative log likelihood
def _negLogLikelihood(params, data, datacv, xi_bounds=None):
"""GEV shift negative log likelihood.
params: (mu0, sigma, xi, alpha)
data: sample(s)
datacv: co-variate
Ref: https://en.wikipedia.org/wiki/Generalized_extreme_value_distribution
https://ascmo.copernicus.org/articles/6/177/2020/
"""
if xi_bounds is None or xi_bounds==(None, None):
xi_bounds = (-np.inf, np.inf)
#xi_bounds = (-0.4, 0.4)
mu0, sigma, xi, alpha = params
mu = mu0 + alpha*datacv
s = (data - mu)/sigma
if xi < xi_bounds[0] or xi > xi_bounds[1]:#effectively set xi bounds
return np.inf
elif xi == 0:
return data.size*log(sigma) + np.sum(s + exp(-s))
else:
return -np.sum( log(xi*s>-1) ) + data.size*log(sigma) + np.sum( (1+1/xi)*log(1+xi*s) ) + np.sum( (1+xi*s)**(-1/xi) )
def fit(data, datacv, **kws):
"""GEV fit using maximum likelihood"""
method = kws.pop('method', 'Nelder-Mead')
#method = ['L-BFGS-B', 'TNC', 'SLSQP', 'Powell', 'COBYLA'][-1] #these methods generally don't work
xi_bounds = kws.pop('xi_bounds', (None, None))
#xi_bounds = kws.pop('xi_bounds', (-0.3, 0.3))
bounds = ( (None, None), (0, None), xi_bounds, (None, None) )
alpha_guess = np.corrcoef(data, datacv)[0,1]*data.std().item()/datacv.std().item()
mu0_guess = data.mean().item() - alpha_guess*datacv.mean().item()
sigma_guess = (data - alpha_guess*datacv).std().item()
xi_guess = -0.1
x0_default = (mu0_guess, sigma_guess, xi_guess, alpha_guess)
x0 = kws.pop('x0', x0_default)
#print('initial params:'.ljust(16), f'{mu0_guess}; {sigma_guess=}; {xi_guess=}; {alpha_guess=}')
r = minimize(_negLogLikelihood, x0, args=(data,datacv, xi_bounds), method=method, bounds=bounds, **kws)
if not r.success:
print(f'{r = }')
print('[failed]:', r.message)
return r
def plot_fit(data, datacv, cv_level=None, fit_result=None, ax=None, fit_kws=None, **kws):
if cv_level is None:#co-variate level, e.g. value in some specific year
cv_level = ('max co-variate', datacv.max().item())
if ax is None:
fig,ax = plt.subplots()
if fit_kws is None:
fit_kws = {}
upper = kws.pop('upper', data.size*100)
label = kws.pop('label', cv_level[0])
#fit by user defined likelihood function
if fit_result is None: #do the fit
r = fit(data, datacv, **fit_kws)
else: #already done the fit: use the result directly
r = fit_result
if r.success:
mu0, sigma, xi, alpha = r.x
print('wy fit params:'.ljust(16), f'{mu0=:.4g}; {sigma=:.4g}; {xi=:.4g}; {alpha=:.4g}')
#empirical/fit return periods
plot_smp_return_period(data-alpha*datacv+alpha*cv_level[1], ax=ax, **kws)
plot_return_period(mu0+alpha*cv_level[1], sigma, xi, upper=upper, ax=ax, label=label, **kws)
ax.set_xlabel('return period')
try:
ax.set_ylabel(data.name)
except:
pass
print()
return r
def fit_bootstrap(data, datacv, nmc=100, mc_seed=0, **kws):
"""GEV shift fit bootstrap.
data: input array-like data to fit
datacv: covariate
nmc: size of Monte Carlo samples
seed: np.random seed (default is 0)
"""
if isinstance(data, xr.DataArray):
data = data.values
if isinstance(datacv, xr.DataArray):
datacv = datacv.values
rng = np.random.default_rng(mc_seed)
#xxmc = np.random.choice(data, size=(nmc, data.size))
mci = rng.choice(data.size, size=(nmc, data.size))
data_mc = data[mci]
datacv_mc = datacv[mci]
params = np.zeros(shape=(nmc, 4)) + np.nan
#bootstrap
for ii in tqdm(range(nmc)):
r = fit(data_mc[ii,:], datacv_mc[ii,:], **kws)
if r.success:
params[ii,:] = r.x
else:
print(f'mc = {ii};', r.message)
#with mp.Pool(processes=min(40, mp.cpu_count(), nmc)) as p:
# p.map(func_bs, range(nmc))
mu0 = xr.DataArray(params[:,0], dims='mc')
sigma = xr.DataArray(params[:,1], dims='mc')
xi = xr.DataArray(params[:,2], dims='mc')
alpha = xr.DataArray(params[:,3], dims='mc')
ds = xr.Dataset(dict(mu0=mu0, sigma=sigma, xi=xi, alpha=alpha))
#best fit
r = fit(data, datacv, **kws)
ds['mu0_best'] = xr.DataArray(r.x[0])
ds['sigma_best'] = xr.DataArray(r.x[1])
ds['xi_best'] = xr.DataArray(r.x[2])
ds['alpha_best'] = xr.DataArray(r.x[3])
return ds
def plot_fit_bootstrap(data, datacv, cv_level=None, bsfit=None, nmc=100, mc_seed=0, ci=95, upper_rp=None, ax=None, fit_kws=None, **kws):
if cv_level is None:
cv_level = ('max co-variate', datacv.max().item())
if upper_rp is None:
upper_rp = data.size*100
if ax is None:
fig,ax = plt.subplots()
if fit_kws is None:
fit_kws = {}
#direct fit plot
r = plot_fit(data, datacv, cv_level=cv_level, ax=ax, fit_kws=fit_kws, upper=upper_rp, **kws)
mu0, sigma, xi, alpha = r.x
#bootstrap
if bsfit is None: #do the bootstrap fit
ds = fit_bootstrap(data, datacv, nmc=nmc, mc_seed=mc_seed, **fit_kws)
else: #already done the bootstrap fit: use the result directly
ds = bsfit
ci_bounds = [(1-ci/100)/2, (1+ci/100)/2]
for ii,daname in enumerate(('mu0', 'sigma', 'xi', 'alpha')):
q = ds[daname].quantile(ci_bounds, dim='mc')
print(f'{daname} and {ci}% CI:'.rjust(20), f'{r.x[ii]:.4g}({q[0].item():.4g}, {q[1].item():.4g})')
print()
#confidence interval of the return value
mu_shift = mu0 + alpha*cv_level[1]
lower, upper = 1, np.log10(upper_rp) #return period bounds
rp = np.logspace(lower, upper, 100)
yy = [gev_return_period_inverse(rp, mu0+alpha*cv_level[1], sigma, xi)
for mu0,sigma,xi,alpha in zip(ds.mu0.values, ds.sigma.values, ds.xi.values, ds.alpha.values)]
yy = xr.DataArray(yy, dims=('mc', 'rp')).assign_coords(rp=rp)
yy.quantile(ci_bounds, dim='mc').plot(x='rp', ls='--', lw=1, hue='quantile', add_legend=False, **kws)
ax.set_xlabel('return period')
ax.set_ylabel('return value')
return ds
def plot_covariate(data, datacv, fit_result=None, ax=None, fit_kws=None, **kws):
if ax is None:
fig,ax = plt.subplots()
if fit_kws is None:
fit_kws = {}
ax.plot(datacv, data, ls='none', marker='o', fillstyle='none', alpha=0.5, **kws)
if fit_result is None:
r = fit(data, datacv, **fit_kws)
else:
r = fit_result
if r.success:
mu0,sigma,xi,alpha = r.x
print('wy fit params:'.ljust(16), f'{mu0=:.4g}; {sigma=:.4g}; {xi=:.4g}; {alpha=:.4g}')
ax.axline((datacv[0].item(), mu0+alpha*datacv[0].item()), slope=alpha, **kws)
ax.axline((datacv[0].item(), mu0+alpha*datacv[0].item()+gev_return_period_inverse(6, 0, sigma, xi)), slope=alpha, lw=1, ls='--', **kws)
ax.axline((datacv[0].item(), mu0+alpha*datacv[0].item()+gev_return_period_inverse(40, 0, sigma, xi)), slope=alpha, lw=1, ls='--', **kws)
ax.set_xlabel('co-variate')
ax.set_ylabel('return value')
def plot_mu_ci(data, datacv, cv0=None, bsfit=None, nmc=100, mc_seed=0, ci=95, fit_kws=None, ax=None, **kws):
"""add confidence inverval information of mu to the figure generated by the plot_covariate.
cv0: co-variate value at which the confidence interval info will be added.
"""
if cv0 is None:
cv0 = np.array(datacv)[-1]
if fit_kws is None:
fit_kws = {}
if bsfit is None:
ds = fit_bootstrap(data, datacv, nmc=nmc, mc_seed=mc_seed, **fit_kws)
else:
ds = bsfit
if ax is None:
ax = plt.gca()
capsize = kws.pop('capsize', 3)
mu_cv0 = ds['mu0_best'] + ds['alpha_best'] * cv0
ci_bound = (1 - ci/100)/2, (1 + ci/100)/2
mu_cv0_ci = ( ds['mu0'] + ds['alpha'] * cv0 ).quantile(ci_bound, dim='mc')
x = cv0
y = mu_cv0
yerr = np.abs(mu_cv0_ci - mu_cv0)
yerr = np.array(yerr).reshape(2,1)
ax.errorbar(x, y, yerr, capsize=capsize, **kws)
print(f'cv0 = {np.array(cv0)}; mu_cv0 = {np.array(mu_cv0)}; mu_cv0_ci = {np.array(mu_cv0_ci)}')
def fit_all(data, datacv, cv_levels=None, nmc=100, mc_seed=0, ci=95, upper_rp=None, fit_kws=None):
if cv_levels is None:
cv_levels = [('min co-variate', datacv.min().item()), ('max co-variate', datacv.max().item())]
if fit_kws is None:
fit_kws = {}
plot_covariate(data, datacv, color='k', fit_kws=fit_kws)
ds = fit_bootstrap(data, datacv, nmc=nmc, mc_seed=mc_seed, **fit_kws)
plot_mu_ci(data, datacv, cv0=cv_levels[0][1], bsfit=ds, color='C0')
plot_mu_ci(data, datacv, cv0=cv_levels[1][1], bsfit=ds, color='C1')
fig,ax = plt.subplots()
plot_fit_bootstrap(data, datacv, cv_levels[0], bsfit=ds, ci=ci, upper_rp=upper_rp, ax=ax, fit_kws=fit_kws, color='C0')
if len(cv_levels) > 1:
for ii,cv_level in enumerate(cv_levels[1:], start=1):
plot_fit_bootstrap(data, datacv, cv_levels[ii], bsfit=ds, ci=ci, upper_rp=upper_rp, ax=ax, fit_kws=fit_kws, color=f'C{ii}')
ax.legend()
return ds
def makedata(mu0=None, sigma=None, xi=None, alpha=None, datacv=None, nsmp=100, seed=1, ofile=None):
#specify params
rng = np.random.default_rng()
if mu0 is None:
mu0 = rng.uniform(10, 20)
if sigma is None:
sigma = rng.uniform(0, 10)
if xi is None:
xi = rng.uniform(-1, 1)
if alpha is None:
alpha = rng.uniform(-4, 4)
true_values = mu0,sigma,xi,alpha
note = 'true params:'.ljust(16) + f'{mu0=:.4g}; {sigma=:.4g}; {xi=:.4g}; {alpha=:.4g}'
print(note)
#specify co-variate
if datacv is None:
datacv = np.linspace(0, 2, nsmp)
#generate data
rng = np.random.default_rng(seed)#seed to generate genextreme random variables
genextreme.random_state = rng
data = genextreme.rvs(-xi, loc=mu0+alpha*datacv, scale=sigma)
#show data
plt.plot(datacv, data, ls='none', marker='o', fillstyle='none')
if ofile is not None:# save data
ofile_data = 'data_' + ofile
xr.DataArray(data, dims='year').assign_coords(year=range(1900,1900+data.size)).assign_attrs(note=note).to_dataset(name='mydata').to_netcdf(ofile_data)
print('[saved]:', ofile_data)
ofile_datacv = 'datacv_' + ofile
xr.DataArray(datacv, dims='year').assign_coords(year=range(1900, 1900+datacv.size)).to_dataset(name='mycovariate').to_netcdf(ofile_datacv)
print('[saved]:', ofile_datacv)
return data, datacv
def test(mu0=None, sigma=None, xi=None, alpha=None, datacv=None, seed=1, nmc=100, mc_seed=0, ci=95, nsmp=100):
#specify params
rng = np.random.default_rng()
if mu0 is None:
mu0 = rng.uniform(10, 20)
if sigma is None:
sigma = rng.uniform(0, 10)
if xi is None:
xi = rng.uniform(-1, 1)
if alpha is None:
alpha = rng.uniform(-4, 4)
true_values = mu0,sigma,xi,alpha
print('true params:'.ljust(16), f'{mu0=:.4g}; {sigma=:.4g}; {xi=:.4g}; {alpha=:.4g}')
#specify co-variate
if datacv is None:
datacv = np.linspace(0, 2, nsmp)
#generate data
rng = np.random.default_rng(seed)#seed to generate genextreme random variables
genextreme.random_state = rng
data = genextreme.rvs(-xi, loc=mu0+alpha*datacv, scale=sigma)
#validate
#co-variate plot
fig, ax = plt.subplots()
plot_covariate(data, datacv, ax=ax, color='k')
#fit_bootstrap plot
fig,ax = plt.subplots()
ds = plot_fit_bootstrap(data, datacv, ('initial co-variate', datacv[0]), nmc=nmc, mc_seed=mc_seed, ci=ci, ax=ax, color='C0')#, upper_rp=data.size*40)
plot_fit_bootstrap(data, datacv, ('final co-variate', datacv[-1]), bsfit=ds, ax=ax, color='C1')#, upper_rp=data.size*40)
ax.legend()
#fit summary
r = fit(data, datacv)
mu0,sigma,xi,alpha = r.x
ci_bounds = [(1-ci/100)/2, (1+ci/100)/2]
s = ''
danames = ('mu0', 'sigma', 'xi', 'alpha')
pnames = ('$\\mu_0$:', '$\\sigma$:', '$\\xi$:', '$\\alpha$:')
for ii,(daname,pname, tv) in enumerate(zip(danames, pnames, true_values)):
q = ds[daname].quantile(ci_bounds, dim='mc')
s += pname + f' {r.x[ii]:.4g}(true:{tv:.4g}); {ci}% CI: ({q[0].item():.4g}, {q[1].item():.4g})\n'
#ax.text(1, 1, s, transform=ax.transAxes, ha='left', va='top', fontsize='small')
ax.text(1, 0, s, transform=ax.transAxes, ha='right', va='bottom', fontsize='small', alpha=0.5)
print(s)
if __name__ == '__main__':
from wyconfig import * #my plot settings
plt.close('all')
if len(sys.argv)<=1:
test()
elif len(sys.argv)>1 and sys.argv[1]=='test': #e.g. python -m wyextreme.gev_shift test xi=-0.1
kws = dict(mu0=None, sigma=None, xi=None, alpha=None, seed=1, nmc=100, mc_seed=0, ci=95, nsmp=100)
if len(sys.argv)>2:
for s in sys.argv[2:]:
key,v = s.split('=')
v = int(v) if key in ('seed', 'nmc', 'mc_seed', 'nsmp') else float(v)
if key in kws: kws[key] = v
test(**kws)
elif len(sys.argv)>1 and sys.argv[1]=='makedata': #e.g. python -m wyextreme.gev_shift makedata
kws = dict(mu0=None, sigma=None, xi=None, alpha=None, nsmp=100, seed=1, ofile=None)
if len(sys.argv)>2:
for s in sys.argv[2:]:
key,v = s.split('=')
if key in ('seed', 'nsm'):
v = int(v)
elif key in ('ofile',):
pass
else:
v = float(v)
if key in kws: kws[key] = v
makedata(**kws)
elif len(sys.argv)>2: # two input data files to compare
kws = dict(cv_levels=None, nmc=100, mc_seed=0, ci=95, upper_rp=None)
da0 = xr.open_dataarray(sys.argv[1])
da1 = xr.open_dataarray(sys.argv[2])
if 'en' in da0.dims:
da0 = da0.stack(s=['en', 'year'])
if 'en' in da1.dims:
da1 = da1.stack(s=['en', 'year'])
fit_all(da0, da1, **kws)
"""
fig, ax = plt.subplots()
plot_covariate(da0, da1, ax=ax, color='k')
fig,ax = plt.subplots()
ds = plot_fit_bootstrap(da0, da1, ('min co-variate', da1.min().item()), ax=ax, color='C0')#, upper_rp=da0.size*40)
plot_fit_bootstrap(da0, da1, ('max co-variate', da1.max().item()), bsfit=ds, ax=ax, color='C1')#, upper_rp=da0.size*40)
"""
#savefig
if 'savefig' in sys.argv:
figname = __file__.replace('.py', f'.png')
wysavefig(figname)
tt.check(f'**Done**')
plt.show()
|
# If you use more than one instance, pass tuples to assign different
# values. For example, for two instances use zones
# `('europe-west4-a', 'europe-west4-c')`.
NUM_INSTANCES = 8
# These paths are relative to the file `distribute_multi.py` as a tuple
# that will be `os.path.join`ed or single string. Don't forget to wrap
# the tuple or string in another tuple if you only specify one path!
CLOUD_API_PATH = ('gcloud',)
STARTUP_SCRIPT_PATH = ('GANerator_GCP_startup.sh',)
# Change these as needed and correct command strings below for your
# cloud service provider. This example is for the Google Cloud Platform.
#
# Do not change variable names unless you want to look into
# `distribute_multi.py`.
# If you do not need a variable, simply set it to an empty string.
# To do more editing (in case you need more variables), look into
# `distribute_multi.py`.
IMAGE_FAMILY = 'pytorch-latest-cu100' # -cu100, maybe -gpu
# Use 'europe-west4-c' for P4, -a for TPUs.
# V100 is available in both.
#
# Or 'europe-west1-b' or -d for highmem but not as
# many GPUs (only P100 and K80).
ZONE = ('europe-west4-a', 'europe-west4-b', 'europe-west4-c', 'europe-west4-c',
'europe-west1-a', 'europe-west1-b', 'europe-west4-a', 'europe-west4-a')
# The number at the end is the amount of CPUs.
# 'n1-standard-2' or maybe -4
# or 'n1-highmem-4' or -8.
MACHINE_TYPE = 'n1-standard-4'
# -t4, -p4, -v100, -p100 or -k80
GPU_TYPE = ('nvidia-tesla-p4',) * 4 + ('nvidia-tesla-t4',) * 4
# 1, 2, 4, 8.
# Make sure you have enough quota available!
GPU_COUNT = 1
# As we will start more than one machine, this is only a prefix.
INSTANCE_NAME_PREFIX = 'ganerator'
# Must contain the dataset you want to use.
RO_DISK_NAME = 'ganerator-ssd'
# Service account you want to use.
SERVICE_ACCOUNT = 'ganerator-service-account@ganerator.iam.gserviceaccount.com'
# All the following commands must be a string and will be split at
# spaces (`' '`).
# To properly manage the path for your cloud provider API binary, write
# the string 'GANERATOR_CLOUD_BIN' literally in its place (or just write
# the binary if it is in your PATH).
#
# Leave keyword format string indicators for the arguments (using double
# curly braces if you want to format the string before).
# TODO make interpolation values choosable via another tuple and **kwargs in format function
# How to start your cloud instance.
# To use a startup script, write 'GANERATOR_STARTUP' literally into the
# string to always get the correct path.
# This is so the path can be replaced later due to spaces in paths.
# The following arguments will be interpolated into the same lower-case
# keyword format string indicator:
# INSTANCE_NAME_PREFIX, ZONE, MACHINE_TYPE, IMAGE_FAMILY, GPU_TYPE,
# GPU_COUNT, RO_DISK_NAME, SERVICE_ACCOUNT
#
# Also make sure there is _another_ format string indicator `{suffix}`
# for the instance name suffix after the prefix.
START_COMMAND = (
'gcloud compute instances create {instance_name_prefix}-{suffix} '
'--zone={zone} '
'--machine-type={machine_type} '
'--image-family={image_family} '
'--image-project=deeplearning-platform-release '
'--maintenance-policy=TERMINATE '
'--accelerator="type={gpu_type},count={gpu_count}" '
'--metadata="install-nvidia-driver=True" '
'--metadata-from-file startup-script="GANERATOR_STARTUP" '
'--create-disk="size=14GB,auto-delete=yes" '
'--disk="name={ro_disk_name},mode=ro" '
'--service-account={service_account} '
'--scopes=storage-full '
'--preemptible'
)
# You must leave one string literal and two format string indicators
# here. The literal is for the command that will be executed and the
# format string indicator for the instance name prefix and suffix.
# Designate the command by the string literal 'GANERATOR_COMMAND' and
# the instance name prefix and suffix by the format string indicators
# `{instance_name_prefix}` and `{suffix}` respectively (using double
# curly braces if you want to format the string before).
REMOTE_PROCESS_COMMAND = (
'gcloud compute ssh {instance_name_prefix}-{suffix} '
'--command "GANERATOR_COMMAND"'
)
# This command will be interpolated in the REMOTE_PROCESS_COMMAND to do
# some final initialization in your machine such as cloning and
# navigating to the GANerator directory. This will be executed as a
# direct shell command to allow for more freedom, so escape the correct
# symbols depending on `REMOTE_PROCESS_COMMAND`.
# If empty or None, this is skipped.
# You can also interpolate the instance name suffix into the command
# via the format string indicator `{suffix}`.
INIT_COMMAND = (
'cd /mnt/disks/rwdisk && '
'mkdir GANerator_experiments && '
'git clone -q https://github.com/janEbert/GANerator.git && '
'cd GANerator && '
'python3 src/ipynb_to_py.py && '
'echo \\"cd \\$PWD\\" > ~/.bashrc && '
'conda init > /dev/null'
)
# This command will be interpolated in the REMOTE_PROCESS_COMMAND to do
# some final work in your machine such as saving your experimental
# results. This will be executed as a direct shell command to allow for
# more freedom, so escape the correct symbols depending on
# `REMOTE_PROCESS_COMMAND`.
# If empty or None, this is skipped.
# You can also interpolate the instance name suffix into the command
# via the format string indicator `{suffix}`.
FINISH_COMMAND = (
"echo 'Compressing results...' && "
'export ANAME=\\$(date +%s) && '
'tar -czvf exp-\\$ANAME.tar.gz --remove-files -C .. GANerator_experiments && '
'gsutil cp exp-\\$ANAME.tar.gz gs://ganerator/ganerator-{suffix}/'
)
# How to end or delete your instance.
# Leave format string indicators `{instance_name_prefix}` and `{suffix}`
# for the `INSTANCE_NAME_PREFIX` and the corresponding suffix that will
# both be interpolated later.
END_COMMAND = (
'gcloud compute instances delete {instance_name_prefix}-{suffix} -q'
)
|
"""
https://github.com/onnx/onnx/blob/09ada0f107f1cc1877f9194475c98d2d8512e188/onnx/defs/nn/defs.cc
"""
from webdnn.graph.axis import Axis
from webdnn.frontend.onnx.converter import ONNXConverter, attribute_dict
from webdnn.frontend.onnx.type_hint import INodeProto
from webdnn.graph.operators.average_pooling_2d import AveragePooling2D
from webdnn.graph.operators.convolution2d import Convolution2D
from webdnn.graph.operators.max_pooling_2d import MaxPooling2D
from webdnn.graph.operators.max import Max
from webdnn.graph.operators.sum import Sum
from webdnn.graph.order import OrderC, OrderNCHW, Order
from webdnn.util import console
from webdnn.util.misc import mul
@ONNXConverter.register_handler("AveragePool")
def _convert_average_pool(converter: ONNXConverter, onnx_op: INodeProto):
x = converter.get_variable(onnx_op.input[0])
x.order.unify(OrderNCHW)
attrs = attribute_dict(onnx_op)
ksize = list(attrs["kernel_shape"].ints)
stride = list(attrs["strides"].ints)
pad = list(attrs["pads"].ints)
if len(pad) == 2:
# NOTE:
# In PyTorch, pads is generated as tuple of 2 integers, but ONNX spec says that pads contains 2*N integers where N is the number of
# padded dimension. It's maybe PyTorch's bug.
pass
else:
if any(pad[2 * i] != pad[2 * i + 1] for i in range(len(pad) // 2)):
raise NotImplementedError("[ONNXConverter] odd-size padding is not supported.")
pad = [pad[0], pad[2]]
y, = AveragePooling2D(None, ksize=ksize, stride=stride, padding=pad, cover_all=False)(x)
converter.set_variable(onnx_op.output[0], y)
@ONNXConverter.register_handler("MaxPool")
def _convert_max_pool(converter: ONNXConverter, onnx_op: INodeProto):
x = converter.get_variable(onnx_op.input[0])
x.order.unify(OrderNCHW)
attrs = attribute_dict(onnx_op)
ksize = list(attrs["kernel_shape"].ints)
stride = list(attrs["strides"].ints)
pad = list(attrs["pads"].ints)
if len(pad) == 2:
# NOTE:
# In PyTorch, pads is generated as tuple of 2 integers, but ONNX spec says that pads contains 2*N integers where N is the number of
# padded dimension. It's maybe PyTorch's bug.
pass
else:
if any(pad[2 * i] != pad[2 * i + 1] for i in range(len(pad) // 2)):
raise NotImplementedError("[ONNXConverter] odd-size padding is not supported.")
pad = [pad[0], pad[2]]
# https://github.com/onnx/onnx/blob/master/docs/Operators.md
# output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
# In PyTorch, nn.MaxPool2d(2) with input size 11 produces output size 5,
# where kernel_shape=2, pads=0, strides=2 is set as onnx attributes.
# It corresponds to cover_all=False.
y, = MaxPooling2D(None, ksize=ksize, stride=stride, padding=pad, cover_all=False)(x)
converter.set_variable(onnx_op.output[0], y)
@ONNXConverter.register_handler("Conv")
def _convert_conv(converter: ONNXConverter, onnx_op: INodeProto):
x = converter.get_variable(onnx_op.input[0])
x.order.unify(OrderNCHW)
w = converter.get_variable(onnx_op.input[1])
w.order.unify(Order([Axis.N, Axis.C, Axis.KH, Axis.KW]))
attrs = attribute_dict(onnx_op)
ksize = list(attrs["kernel_shape"].ints)
dilations = list(attrs["dilations"].ints)
stride = list(attrs["strides"].ints)
pad = list(attrs["pads"].ints)
if any(pad[2 * i] != pad[2 * i + 1] for i in range(len(pad) // 2)):
raise NotImplementedError("[ONNXConverter] odd-size padding is not supported.")
pad = [pad[0], pad[2]]
y, = Convolution2D(None, ksize=ksize, stride=stride, padding=pad, dilation_rate=dilations)(x, w)
y.change_order(OrderNCHW)
if len(onnx_op.input) == 3:
# with bias
b = converter.get_variable(onnx_op.input[2])
b.order.unify(OrderC)
y = y + b
converter.set_variable(onnx_op.output[0], y)
@ONNXConverter.register_handler("ConvTranspose")
def _convert_conv_transpose(converter: ONNXConverter, onnx_op: INodeProto):
# FIXME: It's possible to support in current version of webdnn
raise NotImplementedError("[ONNXConverter] Operator \"ConvTranspose\" is not supported yet.")
@ONNXConverter.register_handler("GlobalAveragePool")
def _convert_global_average_pool(converter: ONNXConverter, onnx_op: INodeProto):
x = converter.get_variable(onnx_op.input[0])
if x.ndim == 4:
x.order.unify(OrderNCHW)
reduction_size = mul(x.shape[2:])
reduction_axis = Axis()
x = x.reshape([x.shape[0], x.shape[1], reduction_size],
Order([x.order.axes[0], x.order.axes[1], reduction_axis]))
y, = Sum(None, axis=reduction_axis)(x)
y /= reduction_size
converter.set_variable(onnx_op.output[0], y)
@ONNXConverter.register_handler("GlobalMaxPool")
def _convert_global_max_pool(converter: ONNXConverter, onnx_op: INodeProto):
x = converter.get_variable(onnx_op.input[0])
if x.ndim == 4:
x.order.unify(OrderNCHW)
reduction_size = mul(x.shape[2:])
reduction_axis = Axis()
x = x.reshape([x.shape[0], x.shape[1], reduction_size],
Order([x.order.axes[0], x.order.axes[1], reduction_axis]))
y, = Max(None, axis=reduction_axis)(x)
converter.set_variable(onnx_op.output[0], y)
@ONNXConverter.register_handler("BatchNormalization")
def _convert_batch_normalization(converter: ONNXConverter, onnx_op: INodeProto):
x = converter.get_variable(onnx_op.input[0])
x.order.axes[0].unify(Axis.N)
x.order.axes[1].unify(Axis.C)
scale = converter.get_variable(onnx_op.input[1])
scale.order.unify(OrderC)
B = converter.get_variable(onnx_op.input[2])
B.order.unify(OrderC)
mean = converter.get_variable(onnx_op.input[3])
mean.order.unify(OrderC)
var = converter.get_variable(onnx_op.input[4])
var.order.unify(OrderC)
attrs = attribute_dict(onnx_op)
assert "spatial" not in attrs or attrs["spatial"].i == 1, \
"[ONNXConverter] Operator \"BatchNormalization\" spatial==0 is not implemented."
epsilon = attrs["epsilon"].f if "epsilon" in attrs else 1e-5
y = (x - mean) / ((var + epsilon) ** 0.5) * scale + B
converter.set_variable(onnx_op.output[0], y)
@ONNXConverter.register_handler("Dropout")
def _convert_max_pool(converter: ONNXConverter, onnx_op: INodeProto):
console.warning("[ONNXConverter] Operator \"Dropout\" is ignored")
x = converter.get_variable(onnx_op.input[0])
converter.set_variable(onnx_op.output[0], x)
@ONNXConverter.register_handler("Flatten")
def _convert_flatten(converter: ONNXConverter, onnx_op: INodeProto):
x = converter.get_variable(onnx_op.input[0])
attrs = attribute_dict(onnx_op)
axis = attrs["axis"].i if "axis" in attrs else 1
new_shape = [mul(x.shape[:axis]), mul(x.shape[axis:])]
new_order = Order([None, None])
y = x.reshape(shape=new_shape, order=new_order)
converter.set_variable(onnx_op.output[0], y)
@ONNXConverter.register_handler("LRN")
def _convert_lrn(converter: ONNXConverter, onnx_op: INodeProto):
# FIXME: It's possible to support in current version of webdnn
raise NotImplementedError("[ONNXConverter] Operator \"LRN\" is not supported yet.")
|
from app.api.now_applications.models.activity_detail.activity_detail_base import ActivityDetailBase
class StagingAreaDetail(ActivityDetailBase):
__mapper_args__ = {'polymorphic_identity': 'camp'}
def __repr__(self):
return f'<{self.__class__.__name__} {self.activity_detail_id}>'
|
from django.urls import path
from . import views
from django.contrib import admin
admin.site.index_title = 'Chatops administration'
urlpatterns = [
path('', views.index, name='index')
]
|
# `engine` dependencies span across the entire project, so it's better to
# leave this __init__.py empty, and use `from matchzoo.engine.package import
# x` or `from matchzoo.engine import package` instead of `from matchzoo
# import engine`.
|
'''
Poker.py 는
포커 작동관련 모든 함수를 정의 해 놓은 곳이다.
'''
import Tools
# 가지고있는 핸드중 가장 높은 숫자의 카드를 돌려주는 함수
def findTopCard(hands:list):
topnumber = 0
cards = hands.copy()
cards = changeRoyal(cards)
cards = removeShape(cards)
cards.sort()
topnumber = int(cards[4])
if topnumber < 10:
topnumber = ('0' + str(topnumber))
return topnumber
# 풀하우스, 포카드, 트리플, 투페어, 원페어일때 페어인 카드를 출력해주는 함수
def findMatchCard(hands:list):
match_card = 0
cards = hands.copy()
tier = checkPair(cards)
cards = changeRoyal(cards)
cards = removeShape(cards)
cards_set = Tools.deleteSame(cards)
if tier == 1 or tier == 3 or tier == 4:
for i in range(len(cards_set)):
if cards_set[i] in cards:
cards.remove(cards_set[i])
match_card = cards[0]
elif tier == 2:
for i in range(len(cards_set)):
if cards_set[i] in cards:
cards.remove(cards_set[i])
cards.sort()
match_card = cards[1]
elif tier == 6:
for i in range(len(cards_set)):
if cards_set[i] in cards:
cards.remove(cards_set[i])
for i in range(len(cards_set)):
if cards_set[i] in cards:
cards.remove(cards_set[i])
match_card = cards[0]
if match_card < 10:
match_card = ('0' + str(match_card))
return match_card
# 제시한 7개의 카드로 가능한 모든 핸드의 리스트를 반환해주는 함수
def makeHand(cards:list):
clone = []
hand_list = []
for i in range(len(cards)):
clone = cards.copy()
clone.pop(i)
for j in range(len(clone)):
cclone = clone.copy()
cclone.pop(j)
hand_list.append(cclone)
return Tools.deleteSame(hand_list)
# 리스트에서 replacy를 찾아 replacer로 바꿔주는 함수
def replaceHand(hands:list, replacy:str, replacer:str):
temp = 0
for i in range(len(hands)):
if hands[i] == replacy:
temp = i
origin_card = hands.pop(temp)
origin_shape = origin_card[0]
addable_card = origin_shape + replacer
hands.append(addable_card)
return hands
# hand 에서 A,J,Q,K를 숫자로 바꿔주는 함수
def changeRoyal(hands:list):
cards = hands.copy()
for i in range(len(hands)):
if hands[i][1] == 'A' or 'J' or 'Q' or 'K':
if hands[i][1] == 'A':
replaceHand(cards,'A','14')
elif hands[i][1] == 'J':
replaceHand(cards,'J','11')
elif hands[i][1] == 'Q':
replaceHand(cards,'Q','12')
elif hands[i][1] == 'K':
replaceHand(cards,'K','13')
else:
replaceHand(cards,hands[i][1:],hands[i][1:])
return cards
# hand에서 앞에 문양을 없애고 숫자만돌려주는 함수
def removeShape(hands:list):
nums = []
for i in range(len(hands)):
nums.append(int(hands[i][1:]))
nums.sort()
return nums
# Straight인지 판별해주는 함수 Retrun type : bool
def checkStraight(hands:list):
cards = hands.copy()
cards = changeRoyal(cards)
nums = removeShape(cards)
isstraight = True
if int(nums[0]) + 1 != int(nums[1]):
isstraight = False
elif int(nums[1]) + 1 != int(nums[2]):
isstraight = False
elif int(nums[2]) + 1 != int(nums[3]):
isstraight = False
elif int(nums[3]) + 1 != int(nums[4]):
isstraight = False
if isstraight == False and int(nums[4]) == 14:
if int(nums[0]) == 2 and int(nums[1]) == 3 and int(nums[2]) == 4 and int(nums[3]) == 5:
isstraight = True
return isstraight
# Flush인지 판단해주는 함수 Retrun type : bool
def checkFlush(hands:list):
shapes = []
isFlush = False
for i in range(len(hands)):
shapes.append(hands[i][:1])
if shapes[0] == shapes[1] == shapes[2] == shapes[3] == shapes[4]:
isFlush = True
return isFlush
# Pair 와 나머지를 판별해주는 함수 리턴값 0 : 아님 / 1 : 원페어 / 2 : 투페어
# 3 : 트리플 / 4 : 포카드 / 6 : 풀하우스 <-왜 5를 건너 뛰었지..?
def checkPair(hands:list):
hands = changeRoyal(hands)
nums = removeShape(hands)
nums_set = Tools.deleteSame(nums)
if len(nums_set) == 5:
return 0
elif len(nums_set) == 4:
return 1
elif len(nums_set) == 3:
twoortriple = nums.copy()
for i in range(len(nums_set)):
if nums_set[i] in twoortriple:
twoortriple.remove(nums_set[i])
if twoortriple[0] == twoortriple[1]:
return 3
else:
return 2
elif len(nums_set) == 2: # 풀하우스 일수도 있다 / 포카드랑
fullhouseorfourcard = nums.copy()
for i in range(len(nums_set)):
if nums_set[i] in fullhouseorfourcard:
fullhouseorfourcard.remove(nums_set[i])
if fullhouseorfourcard[0] == fullhouseorfourcard[1] == fullhouseorfourcard[2]:
return 4
else:
return 6
else:
return -1 #오류
# 자신이 될 수 있는 모든 패중 가장 높은 핸드를 돌려주는 함수
# 7개의 핸드로 될수 있는 모든 핸드를 만든 후, calcrank해서 가장 높은 점수를 가진 핸드를 반환한다.
# 만약 같은 족보로 점수가 겹칠경우
def MakeStrongestHand(handslist:list):
allhands = makeHand(handslist)
handsranking = []
toprank = 0
alltophands = []
tophands = []
distinguish_cards = []
sharedcards = []
for i in range(len(allhands)):
handsranking.append([calcRank(allhands[i]),allhands[i]])
toprank = handsranking[0][0]
for i in range(len(handsranking)):
if toprank < handsranking[i][0]:
toprank = handsranking[i][0]
for i in range(len(handsranking)):
if toprank == handsranking[i][0]:
alltophands.append(handsranking[i][1])
if len(alltophands) == 1:
tophands = alltophands[0]
else:# 가장높은 핸드가 여러개라면?
sharedcards,distinguish_cards = findHigestHand(alltophands)
for i in range(len(sharedcards[0])):
tophands.append(sharedcards[0][i])
while(len(tophands) != 5):
distinguish_cards = findHighestElement(distinguish_cards)
sharedcards,distinguish_cards = findHigestHand(distinguish_cards)
for i in range(len(sharedcards[0])):
tophands.append(sharedcards[0][i])
return tophands
# 가장 높은족보를 찾는 함수
def findHigestHand(cards:list):
tophands = []
allcards = []
allcards_set = []
sharedcards = []
distinguish_cards = cards.copy()
distinguish_nums = []
for i in range(len(cards)):
for j in range(len(cards[0])):
allcards.append(cards[i][j])
allcards.sort()
allcards_set = Tools.deleteSame(allcards)
for i in range(len(allcards_set)):
if allcards.count(allcards_set[i]) == len(cards):
sharedcards.append(allcards_set[i])
tophands.append(sharedcards)
for i in range (len(cards)):
for j in range(len(sharedcards)):
distinguish_cards[i].remove(sharedcards[j])
return tophands, distinguish_cards
# 가장 높은 탑카드를 찾는 함수
def findHighestElement(cards:list):
have_highest_el = []
hands = []
for i in range(len(cards)):
for j in range(len(cards[0])):
hands.append(cards[i][j])
hands = changeRoyal(hands)
nums = removeShape(hands)
nums_set = Tools.deleteSame(nums)
highest = max(nums_set)
if highest > 10 :
if highest == 11:
highest = 'J'
elif highest == 12:
highest = 'Q'
elif highest == 13:
highest = 'K'
elif highest == 14:
highest = 'A'
for i in range(len(cards)):
for j in range(len(cards[0])):
if cards[i][j][1] == str(highest):
have_highest_el.append(cards[i])
have_highest_el.sort()
return have_highest_el
# 핸드를 제공하면 카드 점수를 돌려주는 함수 (점수는 세자리 숫자의 문자열로 반환한다.)
# 첫번째 숫자는 족보를 의미하며 나머지 두자리숫자는 그에 맞는 카드의 숫자이다.
def calcRank(hands:list):
rankpoints = 0
#플러시일 경우
if bool(checkFlush(hands)) == True:
#스트레이트 플러시일 경우
if bool(checkStraight(hands)) == True:
rankpoints = 8
rankpoints = str(rankpoints) + str(findTopCard(hands))
else:
rankpoints = 5
rankpoints = str(rankpoints) + str(findTopCard(hands))
#스트레이트일 경우
elif bool(checkStraight(hands)) == True and bool(checkFlush(hands)) == False:
cards = changeRoyal(hands)
nums = removeShape(cards)
nums.sort()
if int(nums[0]) == 2 and int(nums[1]) == 3 and int(nums[2]) == 4 and int(nums[3]) == 5:
rankpoints = 4
rankpoints = str(rankpoints) + "05"
else:
rankpoints = 4
rankpoints = str(rankpoints) + str(findTopCard(hands))
else:
checkpair = checkPair(hands)
if checkpair == 6:
rankpoints = 6
rankpoints = str(rankpoints) + str(findMatchCard(hands))
elif checkpair == 4:
rankpoints = 7
rankpoints = str(rankpoints) + str(findMatchCard(hands))
elif checkpair == 3:
rankpoints = 3
rankpoints = str(rankpoints) + str(findMatchCard(hands))
elif checkpair == 2:
rankpoints = 2
rankpoints = str(rankpoints) + str(findMatchCard(hands))
elif checkpair == 1:
rankpoints = 1
rankpoints = str(rankpoints) + str(findMatchCard(hands))
elif checkpair == 0:
rankpoints = 0
rankpoints = str(rankpoints) + str(findTopCard(hands))
return rankpoints
# calcrank 된 세자리 숫자를 주면 족보를 글로 반환해주는 함수.
def showrank(rank:int):
rankstr = str(rank)
rankmatch = rankstr[1] + rankstr[2]
rankhead = ''
ranktail = ''
if rankstr[0] == '8':
rankhead = 'straight flush'
elif rankstr[0] == '7':
rankhead = 'four card'
elif rankstr[0] == '6':
rankhead = 'fullhouse'
elif rankstr[0] == '5':
rankhead = 'flush'
elif rankstr[0] == '4':
rankhead = 'straight'
elif rankstr[0] == '3':
rankhead = 'triple'
elif rankstr[0] == '2':
rankhead = 'two pair'
elif rankstr[0] == '1':
rankhead = 'one pair'
elif rankstr[0] == '0':
rankhead = 'top'
else:
return -1
if rankmatch == '14':
ranktail = 'ace'
elif rankmatch == '13':
ranktail = 'king'
elif rankmatch == '12':
ranktail = 'queen'
elif rankmatch == '11':
ranktail = 'jack'
elif rankmatch == '10':
ranktail = 'ten'
elif rankmatch == '9':
ranktail = 'nine'
elif rankmatch == '8':
ranktail = 'eight'
elif rankmatch == '7':
ranktail = 'seven'
elif rankmatch == '6':
ranktail = 'six'
elif rankmatch == '5':
ranktail = 'five'
elif rankmatch == '4':
ranktail = 'four'
elif rankmatch == '3':
ranktail = 'three'
elif rankmatch == '2':
ranktail = 'two'
else:
return -1
if int(rankstr[0]) > 1:
return rankhead
else:
return ranktail + " " + rankhead
#여러개의 핸드를 주면 가장 높은 패를 가지고 있는 핸드의 index + 1를 돌려준다.
#만약 승자가 여려명이라면 무승부인 모든 플레이어를 리턴한다.
def checkWhoWin(hands:list):
playernum = len(hands)
winner = 0
playerranks = []
for i in range(playernum):
playerranks.append(calcRank(hands[i]))
winrank = max(playerranks)
if playerranks.count(winrank) != 1:
for i in range(playernum):
if playerranks[i] == winrank:
str(winner) + str(i+1)
else:
winner = playerranks.index(winrank) + 1
return winner
|
# Imports
import keras_ocr
import helpers
# Prepare OCR recognizer
recognizer = keras_ocr.recognition.Recognizer()
# Load images and their labels
dataset_folder = 'Dataset'
image_file_filter = '*.jpg'
images_with_labels = helpers.load_images_from_folder(
dataset_folder, image_file_filter)
# Perform OCR recognition on the input images
predicted_labels = []
for image_with_label in images_with_labels:
predicted_labels.append(recognizer.recognize(image_with_label[0]))
# Display results
rows = 4
cols = 2
font_size = 14
helpers.plot_results(images_with_labels, predicted_labels, rows, cols, font_size) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .base import * # noqa
# don't use an unicode string
localeID = 'de_DE'
dateSep = ['.']
timeSep = [':']
meridian = []
usesMeridian = False
uses24 = True
decimal_mark = ','
Weekdays = [
'montag', 'dienstag', 'mittwoch',
'donnerstag', 'freitag', 'samstag', 'sonntag',
]
shortWeekdays = ['mo', 'di', 'mi', 'do', 'fr', 'sa', 'so']
Months = [
'januar', 'februar', 'märz',
'april', 'mai', 'juni',
'juli', 'august', 'september',
'oktober', 'november', 'dezember',
]
shortMonths = [
'jan', 'feb', 'mrz', 'apr', 'mai', 'jun',
'jul', 'aug', 'sep', 'okt', 'nov', 'dez',
]
dateFormats = {
'full': 'EEEE, d. MMMM yyyy',
'long': 'd. MMMM yyyy',
'medium': 'dd.MM.yyyy',
'short': 'dd.MM.yy',
}
timeFormats = {
'full': 'HH:mm:ss v',
'long': 'HH:mm:ss z',
'medium': 'HH:mm:ss',
'short': 'HH:mm',
}
dp_order = ['d', 'm', 'y']
# the short version would be a capital M,
# as I understand it we can't distinguish
# between m for minutes and M for months.
units = {
'seconds': ['sekunden', 'sek', 's'],
'minutes': ['minuten', 'min'],
'hours': ['stunden', 'std', 'h'],
'days': ['tag', 'tage', 'tagen', 't'],
'weeks': ['wochen', 'woche', 'w'],
'months': ['monat', 'monate'],
'years': ['jahren', 'jahr', 'jahre', 'j'],
}
numbers = {
'null': 0,
'eins': 1,
'ein': 1,
'zwei': 2,
'drei': 3,
'vier': 4,
'fünf': 5,
'funf': 5,
'sechs': 6,
'sieben': 7,
'acht': 8,
'neun': 9,
'zehn': 10,
'elf': 11,
'zwölf':12,
'zwolf': 12,
'zwanzig': 20,
'dreißig': 30,
'dreibig': 30,
'vierzig': 40,
'fünfzig': 50,
'funfzig': 50,
'sechzig': 60,
'siebzig': 70,
'achtzig': 80,
'neunzig': 90,
'hundert': 100,
'ignore': 'und'
}
re_values = re_values.copy()
re_values.update({
'specials': 'am|dem|der|im|in|den|zum',
'timeseparator': ':',
'of': '.', # "eg. 3rd of march"
'rangeseparator': '-|bis',
'daysuffix': '',
'qunits': 'h|m|s|t|w|m|j',
'now': ['jetzt'],
'after': 'nach|vor|später|spater',
'from': 'von', # num unit from rel
'this': 'deises|diesen|kommenden',
'next': 'nächsten|nächster|nächste|nachsten|nachster|nachste',
'last': 'letzter|letzten|letzte',
'in': r'in', # "in 5 days" #done (german same eng)
'since': 'seit', # since time, since date, since num unit
})
# Used to adjust the returned date before/after the source
# still looking for insight on how to translate all of them to german.
Modifiers = {
'from': 1,
'before': -1,
'nach': 1,
'vor': -1,
'später': -1,
'spater': -1,
'vergangener': -1,
'vorheriger': -1,
'prev': -1,
'seit': -1,
'letzter': -1,
'letzten': -1,
'letzte': -1,
'nächster': 1,
'nächsten': 1,
'nächste': 1,
'nachster': 1,
'nachsten': 1,
'nachste': 1,
'dieser': 0,
'diesen': 0,
'kommenden': 0,
'previous': -1,
'in a': 2,
'end of': 0,
'eod': 0,
'eo': 0,
}
# morgen/abermorgen does not work, see
# http://code.google.com/p/parsedatetime/issues/detail?id=19
dayOffsets = {
'morgen': 1,
'heute': 0,
'gestern': -1,
'vorgestern': -2,
'übermorgen': 2,
'ubermorgen': 2,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = {
'mittag': {'hr': 12, 'mn': 0, 'sec': 0},
'mittags': {'hr': 12, 'mn': 0, 'sec': 0},
'mittagessen': {'hr': 12, 'mn': 0, 'sec': 0},
'morgen': {'hr': 6, 'mn': 0, 'sec': 0},
'morgens': {'hr': 6, 'mn': 0, 'sec': 0},
'frühstück': {'hr': 8, 'mn': 0, 'sec': 0},
'abendessen': {'hr': 19, 'mn': 0, 'sec': 0},
'abend': {'hr': 18, 'mn': 0, 'sec': 0},
'abends': {'hr': 18, 'mn': 0, 'sec': 0},
'mitternacht': {'hr': 0, 'mn': 0, 'sec': 0},
'nacht': {'hr': 21, 'mn': 0, 'sec': 0},
'nachts': {'hr': 21, 'mn': 0, 'sec': 0},
'heute abend': {'hr': 21, 'mn': 0, 'sec': 0},
'heute nacht': {'hr': 21, 'mn': 0, 'sec': 0},
'feierabend': {'hr': 17, 'mn': 0, 'sec': 0},
}
|
def leiaint(a):
n = str(input(a))
while not n.isnumeric():
print('\033[1;31mERRO! Digite um valor válido!\033[0;0m')
n = str(input(a))
if n.isnumeric():
int(n)
print(f'Ok, o número {n} é inteiro.')
leiaint('Digite um número: ')
|
import pytest
from .dict_serialization_helpers import *
class TestTupleKeysSerializers(object):
class ExampleObj(object):
def __init__(self, foo, bar):
self.foo = foo
self.bar = bar
def __eq__(self, other):
return self.foo == other.foo and self.bar == other.bar
def to_dict(self):
return {'foo': self.foo, 'bar': self.bar}
@classmethod
def from_dict(cls, dct):
return cls(**dct)
def setup(self):
self.foo = {('a', 'b'): [1, 2], ('c', 'd'): [3, 4]}
self.bar = 3
self.obj = self.ExampleObj(self.foo, self.bar)
self.dct = {'foo': {'foo_tuple_keys': [('a', 'b'), ('c', 'd')],
'foo_values': [[1, 2], [3, 4]]},
'bar': 3}
def test_tuple_keys_to_dict(self):
decorated = tuple_keys_to_dict(self.ExampleObj.to_dict, 'foo')
assert decorated(self.obj) == self.dct
def test_tuple_keys_from_dict(self):
decorated = tuple_keys_from_dict(self.ExampleObj.from_dict, 'foo')
# requires explicit cls because we're not binding to the class!
assert decorated(self.ExampleObj, self.dct) == self.obj
|
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import threading
import time
import random
import re
import ufora.FORA.python.Evaluator.Evaluator as Evaluator
import ufora.FORA.python.ExecutionContext as ExecutionContext
import ufora.FORA.python.FORA as FORA
import ufora.FORA.python.Runtime as Runtime
import ufora.FORA.python.ForaValue as ForaValue
import ufora.native.FORA as FORANative
import ufora.config.Setup as Setup
import ufora.native.CallbackScheduler as CallbackScheduler
callbackScheduler = CallbackScheduler.singletonForTesting()
def triggerAfter(f, timeout):
"""call 'f' after timeout seconds"""
def threadFun():
time.sleep(timeout)
f()
threading.Thread(target = threadFun).start()
def enum(**enums):
return type('Enum', (), enums)
def evaluate(context, *args):
context.placeInEvaluationState(FORANative.ImplValContainer(args))
context.compute()
#ExecutionMode = enum(Interpreted=0, SampledSpecializations=1, Compiled=2)
#InterruptAction = enum(Noop=0, SpecializeOnCurrent=1, DrainAllPendingCompilations=2)
def dumpToFile(toDump, fileName):
with open(fileName, "w") as f:
for item in toDump:
print >> f, str(item)
class TestExecutionContext(unittest.TestCase):
def setUp(self):
self.executionContextSeed = random.randint(0, 4294967295)
self.interruptRate = 1000
#Runtime.getMainRuntime().dynamicOptimizer.clearInstructionCache()
#Runtime.getMainRuntime().dynamicOptimizer.resume()
self.loopFun = FORA.extractImplValContainer(
FORA.eval(
"""fun()
{
let c = 0;
while (true)
c = c + 1;
}
"""))
self.simpleSum = FORA.extractImplValContainer(
FORA.eval(
"""fun ()
{
let sum = fun(a, b)
{
if (a >= b)
return nothing
if (a+1 >= b)
return a
let mid = Int64((a+b)/2);
return sum(a, mid) + sum(mid, b)
}
return sum(0, 2000)
}
"""))
self.simpleLoop = FORA.extractImplValContainer(
FORA.eval("""fun() { let x = 0; while (x < 10000) x = x + 1; return x }""")
)
# TODO: reenable these tests with the new compiler model
# def test_deterministicInterrupt(self):
# self.verifyDeterministicExecutionWithSpecializations(self.simpleLoop)
# def test_deterministicInterrupt_in_loop(self):
# self.verifyDeterministicExecutionInInterpreter(self.simpleLoop)
# def test_deterministicExecution_of_sum_interpreted(self):
# self.verifyDeterministicExecutionInInterpreter(self.simpleSum)
# def test_deterministicExecution_of_sum_with_specializations(self):
# self.verifyDeterministicExecutionWithSpecializations(self.simpleSum)
# def test_deterministicExecution_of_sum_compiled(self):
# self.verifyDeterministicExecutionInCompiler(self.simpleSum)
# def verifyDeterministicExecutionInInterpreter(self, fun):
# return self.verifyDeterministicExecution(fun, ExecutionMode.Interpreted)
# def verifyDeterministicExecutionWithSpecializations(self, fun):
# return self.verifyDeterministicExecution(fun, ExecutionMode.SampledSpecializations)
# def verifyDeterministicExecutionInCompiler(self, fun):
# return self.verifyDeterministicExecution(fun, ExecutionMode.Compiled)
# def verifyDeterministicExecution(self, fun, executionMode=ExecutionMode.Compiled):
# firstPassResults = None
# runFirst = None
# runRest = None
# if executionMode == ExecutionMode.SampledSpecializations:
# runFirst = runRest = self.runWithSpecializations
# runFirst(fun)
# elif executionMode == ExecutionMode.Interpreted:
# runFirst = runRest = self.runInterpreted
# elif executionMode == ExecutionMode.Compiled:
# #self.runCompiled(fun)
# runFirst = self.runCompiled
# runRest = self.runInterpreted
# else:
# self.assertEqual(executionMode, ExecutionMode.Compiled)
# for passIndex in range(4):
# Runtime.getMainRuntime().dynamicOptimizer.clearInstructionCache()
# print "Starting pass", passIndex
# rawTrace, steps1 = runFirst(fun)
# newTrace = "\n".join(rawTrace)
# self.assertTrue(len(steps1) > 0)
# rawTrace, steps2 = runRest(fun)
# newTrace2 = "\n".join(rawTrace)
# self.assertTrue(len(steps2) > 0)
# rawTrace, steps3 = runRest(fun, steps2)
# newTrace3 = "\n".join(rawTrace)
# self.assertTrue(len(steps3) > 0)
# if executionMode == ExecutionMode.Interpreted:
# self.compareLists(steps1, steps2, 'inner1')
# self.compareLists(steps2, steps3, 'inner2')
# if passIndex == 0:
# # we compare traces to the ones from the second iteration because
# # the first iteration can have interpreter frames that don't appear
# # in subsequent runs once code has been compiled.
# firstPassResults = {}
# firstPassResults["steps1"] = steps1
# firstPassResults["steps2"] = steps2
# firstPassResults["steps3"] = steps3
# elif passIndex > 0:
# self.compareLists(firstPassResults["steps1"], steps1, 'diff1')
# self.compareLists(firstPassResults["steps2"], steps2, 'diff2')
# self.compareLists(firstPassResults["steps3"], steps3, 'diff3')
# def runWithInterrupts(self, func, interruptAction, previousTrace):
# vdm = FORANative.VectorDataManager(Setup.config().maxPageSizeInBytes)
# if interruptAction == InterruptAction.DrainAllPendingCompilations:
# context = ExecutionContext.ExecutionContext(dataManager = vdm)
# Runtime.getMainRuntime().dynamicOptimizer.pause()
# else:
# context = ExecutionContext.ExecutionContext(
# dataManager = vdm,
# allowInterpreterTracing = False
# )
# context.enalbeExecutionStepsRecording()
# if previousTrace != None:
# context.setExpectedSteps(previousTrace)
# context.interruptAfterCycleCount(self.interruptRate)
# evaluate(context, func, FORANative.symbol_Call)
# traces = []
# while context.isInterrupted():
# Runtime.getMainRuntime().dynamicOptimizer.resume()
# Runtime.getMainRuntime().dynamicOptimizer.drainCompilationQueue()
# Runtime.getMainRuntime().dynamicOptimizer.pause()
# traces.append(context.extractCurrentTextStacktrace())
# if interruptAction == InterruptAction.SpecializeOnCurrent:
# context.specializeOnCurrentInterpreterInstruction()
# context.resetInterruptState()
# context.interruptAfterCycleCount(self.interruptRate)
# context.resume()
# self.assertTrue(context.isFinished())
# return ([re.sub("CIG_[0-9]+", "CIG", x) for x in traces], context.getRecordedSteps())
# def runWithSpecializations(self, func, previousTrace=None):
# return self.runWithInterrupts(func, InterruptAction.SpecializeOnCurrent, previousTrace)
# def runInterpreted(self, func, previousTrace=None):
# return self.runWithInterrupts(func, InterruptAction.Noop, previousTrace)
# def runCompiled(self, func, previousTrace=None):
# return self.runWithInterrupts(func, InterruptAction.DrainAllPendingCompilations, previousTrace)
def compareLists(self, list1, list2, prefix):
l1 = [(i[0], i[1]) for i in list1]
l2 = [(i[0], i[1]) for i in list2]
if len(l1) != len(l2) or l1 != l2:
dumpToFile(list1, prefix + ".1")
dumpToFile(list2, prefix + ".2")
self.assertTrue(False, "lists differ. prefix= " + prefix)
def test_refcountsInCompiledCode(self):
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(
dataManager = vdm,
allowInterpreterTracing = True,
blockUntilTracesAreCompiled = True,
allowInternalSplitting = False
)
text = """fun(){
let f = fun(v, depth) {
if (depth > 100)
//this will trigger an interrupt since the data cannot exist in the VDM
datasets.s3('','')
else
f(v, depth+1)
}
f([1,2,3,4,5], 0)
}"""
evaluate(context,
FORA.extractImplValContainer(FORA.eval(text)),
FORANative.symbol_Call
)
stacktraceText = context.extractCurrentTextStacktrace()
self.assertTrue(stacktraceText.count("Vector") < 10)
def pageLargeVectorHandlesTest(self, text, cycleCount, expectsToHavePages):
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(
dataManager = vdm,
allowInterpreterTracing = False,
allowInternalSplitting = False
)
context.configuration.agressivelyValidateRefcountsAndPageReachability = True
context.placeInEvaluationState(
FORANative.ImplValContainer(
(
FORA.extractImplValContainer(FORA.eval(text)),
FORANative.symbol_Call
)
)
)
context.interruptAfterCycleCount(cycleCount)
context.compute()
if expectsToHavePages:
self.assertTrue(context.pageLargeVectorHandles(0))
self.assertFalse(context.pageLargeVectorHandles(0))
else:
self.assertFalse(context.pageLargeVectorHandles(0))
return context
def test_pageLargeVectorHandleSlicesWorks(self):
context = self.pageLargeVectorHandlesTest("""fun() {
let v = Vector.range(100);
v = v + v + v + v
v = v + v + v + v
let v2 = v[10,-10];
let res = 0;
for ix in sequence(10000) {
res = res + v[0]
}
size(v2)
}""",
5000,
True
)
context.resetInterruptState()
context.compute()
self.assertEqual(context.getFinishedResult().asResult.result.pyval, 1580)
def test_pageLargeVectorHandles(self):
#check that walking a frame with a few VectorHandles works
self.pageLargeVectorHandlesTest(
"""fun() {
let res = 0
let v = [1,2,3];
v = v + v + v + v
v = v + v + v + v
v = v + v + v + v
v = v + v + v + v
v = v + v + v + v
for ix in sequence(10000)
res = res + ix
res + v[0]
}""",
5000,
True
)
def test_pageLargeVectorHandles_2(self):
#check that walking a frame with a few VectorHandles works
self.pageLargeVectorHandlesTest(
"""fun() {
let res = 0
let v = [1,2,3];
let v2 = [v,v,v,v,v,v]
for ix in sequence(10000)
res = res + ix
res + v2[0][0]
}""",
5000,
True
)
def test_pageLargeVectorHandles_3(self):
#check that walking a frame with a few VectorHandles works
self.pageLargeVectorHandlesTest(
"""fun() {
let res = 0
let v = [[x for x in sequence(ix)] for ix in sequence(1000)]
v.sum(fun(x){x.sum()})
}""",
300000,
True
)
def test_pageLargeVectorHandles_4(self):
#check that walking a frame with a few VectorHandles works
self.pageLargeVectorHandlesTest(
"""fun() {
let res = 0
let v = [1,2,3,4]
v = v + v + v + v
v = v + v + v + v
v = v + v + v + v
v = v + v + v + v
let f = fun(vec, x) {
if (x > 0)
return f(vec, x - 1) + f(vec, x - 1)
else
return 0
}
f(v, 10)
}""",
1000,
True
)
def test_pageLargeVectorHandles_5(self):
#check that walking a frame with a few VectorHandles works
self.pageLargeVectorHandlesTest(
"""fun() {
let res = 0
let v = [1,2,3,4]
v = v + v + v + v
v = v + v + v + v
v = v + v + v + v
v = v + v + v + v
let f = fun(vec) {
let res = 0;
for ix in sequence(1, size(vec) - 1)
res = res + f(vec[,ix]) + f(vec[ix,])
res
}
f(v)
}""",
10000,
True
)
def test_resumingAfterCopyDataOutOfPages(self):
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(
dataManager = vdm,
allowInternalSplitting = False
)
text = """
fun() {
let v = Vector.range(1000).paged;
let ix1 = 0
let res = 0
while (true) {
res = res + v[ix1]
ix1 = (ix1 + 1) % size(v)
}
res
}"""
context.placeInEvaluationState(FORANative.ImplValContainer((
FORA.extractImplValContainer(FORA.eval(text)),
FORANative.symbol_Call
)))
context.interruptAfterCycleCount(100000)
context.compute()
paused1 = context.extractPausedComputation()
while not context.isVectorLoad():
context.copyValuesOutOfVectorPages()
vdm.unloadAllPossible()
context.resetInterruptState()
context.interruptAfterCycleCount(100000)
context.compute()
paused2 = context.extractPausedComputation()
self.assertTrue(len(paused1.asThread.computation.frames) == len(paused2.asThread.computation.frames))
def copyDataOutOfPagesTest(self, text, cycleCount, expectsToHaveCopies):
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(
dataManager = vdm,
allowInterpreterTracing = False,
allowInternalSplitting = False
)
context.configuration.agressivelyValidateRefcountsAndPageReachability = True
context.configuration.releaseVectorHandlesImmediatelyAfterExecution = False
context.placeInEvaluationState(FORANative.ImplValContainer((
FORA.extractImplValContainer(FORA.eval(text)),
FORANative.symbol_Call
)))
context.interruptAfterCycleCount(cycleCount)
context.compute()
if expectsToHaveCopies:
self.assertTrue(context.copyValuesOutOfVectorPages())
self.assertFalse(context.copyValuesOutOfVectorPages())
else:
self.assertFalse(context.copyValuesOutOfVectorPages())
def test_copyDataOutOfPages_1(self):
#verify that just walking the stackframes doesn't segfault us
self.copyDataOutOfPagesTest(
"""fun() {
let res = 0
for ix in sequence(10000)
res = res + ix
res
}""",
5000,
False
)
def test_copyDataOutOfPages_2(self):
#check that walking a frame with a VectorHandle appears to work
self.copyDataOutOfPagesTest(
"""fun() {
let res = 0
let v = [1,2,3].paged;
for ix in sequence(10000)
res = res + ix
res + v[0]
}""",
5000,
False
)
def test_copyDataOutOfPages_3(self):
#walk a frame where we are holding a VectorHandle from within a paged vector
self.copyDataOutOfPagesTest(
"""fun() {
let res = 0
//allocate a vector that's a reference into a paged Vector
let v = [[1,2,3]].paged[0];
for ix in sequence(10000)
res = res + ix
res + v[0]
}""",
5000,
True
)
def test_copyDataOutOfPages_4(self):
#walk a frame where we are holding a VectorHandle from within a paged vector
self.copyDataOutOfPagesTest(
"""fun() {
let res = 0
//allocate one vector, but put it in twice, and pull it out twice
let (v1, v2) = (
let v0 = [1,2,3]
let vPaged = [v0,v0].paged;
(vPaged[0],vPaged[1])
);
for ix in sequence(10000)
res = res + ix
res + v1[0] + v2[0]
}""",
5000,
True
)
def test_copyDataOutOfPages_5(self):
#walk a frame where we are holding a VectorHandle from within a paged vector
self.copyDataOutOfPagesTest(
"""fun() {
let res = 0
let v = [ [ [1.0].paged ] ].paged;
//now grab interior vector
let v2 = v[0]
for ix in sequence(10000)
res = res + ix
res + size(v2)
}""",
5000,
True
)
def test_copyDataOutOfPages_Strings(self):
#walk a frame where we are holding a VectorHandle from within a paged vector
self.copyDataOutOfPagesTest(
"""fun() {
let res = 0
//allocate one vector, but put it in twice, and pull it out twice
let (a, b) = (
let v = ["asdfasdfasdfasdfasdfasdfasdfasdfasdfasdf",
"bsdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdf",
"casasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfdfasdfasdf"].paged;
(v[0],v[1])
);
for ix in sequence(10000)
res = res + ix
res + a[0] + b[0]
}""",
5000,
True
)
def test_copyDataOutOfPages_VectorTrees(self):
#walk a frame where we are holding a VectorHandle from within a paged vector
self.copyDataOutOfPagesTest(
"""fun() {
let res = 0
//allocate one vector, but put it in twice, and pull it out twice
let (v1, v2) = (
let v0 = ["a"].paged + ["b"] + ["c"].paged + ["d"] + ["e"].paged;
let vPaged = [v0,v0].paged;
(vPaged[0],vPaged[1])
);
for ix in sequence(10000)
res = res + ix
res + v1[0] + v2[0]
}""",
5000,
True
)
def test_verifyThatExtractingPausedComputationsDoesntDuplicateLargeStrings(self):
text = """fun() {
let s = ' '
while (size(s) < 1000000)
s = s + s
let f = fun(x) { if (x > 0) return f(x-1) + s[x]; `TriggerInterruptForTesting() }
f(20)
}"""
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(
dataManager = vdm,
allowInterpreterTracing = False,
allowInternalSplitting = False
)
evaluate(context,
FORA.extractImplValContainer(FORA.eval(text)),
FORANative.symbol_Call
)
computation = context.extractPausedComputation()
context2 = ExecutionContext.ExecutionContext(
dataManager = vdm,
allowInterpreterTracing = False,
allowInternalSplitting = False
)
context2.resumePausedComputation(computation)
self.assertTrue(
context2.totalBytesUsed < 2 * context.totalBytesUsed
)
def test_extractPausedComputation(self):
text = """fun() {
let x = 0;
while (x < 100000)
x = x + 1
x
}"""
self.runtime = Runtime.getMainRuntime()
#self.dynamicOptimizer = self.runtime.dynamicOptimizer
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(
dataManager = vdm,
allowInterpreterTracing = False,
allowInternalSplitting = False
)
context.interruptAfterCycleCount(1010)
evaluate(context,
FORA.extractImplValContainer(FORA.eval(text)),
FORANative.symbol_Call
)
computation = context.extractPausedComputation()
context2 = ExecutionContext.ExecutionContext(
dataManager = vdm,
allowInterpreterTracing = False,
allowInternalSplitting = False
)
context2.resumePausedComputation(computation)
context2.compute()
self.assertEqual(context2.getFinishedResult().asResult.result.pyval, 100000)
context.teardown()
context2.teardown()
def test_extractPausedComputationDuringVectorLoad(self):
self.runtime = Runtime.getMainRuntime()
#self.dynamicOptimizer = self.runtime.dynamicOptimizer
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(
dataManager = vdm,
allowInterpreterTracing = False,
allowInternalSplitting = False
)
evaluate(context,
FORA.extractImplValContainer(FORA.eval("fun() { [1,2,3].paged }")),
FORANative.ImplValContainer(FORANative.makeSymbol("Call"))
)
pagedVec = context.getFinishedResult().asResult.result
vdm.unloadAllPossible()
context.placeInEvaluationState(
FORANative.ImplValContainer(
(pagedVec,
FORANative.ImplValContainer(FORANative.makeSymbol("GetItem")),
FORANative.ImplValContainer(0))
)
)
context.compute()
self.assertTrue(context.isVectorLoad(), context.extractCurrentTextStacktrace())
computation = context.extractPausedComputation()
self.assertEqual(len(computation.asThread.computation.frames),1)
def test_resumePausedComputationWithResult(self):
self.runtime = Runtime.getMainRuntime()
#self.dynamicOptimizer = self.runtime.dynamicOptimizer
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(
dataManager = vdm,
allowInterpreterTracing = False,
allowInternalSplitting = False
)
text = """
let f = fun(v, ix) {
if (ix > 0)
{
let (v2,res) = f(v,ix-1);
return (v2, res + v2[0])
}
`TriggerInterruptForTesting()
return (v, 0)
};
f([1], 10)
"""
evaluate(context,
FORA.extractImplValContainer(FORA.eval("fun() { " + text + " }")),
FORANative.ImplValContainer(FORANative.makeSymbol("Call"))
)
assert context.isInterrupted()
pausedComp = context.extractPausedComputation()
framesToUse = pausedComp.asThread.computation.frames[0:5]
pausedComp2 = FORANative.PausedComputationTree(
FORANative.PausedComputation(
framesToUse,
FORA.extractImplValContainer(FORA.eval("([2], 0)", keepAsForaValue=True)),
False
)
)
context.resumePausedComputation(pausedComp2)
context.copyValuesOutOfVectorPages()
context.pageLargeVectorHandles(0)
context.resetInterruptState()
context.compute()
self.assertTrue( context.isFinished() )
result = context.getFinishedResult()
self.assertTrue(result.asResult.result[1].pyval == 6)
def test_interrupt_works(self):
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(dataManager = vdm, allowInternalSplitting = False)
triggerAfter(context.interrupt, .03)
t0 = time.time()
evaluate(context, self.loopFun, FORANative.symbol_Call)
#make sure we actually looped!
self.assertTrue(time.time() - t0 > .02)
self.assertFalse(context.isEmpty())
self.assertFalse(context.isCacheRequest())
self.assertFalse(context.isVectorLoad())
self.assertFalse(context.isFinished())
self.assertTrue(context.isInterrupted())
def test_serialize_while_holding_interior_vector(self):
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(dataManager = vdm, allowInterpreterTracing=False, allowInternalSplitting=False)
evaluate(context,
FORA.extractImplValContainer(
FORA.eval("""
fun() {
let v = [[1].paged].paged;
let v2 = v[0]
`TriggerInterruptForTesting()
1+2+3+v+v2
}"""
)
),
FORANative.symbol_Call
)
self.assertTrue(context.isInterrupted())
serialized = context.serialize()
context = None
def test_serialize_during_vector_load(self):
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(dataManager = vdm, allowInternalSplitting=False)
evaluate(context,
FORA.extractImplValContainer(
FORA.eval("fun(){ datasets.s3('a','b')[0] }")
),
FORANative.symbol_Call
)
self.assertTrue(context.isVectorLoad())
serialized = context.serialize()
context2 = ExecutionContext.ExecutionContext(dataManager = vdm, allowInternalSplitting=False)
context2.deserialize(serialized)
self.assertTrue(context2.isVectorLoad())
def test_teardown_during_vector_load(self):
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(
dataManager = vdm,
allowInternalSplitting = False
)
evaluate(context,
FORA.extractImplValContainer(
FORA.eval("fun() { let v = [1,2,3].paged; fun() { v[1] } }")
),
FORANative.symbol_Call
)
vdm.unloadAllPossible()
pagedVecAccessFun = context.getFinishedResult().asResult.result
context.teardown()
evaluate(context,
pagedVecAccessFun,
FORANative.symbol_Call
)
self.assertFalse(context.isInterrupted())
self.assertTrue(context.isVectorLoad())
context.teardown()
def extractPagedUnloadedVector(self, vdm, count):
context = ExecutionContext.ExecutionContext(
dataManager = vdm,
allowInternalSplitting = False
)
evaluate(context,
FORA.extractImplValContainer(FORA.eval("fun() { Vector.range(%s).paged }" % count)),
FORANative.ImplValContainer(FORANative.makeSymbol("Call"))
)
pagedVec = context.getFinishedResult().asResult.result
vdm.unloadAllPossible()
return pagedVec
def test_teardown_simple(self):
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(dataManager = vdm, allowInternalSplitting=False)
evaluate(context,
FORA.extractImplValContainer(
FORA.eval("fun(){nothing}")
),
FORANative.symbol_Call
)
context.getFinishedResult()
toEval = FORA.extractImplValContainer(
FORA.eval(
"""fun() {
let f = fun() { };
let v = [1, [3]];
cached(f())
}"""
)
)
evaluate(context, toEval, FORANative.symbol_Call)
while not context.isCacheRequest():
context.compute()
context.teardown(True)
def test_teardown_simple_2(self):
vdm = FORANative.VectorDataManager(callbackScheduler, Setup.config().maxPageSizeInBytes)
context = ExecutionContext.ExecutionContext(
dataManager = vdm,
allowInternalSplitting = False
)
context.placeInEvaluationState(FORANative.ImplValContainer((
FORA.extractImplValContainer(
FORA.eval("fun(){ let f = fun() { throw 1 }; try { f() } catch(...) { throw 2 } }")
),
FORANative.symbol_Call
)))
context.compute()
self.assertTrue(context.getFinishedResult().isException())
def stringAllocShouldFailFun(self, ct):
return FORA.extractImplValContainer(
FORA.eval(
"""fun()
{
let s = "*";
let i = 0;
while (i < 100000)
{
s = s + "%s" + s + "%s";
i = i + 1;
}
}
""" % (" ", " " * ct)))
def test_large_string_alloc_fails_and_raises_foravalue_error(self):
for ix in range(10):
val = ForaValue.FORAValue(self.stringAllocShouldFailFun(ix))
self.assertRaises(ForaValue.FORAFailure, val)
if __name__ == "__main__":
import ufora.config.Mainline as Mainline
Mainline.UnitTestMainline([ExecutionContext, Evaluator])
|
# -*- encoding: utf-8 -*-
from contextlib import contextmanager
from datetime import datetime
import sys
from traceback import format_exception
import colorama
def warn(title):
sys.stderr.write('{warn} [WARN] {title}{rest}\n'.format(
warn=colorama.Back.RED + colorama.Fore.WHITE + colorama.Style.BRIGHT,
title=title,
reset=colorama.Style.RESET_ALL
))
def exception(title, exc_info):
sys.stderr.write('{warn}[WARN] {title}:{reset}\n{trace}\n{warn}----------------------------{reset}\n\n'.format(
warn=colorama.Back.RED + colorama.Fore.WHITE + colorama.Style.BRIGHT,
title=title,
reset=colorama.Style.RESET_ALL,
trace=''.join(format_exception(*exc_info))
))
def failed(msg):
sys.stderr.write('{red}{msg}{reset}\n'.format(
red=colorama.Back.RED,
msg=msg,
reset=colorama.Style.RESET_ALL
))
def debug(msg):
sys.stderr.write('{blue}{bold}DEBUG:{reset} {msg}\n'.format(
blue=colorama.Fore.BLUE,
bold=colorama.Style.BRIGHT,
reset=colorama.Style.RESET_ALL,
msg=msg,
))
@contextmanager
def debug_time(msg):
started = datetime.now()
try:
yield
finally:
debug('{} took: {}'.format(msg, datetime.now() - started))
def version(cli_version, python_version, os_info):
sys.stderr.write('jandan-py {} using Python {} on {}\n'.format(
cli_version,
python_version,
os_info
)) |
#from pydub import AudioSegment
import numpy, scipy, matplotlib.pyplot as plt, sklearn, librosa, mir_eval, urllib
from scipy.io.wavfile import write
import os,sys
#feature extractor
def extract_features(x, fs):
zcr = librosa.zero_crossings(x).sum()
energy = scipy.linalg.norm(x)
return [zcr, energy]
#These parameters are for testing.
#inputpath should be audio/output/self_recorded_files
#inputpathname should be eechunk
#outputpath should be audio/clustered/self_recorded_files
#outputpathname would be eecluster
#def getInputFiles(inputPath, inputPathName):
def clusterAudioSegments(inputPath, inputPathName, outputPath, outputFileName, k):
features = numpy.empty((0, 2))
segments = list()
#looping through each segmented file
for file in os.listdir(inputPath):
if file.startswith(inputPathName):
#for each segmented file
x, fs = librosa.load(inputPath + "/" + file)
feature = extract_features(x,fs)
features = numpy.vstack((features, feature))
segments.append(x)
#scale features from -1 to 1
min_max_scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(-1, 1))
features_scaled = min_max_scaler.fit_transform(features)
#PyPlot this
plt.scatter(features_scaled[:,0], features_scaled[:,1])
plt.xlabel('Zero Crossing Rate (scaled)')
plt.ylabel('Spectral Centroid (scaled)')
plt.show()
#kmeans
data = numpy.random.rand(10, 3)
kmeans = sklearn.cluster.KMeans(n_clusters = 5).fit_predict(data)
print("Before scaling inertia is ", kmeans)
data = data + 5
kmeans = sklearn.cluster.KMeans(n_clusters = 5).fit_predict(data)
print("after scaling inertia is ", kmeans)
model = sklearn.cluster.AffinityPropagation()
kmeansLabels = model.fit_predict(features_scaled)
print ("Kmeans with k = ", k, " result: ", kmeansLabels)
'''
#affinity propogation
model = sklearn.cluster.AffinityPropagation()
apLabels = model.fit_predict(features_scaled)
print ("Affinity propogation result: ", apLabels)
'''
#combine files in cluster
results = [list() for _ in range(k)]
padding = 1000; #padding within breaks
for i in range(features.shape[0]):
segment_to_attach = numpy.hstack(([0 for _ in range(padding)], segments[i]))
results[kmeansLabels[i]] = numpy.hstack((results[kmeansLabels[i]], segment_to_attach))
for i in range(k):
out_file = outputPath + "/" + outputFileName + str(i) + ".wav"
write(out_file, fs, results[i])
####################
#SCRIPT STARTS HERE#
####################
clusterAudioSegments("audio/input/self_recorded_syllables", "jh", "audio/clustered/self_recorded_syllables", "jhcluster", 3)
|
class Treinamento:
|
r"""
Hall Algebras
AUTHORS:
- Travis Scrimshaw (2013-10-17): Initial version
"""
#*****************************************************************************
# Copyright (C) 2013 Travis Scrimshaw <tscrim at ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
# python3
from __future__ import division
from sage.misc.misc_c import prod
from sage.misc.cachefunc import cached_method
from sage.categories.algebras_with_basis import AlgebrasWithBasis
from sage.categories.hopf_algebras_with_basis import HopfAlgebrasWithBasis
from sage.combinat.partition import Partition, Partitions
from sage.combinat.free_module import CombinatorialFreeModule
from sage.combinat.hall_polynomial import hall_polynomial
from sage.combinat.sf.sf import SymmetricFunctions
from sage.rings.all import ZZ
from functools import reduce
def transpose_cmp(x, y):
r"""
Compare partitions ``x`` and ``y`` in transpose dominance order.
We say partitions `\mu` and `\lambda` satisfy `\mu \prec \lambda`
in transpose dominance order if for all `i \geq 1` we have:
.. MATH::
l_1 + 2 l_2 + \cdots + (i-1) l_{i-1} + i(l_i + l_{i+1} + \cdots) \leq
m_1 + 2 m_2 + \cdots + (i-1) m_{i-1} + i(m_i + m_{i+1} + \cdots),
where `l_k` denotes the number of appearances of `k` in
`\lambda`, and `m_k` denotes the number of appearances of `k`
in `\mu`.
Equivalently, `\mu \prec \lambda` if the conjugate of the
partition `\mu` dominates the conjugate of the partition
`\lambda`.
Since this is a partial ordering, we fallback to lex ordering
`\mu <_L \lambda` if we cannot compare in the transpose order.
EXAMPLES::
sage: from sage.algebras.hall_algebra import transpose_cmp
sage: transpose_cmp(Partition([4,3,1]), Partition([3,2,2,1]))
-1
sage: transpose_cmp(Partition([2,2,1]), Partition([3,2]))
1
sage: transpose_cmp(Partition([4,1,1]), Partition([4,1,1]))
0
"""
if x == y:
return 0
xexp = x.to_exp()
yexp = y.to_exp()
n = min(len(xexp), len(yexp))
def check(m, l):
s1 = 0
s2 = 0
for i in range(n):
s1 += sum(l[i:])
s2 += sum(m[i:])
if s1 > s2:
return False
return sum(l) <= sum(m)
if check(xexp, yexp):
return 1
if check(yexp, xexp):
return -1
return cmp(x, y)
class HallAlgebra(CombinatorialFreeModule):
r"""
The (classical) Hall algebra.
The *(classical) Hall algebra* over a commutative ring `R` with a
parameter `q \in R` is defined to be the free `R`-module with
basis `(I_\lambda)`, where `\lambda` runs over all integer
partitions. The algebra structure is given by a product defined by
.. MATH::
I_\mu \cdot I_\lambda = \sum_\nu P^{\nu}_{\mu, \lambda}(q) I_\nu,
where `P^{\nu}_{\mu, \lambda}` is a Hall polynomial (see
:meth:`~sage.combinat.hall_polynomial.hall_polynomial`). The
unity of this algebra is `I_{\emptyset}`.
The (classical) Hall algebra is also known as the Hall-Steinitz
algebra.
We can define an `R`-algebra isomorphism `\Phi` from the
`R`-algebra of symmetric functions (see
:class:`~sage.combinat.sf.sf.SymmetricFunctions`) to the
(classical) Hall algebra by sending the `r`-th elementary
symmetric function `e_r` to `q^{r(r-1)/2} I_{(1^r)}` for every
positive integer `r`. This isomorphism used to transport the
Hopf algebra structure from the `R`-algebra of symmetric functions
to the Hall algebra, thus making the latter a connected graded
Hopf algebra. If `\lambda` is a partition, then the preimage
of the basis element `I_{\lambda}` under this isomorphism is
`q^{n(\lambda)} P_{\lambda}(x; q^{-1})`, where `P_{\lambda}` denotes
the `\lambda`-th Hall-Littlewood `P`-function, and where
`n(\lambda) = \sum_i (i - 1) \lambda_i`.
See section 2.3 in [Schiffmann]_, and sections II.2 and III.3
in [Macdonald1995]_ (where our `I_{\lambda}` is called `u_{\lambda}`).
EXAMPLES::
sage: R.<q> = ZZ[]
sage: H = HallAlgebra(R, q)
sage: H[2,1]*H[1,1]
H[3, 2] + (q+1)*H[3, 1, 1] + (q^2+q)*H[2, 2, 1] + (q^4+q^3+q^2)*H[2, 1, 1, 1]
sage: H[2]*H[2,1]
H[4, 1] + q*H[3, 2] + (q^2-1)*H[3, 1, 1] + (q^3+q^2)*H[2, 2, 1]
sage: H[3]*H[1,1]
H[4, 1] + q^2*H[3, 1, 1]
sage: H[3]*H[2,1]
H[5, 1] + q*H[4, 2] + (q^2-1)*H[4, 1, 1] + q^3*H[3, 2, 1]
We can rewrite the Hall algebra in terms of monomials of
the elements `I_{(1^r)}`::
sage: I = H.monomial_basis()
sage: H(I[2,1,1])
H[3, 1] + (q+1)*H[2, 2] + (2*q^2+2*q+1)*H[2, 1, 1]
+ (q^5+2*q^4+3*q^3+3*q^2+2*q+1)*H[1, 1, 1, 1]
sage: I(H[2,1,1])
I[3, 1] + (-q^3-q^2-q-1)*I[4]
The isomorphism between the Hall algebra and the symmetric
functions described above is implemented as a coercion::
sage: R = PolynomialRing(ZZ, 'q').fraction_field()
sage: q = R.gen()
sage: H = HallAlgebra(R, q)
sage: e = SymmetricFunctions(R).e()
sage: e(H[1,1,1])
1/q^3*e[3]
We can also do computations with any special value of ``q``,
such as `0` or `1` or (most commonly) a prime power. Here
is an example using a prime::
sage: H = HallAlgebra(ZZ, 2)
sage: H[2,1]*H[1,1]
H[3, 2] + 3*H[3, 1, 1] + 6*H[2, 2, 1] + 28*H[2, 1, 1, 1]
sage: H[3,1]*H[2]
H[5, 1] + H[4, 2] + 6*H[3, 3] + 3*H[4, 1, 1] + 8*H[3, 2, 1]
sage: H[2,1,1]*H[3,1]
H[5, 2, 1] + 2*H[4, 3, 1] + 6*H[4, 2, 2] + 7*H[5, 1, 1, 1]
+ 19*H[4, 2, 1, 1] + 24*H[3, 3, 1, 1] + 48*H[3, 2, 2, 1]
+ 105*H[4, 1, 1, 1, 1] + 224*H[3, 2, 1, 1, 1]
sage: I = H.monomial_basis()
sage: H(I[2,1,1])
H[3, 1] + 3*H[2, 2] + 13*H[2, 1, 1] + 105*H[1, 1, 1, 1]
sage: I(H[2,1,1])
I[3, 1] - 15*I[4]
If `q` is set to `1`, the coercion to the symmetric functions
sends `I_{\lambda}` to `m_{\lambda}`::
sage: H = HallAlgebra(QQ, 1)
sage: H[2,1] * H[2,1]
H[4, 2] + 2*H[3, 3] + 2*H[4, 1, 1] + 2*H[3, 2, 1] + 6*H[2, 2, 2] + 4*H[2, 2, 1, 1]
sage: m = SymmetricFunctions(QQ).m()
sage: m[2,1] * m[2,1]
4*m[2, 2, 1, 1] + 6*m[2, 2, 2] + 2*m[3, 2, 1] + 2*m[3, 3] + 2*m[4, 1, 1] + m[4, 2]
sage: m(H[3,1])
m[3, 1]
We can set `q` to `0` (but should keep in mind that we don't get
the Schur functions this way)::
sage: H = HallAlgebra(QQ, 0)
sage: H[2,1] * H[2,1]
H[4, 2] + H[3, 3] + H[4, 1, 1] - H[3, 2, 1] - H[3, 1, 1, 1]
TESTS:
The coefficients are actually Laurent polynomials in general, so we don't
have to work over the fraction field of `\ZZ[q]`. This didn't work before
:trac:`15345`::
sage: R.<q> = LaurentPolynomialRing(ZZ)
sage: H = HallAlgebra(R, q)
sage: I = H.monomial_basis()
sage: hi = H(I[2,1]); hi
H[2, 1] + (1+q+q^2)*H[1, 1, 1]
sage: hi.parent() is H
True
sage: h22 = H[2]*H[2]; h22
H[4] - (1-q)*H[3, 1] + (q+q^2)*H[2, 2]
sage: h22.parent() is H
True
sage: e = SymmetricFunctions(R).e()
sage: e(H[1,1,1])
(q^-3)*e[3]
REFERENCES:
.. [Schiffmann] Oliver Schiffmann. *Lectures on Hall algebras*.
:arxiv:`0611617v2`.
"""
def __init__(self, base_ring, q, prefix='H'):
"""
Initialize ``self``.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: H = HallAlgebra(R, q)
sage: TestSuite(H).run()
sage: R = PolynomialRing(ZZ, 'q').fraction_field()
sage: q = R.gen()
sage: H = HallAlgebra(R, q)
sage: TestSuite(H).run() # long time
sage: R.<q> = LaurentPolynomialRing(ZZ)
sage: H = HallAlgebra(R, q)
sage: TestSuite(H).run() # long time
"""
self._q = q
try:
q_inverse = q**-1
if not q_inverse in base_ring:
hopf_structure = False
else:
hopf_structure = True
except Exception:
hopf_structure = False
if hopf_structure:
category = HopfAlgebrasWithBasis(base_ring)
else:
category = AlgebrasWithBasis(base_ring)
CombinatorialFreeModule.__init__(self, base_ring, Partitions(),
prefix=prefix, bracket=False,
monomial_cmp=transpose_cmp,
category=category)
# Coercions
I = self.monomial_basis()
M = I.module_morphism(I._to_natural_on_basis, codomain=self,
triangular='upper', unitriangular=True,
inverse_on_support=lambda x: x.conjugate(),
invertible=True)
M.register_as_coercion()
(~M).register_as_coercion()
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: HallAlgebra(R, q)
Hall algebra with q=q over Univariate Polynomial Ring in q over Integer Ring
"""
return "Hall algebra with q={} over {}".format(self._q, self.base_ring())
def one_basis(self):
"""
Return the index of the basis element `1`.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: H = HallAlgebra(R, q)
sage: H.one_basis()
[]
"""
return Partition([])
def product_on_basis(self, mu, la):
"""
Return the product of the two basis elements indexed by ``mu``
and ``la``.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: H = HallAlgebra(R, q)
sage: H.product_on_basis(Partition([1,1]), Partition([1]))
H[2, 1] + (q^2+q+1)*H[1, 1, 1]
sage: H.product_on_basis(Partition([2,1]), Partition([1,1]))
H[3, 2] + (q+1)*H[3, 1, 1] + (q^2+q)*H[2, 2, 1] + (q^4+q^3+q^2)*H[2, 1, 1, 1]
sage: H.product_on_basis(Partition([3,2]), Partition([2,1]))
H[5, 3] + (q+1)*H[4, 4] + q*H[5, 2, 1] + (2*q^2-1)*H[4, 3, 1]
+ (q^3+q^2)*H[4, 2, 2] + (q^4+q^3)*H[3, 3, 2]
+ (q^4-q^2)*H[4, 2, 1, 1] + (q^5+q^4-q^3-q^2)*H[3, 3, 1, 1]
+ (q^6+q^5)*H[3, 2, 2, 1]
sage: H.product_on_basis(Partition([3,1,1]), Partition([2,1]))
H[5, 2, 1] + q*H[4, 3, 1] + (q^2-1)*H[4, 2, 2]
+ (q^3+q^2)*H[3, 3, 2] + (q^2+q+1)*H[5, 1, 1, 1]
+ (2*q^3+q^2-q-1)*H[4, 2, 1, 1] + (q^4+2*q^3+q^2)*H[3, 3, 1, 1]
+ (q^5+q^4)*H[3, 2, 2, 1] + (q^6+q^5+q^4-q^2-q-1)*H[4, 1, 1, 1, 1]
+ (q^7+q^6+q^5)*H[3, 2, 1, 1, 1]
"""
# Check conditions for multiplying by 1
if len(mu) == 0:
return self.monomial(la)
if len(la) == 0:
return self.monomial(mu)
if all(x == 1 for x in la):
return self.sum_of_terms([(p, hall_polynomial(p, mu, la, self._q))
for p in Partitions(sum(mu) + len(la))],
distinct=True)
I = HallAlgebraMonomials(self.base_ring(), self._q)
mu = self.monomial(mu)
la = self.monomial(la)
return self(I(mu) * I(la))
def coproduct_on_basis(self, la):
"""
Return the coproduct of the basis element indexed by ``la``.
EXAMPLES::
sage: R = PolynomialRing(ZZ, 'q').fraction_field()
sage: q = R.gen()
sage: H = HallAlgebra(R, q)
sage: H.coproduct_on_basis(Partition([1,1]))
H[] # H[1, 1] + 1/q*H[1] # H[1] + H[1, 1] # H[]
sage: H.coproduct_on_basis(Partition([2]))
H[] # H[2] + ((q-1)/q)*H[1] # H[1] + H[2] # H[]
sage: H.coproduct_on_basis(Partition([2,1]))
H[] # H[2, 1] + ((q^2-1)/q^2)*H[1] # H[1, 1] + 1/q*H[1] # H[2]
+ ((q^2-1)/q^2)*H[1, 1] # H[1] + 1/q*H[2] # H[1] + H[2, 1] # H[]
sage: R.<q> = LaurentPolynomialRing(ZZ)
sage: H = HallAlgebra(R, q)
sage: H.coproduct_on_basis(Partition([2]))
H[] # H[2] - (q^-1-1)*H[1] # H[1] + H[2] # H[]
sage: H.coproduct_on_basis(Partition([2,1]))
H[] # H[2, 1] - (q^-2-1)*H[1] # H[1, 1] + (q^-1)*H[1] # H[2]
- (q^-2-1)*H[1, 1] # H[1] + (q^-1)*H[2] # H[1] + H[2, 1] # H[]
"""
S = self.tensor_square()
if all(x == 1 for x in la):
n = len(la)
return S.sum_of_terms([( (Partition([1]*r), Partition([1]*(n-r))), self._q**(-r*(n-r)) )
for r in range(n+1)], distinct=True)
I = HallAlgebraMonomials(self.base_ring(), self._q)
la = self.monomial(la)
return S(I(la).coproduct())
def antipode_on_basis(self, la):
"""
Return the antipode of the basis element indexed by ``la``.
EXAMPLES::
sage: R = PolynomialRing(ZZ, 'q').fraction_field()
sage: q = R.gen()
sage: H = HallAlgebra(R, q)
sage: H.antipode_on_basis(Partition([1,1]))
1/q*H[2] + 1/q*H[1, 1]
sage: H.antipode_on_basis(Partition([2]))
-1/q*H[2] + ((q^2-1)/q)*H[1, 1]
sage: R.<q> = LaurentPolynomialRing(ZZ)
sage: H = HallAlgebra(R, q)
sage: H.antipode_on_basis(Partition([1,1]))
(q^-1)*H[2] + (q^-1)*H[1, 1]
sage: H.antipode_on_basis(Partition([2]))
-(q^-1)*H[2] - (q^-1-q)*H[1, 1]
"""
if all(x == 1 for x in la):
r = len(la)
q = (-1) ** r * self._q ** (-(r * (r - 1)) // 2)
return self._from_dict({p: q for p in Partitions(r)})
I = HallAlgebraMonomials(self.base_ring(), self._q)
return self(I(self.monomial(la)).antipode())
def counit(self, x):
"""
Return the counit of the element ``x``.
EXAMPLES::
sage: R = PolynomialRing(ZZ, 'q').fraction_field()
sage: q = R.gen()
sage: H = HallAlgebra(R, q)
sage: H.counit(H.an_element())
2
"""
return x.coefficient(self.one_basis())
def monomial_basis(self):
"""
Return the basis of the Hall algebra given by monomials in the
`I_{(1^r)}`.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: H = HallAlgebra(R, q)
sage: H.monomial_basis()
Hall algebra with q=q over Univariate Polynomial Ring in q over
Integer Ring in the monomial basis
"""
return HallAlgebraMonomials(self.base_ring(), self._q)
def __getitem__(self, la):
"""
Return the basis element indexed by ``la``.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: H = HallAlgebra(R, q)
sage: H[[]]
H[]
sage: H[2]
H[2]
sage: H[[2]]
H[2]
sage: H[2,1]
H[2, 1]
sage: H[Partition([2,1])]
H[2, 1]
sage: H[(2,1)]
H[2, 1]
"""
if la in ZZ:
return self.monomial(Partition([la]))
return self.monomial(Partition(la))
class Element(CombinatorialFreeModule.Element):
def scalar(self, y):
r"""
Return the scalar product of ``self`` and ``y``.
The scalar product is given by
.. MATH::
(I_{\lambda}, I_{\mu}) = \delta_{\lambda,\mu}
\frac{1}{a_{\lambda}},
where `a_{\lambda}` is given by
.. MATH::
a_{\lambda} = q^{|\lambda| + 2 n(\lambda)} \prod_k
\prod_{i=1}^{l_k} (1 - q^{-i})
where `n(\lambda) = \sum_i (i - 1) \lambda_i` and
`\lambda = (1^{l_1}, 2^{l_2}, \ldots, m^{l_m})`.
Note that `a_{\lambda}` can be interpreted as the number
of automorphisms of a certain object in a category
corresponding to `\lambda`. See Lemma 2.8 in [Schiffmann]_
for details.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: H = HallAlgebra(R, q)
sage: H[1].scalar(H[1])
1/(q - 1)
sage: H[2].scalar(H[2])
1/(q^2 - q)
sage: H[2,1].scalar(H[2,1])
1/(q^5 - 2*q^4 + q^3)
sage: H[1,1,1,1].scalar(H[1,1,1,1])
1/(q^16 - q^15 - q^14 + 2*q^11 - q^8 - q^7 + q^6)
sage: H.an_element().scalar(H.an_element())
(4*q^2 + 9)/(q^2 - q)
"""
q = self.parent()._q
f = lambda la: ~( q**(sum(la) + 2*la.weighted_size())
* prod(prod((1 - q**-i) for i in range(1,k+1))
for k in la.to_exp()) )
y = self.parent()(y)
ret = q.parent().zero()
for mx, cx in self:
cy = y.coefficient(mx)
if cy != 0:
ret += cx * cy * f(mx)
return ret
class HallAlgebraMonomials(CombinatorialFreeModule):
r"""
The classical Hall algebra given in terms of monomials in the
`I_{(1^r)}`.
We first associate a monomial `I_{(1^{r_1})} I_{(1^{r_2})} \cdots
I_{(1^{r_k})}` with the composition `(r_1, r_2, \ldots, r_k)`. However
since `I_{(1^r)}` commutes with `I_{(1^s)}`, the basis is indexed
by partitions.
EXAMPLES:
We use the fraction field of `\ZZ[q]` for our initial example::
sage: R = PolynomialRing(ZZ, 'q').fraction_field()
sage: q = R.gen()
sage: H = HallAlgebra(R, q)
sage: I = H.monomial_basis()
We check that the basis conversions are mutually inverse::
sage: all(H(I(H[p])) == H[p] for i in range(7) for p in Partitions(i))
True
sage: all(I(H(I[p])) == I[p] for i in range(7) for p in Partitions(i))
True
Since Laurent polynomials are sufficient, we run the same check with
the Laurent polynomial ring `\ZZ[q, q^{-1}]`::
sage: R.<q> = LaurentPolynomialRing(ZZ)
sage: H = HallAlgebra(R, q)
sage: I = H.monomial_basis()
sage: all(H(I(H[p])) == H[p] for i in range(6) for p in Partitions(i)) # long time
True
sage: all(I(H(I[p])) == I[p] for i in range(6) for p in Partitions(i)) # long time
True
We can also convert to the symmetric functions. The natural basis
corresponds to the Hall-Littlewood basis (up to a renormalization and
an inversion of the `q` parameter), and this basis corresponds
to the elementary basis (up to a renormalization)::
sage: Sym = SymmetricFunctions(R)
sage: e = Sym.e()
sage: e(I[2,1])
(q^-1)*e[2, 1]
sage: e(I[4,2,2,1])
(q^-8)*e[4, 2, 2, 1]
sage: HLP = Sym.hall_littlewood(q).P()
sage: H(I[2,1])
H[2, 1] + (1+q+q^2)*H[1, 1, 1]
sage: HLP(e[2,1])
(1+q+q^2)*HLP[1, 1, 1] + HLP[2, 1]
sage: all( e(H[lam]) == q**-sum([i * x for i, x in enumerate(lam)])
....: * e(HLP[lam]).map_coefficients(lambda p: p(q**(-1)))
....: for lam in Partitions(4) )
True
We can also do computations using a prime power::
sage: H = HallAlgebra(ZZ, 3)
sage: I = H.monomial_basis()
sage: i_elt = I[2,1]*I[1,1]; i_elt
I[2, 1, 1, 1]
sage: H(i_elt)
H[4, 1] + 7*H[3, 2] + 37*H[3, 1, 1] + 136*H[2, 2, 1]
+ 1495*H[2, 1, 1, 1] + 62920*H[1, 1, 1, 1, 1]
"""
def __init__(self, base_ring, q, prefix='I'):
"""
Initialize ``self``.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: I = HallAlgebra(R, q).monomial_basis()
sage: TestSuite(I).run()
sage: R = PolynomialRing(ZZ, 'q').fraction_field()
sage: q = R.gen()
sage: I = HallAlgebra(R, q).monomial_basis()
sage: TestSuite(I).run()
sage: R.<q> = LaurentPolynomialRing(ZZ)
sage: I = HallAlgebra(R, q).monomial_basis()
sage: TestSuite(I).run()
"""
self._q = q
try:
q_inverse = q**-1
if not q_inverse in base_ring:
hopf_structure = False
else:
hopf_structure = True
except Exception:
hopf_structure = False
if hopf_structure:
category = HopfAlgebrasWithBasis(base_ring)
else:
category = AlgebrasWithBasis(base_ring)
CombinatorialFreeModule.__init__(self, base_ring, Partitions(),
prefix=prefix, bracket=False,
category=category)
# Coercions
if hopf_structure:
e = SymmetricFunctions(base_ring).e()
f = lambda la: q ** sum(-((r * (r - 1)) // 2) for r in la)
M = self.module_morphism(diagonal=f, codomain=e)
M.register_as_coercion()
(~M).register_as_coercion()
@cached_method
def _to_natural_on_basis(self, a):
"""
Return the basis element indexed by ``a`` converted into
the partition basis.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: I = HallAlgebra(R, q).monomial_basis()
sage: I._to_natural_on_basis(Partition([3]))
H[1, 1, 1]
sage: I._to_natural_on_basis(Partition([2,1,1]))
H[3, 1] + (q+1)*H[2, 2] + (2*q^2+2*q+1)*H[2, 1, 1]
+ (q^5+2*q^4+3*q^3+3*q^2+2*q+1)*H[1, 1, 1, 1]
"""
H = HallAlgebra(self.base_ring(), self._q)
return reduce(lambda cur,r: cur * H.monomial(Partition([1]*r)), a, H.one())
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: HallAlgebra(R, q).monomial_basis()
Hall algebra with q=q over Univariate Polynomial Ring in q over
Integer Ring in the monomial basis
"""
return "Hall algebra with q={} over {} in the monomial basis".format(self._q, self.base_ring())
def one_basis(self):
"""
Return the index of the basis element `1`.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: I = HallAlgebra(R, q).monomial_basis()
sage: I.one_basis()
[]
"""
return Partition([])
def product_on_basis(self, a, b):
"""
Return the product of the two basis elements indexed by ``a``
and ``b``.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: I = HallAlgebra(R, q).monomial_basis()
sage: I.product_on_basis(Partition([4,2,1]), Partition([3,2,1]))
I[4, 3, 2, 2, 1, 1]
"""
return self.monomial(Partition(sorted(list(a) + list(b), reverse=True)))
def coproduct_on_basis(self, a):
"""
Return the coproduct of the basis element indexed by ``a``.
EXAMPLES::
sage: R = PolynomialRing(ZZ, 'q').fraction_field()
sage: q = R.gen()
sage: I = HallAlgebra(R, q).monomial_basis()
sage: I.coproduct_on_basis(Partition([1]))
I[] # I[1] + I[1] # I[]
sage: I.coproduct_on_basis(Partition([2]))
I[] # I[2] + 1/q*I[1] # I[1] + I[2] # I[]
sage: I.coproduct_on_basis(Partition([2,1]))
I[] # I[2, 1] + 1/q*I[1] # I[1, 1] + I[1] # I[2]
+ 1/q*I[1, 1] # I[1] + I[2] # I[1] + I[2, 1] # I[]
sage: R.<q> = LaurentPolynomialRing(ZZ)
sage: I = HallAlgebra(R, q).monomial_basis()
sage: I.coproduct_on_basis(Partition([2,1]))
I[] # I[2, 1] + (q^-1)*I[1] # I[1, 1] + I[1] # I[2]
+ (q^-1)*I[1, 1] # I[1] + I[2] # I[1] + I[2, 1] # I[]
"""
S = self.tensor_square()
return S.prod(S.sum_of_terms([( (Partition([r]), Partition([n-r]) ), self._q**(-r*(n-r)) )
for r in range(n+1)], distinct=True) for n in a)
def antipode_on_basis(self, a):
"""
Return the antipode of the basis element indexed by ``a``.
EXAMPLES::
sage: R = PolynomialRing(ZZ, 'q').fraction_field()
sage: q = R.gen()
sage: I = HallAlgebra(R, q).monomial_basis()
sage: I.antipode_on_basis(Partition([1]))
-I[1]
sage: I.antipode_on_basis(Partition([2]))
1/q*I[1, 1] - I[2]
sage: I.antipode_on_basis(Partition([2,1]))
-1/q*I[1, 1, 1] + I[2, 1]
sage: R.<q> = LaurentPolynomialRing(ZZ)
sage: I = HallAlgebra(R, q).monomial_basis()
sage: I.antipode_on_basis(Partition([2,1]))
-(q^-1)*I[1, 1, 1] + I[2, 1]
"""
H = HallAlgebra(self.base_ring(), self._q)
cur = self.one()
for r in a:
q = (-1) ** r * self._q ** (-(r * (r - 1)) // 2)
cur *= self(H._from_dict({p: q for p in Partitions(r)}))
return cur
def counit(self, x):
"""
Return the counit of the element ``x``.
EXAMPLES::
sage: R = PolynomialRing(ZZ, 'q').fraction_field()
sage: q = R.gen()
sage: I = HallAlgebra(R, q).monomial_basis()
sage: I.counit(I.an_element())
2
"""
return x.coefficient(self.one_basis())
def __getitem__(self, a):
"""
Return the basis element indexed by ``a``.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: I = HallAlgebra(R, q).monomial_basis()
sage: I[3,1,1] + 3*I[1,1]
3*I[1, 1] + I[3, 1, 1]
sage: I[Partition([3,2,2])]
I[3, 2, 2]
sage: I[2]
I[2]
sage: I[[2]]
I[2]
sage: I[[]]
I[]
"""
if a in ZZ:
return self.monomial(Partition([a]))
return self.monomial(Partition(a))
class Element(CombinatorialFreeModule.Element):
def scalar(self, y):
r"""
Return the scalar product of ``self`` and ``y``.
The scalar product is computed by converting into the
natural basis.
EXAMPLES::
sage: R.<q> = ZZ[]
sage: I = HallAlgebra(R, q).monomial_basis()
sage: I[1].scalar(I[1])
1/(q - 1)
sage: I[2].scalar(I[2])
1/(q^4 - q^3 - q^2 + q)
sage: I[2,1].scalar(I[2,1])
(2*q + 1)/(q^6 - 2*q^5 + 2*q^3 - q^2)
sage: I[1,1,1,1].scalar(I[1,1,1,1])
24/(q^4 - 4*q^3 + 6*q^2 - 4*q + 1)
sage: I.an_element().scalar(I.an_element())
(4*q^4 - 4*q^2 + 9)/(q^4 - q^3 - q^2 + q)
"""
H = HallAlgebra(self.parent().base_ring(), self.parent()._q)
return H(self).scalar(H(y))
|
from typing import *
class Solution:
# 80 ms, faster than 58.31% of Python3 online submissions for Sort Array By Parity.
# 15.2 MB, less than 22.91% of Python3 online submissions for Sort Array By Parity.
def sortArrayByParity(self, nums: List[int]) -> List[int]:
even = []
odd = []
for num in nums:
if num % 2 == 0:
even.append(num)
if num % 2 == 1:
odd.append(num)
even.extend(odd)
return even
# 68 ms, faster than 98.29% of Python3 online submissions for Sort Array By Parity.
# 14.8 MB, less than 99.01% of Python3 online submissions for Sort Array By Parity.
def sortArrayByParity(self, nums: List[int]) -> List[int]:
i = 0
for j in range(len(nums)):
if nums[j] %2 == 0:#irrespective of the number at ith position, we are swapping if number at j is even
nums[i], nums[j] = nums[j], nums[i]
i += 1
return nums
if __name__ == "__main__":
so = Solution()
print(so.sortArrayByParity([0])) |
import pytest
from pytest import raises
from vyper import compiler
from vyper.exceptions import TypeMismatchException
fail_list = [
"""
@public
def foo():
y = min(7, 0x1234567890123456789012345678901234567890)
"""
]
@pytest.mark.parametrize('bad_code', fail_list)
def test_block_fail(bad_code):
with raises(TypeMismatchException):
compiler.compile_code(bad_code)
|
"""
Copyright (c) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from ..adapters import Adapter
from ..config import ListField
from ..postprocessor import NMS
from ..representation import (
DetectionPrediction,
FacialLandmarksPrediction,
ContainerPrediction,
AttributeDetectionPrediction
)
class RetinaFaceAdapter(Adapter):
__provider__ = 'retinaface'
@classmethod
def parameters(cls):
params = super().parameters()
params.update(
{
'bboxes_outputs': ListField(),
'scores_outputs': ListField(),
'landmarks_outputs': ListField(optional=True),
'type_scores_outputs': ListField(optional=True)
}
)
return params
def configure(self):
self.bboxes_output = self.get_value_from_config('bboxes_outputs')
self.scores_output = self.get_value_from_config('scores_outputs')
self.landmarks_output = self.get_value_from_config('landmarks_outputs') or []
self.type_scores_output = self.get_value_from_config('type_scores_outputs') or []
_ratio = (1.,)
self.anchor_cfg = {
32: {'SCALES': (32, 16), 'BASE_SIZE': 16, 'RATIOS': _ratio},
16: {'SCALES': (8, 4), 'BASE_SIZE': 16, 'RATIOS': _ratio},
8: {'SCALES': (2, 1), 'BASE_SIZE': 16, 'RATIOS': _ratio}
}
self._features_stride_fpn = [32, 16, 8]
self._anchors_fpn = dict(zip(self._features_stride_fpn, self.generate_anchors_fpn(cfg=self.anchor_cfg)))
self._num_anchors = dict(zip(
self._features_stride_fpn, [anchors.shape[0] for anchors in self._anchors_fpn.values()]
))
if self.type_scores_output:
self.landmark_std = 0.2
else:
self.landmark_std = 1.0
def process(self, raw, identifiers, frame_meta):
raw_predictions = self._extract_predictions(raw, frame_meta)
raw_predictions = self._repack_data_according_layout(raw_predictions, frame_meta[0])
results = []
for batch_id, (identifier, meta) in enumerate(zip(identifiers, frame_meta)):
proposals_list = []
scores_list = []
landmarks_list = []
mask_scores_list = []
for _idx, s in enumerate(self._features_stride_fpn):
anchor_num = self._num_anchors[s]
scores = self._get_scores(raw_predictions[self.scores_output[_idx]][batch_id], anchor_num)
bbox_deltas = raw_predictions[self.bboxes_output[_idx]][batch_id]
height, width = bbox_deltas.shape[1], bbox_deltas.shape[2]
anchors_fpn = self._anchors_fpn[s]
anchors = self.anchors_plane(height, width, int(s), anchors_fpn)
anchors = anchors.reshape((height * width * anchor_num, 4))
proposals = self._get_proposals(bbox_deltas, anchor_num, anchors)
x_mins, y_mins, x_maxs, y_maxs = proposals.T
keep = NMS.nms(x_mins, y_mins, x_maxs, y_maxs, scores, 0.5, False)
proposals_list.extend(proposals[keep])
scores_list.extend(scores[keep])
if self.type_scores_output:
mask_scores_list.extend(self._get_mask_scores(
raw_predictions[self.type_scores_output[_idx]][batch_id], anchor_num)[keep])
if self.landmarks_output:
landmarks = self._get_landmarks(raw_predictions[self.landmarks_output[_idx]][batch_id],
anchor_num, anchors)[keep, :]
landmarks_list.extend(landmarks)
scores = np.reshape(scores_list, -1)
mask_scores = np.reshape(mask_scores_list, -1)
labels = np.full_like(scores, 1, dtype=int)
x_mins, y_mins, x_maxs, y_maxs = np.array(proposals_list).T # pylint: disable=E0633
x_scale, y_scale = self.get_scale(meta)
detection_representation = DetectionPrediction(
identifier, labels, scores, x_mins / x_scale, y_mins / y_scale, x_maxs / x_scale, y_maxs / y_scale
)
representations = {'face_detection': detection_representation}
if self.type_scores_output:
representations['mask_detection'] = AttributeDetectionPrediction(
identifier, labels, scores, mask_scores, x_mins / x_scale,
y_mins / y_scale, x_maxs / x_scale, y_maxs / y_scale
)
if self.landmarks_output:
landmarks_x_coords = np.array(landmarks_list)[:, :, ::2].reshape(len(landmarks_list), -1) / x_scale
landmarks_y_coords = np.array(landmarks_list)[:, :, 1::2].reshape(len(landmarks_list), -1) / y_scale
representations['landmarks_regression'] = FacialLandmarksPrediction(identifier, landmarks_x_coords,
landmarks_y_coords)
results.append(
ContainerPrediction(representations) if len(representations) > 1 else detection_representation
)
return results
def _get_proposals(self, bbox_deltas, anchor_num, anchors):
bbox_deltas = bbox_deltas.transpose((1, 2, 0))
bbox_pred_len = bbox_deltas.shape[2] // anchor_num
bbox_deltas = bbox_deltas.reshape((-1, bbox_pred_len))
proposals = self.bbox_pred(anchors, bbox_deltas)
return proposals
@staticmethod
def _get_scores(scores, anchor_num):
scores = scores[anchor_num:, :, :]
scores = scores.transpose((1, 2, 0)).reshape(-1)
return scores
@staticmethod
def _get_mask_scores(type_scores, anchor_num):
mask_scores = type_scores[anchor_num * 2:, :, :]
mask_scores = mask_scores.transpose((1, 2, 0)).reshape(-1)
return mask_scores
def _get_landmarks(self, landmark_deltas, anchor_num, anchors):
landmark_pred_len = landmark_deltas.shape[0] // anchor_num
landmark_deltas = landmark_deltas.transpose((1, 2, 0)).reshape((-1, 5, landmark_pred_len // 5))
landmark_deltas *= self.landmark_std
landmarks = self.landmark_pred(anchors, landmark_deltas)
return landmarks
@staticmethod
def bbox_pred(boxes, box_deltas):
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
dx = box_deltas[:, 0:1]
dy = box_deltas[:, 1:2]
dw = box_deltas[:, 2:3]
dh = box_deltas[:, 3:4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
pred_boxes[:, 0:1] = pred_ctr_x - 0.5 * (pred_w - 1.0)
pred_boxes[:, 1:2] = pred_ctr_y - 0.5 * (pred_h - 1.0)
pred_boxes[:, 2:3] = pred_ctr_x + 0.5 * (pred_w - 1.0)
pred_boxes[:, 3:4] = pred_ctr_y + 0.5 * (pred_h - 1.0)
if box_deltas.shape[1] > 4:
pred_boxes[:, 4:] = box_deltas[:, 4:]
return pred_boxes
@staticmethod
def anchors_plane(height, width, stride, base_anchors):
num_anchors = base_anchors.shape[0]
all_anchors = np.zeros((height, width, num_anchors, 4))
for iw in range(width):
sw = iw * stride
for ih in range(height):
sh = ih * stride
for k in range(num_anchors):
all_anchors[ih, iw, k, 0] = base_anchors[k, 0] + sw
all_anchors[ih, iw, k, 1] = base_anchors[k, 1] + sh
all_anchors[ih, iw, k, 2] = base_anchors[k, 2] + sw
all_anchors[ih, iw, k, 3] = base_anchors[k, 3] + sh
return all_anchors
@staticmethod
def generate_anchors_fpn(cfg):
def generate_anchors(base_size=16, ratios=(0.5, 1, 2), scales=2 ** np.arange(3, 6)):
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales) for i in range(ratio_anchors.shape[0])])
return anchors
def _ratio_enum(anchor, ratios):
w, h, x_ctr, y_ctr = _generate_wh_ctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _make_anchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
w, h, x_ctr, y_ctr = _generate_wh_ctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _make_anchors(ws, hs, x_ctr, y_ctr)
return anchors
def _generate_wh_ctrs(anchor):
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _make_anchors(ws, hs, x_ctr, y_ctr):
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((
x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)
))
return anchors
rpn_feat_stride = [int(k) for k in cfg]
rpn_feat_stride.sort(reverse=True)
anchors = []
for stride in rpn_feat_stride:
feature_info = cfg[stride]
bs = feature_info['BASE_SIZE']
__ratios = np.array(feature_info['RATIOS'])
__scales = np.array(feature_info['SCALES'])
anchors.append(generate_anchors(bs, __ratios, __scales))
return anchors
@staticmethod
def landmark_pred(boxes, landmark_deltas):
if boxes.shape[0] == 0:
return np.zeros((0, landmark_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
pred = landmark_deltas.copy()
for i in range(5):
pred[:, i, 0] = landmark_deltas[:, i, 0] * widths + ctr_x
pred[:, i, 1] = landmark_deltas[:, i, 1] * heights + ctr_y
return pred
@staticmethod
def get_scale(meta):
if 'scale_x' in meta:
return meta['scale_x'], meta['scale_y']
original_image_size = meta['image_size'][:2]
image_input = [shape for shape in meta['input_shape'].values() if len(shape) == 4]
assert image_input, "image input not found"
assert len(image_input) == 1, 'model should have only one image input'
image_input = image_input[0]
if image_input[1] == 3:
processed_image_size = image_input[2:]
else:
processed_image_size = image_input[1:3]
y_scale = processed_image_size[0] / original_image_size[0]
x_scale = processed_image_size[1] / original_image_size[1]
return x_scale, y_scale
def _repack_data_according_layout(self, raw_predictions, meta):
if 'output_layouts' not in meta:
return raw_predictions
output_layouts = meta['output_layouts']
target_outputs = self.bboxes_output + self.scores_output + self.landmarks_output + self.type_scores_output
for target_out in target_outputs:
layout = output_layouts[target_out]
if layout != 'NHWC':
continue
shape = raw_predictions[target_out].shape
transposed_output = np.transpose(raw_predictions, (0, 3, 1, 2))
if shape[1] <= shape[3]:
transposed_output = transposed_output.reshape(shape)
raw_predictions[target_out] = transposed_output
return raw_predictions
|
#!/usr/bin/env python
# IRC Cryptocurrency Exchange (IRCCEX) - Developed by acidvegas in Python (https://acid.vegas/irccex)
# constants.py
# Control Characters
bold = '\x02'
color = '\x03'
italic = '\x1D'
underline = '\x1F'
reverse = '\x16'
reset = '\x0f'
# Color Codes
white = '00'
black = '01'
blue = '02'
green = '03'
red = '04'
brown = '05'
purple = '06'
orange = '07'
yellow = '08'
light_green = '09'
cyan = '10'
light_cyan = '11'
light_blue = '12'
pink = '13'
grey = '14'
light_grey = '15'
# Events
PASS = 'PASS'
NICK = 'NICK'
USER = 'USER'
OPER = 'OPER'
MODE = 'MODE'
SERVICE = 'SERVICE'
QUIT = 'QUIT'
SQUIT = 'SQUIT'
JOIN = 'JOIN'
PART = 'PART'
TOPIC = 'TOPIC'
NAMES = 'NAMES'
LIST = 'LIST'
INVITE = 'INVITE'
KICK = 'KICK'
PRIVMSG = 'PRIVMSG'
NOTICE = 'NOTICE'
MOTD = 'MOTD'
LUSERS = 'LUSERS'
VERSION = 'VERSION'
STATS = 'STATS'
LINKS = 'LINKS'
TIME = 'TIME'
CONNECT = 'CONNECT'
TRACE = 'TRACE'
ADMIN = 'ADMIN'
INFO = 'INFO'
SERVLIST = 'SERVLIST'
SQUERY = 'SQUERY'
WHO = 'WHO'
WHOIS = 'WHOIS'
WHOWAS = 'WHOWAS'
KILL = 'KILL'
PING = 'PING'
PONG = 'PONG'
ERROR = 'ERROR'
AWAY = 'AWAY'
REHASH = 'REHASH'
DIE = 'DIE'
RESTART = 'RESTART'
SUMMON = 'SUMMON'
USERS = 'USERS'
WALLOPS = 'WALLOPS'
USERHOST = 'USERHOST'
ISON = 'ISON'
# Event Numerics
RPL_WELCOME = '001'
RPL_YOURHOST = '002'
RPL_CREATED = '003'
RPL_MYINFO = '004'
RPL_ISUPPORT = '005'
RPL_TRACELINK = '200'
RPL_TRACECONNECTING = '201'
RPL_TRACEHANDSHAKE = '202'
RPL_TRACEUNKNOWN = '203'
RPL_TRACEOPERATOR = '204'
RPL_TRACEUSER = '205'
RPL_TRACESERVER = '206'
RPL_TRACESERVICE = '207'
RPL_TRACENEWTYPE = '208'
RPL_TRACECLASS = '209'
RPL_STATSLINKINFO = '211'
RPL_STATSCOMMANDS = '212'
RPL_STATSCLINE = '213'
RPL_STATSILINE = '215'
RPL_STATSKLINE = '216'
RPL_STATSYLINE = '218'
RPL_ENDOFSTATS = '219'
RPL_UMODEIS = '221'
RPL_SERVLIST = '234'
RPL_SERVLISTEND = '235'
RPL_STATSLLINE = '241'
RPL_STATSUPTIME = '242'
RPL_STATSOLINE = '243'
RPL_STATSHLINE = '244'
RPL_LUSERCLIENT = '251'
RPL_LUSEROP = '252'
RPL_LUSERUNKNOWN = '253'
RPL_LUSERCHANNELS = '254'
RPL_LUSERME = '255'
RPL_ADMINME = '256'
RPL_ADMINLOC1 = '257'
RPL_ADMINLOC2 = '258'
RPL_ADMINEMAIL = '259'
RPL_TRACELOG = '261'
RPL_TRYAGAIN = '263'
RPL_NONE = '300'
RPL_AWAY = '301'
RPL_USERHOST = '302'
RPL_ISON = '303'
RPL_UNAWAY = '305'
RPL_NOWAWAY = '306'
RPL_WHOISUSER = '311'
RPL_WHOISSERVER = '312'
RPL_WHOISOPERATOR = '313'
RPL_WHOWASUSER = '314'
RPL_ENDOFWHO = '315'
RPL_WHOISIDLE = '317'
RPL_ENDOFWHOIS = '318'
RPL_WHOISCHANNELS = '319'
RPL_LIST = '322'
RPL_LISTEND = '323'
RPL_CHANNELMODEIS = '324'
RPL_NOTOPIC = '331'
RPL_TOPIC = '332'
RPL_INVITING = '341'
RPL_INVITELIST = '346'
RPL_ENDOFINVITELIST = '347'
RPL_EXCEPTLIST = '348'
RPL_ENDOFEXCEPTLIST = '349'
RPL_VERSION = '351'
RPL_WHOREPLY = '352'
RPL_NAMREPLY = '353'
RPL_LINKS = '364'
RPL_ENDOFLINKS = '365'
RPL_ENDOFNAMES = '366'
RPL_BANLIST = '367'
RPL_ENDOFBANLIST = '368'
RPL_ENDOFWHOWAS = '369'
RPL_INFO = '371'
RPL_MOTD = '372'
RPL_ENDOFINFO = '374'
RPL_MOTDSTART = '375'
RPL_ENDOFMOTD = '376'
RPL_YOUREOPER = '381'
RPL_REHASHING = '382'
RPL_YOURESERVICE = '383'
RPL_TIME = '391'
RPL_USERSSTART = '392'
RPL_USERS = '393'
RPL_ENDOFUSERS = '394'
RPL_NOUSERS = '395'
ERR_NOSUCHNICK = '401'
ERR_NOSUCHSERVER = '402'
ERR_NOSUCHCHANNEL = '403'
ERR_CANNOTSENDTOCHAN = '404'
ERR_TOOMANYCHANNELS = '405'
ERR_WASNOSUCHNICK = '406'
ERR_TOOMANYTARGETS = '407'
ERR_NOSUCHSERVICE = '408'
ERR_NOORIGIN = '409'
ERR_NORECIPIENT = '411'
ERR_NOTEXTTOSEND = '412'
ERR_NOTOPLEVEL = '413'
ERR_WILDTOPLEVEL = '414'
ERR_BADMASK = '415'
ERR_UNKNOWNCOMMAND = '421'
ERR_NOMOTD = '422'
ERR_NOADMININFO = '423'
ERR_FILEERROR = '424'
ERR_NONICKNAMEGIVEN = '431'
ERR_ERRONEUSNICKNAME = '432'
ERR_NICKNAMEINUSE = '433'
ERR_NICKCOLLISION = '436'
ERR_USERNOTINCHANNEL = '441'
ERR_NOTONCHANNEL = '442'
ERR_USERONCHANNEL = '443'
ERR_NOLOGIN = '444'
ERR_SUMMONDISABLED = '445'
ERR_USERSDISABLED = '446'
ERR_NOTREGISTERED = '451'
ERR_NEEDMOREPARAMS = '461'
ERR_ALREADYREGISTRED = '462'
ERR_NOPERMFORHOST = '463'
ERR_PASSWDMISMATCH = '464'
ERR_YOUREBANNEDCREEP = '465'
ERR_KEYSET = '467'
ERR_CHANNELISFULL = '471'
ERR_UNKNOWNMODE = '472'
ERR_INVITEONLYCHAN = '473'
ERR_BANNEDFROMCHAN = '474'
ERR_BADCHANNELKEY = '475'
ERR_BADCHANMASK = '476'
ERR_BANLISTFULL = '478'
ERR_NOPRIVILEGES = '481'
ERR_CHANOPRIVSNEEDED = '482'
ERR_CANTKILLSERVER = '483'
ERR_UNIQOPRIVSNEEDED = '485'
ERR_NOOPERHOST = '491'
ERR_UMODEUNKNOWNFLAG = '501'
ERR_USERSDONTMATCH = '502' |
# Generated by Django 2.2.3 on 2019-07-26 08:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("kiss_cache", "0002_alter_resources")]
operations = [
migrations.AlterField(
model_name="resource",
name="state",
field=models.IntegerField(
choices=[
(0, "Scheduled"),
(1, "Downloading"),
(2, "Completed"),
(3, "Failed"),
],
default=0,
),
),
migrations.AlterField(
model_name="resource", name="ttl", field=models.IntegerField(default=86400)
),
]
|
#Nishaant Goswamy
import collections
from CompressDecompress import Text_Encoding,Letter_Decoding
import cryptAlg as crypt
print("This is ElGamal Signature")
p = int(input("Enter a prime value p: "))
if not crypt.primeCheck(p): quit()
g = crypt.Generator((p-1), p)
print("Generator g:", g )
r = int(input("Select secret key int r: "))
K = crypt.Square_And_Multiply(g,r,p)
print("K value K=(g**r)%p:", K)
R = crypt.Coprime(p-1)
print("R value GCD(R,p-1)=1:", R)
X = crypt.Square_And_Multiply(g,R, p)
print("X value X=(g**R)%p:", X)
M = (input("Enter the message string: ")).lower()
MsgNumList= Text_Encoding(M)
A1_List = []
A2_List = []
for M in MsgNumList:
print("\n***Signature***")
M = int(M)
print("Msg Num (M):", M)
inv_R = crypt.Inverse_Mod(R,p-1)
print("inverse mod (R^-1(mod p)):", inv_R)
Y = ((M - r*X)*inv_R)% (p-1)
print("Signature Y = ((M - r*X)*inv_R)% (p-1)")
print("Y value:", Y)
print("***Verfification***")
A1 = (crypt.Square_And_Multiply(K,X,p) * crypt.Square_And_Multiply(X,Y,p)) % p
# A1 = ((K**X)*(X**Y))%p
print("A1 value is:", A1)
A1_List.append(A1)
A2 = crypt.Square_And_Multiply(g,M, p)
print("A2 value is:", A2, "\n")
A2_List.append(A2)
if A1 == A2:
print("Success!! Signature a element of A1 and A2 match")
if collections.Counter(A1_List) == collections.Counter(A2_List):
print("Success!! Signature A1 and A2 match for all elements")
print("A1's List", A1_List)
print("A2's List", A2_List)
print("Encoded Message:", MsgNumList)
verifyEncodedMsg = int(''.join([str(x) for x in MsgNumList]))
print("Encoded Message:", verifyEncodedMsg)
decoded_msg = ''
print("Decode Message: ",end="")
for i in MsgNumList:
print(i+":"+Letter_Decoding(int(i)),end=", ")
decoded_msg += Letter_Decoding(int(i))
print("\nDecoded Message:", decoded_msg)
|
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2016 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from ..types import TypedList
from ..util import prep
from datetime import datetime
import json
import logging
import six
class ContainerDestination(object):
"""Smartsheet ContainerDestination data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the ContainerDestination model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._pre_request_filter = None
self._log = logging.getLogger(__name__)
self.allowed_values = {
'destination_type': [
'home',
'workspace',
'folder']}
self._destination_id = None
self._destination_type = None
self._new_name = None
if props:
# account for alternate variable names from raw API response
if 'destinationId' in props:
self.destination_id = props['destinationId']
if 'destination_id' in props:
self.destination_id = props['destination_id']
if 'destinationType' in props:
self.destination_type = props['destinationType']
if 'destination_type' in props:
self.destination_type = props[
'destination_type']
if 'newName' in props:
self.new_name = props['newName']
if 'new_name' in props:
self.new_name = props['new_name']
@property
def destination_id(self):
return self._destination_id
@destination_id.setter
def destination_id(self, value):
if isinstance(value, six.integer_types):
self._destination_id = value
@property
def destination_type(self):
return self._destination_type
@destination_type.setter
def destination_type(self, value):
if isinstance(value, six.string_types):
if value not in self.allowed_values['destination_type']:
raise ValueError(
("`{0}` is an invalid value for ContainerDestination`destination_type`,"
" must be one of {1}").format(
value, self.allowed_values['destination_type']))
self._destination_type = value
@property
def new_name(self):
return self._new_name
@new_name.setter
def new_name(self, value):
if isinstance(value, six.string_types):
self._new_name = value
@property
def pre_request_filter(self):
return self._pre_request_filter
@pre_request_filter.setter
def pre_request_filter(self, value):
self._pre_request_filter = value
def to_dict(self, op_id=None, method=None):
obj = {
'destinationId': prep(self._destination_id),
'destinationType': prep(self._destination_type),
'newName': prep(self._new_name)}
return self._apply_pre_request_filter(obj)
def _apply_pre_request_filter(self, obj):
if self.pre_request_filter == 'copy_workspace':
permitted = ['newName']
all_keys = list(obj.keys())
for key in all_keys:
if key not in permitted:
self._log.debug(
'deleting %s from obj (filter: %s)',
key, self.pre_request_filter)
del obj[key]
if self.pre_request_filter == 'move_folder':
permitted = ['destinationId',
'destinationType']
all_keys = list(obj.keys())
for key in all_keys:
if key not in permitted:
self._log.debug(
'deleting %s from obj (filter: %s)',
key, self.pre_request_filter)
del obj[key]
if self.pre_request_filter == 'move_sheet':
permitted = ['destinationType',
'destinationId']
all_keys = list(obj.keys())
for key in all_keys:
if key not in permitted:
self._log.debug(
'deleting %s from obj (filter: %s)',
key, self.pre_request_filter)
del obj[key]
return obj
def to_json(self):
return json.dumps(self.to_dict(), indent=2)
def __str__(self):
return json.dumps(self.to_dict())
|
"""
A PyQt5 custom widget used by Morse Trainer.
Used to select overall word speed for the Send tab only.
speed = SendSpeeds()
speed.setState(wpm) # sets the speed display
The widget generates a signal '.changed' when some value changes.
"""
import platform
from PyQt5.QtWidgets import QWidget, QPushButton, QHBoxLayout, QVBoxLayout
from PyQt5.QtWidgets import QLabel, QSpinBox, QGroupBox, QCheckBox, QSpacerItem
from PyQt5.QtCore import pyqtSignal
import utils
import logger
log = logger.Log('debug.log', logger.Log.CRITICAL)
class SendSpeeds(QWidget):
# signal raised when user changes cwpm
changed = pyqtSignal(int)
# maximum, minimum speeds and increment
MinSpeed = 5
MaxSpeed = 40
StepSpeed = 5
def __init__(self, speed=MinSpeed):
QWidget.__init__(self)
# define state variables
self.inhibit = True
self.speed = speed
# define the UI
self.initUI()
self.setFixedHeight(80)
self.show()
self.inhibit = False
def initUI(self):
# define the widgets we are going to use
lbl_set_speed = QLabel('Set to:')
self.spb_speed = QSpinBox(self)
self.spb_speed.setMinimum(SendSpeeds.MinSpeed)
self.spb_speed.setMaximum(SendSpeeds.MaxSpeed)
self.spb_speed.setSingleStep(SendSpeeds.StepSpeed)
self.spb_speed.setSuffix(' wpm')
self.spb_speed.setValue(self.speed)
self.lbl_apparent_speed = QLabel('Apparent speed:')
# start the layout
layout = QVBoxLayout()
groupbox = QGroupBox("Speed")
groupbox.setStyleSheet(utils.StyleCSS)
layout.addWidget(groupbox)
hbox = QHBoxLayout()
hbox.addWidget(lbl_set_speed)
hbox.addWidget(self.spb_speed)
hbox.addItem(QSpacerItem(20, 20))
hbox.addWidget(self.lbl_apparent_speed)
hbox.addStretch()
groupbox.setLayout(hbox)
self.setLayout(layout)
# helpful (?) tooltip
self.setToolTip('<font size=4>'
'This provides a rough control over the speed Morse '
'Trainer will attempt to recognize. Setting the '
'speed in the spinbox will configure the program to '
'recognize that speed. Once the program recognizes '
'your code it will adapt to any speed variation.<p>'
'The "apparent speed" display is a rough attempt to '
'show the speed you are sending.'
'</font>'
)
# connect spinbox events to handlers
self.spb_speed.valueChanged.connect(self.handle_speed_change)
def handle_speed_change(self, word_speed):
"""The widget speed changed.
word_speed the new speed
Raise self.changed event with params.
"""
# save changed speed
self.speed = word_speed
# tell the world there was a change, if allowed
if not self.inhibit:
self.changed.emit(self.speed)
def setState(self, wpm):
"""Set the overall widget state.
wpm the speed in words per minute (integer)
"""
# force speed to nearest 5wpm value
canon_wpm = utils.make_multiple(wpm, 5)
self.inhibit = True
self.speed = canon_wpm
self.spb_speed.setValue(canon_wpm)
self.inhibit = False
self.update()
def getState(self):
"""Return the widget state.
Returns the speed in wpm.
"""
return self.speed
def setApparentSpeed(self, wpm):
"""Set the apparent speed to 'wpm'."""
new_text = 'Apparent speed: %d wpm' % wpm
self.lbl_apparent_speed.setText(new_text)
self.lbl_apparent_speed.update()
|
# pylint: disable=W0212
import logging
import os
import sys
from loguru import logger as _logger
logger = _logger
CONSOLE_LOG_FORMAT = "{level.icon} {time:MM-DD HH:mm:ss} <lvl>{level}\t{message}</lvl>"
FILE_LOG_FORMAT = "{time:YYYY-MM-DD HH:mm} {level}\t{message}"
FILE_LOG_PATH_NAMING = "./logs/{time}.log"
def logger_init(console_log=True, file_log=False):
"""
:param console_log: 该参数控制控制台日志等级,为True输出INFO等级日志,为False输出EROOR等级的日志
:param file_log: 该参数控制日志文件开与关,为True输出INFO等级日志的文件,为False关闭输出日志文件
环境变量``BOTOY_LOG_LEVEL``拥有最高优先级
"""
logger.remove()
BOTOY_LOG_LEVEL = os.getenv("BOTOY_LOG_LEVEL")
if console_log:
logger.add(
sys.stdout,
format=CONSOLE_LOG_FORMAT,
colorize=True,
level=BOTOY_LOG_LEVEL or "INFO",
)
else:
logger.add(
sys.stdout,
format=CONSOLE_LOG_FORMAT,
colorize=True,
level=BOTOY_LOG_LEVEL or "ERROR",
)
if file_log:
logger.add(
FILE_LOG_PATH_NAMING,
format=FILE_LOG_FORMAT,
rotation="1 day",
encoding="utf-8",
level=BOTOY_LOG_LEVEL or "INFO",
)
class LoguruHandler(logging.Handler):
def emit(self, record):
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage()
)
logger_init()
|
# @time: 2022/1/12 10:07 AM
# Author: pan
# @File: src.py
# @Software: PyCharm
"""
核心代码文件
"""
from db import db_handle
from lib import common
# 存储用户信息
user_data = []
# 注册功能
def register():
"""
项目注册功能 如果已经注册提示已经注册
密码需要两次输入的密码一致才可以
:return:
"""
while True:
inp_name = input("请输入用户名:(输入q退出注册)").strip()
if inp_name == "q" or inp_name == "Q":
break
# 判断用户名是否已经注册
user_data = db_handle.select_user(inp_name)
if user_data:
print("该用户名已经注册,请重新输入用户名:")
continue
inp_pwd = input("请输入密码:(输入q退出注册)").strip()
inp_re_pwd = input("请再次输入密码:(输入q退出注册)").strip()
if inp_pwd == "q" or inp_pwd == "Q":
break
if inp_pwd == inp_re_pwd:
# 密码一致 进行注册
db_handle.add_user(inp_name, inp_pwd)
print("恭喜您! 注册成功")
break
else:
print("两次输入的密码不一致,请重新输入!!!")
continue
# 项目登录功能
def login():
"""
用户进行登录 如果输入的用户名不在数据库 提示未进行注册
登录成功 保存登录信息
:return:
"""
while True:
inp_name = input("请输入用户名:").strip()
db_user_data = db_handle.select_user(inp_name)
# 判断数据库中有没有该用户信息
if not db_user_data:
print("用户名未注册!")
continue
inp_pwd = input("请输入用户密码:").strip()
# 判断用户输入的用户名和密码是否都一致
if inp_name == db_user_data[0] and inp_pwd == db_user_data[1]:
print("登录成功!")
# 如果一致 保存用户信息到全局变量
global user_data
user_data = db_user_data
break
else:
print("账号或密码有误!!!")
# 充值功能
@common.login_auth
def recharge():
while True:
inp_money = input("请输入充值金额:").strip()
if not inp_money.isdigit():
print("输入的金额必须是数字!!!")
continue
# 1 旧数据
name, pwd, balance = db_handle.select_user(user_data[0])
old_data = f'{name}:{pwd}:{balance}\n'
# 2 新数据
balance = int(balance)
balance += int(inp_money)
new_data = f'{name}:{pwd}:{balance}\n'
# 3 更新数据库中的数据
db_handle.update_user(old_data, new_data)
# 4 记录日志
import time
now_time = time.strftime('%Y-%m-%d %H:%M:%S')
log_msg = f'用户:{user_data[0]}, 在:{now_time}, 进行了充值! 充值金额:{inp_money}\n'
print(log_msg)
common.logging(log_msg)
break
# 阅读小说功能
@common.login_auth
def reader():
print("""
================ 小说类别 ===============
0 武侠小说
1 玄幻小说
2 都市言情
================== end =================
""")
while True:
# 1) 选择小说类别
type_cmd = input("请输入要看的小说类别:").strip()
story_dic = db_handle.select_story_list()
if type_cmd not in story_dic:
print("请输入正确的小说类型!!!")
continue
# 2)获取对应小说类型的字典
sub_story_dic = story_dic[type_cmd]
for key in sub_story_dic:
print(f'小说编号:{key},'
f'小说名字:{sub_story_dic[key][0]},'
f'小说价格:{sub_story_dic[key][1]}')
# 3)输入要看的小说编号
fiction_no = input("请输入要看的小说编号:").strip()
if fiction_no not in sub_story_dic:
print("请输入正确的小说编号!!!")
continue
fiction_name, fiction_price = sub_story_dic[fiction_no]
# 4)输入小说编号之后询问是否是进行付费观看
pay_cmd = input("是否进行付费观看(输入y或Y进行查看):").strip()
if pay_cmd == "y" or pay_cmd == "Y":
user_money = int(user_data[2])
if user_money >= int(fiction_price):
# 用户余额充足 进行扣费
# 1 旧数据
name, pwd, balance = db_handle.select_user(user_data[0])
old_data = f'{name}:{pwd}:{balance}\n'
# 2 新数据
balance = int(balance)
balance -= int(fiction_price)
new_data = f'{name}:{pwd}:{balance}\n'
# 3 更新数据库中的数据
db_handle.update_user(old_data, new_data)
# 4 记录日志
import time
now_time = time.strftime('%Y-%m-%d %H:%M:%S')
log_msg = f'用户:{user_data[0]}, 在:{now_time}, 进行了消费! 消费金额:{fiction_price}\n'
print(log_msg)
common.logging(log_msg)
# 5 扣费之后 查看小说内容
fiction_content = db_handle.show_fiction_content(fiction_name)
print(fiction_content)
break
else:
print("用户余额不足,请先进行充值!!!")
break
# 函数功能字典
func_dic = {
"0": ["退出功能", exit],
"1": ["账号注册", register],
"2": ["账号登录", login],
"3": ["充值功能", recharge],
"4": ["阅读小说", reader]
}
def run():
print("项目开始运行...")
while True:
print("============= 请选择项目功能 ===============")
for key in func_dic:
print(f' {key} {func_dic[key][0]}')
print("================== end ====================")
# 输入指令
cmd = input("请输入指令:").strip()
if cmd not in func_dic:
print("请输入正确的指令!!!")
continue
# 指令正确运行功能
func_dic[cmd][1]()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.shortcuts import render
from rest_framework import generics
from rest_framework.views import APIView, Response
from celery import task
@task
def test(*args, **kwargs):
print('args: ', args)
print('kwargs: ', kwargs)
print('++++Task started++++')
for i in range(100):
print str(i) + ' * ' + str(i) + ' = ' + str(i*i)
print('====Task ended====')
class TestAPI(APIView):
def get(self, request, *args, **kwargs):
print 'got it'
test.delay('a')
return Response(data={'info': 'resp: ' + str(datetime.datetime.now())}) |
"""Configuration module.
This module provides functions for finding, reading, and creating user
configuration files. It also provides an object, CONFIG, for easy access
to the loaded config.
"""
import os
import shutil
from configparser import ConfigParser
DEFAULT_CONFIG_FILE = "~/.config/reddit-wall.conf"
# FIXME: Add support for specifying config path as a command line argument
def create_config(config_file=None):
"""Read the program config file and create one if it doesn't exist.
Parameters
----------
config_file : string
Path to the config file if not using the default.
Returns
-------
parser : ConfigParser
An instance of the program configuration.
"""
default_config_path = os.path.expanduser(DEFAULT_CONFIG_FILE)
if not config_file and not os.path.exists(default_config_path):
shutil.copyfile("conf/reddit-wall.conf", default_config_path)
parser = ConfigParser(allow_no_value=True)
parser.read_file(open(default_config_path))
if config_file:
parser.read(config_file)
return parser
CONFIG = create_config()
|
# coding: utf-8
import os
from setuptools import setup
import lspy
this_dir = os.path.dirname(os.path.abspath(__file__))
keywords = [
"rpc", "json", "json-rpc", "2.0", "lsp"
]
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Operating System :: OS Independent",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: Information Technology"
]
# read the readme file
with open(os.path.join(this_dir, "README.md"), "r") as f:
long_description = f.read()
setup(
name=lspy.__name__,
version=lspy.__version__,
author=lspy.__author__,
author_email=lspy.__email__,
description=lspy.__doc__.strip().split("\n")[0].strip(),
license=lspy.__license__,
url=lspy.__contact__,
keywords=keywords,
classifiers=classifiers,
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=2.7",
zip_safe=False,
py_modules=[lspy.__name__],
)
|
from dateutil import parser
from time import mktime
from bamboo.lib.utils import combine_dicts, replace_keys
def parse_order_by(order_by):
if order_by:
if order_by[0] in ('-', '+'):
sort_dir, field = -1 if order_by[0] == '-' else 1, order_by[1:]
else:
sort_dir, field = 1, order_by
order_by = [(field, sort_dir)]
return order_by
def parse_dates_from_query(query, dataset):
if query and dataset:
for col in dataset.schema.datetimes(query.keys()):
query[col] = maybe_parse_date(query[col])
return query or {}
def maybe_parse_date(o):
if isinstance(o, dict):
return {k: maybe_parse_date(v) for k, v in o.items()}
elif isinstance(o, list):
return [maybe_parse_date(e) for e in o]
elif isinstance(o, basestring):
return mktime(parser.parse(o).timetuple())
else:
return o
class QueryArgs(object):
def __init__(self, query=None, select=None, distinct=None, limit=0,
order_by=None, dataset=None):
"""A holder for query arguments.
:param query: An optional query.
:param select: An optional select to limit the fields in the dframe.
:param distinct: Return distinct entries for this field.
:param limit: Limit on the number of rows in the returned dframe.
:param order_by: Sort resulting rows according to a column value and
sign indicating ascending or descending.
Example of `order_by`:
- ``order_by='mycolumn'``
- ``order_by='-mycolumn'``
"""
self.query = parse_dates_from_query(query, dataset)
self.select = select
self.distinct = distinct
self.limit = limit
self.order_by = parse_order_by(order_by)
def encode(self, encoding, query):
"""Encode query, order_by, and select given an encoding.
The query will be combined with the existing query.
:param encoding: A dict to encode the QueryArgs fields with.
:param query: An additional dict to combine with the existing query.
"""
self.query = replace_keys(combine_dicts(self.query, query), encoding)
self.order_by = self.order_by and replace_keys(dict(self.order_by),
encoding).items()
self.select = self.select and replace_keys(self.select, encoding)
def __nonzero__(self):
return bool(self.query or self.select or self.distinct or self.limit
or self.order_by)
|
"""teset."""
from homeassistant import config_entries
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import *
from .deps.const import (
DOMAIN,
CONF_UPDATE_INSTANT,
CONF_MAPPING,
CONF_CONTROL_PARAMS,
CONF_CLOUD,
CONF_MODEL,
ATTR_STATE_VALUE,
ATTR_MODEL,
ATTR_FIRMWARE_VERSION,
ATTR_HARDWARE_VERSION,
SUPPORTED_DOMAINS,
)
import json
from homeassistant.helpers.device_registry import format_mac
from miio import (
Device as MiioDevice,
DeviceException,
)
from miio.miot_device import MiotDevice
import async_timeout
from aiohttp import ClientSession
from homeassistant.helpers import aiohttp_client, discovery
import requests
from .deps.miot_device_adapter import MiotAdapter
from homeassistant.components import persistent_notification
VALIDATE = {'fan': [{"switch_status"}, {"switch_status"}],
'switch': [{"switch_status"}, {"switch_status"}],
'light': [{"switch_status"}, {"switch_status"}],
'cover': [{"motor_control"}, {"motor_control"}],
'humidifier': [{"switch_status","target_humidity"}, {"switch_status","target_humidity"}]
}
async def validate_devinfo(hass, data):
"""检验配置是否缺项。无问题返回[[],[]],有缺项返回缺项。"""
# print(result)
devtype = data['devtype']
ret = [[],[]]
requirements = VALIDATE.get(devtype)
if not requirements:
return ret
else:
for item in requirements[0]:
if item not in json.loads(data[CONF_MAPPING]):
ret[0].append(item)
for item in requirements[1]:
if item not in json.loads(data[CONF_CONTROL_PARAMS]):
ret[1].append(item)
return ret
async def async_get_mp_from_net(hass, model):
cs = aiohttp_client.async_get_clientsession(hass)
url = "https://raw.githubusercontent.com/ha0y/miot-params/master/main.json"
with async_timeout.timeout(10):
try:
a = await cs.get(url)
except Exception:
a = None
if a:
data = await a.json(content_type=None)
for item in data:
if item['device_model'] == model:
return item
return None
async def guess_mp_from_model(hass,model):
cs = aiohttp_client.async_get_clientsession(hass)
url_all = 'http://miot-spec.org/miot-spec-v2/instances?status=all'
url_spec = 'http://miot-spec.org/miot-spec-v2/instance'
with async_timeout.timeout(10):
try:
a = await cs.get(url_all)
except Exception:
a = None
if a:
dev_list = await a.json(content_type=None)
dev_list = dev_list.get('instances')
else:
dev_list = None
result = None
if dev_list:
for item in dev_list:
if model == item['model']:
result = item
urn = result['type']
params = {'type': urn}
with async_timeout.timeout(10):
try:
s = await cs.get(url_spec, params=params)
except Exception:
s = None
if s:
spec = await s.json()
ad = MiotAdapter(spec)
mt = ad.mitype
dt = ad.devtype
mp = ad.get_mapping_by_snewid(mt) or ad.get_mapping_by_snewid(dt)
prm = ad.get_params_by_snewid(mt) or ad.get_params_by_snewid(dt)
return {
'device_type': dt or 'switch',
'mapping': json.dumps(mp,separators=(',', ':')),
'params': json.dumps(prm,separators=(',', ':'))
}
else:
return None
# TODO
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize flow"""
self._name = vol.UNDEFINED
self._host = vol.UNDEFINED
self._token = vol.UNDEFINED
self._mapping = vol.UNDEFINED
self._params = vol.UNDEFINED
self._devtype = vol.UNDEFINED
self._info = None
self._model = None
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
# Check if already configured
# await self.async_set_unique_id(DOMAIN)
# self._abort_if_unique_id_configured()
if user_input is not None:
self._name = user_input[CONF_NAME]
self._host = user_input[CONF_HOST]
self._token = user_input[CONF_TOKEN]
# self._mapping = user_input[CONF_MAPPING]
# self._params = user_input[CONF_CONTROL_PARAMS]
device = MiioDevice(self._host, self._token)
try:
self._info = device.info()
except DeviceException:
# print("DeviceException!!!!!!")
errors['base'] = 'cannot_connect'
# except ValueError:
# errors['base'] = 'value_error'
if self._info is not None:
unique_id = format_mac(self._info.mac_address)
# await self.async_set_unique_id(unique_id)
for entry in self._async_current_entries():
if entry.unique_id == unique_id:
persistent_notification.async_create(
self.hass,
f"您新添加的设备: **{self._name}** ,\n"
f"其 MAC 地址与现有的某个设备相同。\n"
f"只是通知,不会造成任何影响。",
"设备可能重复")
break
self._abort_if_unique_id_configured()
d = self._info.raw
self._model = d['model']
device_info = (
f"Model: {d['model']}\n"
f"Firmware: {d['fw_ver']}\n"
f"MAC: {d['mac']}\n"
)
# self._info = self.get_devconfg_by_model(self._model)
self._info = await async_get_mp_from_net(self.hass, self._model) \
or await guess_mp_from_model(self.hass, self._model)
if self._info:
device_info += "\n已经自动发现配置参数。\n如无特殊需要,无需修改下列内容。\n"
devtype_default = self._info.get('device_type')
mapping_default = self._info.get('mapping')
params_default = self._info.get('params')
else:
device_info += "请手动进行配置。\n"
devtype_default = ''
mapping_default = '{"switch_status":{"siid":2,"piid":1}}'
params_default = '{"switch_status":{"power_on":true,"power_off":false}}'
self._input2 = user_input
return self.async_show_form(
step_id="devinfo",
data_schema=vol.Schema({
vol.Required('devtype', default=devtype_default): vol.In(SUPPORTED_DOMAINS),
vol.Required(CONF_MAPPING, default=mapping_default): str,
vol.Required(CONF_CONTROL_PARAMS, default=params_default): str,
vol.Optional('cloud_read'): bool,
vol.Optional('cloud_write'): bool,
}),
description_placeholders={"device_info": device_info},
errors=errors,
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME): str,
vol.Required(CONF_HOST, default='192.168.'): str,
vol.Required(CONF_TOKEN): str,
# vol.Required(CONF_MAPPING, default='{"switch_status":{"siid":2,"piid":1}}'): str,
# vol.Required(CONF_CONTROL_PARAMS, default='{"switch_status":{"power_on":true,"power_off":false}}'): str,
}
),
# description_placeholders={"device_info": "device_info"},
errors=errors,
)
async def async_step_devinfo(self, user_input=None):
errors = {}
hint = ""
if user_input is not None:
self._devtype = user_input['devtype']
self._input2['devtype'] = self._devtype
self._input2[CONF_MAPPING] = user_input[CONF_MAPPING]
self._input2[CONF_CONTROL_PARAMS] = user_input[CONF_CONTROL_PARAMS]
# self._input2['cloud_read'] = user_input['cloud_read']
self._input2['cloud_write'] = user_input.get('cloud_write')
v = await validate_devinfo(self.hass, self._input2)
if v == [[],[]] :
try:
# print(result)
if not user_input.get('cloud_read') and not user_input.get('cloud_write'):
device = MiotDevice(ip=self._input2[CONF_HOST], token=self._input2[CONF_TOKEN], mapping=json.loads(self._input2[CONF_MAPPING]))
result = device.get_properties_for_mapping()
return self.async_create_entry(
title=self._input2[CONF_NAME],
data=self._input2,
)
else:
return self.async_show_form(
step_id="cloudinfo",
data_schema=vol.Schema({
vol.Required('did'): str,
vol.Required('userId'): str,
vol.Required('serviceToken'): str,
vol.Required('ssecurity'): str,
}),
# description_placeholders={"device_info": hint},
errors=errors,
)
except DeviceException as ex:
errors["base"] = "no_local_access"
hint = f"错误信息: {ex}"
except Exception as exe:
hint = f"错误信息: {exe}"
else:
errors["base"] = "bad_params"
hint = ""
if v[0]:
hint += "\nmapping 缺少必须配置的项目:"
for item in v[0]:
hint += (item + ', ')
if v[1]:
hint += "\nparams 缺少必须配置的项目:"
for item in v[1]:
hint += (item + ', ')
# if info:
return self.async_show_form(
step_id="devinfo",
data_schema=vol.Schema({
vol.Required('devtype', default=user_input['devtype']): vol.In(SUPPORTED_DOMAINS),
vol.Required(CONF_MAPPING, default=user_input[CONF_MAPPING]): str,
vol.Required(CONF_CONTROL_PARAMS, default=user_input[CONF_CONTROL_PARAMS]): str,
vol.Optional('cloud_read'): bool,
vol.Optional('cloud_write'): bool,
}),
description_placeholders={"device_info": hint},
errors=errors,
)
async def async_step_cloudinfo(self, user_input=None):
errors = {}
if user_input is not None:
self._input2['update_from_cloud'] = {}
self._input2['update_from_cloud']['did'] = user_input['did']
self._input2['update_from_cloud']['userId'] = user_input['userId']
self._input2['update_from_cloud']['serviceToken'] = user_input['serviceToken']
self._input2['update_from_cloud']['ssecurity'] = user_input['ssecurity']
return self.async_create_entry(
title=self._input2[CONF_NAME],
data=self._input2,
)
# def get_devconfg_by_model(self, model):
# print(model)
# dev = json.loads(TEST)
# for item in dev:
# if item['device_model'] == model:
# return item
# return None
async def async_step_import(self, user_input):
"""Import a config flow from configuration."""
return True
|
from datetime import datetime, timezone
def parse_youtube_datetime(datetime_str: str) -> datetime:
return datetime.fromisoformat(datetime_str.replace("Z", "+00:00")).astimezone(
timezone.utc
)
|
"""
:copyright: © 2019 by the Lin team.
:license: MIT, see LICENSE for more details.
"""
from datetime import timedelta
class BaseConfig(object):
"""
基础配置
"""
# 分页配置
COUNT_DEFAULT = 10
PAGE_DEFAULT = 0
# 屏蔽 sql alchemy 的 FSADeprecationWarning
SQLALCHEMY_TRACK_MODIFICATIONS = False
class DevelopmentConfig(BaseConfig):
"""
开发环境普通配置
"""
DEBUG = True
# 令牌配置
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=1)
APP_ID = ""
APP_SECRET = ""
WECHAT_LOGIN_URL = ""
TIMEOUT = 5
# 插件模块暂时没有开启,以下配置可忽略
# plugin config写在字典里面
# PLUGIN_PATH = {
# 'poem': {'path': 'app.plugins.poem', 'enable': True, 'version': '0.0.1', 'limit': 20},
# 'oss': {'path': 'app.plugins.oss', 'enable': True, 'version': '0.0.1', 'access_key_id': 'not complete',
# 'access_key_secret': 'not complete', 'endpoint': 'http://oss-cn-shenzhen.aliyuncs.com',
# 'bucket_name': 'not complete', 'upload_folder': 'app',
# 'allowed_extensions': ['jpg', 'gif', 'png', 'bmp']}
# }
class ProductionConfig(BaseConfig):
"""
生产环境普通配置
"""
DEBUG = False
# 令牌配置
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=1)
# 插件模块暂时没有开启,以下配置可忽略
# plugin config写在字典里面
# PLUGIN_PATH = {
# 'poem': {'path': 'app.plugins.poem', 'enable': True, 'version': '0.0.1', 'limit': 20},
# 'oss': {'path': 'app.plugins.oss', 'enable': True, 'version': '0.0.1', 'access_key_id': 'not complete',
# 'access_key_secret': 'not complete', 'endpoint': 'http://oss-cn-shenzhen.aliyuncs.com',
# 'bucket_name': 'not complete', 'upload_folder': 'app',
# 'allowed_extensions': ['jpg', 'gif', 'png', 'bmp']}
# }
|
import pytest
import os
import re
import ssg.build_cpe
import ssg.xml
ET = ssg.xml.ElementTree
def test_extract_element():
obj = """<?xml version="1.0"?>
<variables>
<var>
<subelement>
<random id="test">This</random>
</subelement>
</var>
<var>
<subelement>
<random random="not-me">That</random>
</subelement>
</var>
</variables>
"""
tree = ET.fromstring(obj)
assert ssg.build_cpe.extract_subelement(tree, 'id') == 'test'
assert ssg.build_cpe.extract_subelement(tree, 'random') == 'not-me'
assert ssg.build_cpe.extract_subelement(tree, 'missing') is None
assert ssg.build_cpe.extract_subelement(tree, 'subelement') is None
def test_extract_env_obj():
local_var_text = """
<var>
<subelement>
<random object_ref="magical">elements</random>
</subelement>
</var>
"""
local_var = ET.fromstring(local_var_text)
local_var_missing_text = """
<var>
<subelement>
<random object_ref="nothing">here</random>
</subelement>
</var>
"""
local_var_missing = ET.fromstring(local_var_missing_text)
objects_text = """
<objects>
<object id="something">something</object>
<object id="magical">magical</object>
<object id="here">here</object>
</objects>
"""
objects = ET.fromstring(objects_text)
present = ssg.build_cpe.extract_env_obj(objects, local_var)
assert present is not None
assert present.text == 'magical'
missing = ssg.build_cpe.extract_env_obj(objects, local_var_missing)
assert missing is None
def test_extract_referred_nodes():
tree_with_refs_text = """
<references>
<reference object_ref="something_borrowed" />
<reference object_ref="something_missing" />
</references>
"""
tree_with_refs = ET.fromstring(tree_with_refs_text)
tree_with_ids_text = """
<objects>
<object id="something_old">Brno</object>
<object id="something_new">Boston</object>
<object id="something_borrowed">Source Code</object>
<object id="something_blue">Fedora</object>
</objects>
"""
tree_with_ids = ET.fromstring(tree_with_ids_text)
results = ssg.build_cpe.extract_referred_nodes(tree_with_refs, tree_with_ids, 'object_ref')
assert len(results) == 1
assert results[0].text == 'Source Code'
|
import sys
import os
import click
from .. import shared
@shared.cli.command()
@click.argument("path", required=True)
@click.option("--kind", "-k", required=False)
@click.option("--force", "-f", is_flag=True, required=False)
@click.option("--symlink", "-s", is_flag=True, required=False)
@click.pass_context
def take(ctx, path, kind, force, symlink):
"""Import a file as a document.
The base filename becomes the document title.
Should be a text type, but we leave that to user.
--force will cause a similarly titled document to be overwritten
in the case of a name conflict.
"""
yew = ctx.obj["YEW"]
# import ipdb; ipdb.set_trace()
if not os.path.exists(path):
click.echo(f"path does not exist: {path}")
sys.exit(1)
if not os.path.isfile(path):
click.echo(f"path is not a file: {path}")
sys.exit(1)
content = None
# slurp file
if not symlink:
with click.open_file(path, "r", "utf-8") as f:
content = f.read()
# get location, filename, etc.
fn = os.path.basename(path)
filename, file_extension = os.path.splitext(fn)
if not kind:
kind = "txt"
title = os.path.splitext(path)[0]
title = title.replace(os.sep, "-")
# check if we have one with this title
# the behaviour we want is for the user to continuously
# ingest the same file that might be updated out-of-band
# TODO: handle multiple titles of same name
docs = yew.store.get_docs(name_frag=title, exact=True)
if docs and not symlink:
if len(docs) >= 1:
if not force:
click.echo("A document with this title exists already")
if force or click.confirm(
f"Overwrite existing document: {docs[0].name} ?", abort=True
):
docs[0].put_content(content)
sys.exit(0)
if symlink:
doc = yew.store.create_document(title, kind, symlink_source_path=path)
click.echo(f"Symlinked: {doc.uid}")
else:
doc = yew.store.create_document(title, kind)
doc.put_content(content)
|
import re
import core.functions
from cgi import escape
class MarkdownParser:
def __init__(self, resource_path, **kwargs):
self.resource_path = resource_path
self.options = kwargs
self.codeblocks = []
self.table_blocks = []
self.tables = []
def _render_italics(self, match):
contents = match.group(1)
return "<em>%s</em>" % (contents)
def _render_bold(self, match):
contents = match.group(1)
return "<strong>%s</strong>" % (contents)
def _render_link(self, match):
label = match.group(1)
url = match.group(2)
return "<a target='_blank' href='%s'>%s</a>" % (url, label)
def _render_header(self, match):
return "<h2>%s</h2>" % (match.group(1))
def _render_paragraph(self, match):
text = match.group(1).strip()
if not text:
return ""
return "<p>%s</p>" % (text)
def _render_list_item(self, match):
return "<li class='blog'>%s</li>" % (match.group(1))
def _render_list(self, match):
return "<ul>%s</ul>" % (match.group(1))
def _render_image(self, path, position):
url = core.functions.static_to_url(self.resource_path + path)
return "<div class='img-wrapper'><img class='%s' src='%s'></img></div>" % (position.lower(), url)
def _render_codeblock(self, code):
return "<code>%s</code>" % escape(code.strip())
def _replace_codeblock(self, match):
self.codeblocks.append(match.group(1))
return "%CODE_BLOCK_PLACEHOLDER%"
def _replace_table(self, match):
self.table_blocks.append(match.group(1))
return "%TABLE_PLACEHOLDER%"
def _render_table_block(self, raw_table):
raw_rows = [ row.strip() for row in raw_table.strip().split("\n")]
rows = []
for raw_row in raw_rows:
rows.append([col.strip() for col in raw_row.split(',')])
headerContent = ''.join([('<th>%s</th>' % col) for col in rows[0]])
html = '<thead><tr>%s</tr></thead><tbody>' % headerContent
for row in rows[1:]:
rowContent = ''.join([('<td>%s</td>' % col) for col in row])
html += '<tr>%s</tbody></tr>' % rowContent
return '<table class="article-table">%s</table>' % (html)
def _render_code(self, path, language):
code = open("static/%s/%s" % (self.resource_path, path), "r").read()
return "<code>%s</code>" % escape(code)
def _render_code_snippet(self, match):
return "<span class='code-snippet'>%s</span>" % (match.group(1))
def _get_resource_handlers(self):
return {
'IMAGE': self._render_image,
'CODE': self._render_code,
}
def _render_resource(self, match):
type = match.group(1)
arg1 = match.group(2)
arg2 = match.group(3)
handler = self._get_resource_handlers().get(type, False)
if handler:
return handler(arg1, arg2)
else:
return "<em style='color: red'>Unknown Resource: %s</em>" % (type)
def apply_rules(self, markdown, rules):
new_markdown = markdown
for (rule, replace_func) in rules.items():
new_markdown = re.sub(
rule,
replace_func,
new_markdown,
flags=re.S
)
return new_markdown
def clean_text_whitespace(self, text):
lines = text.split("\n")
return "\n".join([line.strip() for line in lines])
def render(self, markdown):
rules_1 = {
r"\n([^\n]*?)\n(\-+)": self._render_header,
r"[\s^\n]*?[\#][\#]([^\n]*?)\n": self._render_list_item,
# r"[^`]`(.*?)`[^`]": self._render_code_snippet
}
rules_2 = {
r"([^\n^<^>]+)\n": self._render_paragraph,
r"\{(.*)\}\[(.*?)\]": self._render_link,
r"([^\s]*?)\[(.*?)\]": self._render_link,
r"\*\*([^\n]*?)\*\*": self._render_bold,
r"\*([^\n]*?)\*": self._render_italics
}
rules_3 = {
r"(<li((?!<p>).)*</li>)": self._render_list,
r"\%\% (.+?), (.+?), (.+?) \%\%": self._render_resource,
}
# Remove table blocks
markdown = re.sub(
r"```table(.*?)```",
self._replace_table,
markdown,
flags=re.S
)
# Remove codeblocks
markdown = re.sub(
r"```(.*?)```",
self._replace_codeblock,
markdown,
flags=re.S
)
markdown = self.apply_rules(markdown, rules_1)
markdown = self.apply_rules(markdown, rules_2)
markdown = self.apply_rules(markdown, rules_3)
for code in self.codeblocks:
markdown = markdown.replace('%CODE_BLOCK_PLACEHOLDER%', self._render_codeblock(code), 1)
for table in self.table_blocks:
markdown = markdown.replace('%TABLE_PLACEHOLDER%', self._render_table_block(table), 1)
return markdown
class MarkdownBlogPreviewParser(MarkdownParser):
def _render_paragraph(self, match):
return "<span>%s</span>" % (match.group(1))
def _render_image(self, path, position):
return ""
def _render_header(self, match):
return ""
|
# -*- coding: utf8 -*-
__author__ = 'wangqiang'
'''
多进程方式的生成者-消费者模型
多进程的实现,借助外部存储(如redis)实现数据共享
pip install redis
'''
import redis
import multiprocessing
import time
import uuid
import random
def producer(name, share_cache, cache_key):
'''
生产者,执行业务逻辑,并向共享缓存写入消息
:param name: 生产者名字代号
:param share_cache: 共享缓存(redis——client)
:param cache_key: 共享缓存队列key
:return:
'''
while True:
if share_cache.llen(cache_key) > 10000:
time.sleep(random.randint(0, 1))
break
# 元组的第一项用于表示消息类型,元组第二项使用uuid模拟消息内容
mt = ['a', 'b', 'c']
random.shuffle(mt)
try:
#todo 补充业务逻辑
msg = "{}#{}".format(mt[0], uuid.uuid1())
# 生产消息
share_cache.lpush(cache_key, msg)
print("{} produce {}".format(name, msg))
except Exception as exp:
pass
# 模拟数据通信的延时
time.sleep(random.randint(0, 1))
def consumer(name, share_cache, cache_key):
'''
消费者,从共享缓存中获取消息,并执行业务逻辑
:param name: 消费者名字代号
:param share_cache: 共享缓存(redis——client)
:param cache_key: 共享缓存队列key
:return:
'''
while True:
# 用于task的去重
task_map = {}
get_times = 0
try:
if share_cache.llen(cache_key) > 0:
task = share_cache.rpop(cache_key)
if task:
task = task.decode()
# 更新task,同类型task只保存最新的一个
parts = task.split("#")
task_map[parts[0]] = parts[1]
except Exception as exp:
break
get_times += 1
if get_times >= 10:
break
# 执行task,此处用输出task信息模拟
if len(task_map) > 0:
for mt, msg in task_map.items():
# todo 用print代替正常的业务逻辑
print("{} consume {}-{}".format(name, mt, msg))
else:
time.sleep(random.randint(0, 1))
if __name__ == "__main__":
redis_share = redis.Redis(host='127.0.0.1', port=6666, db=15, password="")
cache_key = "price_message"
# 构建多个消费者
for i in range(3):
c = multiprocessing.Process(target=consumer, args=("c{}".format(i), redis_share, cache_key))
c.start()
print("consumer-{} started".format(i))
# 构建多个生产者
p1 = multiprocessing.Process(target=producer, args=("p01", redis_share, cache_key))
p1.start()
p2 = multiprocessing.Process(target=producer, args=("p02", redis_share, cache_key))
p2.start()
print("producer started") |
from time import sleep
from data_sources.aa_residues import aa_residues
from data_sources.coguk_me import coguk_me
from data_sources.our_sequence_annotations import our_sequence_annotations
from data_sources.ruba_aa_change_effects import effects_of_aa_changes
from data_sources.ruba_variant_effects import effects_of_variants
from data_sources import covariants
from data_sources import phe_variants
from data_sources import uniprot
if __name__ == '__main__':
covariants.run()
sleep(3)
phe_variants.run()
sleep(3)
coguk_me.run()
sleep(3)
our_sequence_annotations.run()
sleep(3)
uniprot.run()
sleep(3)
aa_residues.run()
sleep(3)
effects_of_aa_changes.run()
sleep(3)
effects_of_variants.run()
|
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
# 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
# self.conv1 = nn.Conv2d( 3, 64 / split_count, kernel_size=3, padding=1)
# self.conv2 = nn.Conv2d( 64, 64 / split_count, kernel_size=3, padding=1)
# self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
# self.conv3 = nn.Conv2d( 64, 128 / split_count, kernel_size=3, padding=1)
# self.conv4 = nn.Conv2d(128, 128 / split_count, kernel_size=3, padding=1)
# self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
# layersToSplit = [True, True, 'M', 128, True, 'M', 256, 256, 256, 'M', 512, 512, True, 'M', 512, 512, True, 'M'],
# layersToSplit = [True, True, False, False, True, False, False, False, False, False, False, False, True, False, False, False, True, False]
layersToSplit = [True, True, False, True, True, False, True, True, True, False, True, True, True, False, True, True, True, False]
class VGG16(nn.Module):
def __init__(self, split_count=1, num_classes=1000, init_weights=True):
super(VGG16, self).__init__()
self.split_count = split_count
self.features = nn.ModuleList([])
in_channels = 3
# for v in cfg['D']:
# print(len(cfg['D']))
# print(len(layersToSplit))
for i in range(len(cfg['D'])):
v = cfg['D'][i]
if v == 'M':
self.features.append(nn.MaxPool2d(kernel_size=2, stride=2))
else:
self.features.append(nn.Conv2d(in_channels, int(v / split_count) if layersToSplit[i] else v, kernel_size=3, padding=1))
in_channels = v
# split1side = int(split_count**0.5)
# inChannels = int(512 / split1side)
# print("linear intake features: %d"%int(512 * 7 * 7 / split1side))
self.classifier = nn.Sequential(
nn.Linear(int(512 * 7 * 7 / split_count), int(4096)),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(int(4096), int(4096 / split_count)),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(int(4096 / split_count), int(num_classes)),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
for i in range(len(self.features)):
x = self.features[i](x)
if cfg['D'][i] != 'M':
x = torch.nn.functional.relu(x, inplace=True)
if layersToSplit[i] and i < len(cfg['D']) - 2 and self.split_count > 1:
# x = torch.repeat_interleave(x, self.split_count, dim=1)
x = x.repeat(1, self.split_count, 1, 1)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg16(splitCount=1, pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
# model = VGG(make_layers(cfg['D'], splitCount), split_count=splitCount, **kwargs)
model = VGG16(split_count=splitCount, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model |
# Seq de Fibonacci
print('-=-'*8)
print('Sequência de Fibonacci')
print('-=-'*8)
# 0, 0+1, 0+1, 1+1, 2+1, ..., n = (n-1) + (n-2)
n = int(input('Quantos termos você quer ver? '))
t1 = 0
t2 = 1
print('~'*30)
print(f'{t1} -> {t2}', end='')
cont = 3 # Começa no 3, pois o primeiro e segundo termos já foram mostrados
while cont <= n:
# Fazer o t1, t2 e t3 andarem na sequência.
t3 = t1 + t2
print(f' -> {t3}', end='')
t1 = t2 # linha 16 e 17 que "andam" pela sequência
t2 = t3
cont += 1
print(' -> Fim')
|
import ctypes
import epicscorelibs.path
# Necessary unless [DY]LD_LIBRARY_PATH is set for epicscorelibs
ctypes.CDLL(epicscorelibs.path.get_lib("Com"))
ctypes.CDLL(epicscorelibs.path.get_lib("dbCore"))
del epicscorelibs.path
del ctypes
# from . import iocsh, macro
# __all__ = ["iocsh", "macro"]
|
import unittest
from unittest.mock import MagicMock, call
import kleat.misc.settings as S
from kleat.hexamer.hexamer import extract_seq
def test_with_skipped_region_for_plus_strand_clv():
"""
AAA <-tail of suffix contig
ACGG--GC┘|| <-suffix contig with skip
0123 456789 <-contig coord
| 1
ctg_clv^ ^init_ctg_idx <-contig coord
...ACGGTTGCGGT... <-genome
789012345678 <-genome coord
1 | |
ref_clv^ ^init_ref_idx
"""
ctg = MagicMock()
ctg.reference_name = 'chr1'
ctg.query_sequence = 'ACGGGCAAA'
ctg.cigartuples = ((S.BAM_CMATCH, 4), (S.BAM_CREF_SKIP, 2), (S.BAM_CMATCH, 2), (S.BAM_CSOFT_CLIP, 3))
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
ref_fa.fetch = MagicMock(return_value='TT')
kw = dict(contig=ctg, strand='+', ref_clv=14, ref_fa=ref_fa, ctg_clv=5)
assert extract_seq(**kw) == 'ACGGTTGC'
assert extract_seq(window=3, **kw) == 'TGC'
ref_fa.fetch.assert_called_with('chr1', 11, 13)
def test_with_1_base_insertion_for_plus_strand_clv():
"""
T
┬ AAA <-tail of suffix contig
ACGG GC┘|| <-suffix contig with skip
0123 56789 <-contig coord
| || |
ctg_clv^ ^init_ctg_idx <-contig coord
...ACGG GCXXX... <-genome
7890 1234567 <-genome coord
1 | |
ref_clv^ ^init_ref_idx
"""
ctg = MagicMock()
ctg.reference_name = 'chr1'
ctg.query_sequence = 'ACGGTGCAAA'
ctg.cigartuples = (
(S.BAM_CMATCH, 4),
(S.BAM_CINS, 5),
(S.BAM_CMATCH, 2),
(S.BAM_CSOFT_CLIP, 3),
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
kw = dict(contig=ctg, strand='+', ref_clv=12, ref_fa=ref_fa, ctg_clv=6)
assert extract_seq(**kw) == 'ACGGTGC'
assert extract_seq(window=4, **kw) == 'GTGC'
def test_with_5_base_inserted_region_for_plus_strand_clv():
"""
AATCC
┬ AA <-tail of suffix contig
ACGG GCG┘| <-suffix contig with skip
0123 9012 <-contig coord
| |1| |
ctg_clv^ ^init_ctg_idx <-contig coord
...ACGG GCGXXX... <-genome
7890 1234567 <-genome coord
1 | |
ref_clv^ ^init_ref_idx
"""
ctg = MagicMock()
ctg.reference_name = 'chr1'
ctg.query_sequence = 'ACGGAATCCGCGAA'
ctg.cigartuples = (
(S.BAM_CMATCH, 4),
(S.BAM_CINS, 5),
(S.BAM_CMATCH, 3),
(S.BAM_CSOFT_CLIP, 2),
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
kw = dict(contig=ctg, strand='+', ref_clv=13, ref_fa=ref_fa, ctg_clv=11)
assert extract_seq(**kw) == 'ACGGAATCCGCG'
assert extract_seq(window=5, **kw) == 'CCGCG'
def test_with_deleted_region_for_plus_strand_clv():
"""
AAA <-tail of suffix contig
ACGG__GC┘|| <-suffix contig with skip
0123 456789 <-contig coord
| 1
ctg_clv^ ^init_ctg_idx <-contig coord
...ACGGTTGCGGT... <-genome
789012345678 <-genome coord
1 | |
ref_clv^ ^init_ref_idx
"""
ctg = MagicMock()
ctg.reference_name = 'chr1'
ctg.query_sequence = 'ACGGGCAAA' # len 9
ctg.cigartuples = ((S.BAM_CMATCH, 4), (S.BAM_CDEL, 2), (S.BAM_CMATCH, 2), (S.BAM_CSOFT_CLIP, 3))
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
kw = dict(contig=ctg, strand='+', ref_clv=14, ref_fa=ref_fa, ctg_clv=5)
assert extract_seq(**kw) == 'ACGGGC'
assert extract_seq(window=3, **kw) == 'GGC'
def test_with_two_skipped_region_for_plus_strand_clv():
"""
AAA <-tail of suffix contig
A-TT--GC┘|| <-suffix contig with skip
0 12 345678 <-contig coord
|
ctg_clv^ ^init_ctg_idx <-contig coord
...ACTTAAGCGGT... <-genome
789012345678 <-genome coord
1 | |
ref_clv^ ^init_ref_idx
"""
ctg = MagicMock()
ctg.reference_name = 'chr3'
ctg.query_sequence = 'ATTGCAAA'
ctg.cigartuples = (
(S.BAM_CMATCH, 1),
(S.BAM_CREF_SKIP, 1),
(S.BAM_CMATCH, 2),
(S.BAM_CREF_SKIP, 2),
(S.BAM_CMATCH, 2),
(S.BAM_CSOFT_CLIP, 3),
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
ref_fa.fetch.side_effect = ['AA', 'C']
kw = dict(contig=ctg, strand='+', ref_clv=14, ref_fa=ref_fa, ctg_clv=4)
assert extract_seq(**kw) == 'ACTTAAGC'
assert ref_fa.fetch.call_count == 2
ref_fa.fetch.assert_has_calls([call('chr3', 11, 13), call('chr3', 8, 9)])
# use a new mock, couldn't make ref_fa.reset_mock() work
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
ref_fa.fetch.return_value = 'AA'
kw.update(ref_fa=ref_fa)
assert extract_seq(window=3, **kw) == 'AGC'
assert ref_fa.fetch.call_count == 1
def test_with_skipped_region_and_insertions_mismatches_for_plus_strand_clv():
"""
G
┬ AAA <-tail of suffix contig
A TA--GCG┘|| <-suffix contig with skip
0 23 456789 <-contig coord
| |x | |
ctg_clv ^ ^init_ctg_idx <-contig coord
...A TTCCGCGXXX... <-genome
7 8901234567 <-genome coord
1 | |
ref_clv^ ^init_ref_idx
"""
ctg = MagicMock()
ctg.reference_name = 'chr3'
ctg.query_sequence = 'AGTAGCGAAA'
ctg.cigartuples = (
(S.BAM_CMATCH, 1),
(S.BAM_CINS, 1),
(S.BAM_CMATCH, 2),
(S.BAM_CREF_SKIP, 2),
(S.BAM_CMATCH, 3),
(S.BAM_CSOFT_CLIP, 3),
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
ref_fa.fetch.return_value = 'CC'
kw = dict(contig=ctg, strand='+', ref_clv=14, ref_fa=ref_fa, ctg_clv=6)
assert extract_seq(**kw) == 'AGTACCGCG'
ref_fa.fetch.assert_called_once_with('chr3', 10, 12)
assert extract_seq(window=1, **kw) == 'G'
assert extract_seq(window=3, **kw) == 'GCG'
assert extract_seq(window=8, **kw) == 'GTACCGCG'
def test_with_skipped_and_deleted_regions_for_plus_strand_clv():
"""
AAA <-tail of suffix contig
A_TT--GC┘|| <-suffix contig with skip
0 12 345678 <-contig coord
|
ctg_clv^ ^init_ctg_idx <-contig coord
...ACTTAAGCGGT... <-genome
789012345678 <-genome coord
1 | |
ref_clv^ ^init_ref_idx
"""
ctg = MagicMock()
ctg.reference_name = 'chr3'
ctg.query_sequence = 'ATTGCAAA'
ctg.cigartuples = (
(S.BAM_CMATCH, 1),
(S.BAM_CDEL, 1),
(S.BAM_CMATCH, 2),
(S.BAM_CREF_SKIP, 2),
(S.BAM_CMATCH, 2),
(S.BAM_CSOFT_CLIP, 3),
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
ref_fa.fetch.return_value = 'AA'
kw = dict(contig=ctg, strand='+', ref_clv=14, ref_fa=ref_fa, ctg_clv=4)
assert extract_seq(**kw) == 'ATTAAGC'
ref_fa.fetch.assert_called_once_with('chr3', 11, 13)
assert extract_seq(window=5, **kw) == 'TAAGC'
def test_with_three_skipped_region_and_mismatches_for_plus_strand_clv():
"""
AA <-tail of suffix contig
A---CC-GTA--GC┘| <-suffix contig with skip
0|||12|345||678 <-contig coord
|||x | x || |
|||x ctg_clv^ ^init_ctg_idx <-contig coord
...ACTGTCAGAATTGCX... <-genome
789012345678901 <-genome coord
1 |2|
ref_clv^ ^init_ref_idx
"""
ctg = MagicMock()
ctg.reference_name = 'chr3'
ctg.query_sequence = 'ACCGTAGCAA'
ctg.cigartuples = (
(S.BAM_CMATCH, 1),
(S.BAM_CREF_SKIP, 3),
(S.BAM_CMATCH, 2),
(S.BAM_CREF_SKIP, 1),
(S.BAM_CMATCH, 3),
(S.BAM_CREF_SKIP, 2),
(S.BAM_CMATCH, 2),
(S.BAM_CSOFT_CLIP, 2),
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
ref_fa.fetch.side_effect = ['TT', 'A', 'CTG']
kw = dict(contig=ctg, strand='+', ref_clv=20, ref_fa=ref_fa, ctg_clv=7)
assert extract_seq(**kw) == 'ACTGCCAGTATTGC'
assert ref_fa.fetch.call_count == 3
ref_fa.fetch.assert_has_calls([call('chr3', 17, 19), call('chr3', 13, 14), call('chr3', 8, 11)])
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
ref_fa.fetch.side_effect = ['TT', 'A']
kw.update(ref_fa=ref_fa)
assert extract_seq(window=9, **kw) == 'CAGTATTGC'
assert ref_fa.fetch.call_count == 2
def test_with_indel_and_skipped_regions_and_mismatches_for_plus_strand_clv():
"""
TC
┬ AA <-tail of suffix contig
A---CC GTA__GC┘| <-suffix contig with skip
0 12 567 8901 <-contig coord
x x |1
x ctg_clv^ ^init_ctg_idx <-contig coord
...ACTGTC GAATTGC... <-genome
789012 345678901 <-genome coord
1 | |
ref_clv^ ^init_ref_idx
"""
ctg = MagicMock()
ctg.reference_name = 'chr3'
ctg.query_sequence = 'ACCTCGTAGCAA'
ctg.cigartuples = (
(S.BAM_CMATCH, 1),
(S.BAM_CREF_SKIP, 3),
(S.BAM_CMATCH, 2),
(S.BAM_CINS, 2),
(S.BAM_CMATCH, 3),
(S.BAM_CDEL, 2),
(S.BAM_CMATCH, 2),
(S.BAM_CSOFT_CLIP, 2),
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
ref_fa.fetch.return_value = 'CTG'
kw = dict(contig=ctg, strand='+', ref_clv=19, ref_fa=ref_fa, ctg_clv=9)
assert extract_seq(**kw) == 'ACTGCCTCGTAGC'
ref_fa.fetch.assert_called_once_with('chr3', 8, 11)
assert extract_seq(window=10, **kw) == 'GCCTCGTAGC'
|
"""Definition of wrapperobject CPython's structure in ctypes. With this you can
get into wrapperobject internals without going to the C level.
See descrobject.c for reference:
http://svn.python.org/view/python/trunk/Objects/descrobject.c?view=markup
Note that not all fields are defined, only those that I needed.
"""
from ctypes import c_long, py_object, cast, Structure, POINTER
ssize_t = c_long
class PyWrapperObject(Structure):
_fields_ = [("ob_refcnt", ssize_t),
("ob_type", py_object),
("descr", py_object),
("self", py_object)]
def _wrapper_internals(wrapper):
return cast(id(wrapper), POINTER(PyWrapperObject)).contents
def get_wrapper_self(wrapper):
return _wrapper_internals(wrapper).self
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-05-18 09:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filter', '0022_auto_20180413_1120'),
]
operations = [
migrations.AddField(
model_name='filterfield',
name='is_users_tastes',
field=models.BooleanField(default=False, help_text='If checked user will see the section named as this filter field with options to choose his own tastes.', verbose_name='Lets user to choose his tastes in profile.'),
),
]
|
import os
# OAuth Settings
AUTH0_CALLBACK_URL = 'http://localhost:8000/callback'
AUTH0_CLIENT_ID = os.environ.get('AUTH0_CLIENT_ID')
AUTH0_CLIENT_SECRET = os.environ.get('AUTH0_CLIENT_SECRET')
AUTH0_DOMAIN = os.environ.get('AUTH0_DOMAIN')
# Flask settings
FLASK_SERVER_ADDRESS = '0.0.0.0'
FLASK_SERVER_PORT = 5000
FLASK_SERVER_NAME = '{0}:{1}'.format(FLASK_SERVER_ADDRESS, FLASK_SERVER_PORT)
FLASK_DEBUG = False # Do not use debug mode in production
# Flask-Restplus settings
RESTPLUS_SWAGGER_UI_DOC_EXPANSION = 'list'
RESTPLUS_VALIDATE = True
RESTPLUS_MASK_SWAGGER = False
RESTPLUS_ERROR_404_HELP = True
# SQLAlchemy settings
DB_ADDRESS = os.environ.get('DB_ADDRESS')
DB_NAME = os.environ.get('DB_NAME')
DB_USERNAME = os.environ.get('DB_USERNAME')
DB_PASSWORD = os.environ.get('DB_PASSWORD')
DB_ENGINE = 'mssql'
if DB_ENGINE == 'mssql': #pragma: no cover
SQLALCHEMY_DATABASE_URI = 'mssql+pymssql://{}:{}@{}/{}'.format(DB_USERNAME, DB_PASSWORD, DB_ADDRESS, DB_NAME)
elif DB_ENGINE == 'mysql': #pragma: no cover
SQLALCHEMY_DATABASE_URI = 'mysql+pymssql://{}:{}@{}/{}'.format(DB_USERNAME, DB_PASSWORD, DB_ADDRESS, DB_NAME)
elif DB_ENGINE == 'sqlite': #pragma: no cover
SQLALCHEMY_DATABASE_URI = 'sqlite://{}:{}@{}/{}'.format(DB_USERNAME, DB_PASSWORD, DB_ADDRESS, DB_NAME)
else: #pragma: no cover
raise RuntimeError("Unsupported DB_ENGINE: {}".format(DB_ENGINE))
SQLALCHEMY_POOL_SIZE = 5
SQLALCHEMY_MAX_OVERFLOW = 40
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
# URL Prefix for API routes
URL_PREFIX = '/api'
# File Upload location
UPLOAD_FOLDER = '/tmp'
# Mandatory Environment Variables
MANDATORY_ENVIRONMENT_VARS = []
|
import numpy as np
from ci_framework import FlopyTestSetup, base_test_dir
import flopy
from flopy.utils.util_array import Util2d
base_dir = base_test_dir(__file__, rel_path="temp", verbose=True)
def test_rchload():
model_ws = f"{base_dir}_test_rchload"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
nlay = 2
nrow = 3
ncol = 4
nper = 2
# create model 1
m1 = flopy.modflow.Modflow("rchload1", model_ws=model_ws)
dis1 = flopy.modflow.ModflowDis(
m1, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper
)
a = np.random.random((nrow, ncol))
rech1 = Util2d(
m1, (nrow, ncol), np.float32, a, "rech", cnstnt=1.0, how="openclose"
)
rch1 = flopy.modflow.ModflowRch(m1, rech={0: rech1})
m1.write_input()
# load model 1
m1l = flopy.modflow.Modflow.load("rchload1.nam", model_ws=model_ws)
a1 = rech1.array
a2 = m1l.rch.rech[0].array
assert np.allclose(a1, a2)
a2 = m1l.rch.rech[1].array
assert np.allclose(a1, a2)
m2 = flopy.modflow.Modflow("rchload2", model_ws=model_ws)
dis2 = flopy.modflow.ModflowDis(
m2, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper
)
a = np.random.random((nrow, ncol))
rech2 = Util2d(
m2, (nrow, ncol), np.float32, a, "rech", cnstnt=2.0, how="openclose"
)
rch2 = flopy.modflow.ModflowRch(m2, rech={0: rech2})
m2.write_input()
# load model 2
m2l = flopy.modflow.Modflow.load("rchload2.nam", model_ws=model_ws)
a1 = rech2.array
a2 = m2l.rch.rech[0].array
assert np.allclose(a1, a2)
a2 = m2l.rch.rech[1].array
assert np.allclose(a1, a2)
if __name__ == "__main__":
test_rchload()
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import sys
from waflib.Tools import ar,d
from waflib.Configure import conf
@conf
def find_gdc(conf):
conf.find_program('gdc',var='D')
out=conf.cmd_and_log([conf.env.D,'--version'])
if out.find("gdc ")==-1:
conf.fatal("detected compiler is not gdc")
@conf
def common_flags_gdc(conf):
v=conf.env
v['DFLAGS']=[]
v['D_SRC_F']=['-c']
v['D_TGT_F']='-o%s'
v['D_LINKER']=v['D']
v['DLNK_SRC_F']=''
v['DLNK_TGT_F']='-o%s'
v['DINC_ST']='-I%s'
v['DSHLIB_MARKER']=v['DSTLIB_MARKER']=''
v['DSTLIB_ST']=v['DSHLIB_ST']='-l%s'
v['DSTLIBPATH_ST']=v['DLIBPATH_ST']='-L%s'
v['LINKFLAGS_dshlib']=['-shared']
v['DHEADER_ext']='.di'
v.DFLAGS_d_with_header='-fintfc'
v['D_HDR_F']='-fintfc-file=%s'
def configure(conf):
conf.find_gdc()
conf.load('ar')
conf.load('d')
conf.common_flags_gdc()
conf.d_platform_flags()
|
from elasticsearch import Elasticsearch
from elastic_feeder.csv_reader import CsvReader
from elastic_feeder.exception import ElkConnectionError
from elastic_feeder.elastic import Elastic
from elasticsearch.exceptions import AuthenticationException
from elastic_feeder.helper import logger
from http import HTTPStatus
import requests
import sys
class FeedElastic:
def __init__(self, host: str, port: int, filename: str, index: str, properties: dict=None, http_auth: list=None):
"""
FeedElastic class is used to create and insert data from csv into Elasticsearch
param :: host : Elasticsearch instance ip address or hostname
param :: port : Elasticsearch instance port number
param :: filename : CSV file full path
param :: index : Index name to create indice in Elasticsearch
"""
self.host = host
self.port = port
self.filename = filename
self.index = index
self.properties = properties
self.http_auth = http_auth
def read_csv(self):
"""
Read csv file and extract headers for creation index mapping properties.
By default it will create all properties types as `search_as_you_type`
"""
self.csv_obj = CsvReader(self.filename)
self.csv_headers = self.csv_obj.csv_header
def es_init(self):
"""
Initialize Elasticsearh object with default request_timeout=30
"""
self.es = Elasticsearch(f"http://{self.host}:{self.port}", request_timeout=30, http_auth=self.http_auth)
def check_connection(self):
"""
Checking Elasticsearch connection
Checking Authentication
"""
try:
self.es.ping()
except Exception as err:
logger.error("Checking connection failed. Host or port is unavailable")
try:
response = requests.get(f"http://{self.host}:{self.port}", auth = self.http_auth)
if response.status_code == HTTPStatus.UNAUTHORIZED:
raise Exception("Authentication failed. Username or password is incorrect")
except Exception as err:
logger.error(err,exc_info=True)
sys.exit(0)
def index_create(self):
"""
Create Elasticsearch index
"""
self.elastic = Elastic(self.csv_headers, es_instance=self.es, index=self.index, properties=self.properties)
def generate_data(self):
"""
Obtain data from csv object. This function will yield rows.
"""
self.gen_data = self.csv_obj.generate_data()
def bulk_insert(self):
"""
Bulk insert data from csv object.
"""
self.elastic.bulk_insert(self.gen_data)
def run(self):
"""
Run method is used to consequently run class method to automate data feeding into Elasticsearch
"""
self.es_init()
self.check_connection()
self.read_csv()
self.index_create()
self.generate_data()
self.bulk_insert()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A script to create the directory structure of each country and three of the "orig" files.
Typically you would run this file from a command line like this:
ipython3.exe -i -- /deploy/cbmcfs3_runner/scripts/orig/copy_orig_files.py
"""
# Third party modules #
# First party modules #
from autopaths import Path
# Constants #
base_path = Path("/forbiomod/EFDM/CBM/")
cbm_data_repos = Path("/repos/cbmcfs3_data/")
###############################################################################
countries = ['AT', 'BE', 'BG', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GR',
'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'NL', 'PL', 'PT', 'RO',
'SE', 'SI', 'SK', 'UK']
all_paths = """
/orig/silviculture.sas
/orig/calibration.mdb
/orig/aidb_eu.mdb
"""
for code in countries:
# Get 4 directories #
cbm_data_dir = cbm_data_repos + code + '/'
orig_data_dir = cbm_data_dir + 'orig' + '/'
forbiomod_dir = base_path + code + '/'
from_calib_dir = forbiomod_dir + 'from_CBM_calibration/'
# Get 3 files #
aidb = from_calib_dir + '/Archive*.mdb'
calibration_mdb = from_calib_dir + '/%s*.mdb' % code
silviculture_sas = from_calib_dir + '/*.sas'
# Create destination #
orig_data_dir.create_if_not_exists()
# Copy #
aidb.copy( orig_data_dir + 'aidb_eu.mdb')
calibration_mdb.copy( orig_data_dir + 'calibration.mdb')
silviculture_sas.copy(orig_data_dir + 'silviculture.sas')
|
import numpy as np
import matplotlib.pyplot as plt
import scprep
import pandas as pd
from TrajectoryNet.dataset import EBData
from EB_dataset_prior import get_prior_EB
from gp_sinkhorn.SDE_solver import solve_sde_RK
from gp_sinkhorn.MLE_drift import *
from gp_sinkhorn.utils import plot_trajectories_2
import torch
from celluloid import Camera
from IPython.display import HTML
import math
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--sigma", type=float, default=1)
parser.add_argument("--decay-sigma", type=float, default=1)
parser.add_argument("--iteration", type=int, default=1)
parser.add_argument("--sparse", type=int, default=0)
parser.add_argument("--start-frame", type=int, default=0)
parser.add_argument("--end-frame", type=int, default=4)
parser.add_argument("--log-dir",type=str,default="./../assets/result_dump")
parser.add_argument("--gp-prior",type=int,default=0)
#Parse arguments
args = vars(parser.parse_args())
#Save config file
f = open(args["log_dir"]+"/config.txt","w")
f.write( str(args) )
f.close()
ds = EBData('pcs', max_dim=5)
fig, ax = plt.subplots(1,1)
scprep.plot.scatter2d(ds.get_data(), c='Gray', alpha=0.1, ax=ax)
frame_0_start, frame_0_end = np.where(ds.labels == args["start_frame"])[0][0], np.where(ds.labels == args["start_frame"])[0][-1]
frame_4_start, frame_4_end = np.where(ds.labels == args["end_frame"])[0][0], np.where(ds.labels == args["end_frame"])[0][-1]
print("Args ",str(args))
X_0_f = ds.get_data()[frame_0_start:frame_0_end]
X_1_f = ds.get_data()[frame_4_start:frame_4_end]
# Subsample terminals
#perm_0 = np.random.permutation(np.arange(len(X_0_f)))
#perm_1 = np.random.permutation(np.arange(len(X_1_f)))
#k = 20
#X_0 = torch.tensor(X_0_f[perm_0][:k])
#X_1 = torch.tensor(X_1_f[perm_1][:k])
X_0 = torch.tensor(X_0_f)
X_1 = torch.tensor(X_1_f)
# SDE Solver config
sigma = 0.5
dt = 0.05
N = int(math.ceil(1.0/dt))
# IPFP init config
prior_X_0 = None
# Inducing points approximation config
data_inducing_points = 10
time_inducing_points = N # No time reduction
num_data_points_prior = 50
num_time_points_prior = N
if args["gp_prior"]:
drift_prior = get_prior_EB()
else:
drift_prior = None
# sparse enables the nystrom method which is just a low rank approximation of the kernel matrix using
# random subsampling, should not affect interpretability much, ive tested it in all our experiments
# works surprisingly well
result = MLE_IPFP(
X_0,X_1,N=N,sigma=args["sigma"], iteration=args["iteration"], sparse=args["sparse"],
num_data_points=data_inducing_points, num_time_points=time_inducing_points, prior_X_0=prior_X_0,
num_data_points_prior=num_data_points_prior, num_time_points_prior=num_time_points_prior,decay_sigma=args["decay_sigma"],
log_dir=args["log_dir"],prior_drift=drift_prior,gp_mean_prior_flag=args["gp_prior"],verbose=1,
)
# Plot trajectories
for i in range(len(result[-1][1])):
# import pdb; pdb.set_trace()
plt.plot(result[-1][1][i,:,0].cpu().detach().numpy(), result[-1][1][i,:,1].cpu().detach().numpy())
scprep.plot.scatter2d(X_0_f, c='Blue', alpha=0.1, ax=ax)
scprep.plot.scatter2d(X_1_f, c='Red', alpha=0.1, ax=ax)
scprep.plot.scatter2d(X_1.detach().cpu().numpy(), c='Green', alpha=0.7, ax=ax)
plt.savefig("../assets/trajectories_EB.png")
|
tup_numbers = (1,2,3,4,5,6,7)
lst_numbers = [1,2,3,4,5,6,7,1,1,1,1,1,1]
str_numbers = '1234567'
numbers = set(list(str_numbers))
print(numbers) |
# use beautifulsoup to scrap the following cnbc rss feed https://www.cnbc.com/id/100003114/device/rss/rss.html
from typing import List
import requests
from bs4 import BeautifulSoup
def get_feed_data(request_url: str ='https://www.cnbc.com/id/100003114/device/rss/rss.html'):
# feeds the cnbc feeds for investing and uses beatifulsoup to parse the rss feed
feed = requests.get(request_url)
feed.raise_for_status()
feed_soup = BeautifulSoup(feed.text, "xml")
# gets the links for the cnbc feed
feed_items = feed_soup.findAll('item')
return feed_items
def parse_cnbc_feed(feed_items: List[dict]):
""" grabs the data from the cnbc feed and parses it into a list of dictionaries
"""
parsed_feed_data = []
for item in feed_items:
try:
parsed_feed_data.append({
'title': item.title.text,
'link': item.link.text,
'description': item.description.text,
'pub_date': item.pubDate.text,
'guid': item.guid.text
})
except AttributeError as e:
print(e)
return parsed_feed_data
def rm_dups_from_list(list_of_dicts: List[dict]):
""" removes duplicate entries from a list of dictionaries
"""
# create a set of all the dictionaries in the list
set_of_dicts = set(tuple(d.items()) for d in list_of_dicts)
# create a list of dictionaries from the set
list_of_dicts = [dict(d) for d in set_of_dicts]
return list_of_dicts
def parse_the_guardian_feed(feed_items: List[dict]):
""" grabs data from the guardian feed and parses it into a list of dictionaries
"""
parsed_feed_data = []
for item in feed_items:
# get all categories for each feed item
categories_elements = item.findAll('category')
# get all unique categories from categories_elements
categories = []
categories = [{'category': category.text, "domain": category["domain"]} for category in categories_elements]
parsed_feed_data.append({
'title': item.title.text,
'link': item.link.text,
'description': item.description.text,
'pub_date': item.pubDate.text,
'guid': item.guid.text,
"categories": categories
})
return parsed_feed_data
def cnbc_article_to_embed(cnbc_article: dict):
""" converts cnbc rss article to discord object
"""
embed = {
"title": cnbc_article['title'],
"description": cnbc_article['description'],
"url": cnbc_article['link'],
# "timestamp": cnbc_article['pub_date']
}
return embed
# links = get_feed_data()
# data = parse_cnbc_feed(links)
# # print(data)
# guardian_data = get_feed_data('https://www.theguardian.com/environment/rss')
# data = parse_the_guardian_feed(guardian_data)
# print(data)
# print(links) |
#! /usr/bin/python2.7
import os
import sys
sys.stdout.write(sys.stdin.read()[::-1])
|
from Source.task4.Rect import Rect
def task4():
f = open('input.txt', 'r')
lines = f.readlines()
f.close()
n = int(lines[0])
count = 0
for i in range(1, n+1):
px, py, x1, y1, x2, y2, x3, y3, x4, y4 = [int(s) for s in lines[i].split(' ')]
count += Rect(x1, y1, x2, y2, x3, y3, x4, y4).where_is_point(px, py)
f = open('output.txt', 'w')
f.write(str(count))
f.close()
if __name__ == '__main__':
task4()
|
#from pyon.ion.endpoint import ProcessRPCClient
from pyon.public import Container, IonObject
from pyon.util.int_test import IonIntegrationTestCase
from pyon.core.exception import BadRequest, NotFound, Conflict
from pyon.public import RT, LCS, PRED
from mock import Mock, patch
from pyon.util.unit_test import PyonTestCase
from nose.plugins.attrib import attr
import unittest
import sys, pprint, time, types, select
from pyon.util.log import log
import gevent
from ion.agents.instrument.direct_access.direct_access_server import DirectAccessServer, DirectAccessTypes
@attr('INT', group='sa')
@unittest.skip("not working; container doesn't start properly")
class Test_DirectAccessServer_Integration(IonIntegrationTestCase):
def setUp(self):
# Start container
#print 'starting container'
self._start_container()
#print 'started container'
setattr(self.container, 'ia_mock_quit', False)
#print 'got CC client'
self.container.start_rel_from_url('res/deploy/examples/ia_mock.yml')
print 'started services'
self.container_client = container_client
def test_direct_access_server(self):
while True:
if self.container.ia_mock_quit == True:
break
gevent.sleep(1)
print("quitting test")
if __name__ == '__main__':
# For command line testing of telnet DA Server w/o nosetest timeouts
# use information returned from IA to manually telnet into DA Server.
# type 'quit' at the DA Server prompt to kill the server after telnet session is closed
print("starting IA mock test for DA Server")
container = Container()
setattr(container, 'ia_mock_quit', False)
print("CC created")
container.start()
print("CC started")
container.start_rel_from_url('res/deploy/examples/ia_mock.yml')
while True:
if container.ia_mock_quit == True:
break
gevent.sleep(1)
print("stopping IA mock test for DA Server")
|
#!/usr/bin/env python
import re
from binaryninja.log import log_info
from binaryninja.architecture import Architecture
from binaryninja.function import RegisterInfo, InstructionInfo, InstructionTextToken
from binaryninja.enums import InstructionTextTokenType, BranchType, FlagRole, LowLevelILFlagCondition
from breach_dis import Opcode, OperandType, decode
from . import breach_il
from . import consts
class BreachArch(Architecture):
name = 'Breach'
address_size = 8
default_int_size = 8
instr_alignment = 1
max_instr_length = 0x100
# register related stuff
regs = {
# main registers
'R0': RegisterInfo('R0', 8),
'R1': RegisterInfo('R1', 8),
'R2': RegisterInfo('R2', 8),
'R3': RegisterInfo('R3', 8),
'R4': RegisterInfo('R4', 8),
'R5': RegisterInfo('R5', 8),
'R6': RegisterInfo('R6', 8),
'R7': RegisterInfo('R7', 8),
'R8': RegisterInfo('R8', 8),
'R9': RegisterInfo('R9', 8),
'R10': RegisterInfo('R10', 8),
'R11': RegisterInfo('R11', 8),
'R12': RegisterInfo('R12', 8),
'R13': RegisterInfo('R13', 8),
'R14': RegisterInfo('R14', 8),
'STACKP': RegisterInfo('STACKP', 8),
'SP': RegisterInfo('STACKP', 8),
# program counter
'PC': RegisterInfo('PC', 8),
}
stack_pointer = "SP"
#------------------------------------------------------------------------------
# FLAG fun
#------------------------------------------------------------------------------
flags = ['z', 'h', 'n', 'c']
# remember, class None is default/integer
semantic_flag_classes = ['class_bitstuff']
# flag write types and their mappings
flag_write_types = ['dummy', 'z']
flags_written_by_flag_write_type = {
'dummy': [],
'z': ['z'],
}
# roles
flag_roles = {
'z': FlagRole.ZeroFlagRole,
}
#------------------------------------------------------------------------------
# CFG building
#------------------------------------------------------------------------------
def get_instruction_info(self, data, addr):
decoded = decode(data, addr)
# on error, return nothing
if not decoded.success():
return None
# on non-branching, return length
result = InstructionInfo()
result.length = decoded.length
if decoded.opcode == Opcode.JMP_ABSOLUTE:
result.add_branch(BranchType.UnconditionalBranch, decoded.operands[0][1] + consts.VM_CODE_START)
elif decoded.opcode == Opcode.JMP_REG:
result.add_branch(BranchType.IndirectBranch)
elif decoded.opcode == Opcode.JMP_EQ:
result.add_branch(BranchType.TrueBranch, decoded.operands[2][1] + consts.VM_CODE_START)
result.add_branch(BranchType.FalseBranch, addr + decoded.length)
# ret from interrupts
elif decoded.opcode == Opcode.META_VM_RET:
result.add_branch(BranchType.FunctionReturn)
elif decoded.opcode == Opcode.META_VM_CALL:
result.add_branch(BranchType.CallDestination, decoded.operands[0][1])
return result
# from api/python/function.py:
#
# TextToken Text that doesn't fit into the other tokens
# InstructionToken The instruction mnemonic
# OperandSeparatorToken The comma or whatever else separates tokens
# RegisterToken Registers
# IntegerToken Integers
# PossibleAddressToken Integers that are likely addresses
# BeginMemoryOperandToken The start of memory operand
# EndMemoryOperandToken The end of a memory operand
# FloatingPointToken Floating point number
def get_instruction_text(self, data, addr):
decoded = decode(data, addr)
if not decoded.success():
return None
result = []
# opcode
result.append(InstructionTextToken( \
InstructionTextTokenType.InstructionToken, decoded.opcode_name()))
# space for operand
if decoded.operands:
result.append(InstructionTextToken(InstructionTextTokenType.TextToken, ' '))
# operands
for i, operand in enumerate(decoded.operands):
(oper_type, oper_val) = operand
if oper_type in {OperandType.REG, OperandType.STACK_REG}:
result.append(InstructionTextToken( \
InstructionTextTokenType.RegisterToken, oper_val.name))
elif oper_type == OperandType.REG_SYSCALL_NUM:
toks = [
(InstructionTextTokenType.TextToken, 'syscall'),
(InstructionTextTokenType.BeginMemoryOperandToken, '['),
(InstructionTextTokenType.RegisterToken, oper_val.name),
(InstructionTextTokenType.EndMemoryOperandToken, ']'),
]
result.extend([InstructionTextToken(*ts) for ts in toks])
elif oper_type == OperandType.REG_GLOBAL_ADDRESS:
toks = [
(InstructionTextTokenType.TextToken, 'data'),
(InstructionTextTokenType.BeginMemoryOperandToken, '['),
(InstructionTextTokenType.RegisterToken, oper_val.name),
(InstructionTextTokenType.EndMemoryOperandToken, ']'),
]
result.extend([InstructionTextToken(*ts) for ts in toks])
elif oper_type == OperandType.REG_PROGRAM_ADDRESS:
toks = [
(InstructionTextTokenType.TextToken, 'code'),
(InstructionTextTokenType.BeginMemoryOperandToken, '['),
(InstructionTextTokenType.RegisterToken, oper_val.name),
(InstructionTextTokenType.EndMemoryOperandToken, ']'),
]
result.extend([InstructionTextToken(*ts) for ts in toks])
elif oper_type == OperandType.IMM64:
result.append(InstructionTextToken( \
InstructionTextTokenType.PossibleAddressToken, hex(oper_val), oper_val))
elif oper_type == OperandType.IMM64_PROGRAM_ADDRESS:
toks = [
(InstructionTextTokenType.TextToken, 'code'),
(InstructionTextTokenType.BeginMemoryOperandToken, '['),
(InstructionTextTokenType.PossibleAddressToken, hex(oper_val), oper_val),
(InstructionTextTokenType.EndMemoryOperandToken, ']'),
]
result.extend([InstructionTextToken(*ts) for ts in toks])
elif oper_type in {OperandType.ALU_OP, OperandType.STACK_OP}:
toks = [
(InstructionTextTokenType.TextToken, oper_val.name),
]
result.extend([InstructionTextToken(*ts) for ts in toks])
elif oper_type == OperandType.STACK_IMM8:
result.append(InstructionTextToken(InstructionTextTokenType.IntegerToken, hex(oper_val), oper_val))
else:
raise Exception('unknown operand type: ' + str(oper_type))
# if this isn't the last operand, add comma
if i < len(decoded.operands)-1:
result.append(InstructionTextToken( \
InstructionTextTokenType.OperandSeparatorToken, ','))
return result, decoded.length
#------------------------------------------------------------------------------
# LIFTING
#------------------------------------------------------------------------------
# def get_flag_write_low_level_il(self, op, size, write_type, flag, operands, il):
# flag_il = LR35902IL.gen_flag_il(op, size, write_type, flag, operands, il)
# if flag_il:
# return flag_il
# return Architecture.get_flag_write_low_level_il(self, op, size, write_type, flag, operands, il)
def get_instruction_low_level_il(self, data, addr, il):
decoded = decode(data, addr)
if not decoded.success():
return None
breach_il.gen_instr_il(addr, decoded, il)
return decoded.length
# LR35902IL.gen_instr_il(addr, decoded, il)
# return decoded.len
def convert_to_nop(data: bytes, addr: int = 0):
return bytes([Opcode.NOP.value] * len(data))
|
import imp
from flask import Flask
from config import config_options
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_uploads import UploadSet, configure_uploads, IMAGES
from flask_mail import Mail
from flask_simplemde import SimpleMDE
bootstrap = Bootstrap()
db = SQLAlchemy()
mail = Mail()
simple = SimpleMDE()
photos = UploadSet('photos', IMAGES) #UPLOAD SET DEFINES WHAT WE ARE UPLOADING, we pass in a name and the type of file we want to upload which is an image
login_manager = LoginManager() #create an instance
login_manager.session_protection = 'strong' #provides diff security levels and by using strong it will minitor changes in the user header and log the user out
login_manager.login_view = 'auth_login' #add the blueprint name as the login endpoint as it is located inside a blueprint
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config_options[config_name])
configure_uploads(app, photos)
bootstrap.init_app(app)
db.init_app(app)
mail.init_app(app)
login_manager.init_app(app)
simple.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .request import configure_request
configure_request(app)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix = '/authenticate')
return app |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 16 16:03:42 2022
@author: Admin
"""
#!/usr/bin/env python3
import datetime
import time
import random
import sys
start = datetime.datetime.strptime("31-10-2019", "%d-%m-%Y")
end = datetime.datetime.strptime("17-01-2022","%d-%m-%Y")
date_generated = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days)]
all_list = []
a_list = []
columns = 2
rows = 365
for date in date_generated:
all_list = []
a_list.append('\n')
a_list.append(date.strftime("%d-%m-%Y"))
a_list.append(str(random.randint(100000,999999)))
a_list.append('\n')
values = ",".join(str(i) for i in a_list)
print(values)
all_list.append(a_list)
print (date.strftime("%d-%m-%Y"))
sys.stdout = open('random_num.csv', 'w')
for a_list in all_list:
print (", ".join(map(str, a_list)))
|
import json
import urllib2
import requests
def refresh_data():
f = open('/Users/boswald/Sites/gcloudsite/appengine-django-skeleton/league/tools/data/champdata.json', 'w')
data = requests.get('https://na.api.pvp.net/api/lol/static-data/na/v1.2/champion?champData=all&api_key=80a03926-6e55-4045-bf6f-692ec7007ca1')
data = data.json()
json.dump(data['data'], f, sort_keys = True, indent = 4)
f.close()
f = open('/Users/boswald/Sites/gcloudsite/appengine-django-skeleton/league/tools/data/champkeys.json', 'w')
json.dump(data['keys'], f, sort_keys = True, indent = 4)
f.close()
def update_champs():
with open('/Users/boswald/Sites/gcloudsite/appengine-django-skeleton/league/tools/data/champkeys.json') as f:
champions = json.load(f)
champkeys = champions.keys()
#print champions
#print champkeys
with open('/Users/boswald/Sites/gcloudsite/appengine-django-skeleton/league/tools/data/champdata.json') as f:
data = json.load(f)
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')
import django
django.setup()
from league.models import AllyTip, Champion
for champ in champkeys:
entry = data[ champions[str(champ)] ]
atips = []
for atip in entry['allytips']:
atipinstance = AllyTip.objects.create(tip=atip)
atipinstance.save()
atips.append(atipinstance.id)
champion_instance = Champion(
name=entry['name'],
title=entry['title'],
champ_id=entry['id'],
blurb=entry['blurb'],
lore=entry['lore'],
)
champion_instance.save()
print atips
champion_instance.allytips = (atips)
'''
refresh_data()
update_champs()
'''
with open('/Users/boswald/Sites/gcloudsite/appengine-django-skeleton/league/tools/data/champkeys.json') as f:
champions = json.load(f)
champkeys = champions.keys()
#print champions
#print champkeys
with open('/Users/boswald/Sites/gcloudsite/appengine-django-skeleton/league/tools/data/champdata.json') as f:
data = json.load(f)
lorelengths = []
for key in champkeys:
lorelengths.append(len(data[champions[str(key)]]['allytips']))
print max(lorelengths)
|
from bs4 import BeautifulSoup
import showtimes
import datetime
import re
import requests
MONDAY = 0
SUNDAY = 6
SATURDAY = 5
def scrape(site_url="https://afisilver.afi.com/films/calendar.aspx"):
"""
"""
# scrape from site
page = requests.get(site_url)
soup = BeautifulSoup(page.content, "html.parser")
# collection will start on the upcoming sunday and end 1 week later
today = datetime.date.today()
if today.weekday == SUNDAY:
next_sun = today
else:
next_sun = today + datetime.timedelta(6-today.weekday())
# parse out the content for the specified days
shows = parse(soup, next_sun)
return shows
def parse(soup, start_date):
"""
"""
# get a list of the days in order (including days from last and next month)
days = soup.find_all('td', class_="day")
dates = [int(x.contents[0]) for x in days]
month_start_idx = dates.index(1)
next_sun_idx = dates[month_start_idx:].index(start_date.day)+month_start_idx
next_sat_idx = next_sun_idx + 6
shows = []
# for each day next week
for i in range(next_sun_idx, next_sat_idx+1):
day = list(days[i].children)[0]
movies = str(list(days[i].children)[1]) # 0 index has day of month, 1 index has content
split_movies = re.findall(r"Movies/Details.*?\">(.*?)</a><br/>(.*?)<br/>", movies)
# for each movie in the day
for movie in split_movies:
try:
j = re.findall(r"([^a-z()]{3,})(\(.+?\))?", movie[0])[0] # ("MOVIE NAME", "(YYYY)")
year = re.sub("[^0-9]", "", j[1])
except IndexError: # TODO what causes this?
print("ERROR: ", movie)
continue
movie_name = j[0]
movie_times = movie[1]
# format times
times = re.findall(r"(?:\d)?\d:\d\d(?: a\.m\.)?", movie_times)
times = [x.replace(".", "") for x in times]
times = [x + " pm" if "am" not in x else x for x in times]
shows.append(showtimes.Showtimes(movie_name.strip(), day, times, "AFI Silver", year))
return shows
|
import ast
import asyncio
import collections
import copy
import html
import logging
import os
from typing import Dict, List, Optional, Tuple
import graphviz as gv
from .common import (FullLoadContext, LoadContext, PVRelations,
ScriptPVRelations, dataclass)
from .db import RecordField, RecordInstance, RecordType
logger = logging.getLogger(__name__)
# TODO: refactor this to not be graphviz-dependent; instead return node/link
# information in terms of dataclasses
# NOTE: the following is borrowed from pygraphviz, reimplemented to allow
# for asyncio compatibility
async def async_render(
engine, format, filepath, renderer=None, formatter=None, quiet=False
):
"""
Async Render file with Graphviz ``engine`` into ``format``, return result
filename.
Parameters
----------
engine :
The layout commmand used for rendering (``'dot'``, ``'neato'``, ...).
format :
The output format used for rendering (``'pdf'``, ``'png'``, ...).
filepath :
Path to the DOT source file to render.
renderer :
The output renderer used for rendering (``'cairo'``, ``'gd'``, ...).
formatter :
The output formatter used for rendering (``'cairo'``, ``'gd'``, ...).
quiet : bool
Suppress ``stderr`` output from the layout subprocess.
Returns
-------
The (possibly relative) path of the rendered file.
Raises
------
ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known.
graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None.
graphviz.ExecutableNotFound: If the Graphviz executable is not found.
subprocess.CalledProcessError: If the exit status is non-zero.
Notes
-----
The layout command is started from the directory of ``filepath``, so that
references to external files (e.g. ``[image=...]``) can be given as paths
relative to the DOT source file.
"""
# Adapted from graphviz under the MIT License (MIT) Copyright (c) 2013-2020
# Sebastian Bank
dirname, filename = os.path.split(filepath)
cmd, rendered = gv.backend.command(engine, format, filename, renderer, formatter)
if dirname:
cwd = dirname
rendered = os.path.join(dirname, rendered)
else:
cwd = None
proc = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=cwd,
)
(stdout, stderr) = await proc.communicate()
if proc.returncode:
raise gv.backend.CalledProcessError(
proc.returncode, cmd, output=stdout, stderr=stderr
)
return rendered
class AsyncDigraph(gv.Digraph):
async def async_render(
self,
filename=None,
directory=None,
view=False,
cleanup=False,
format=None,
renderer=None,
formatter=None,
quiet=False,
quiet_view=False,
):
"""
Save the source to file and render with the Graphviz engine.
Parameters
----------
filename :
Filename for saving the source (defaults to ``name`` + ``'.gv'``)
directory :
(Sub)directory for source saving and rendering.
view (bool) :
Open the rendered result with the default application.
cleanup (bool) :
Delete the source file after rendering.
format :
The output format used for rendering (``'pdf'``, ``'png'``, etc.).
renderer :
The output renderer used for rendering (``'cairo'``, ``'gd'``, ...).
formatter :
The output formatter used for rendering (``'cairo'``, ``'gd'``, ...).
quiet (bool) :
Suppress ``stderr`` output from the layout subprocess.
quiet_view (bool) :
Suppress ``stderr`` output from the viewer process;
implies ``view=True``, ineffective on Windows.
Returns:
The (possibly relative) path of the rendered file.
Raises
------
ValueError: If ``format``, ``renderer``, or ``formatter`` are not known.
graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None.
graphviz.ExecutableNotFound: If the Graphviz executable is not found.
subprocess.CalledProcessError: If the exit status is non-zero.
RuntimeError: If viewer opening is requested but not supported.
Notes
-----
The layout command is started from the directory of ``filepath``, so that
references to external files (e.g. ``[image=...]``) can be given as paths
relative to the DOT source file.
"""
# Adapted from graphviz under the MIT License (MIT) Copyright (c) 2013-2020
# Sebastian Bank
filepath = self.save(filename, directory)
if format is None:
format = self._format
rendered = await async_render(
self._engine,
format,
filepath,
renderer=renderer,
formatter=formatter,
quiet=quiet,
)
if cleanup:
logger.debug("delete %r", filepath)
os.remove(filepath)
if quiet_view or view:
self._view(rendered, self._format, quiet_view)
return rendered
@dataclass
class LinkInfo:
record1: RecordInstance
field1: RecordField
record2: RecordInstance
field2: RecordField
# info: Tuple[str, ...]
info: List[str]
def is_supported_link(link: str) -> bool:
if link.startswith("#") or link.startswith("0x") or link.startswith("@"):
return False
try:
ast.literal_eval(link)
except Exception:
# Should not be an integer literal
return True
return False
def build_database_relations(
database: Dict[str, RecordInstance],
record_types: Optional[Dict[str, RecordType]] = None,
aliases: Optional[Dict[str, str]] = None
) -> PVRelations:
"""
Build a dictionary of PV relationships.
This should not be called often for large databases, as it makes no attempt
to be computationally efficient. For repeated usage, cache the result
of this function and reuse it in future calls to ``graph_links`` and such.
Parameters
----------
database : dict
Dictionary of record name to record instance.
record_types : dict, optional
The database definitions to use for fields that are not defined in the
database file. Dictionary of record type name to RecordType.
Returns
-------
info : dict
Such that: ``info[pv1][pv2] = (field1, field2, info)``
And in reverse: ``info[pv2][pv1] = (field2, field1, info)``
"""
aliases = aliases or {}
warned = set()
unset_ctx: FullLoadContext = (LoadContext("unknown", 0), )
by_record = collections.defaultdict(lambda: collections.defaultdict(list))
# TODO: alias handling?
for rec1 in database.values():
for field1, link, info in rec1.get_links():
# TODO: copied without thinking about implications
# due to the removal of st.cmd context as an attempt to reduce
field1 = copy.deepcopy(field1)
field1.context = rec1.context[:1] + field1.context
if "." in link:
link, field2 = link.split(".")
elif field1.name == "FLNK":
field2 = "PROC"
else:
field2 = "VAL"
rec2 = database.get(aliases.get(link, link), None)
if rec2 is None:
# TODO: switch to debug; this will be expensive later
if not is_supported_link(link):
continue
if link not in warned:
warned.add(link)
logger.debug(
"Linked record from %s.%s not in database: %s",
rec1.name, field1.name, link
)
field2 = RecordField(
dtype="unknown",
name=field2,
value="(unknown-record)",
context=unset_ctx,
)
rec2_name = link
elif field2 in rec2.fields:
rec2_name = rec2.name
# TODO: copied without thinking about implications
field2 = copy.deepcopy(rec2.fields[field2])
field2.context = rec2.context[:1] + field2.context
elif record_types:
rec2_name = rec2.name
dbd_record_type = record_types.get(rec2.record_type, None)
if dbd_record_type is None:
field2 = RecordField(
dtype="invalid",
name=field2,
value="(invalid-record-type)",
context=unset_ctx,
)
elif field2 not in dbd_record_type.fields:
field2 = RecordField(
dtype="invalid",
name=field2,
value="(invalid-field)",
context=unset_ctx,
)
else:
dbd_record_field = dbd_record_type.fields[field2]
field2 = RecordField(
dtype=dbd_record_field.type,
name=field2,
value="",
context=dbd_record_field.context,
)
else:
rec2_name = rec2.name
field2 = RecordField(
dtype="unknown",
name=field2,
value="", # unset or invalid, can't tell yet
context=unset_ctx,
)
by_record[rec1.name][rec2_name].append((field1, field2, info))
by_record[rec2_name][rec1.name].append((field2, field1, info))
return dict(
(key, dict(inner_dict))
for key, inner_dict in by_record.items()
)
def combine_relations(
dest_relations: PVRelations,
dest_db: Dict[str, RecordInstance],
source_relations: PVRelations,
source_db: Dict[str, RecordInstance],
record_types: Optional[Dict[str, RecordType]] = None,
aliases: Optional[Dict[str, str]] = None,
):
"""Combine multiple script relations into one."""
aliases = aliases or {}
def get_relation_by_field() -> Tuple[
str, str, Dict[Tuple[str, str], Tuple[str, str, List[str]]]
]:
for rec1_name, rec2_names in source_relations.items():
dest_rec1_dict = dest_relations.setdefault(rec1_name, {})
for rec2_name in rec2_names:
dest_rec2 = dest_rec1_dict.setdefault(rec2_name, [])
relation_by_field = {
(field1.name, field2.name): (field1, field2, link)
for field1, field2, link in dest_rec2
}
yield rec1_name, rec2_name, relation_by_field
# Part 1:
# Rebuild with new aliases, if available
# Either set of relations could have referred to aliased names, actual
# names, or even *both*.
def alias_to_actual(d):
# This is kinda expensive, imperfect, and confusing; consider reworking
for alias_from, alias_to in aliases.items():
# A -> B
inner_dict = d.pop(alias_from, None)
if not inner_dict:
continue
# Fix up B <- A first, since it's symmetric
for inner_name, inner_items in inner_dict.items():
# d[inner_name][alias_to] += d[inner_name][alias_from]
d[inner_name].setdefault(alias_to, []).extend(
d[inner_name].pop(alias_from)
)
if alias_to not in d:
d[alias_to] = inner_dict
else:
# The actual record name is already in the relation dict
for inner_name, inner_items in inner_dict.items():
# d[alias_to][inner_name] += inner_items
d[alias_to].setdefault(inner_name, []).extend(inner_items)
alias_to_actual(dest_relations)
alias_to_actual(source_relations)
# Part 1:
# Merge in new or updated relations from the second set
for rec1_name, rec2_name, relation_by_field in get_relation_by_field():
for field1, field2, link in source_relations[rec1_name][rec2_name]:
key = (field1.name, field2.name)
existing_link = relation_by_field.get(key, None)
if not existing_link:
relation_by_field[key] = (field1, field2, link)
else:
existing_field1, existing_field2, _ = existing_link
existing_field1.update_unknowns(field1)
existing_field2.update_unknowns(field2)
dest_relations[rec1_name][rec2_name] = list(relation_by_field.values())
def get_record(name) -> RecordInstance:
"""Get record from either database."""
name = aliases.get(name, name)
try:
return dest_db.get(name, None) or source_db[name]
except KeyError:
raise
def get_field_info(record, field):
"""Get record definition if available."""
if field in record.fields:
return record.fields[field]
if record_types:
field_def = record_types[field]
return RecordField(
dtype=field_def.type,
name=field,
value="",
context=field_def.context,
)
raise KeyError("Field not in database or database definition")
# Part 2:
# Update any existing relations in the destination relations with
# information from the source database
for rec1_name, rec1 in source_db.items():
if rec1_name in dest_relations:
for rec2_name, rec2_items in dest_relations[rec1_name].items():
# We know rec1 is in the source database, but we don't know
# where rec2 might be, so use `get_record`.
try:
rec2 = get_record(rec2_name)
except KeyError:
# It's not in this IOC...
continue
def get_items_to_update():
for field1, field2, _ in rec2_items:
yield (rec1, field1)
yield (rec2, field2)
for field1, field2, _ in dest_relations[rec2_name][rec1_name]:
yield (rec2, field1)
yield (rec1, field2)
for rec, field in get_items_to_update():
try:
field_info = get_field_info(rec, field.name)
except KeyError:
logger.debug("Missing field? %s.%s", rec.name, field.name)
else:
field.update_unknowns(field_info)
def find_record_links(database, starting_records, check_all=True, relations=None):
"""
Get all related record links from a set of starting records.
All starting records will be included, along with any other records that
are linked to from there.
Parameters
----------
database : dict
Dictionary of record name to record instance.
starting_records : list of str
Record names
relations : dict, optional
Pre-built PV relationship dictionary. Generated from database
if not provided.
Yields
-------
link_info : LinkInfo
Link info
"""
checked = []
if relations is None:
relations = build_database_relations(database)
records_to_check = list(starting_records)
while records_to_check:
rec1 = database.get(records_to_check.pop(), None)
if rec1 is None:
continue
checked.append(rec1.name)
logger.debug("--- record %s ---", rec1.name)
for rec2_name, fields in relations.get(rec1.name, {}).items():
if rec2_name in checked:
continue
rec2 = database.get(rec2_name, None)
if rec2 is None:
continue
for field1, field2, info in fields:
if rec2_name not in checked and rec2_name not in records_to_check:
records_to_check.append(rec2_name)
li = LinkInfo(
record1=rec1,
field1=field1,
record2=rec2,
field2=field2,
info=info,
)
logger.debug("Link %s", li)
yield li
def graph_links(
database,
starting_records,
graph=None,
engine="dot",
header_format='record({rtype}, "{name}")',
field_format='{field:>4s}: "{value}"',
sort_fields=True,
text_format=None,
show_empty=False,
font_name="Courier",
relations=None,
):
"""
Create a graphviz digraph of record links.
All starting records will be included, along with any other records that
are linked to from there - if available in the database.
Parameters
----------
database : dict
Dictionary of record name to record instance.
starting_records : list of str
Record names
graph : graphviz.Graph, optional
Graph instance to use. New one created if not specified.
engine : str, optional
Graphviz engine (dot, fdp, etc)
field_format : str, optional
Format string for fields (keys: field, value, attr)
sort_fields : bool, optional
Sort list of fields
text_format : str, optional
Text format for full node (keys: header, field_lines)
show_empty : bool, optional
Show empty fields
font_name : str, optional
Font name to use for all nodes and edges
relations : dict, optional
Pre-built PV relationship dictionary. Generated from database
if not provided.
Returns
-------
nodes: dict
edges: dict
graph : AsyncDigraph
"""
node_id = 0
edges = []
nodes = {}
existing_edges = set()
if graph is None:
graph = AsyncDigraph(format="pdf")
if font_name is not None:
graph.attr("graph", dict(fontname=font_name))
graph.attr("node", dict(fontname=font_name))
graph.attr("edge", dict(fontname=font_name))
if engine is not None:
graph.engine = engine
newline = '<br align="left"/>'
if text_format is None:
text_format = f"""<b>{{header}}</b>{newline}{newline}{{field_lines}}"""
# graph.attr("node", {"shape": "record"})
def new_node(rec, field=""):
nonlocal node_id
node_id += 1
nodes[rec.name] = dict(id=str(node_id), text=[], record=rec)
# graph.node(nodes[rec.name], label=field)
logger.debug("Created node %s (field: %r)", rec.name, field)
# TODO: create node and color when not in database?
for li in find_record_links(database, starting_records, relations=relations):
for (rec, field) in ((li.record1, li.field1), (li.record2, li.field2)):
if rec.name not in nodes:
new_node(rec, field)
src, dest = nodes[li.record1.name], nodes[li.record2.name]
for field, text in [(li.field1, src["text"]), (li.field2, dest["text"])]:
if field.value or show_empty:
text_line = field_format.format(field=field.name, value=field.value)
if text_line not in text:
text.append(text_line)
if li.field1.dtype == "DBF_INLINK":
src, dest = dest, src
li.field1, li.field2 = li.field2, li.field1
logger.debug("New edge %s -> %s", src, dest)
edge_kw = {}
if any(item in li.info for item in {"PP", "CPP", "CP"}):
edge_kw["style"] = ""
else:
edge_kw["style"] = "dashed"
if any(item in li.info for item in {"MS", "MSS", "MSI"}):
edge_kw["color"] = "red"
src_id, dest_id = src["id"], dest["id"]
if (src_id, dest_id) not in existing_edges:
edge_kw["xlabel"] = f"{li.field1.name}/{li.field2.name}"
if li.info:
edge_kw["xlabel"] += f"\n{' '.join(li.info)}"
edges.append((src_id, dest_id, edge_kw))
existing_edges.add((src_id, dest_id))
if not nodes:
# No relationship found; at least show the records
for rec_name in starting_records:
try:
new_node(database[rec_name])
except KeyError:
...
for _, node in sorted(nodes.items()):
field_lines = node["text"]
if sort_fields:
field_lines.sort()
if field_lines:
field_lines.append("")
rec = node["record"]
header = header_format.format(rtype=rec.record_type, name=rec.name)
if rec.aliases:
header += f"\nAlias: {', '.join(rec.aliases)}"
text = text_format.format(
header=html.escape(header, quote=False).replace("\n", newline),
field_lines=newline.join(
html.escape(line, quote=False) for line in field_lines
),
)
graph.node(
node["id"],
label="< {} >".format(text),
shape="box3d" if rec.name in starting_records else "rectangle",
fillcolor="bisque" if rec.name in starting_records else "white",
style="filled",
)
# add all of the edges between graphs
for src, dest, options in edges:
graph.edge(src, dest, **options)
return nodes, edges, graph
def build_script_relations(
database: Dict[str, RecordInstance],
by_record: Dict[str, RecordInstance],
limit_to_records: Optional[List[str]] = None
) -> ScriptPVRelations:
if limit_to_records is None:
record_items = by_record.items()
else:
record_items = [
(name, database[name]) for name in limit_to_records
if name in database
]
def get_owner(rec):
if not rec:
return "unknown"
if rec.owner and rec.owner != "unknown":
return rec.owner
if rec.context:
return rec.context[0].name
return "unknown"
by_script = collections.defaultdict(lambda: collections.defaultdict(set))
for rec1_name, list_of_rec2s in record_items:
rec1 = database.get(rec1_name, None)
for rec2_name in list_of_rec2s:
rec2 = database.get(rec2_name, None)
owner1 = get_owner(rec1)
owner2 = get_owner(rec2)
# print(rec1_name, owner1, "|", rec2_name, owner2)
if owner1 != owner2:
by_script[owner2][owner1].add(rec2_name)
by_script[owner1][owner2].add(rec1_name)
return by_script
def graph_script_relations(
database,
limit_to_records=None,
graph=None,
engine="dot",
header_format='record({rtype}, "{name}")',
field_format='{field:>4s}: "{value}"',
text_format=None,
font_name="Courier",
relations=None,
script_relations=None,
):
"""
Create a graphviz digraph of script links (i.e., inter-IOC record links).
Parameters
----------
database : dict
Dictionary of record name to record instance.
starting_records : list of str
Record names
graph : graphviz.Graph, optional
Graph instance to use. New one created if not specified.
engine : str, optional
Graphviz engine (dot, fdp, etc)
sort_fields : bool, optional
Sort list of fields
show_empty : bool, optional
Show empty fields
font_name : str, optional
Font name to use for all nodes and edges
relations : dict, optional
Pre-built PV relationship dictionary. Generated from database
if not provided.
script_relations : dict, optional
Pre-built script relationship dictionary. Generated from database if
not provided.
Returns
-------
nodes: dict
edges: dict
graph : graphviz.Digraph
"""
node_id = 0
edges = []
nodes = {}
if script_relations is None:
if relations is None:
relations = build_database_relations(database)
script_relations = build_script_relations(
database, relations,
limit_to_records=limit_to_records,
)
limit_to_records = limit_to_records or []
if graph is None:
graph = AsyncDigraph(format="pdf")
if font_name is not None:
graph.attr("graph", dict(fontname=font_name))
graph.attr("node", dict(fontname=font_name))
graph.attr("edge", dict(fontname=font_name))
if engine is not None:
graph.engine = engine
newline = '<br align="center"/>'
def new_node(label, text=None):
nonlocal node_id
if label in nodes:
return nodes[label]
node_id += 1
nodes[label] = dict(id=str(node_id), text=text or [], label=label)
logger.debug("Created node %s", label)
return node_id
for script_a, script_a_relations in script_relations.items():
new_node(script_a, text=[script_a])
for script_b, _ in script_a_relations.items():
if script_b in nodes:
continue
new_node(script_b, text=[script_b])
inter_node = f"{script_a}<->{script_b}"
new_node(
inter_node,
text=(
[f"<b>{script_a}</b>", ""]
+ list(sorted(script_relations[script_a][script_b]))
+ [""]
+ [f"<b>{script_b}</b>", ""]
+ list(sorted(script_relations[script_b][script_a]))
),
)
edges.append((script_a, inter_node, {}))
edges.append((inter_node, script_b, {}))
if not nodes:
# No relationship found; at least show the records
for rec_name in limit_to_records or []:
try:
new_node(rec_name)
except KeyError:
...
for name, node in sorted(nodes.items()):
text = newline.join(node["text"])
graph.node(
node["id"],
label="< {} >".format(text),
shape="box3d" if name in limit_to_records else "rectangle",
fillcolor="bisque" if name in limit_to_records else "white",
style="filled",
)
# add all of the edges between graphs
for src, dest, options in edges:
graph.edge(nodes[src]["id"], nodes[dest]["id"], **options)
return nodes, edges, graph
|
""" APIs of data """
from .mean_var_data import MeanVarData
from .mean_var_data_rep import MeanVarDataRep
from .ld_transform_data import LDTransformData
from .ld_transform_bayes_data import LDTransformBayesData
from .mlmc_data import MLMCData
from .mlqmc_data import MLQMCData
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import torch
from tqdm import tqdm
from definitions import Keyframe, path_data
from training_config import Train_Config
def __frame_to_feature(frame, featureset, config):
return featureset.extract(Keyframe.from_numpy(frame))
def __frames_to_feature_batch(frames, featureset, config):
return featureset.extract_batch(frames, random_occlusion=config.random_occlusion)
def to_recurrent_feature(X, y, sequence_length, sequence_distance, sequence_skip):
_X = torch.empty(((X.shape[0] - (sequence_length * sequence_distance)) // sequence_skip + 1, sequence_length, X.shape[1]))
for i in range(sequence_length):
end_idx = -(sequence_length * sequence_distance - (i * sequence_distance) - 1)
if end_idx == 0:
end_idx = X.shape[0]
_X[:, i] = X[i * sequence_distance:end_idx:sequence_skip]
if sequence_distance == 1:
return _X, y[(sequence_length - 1) * sequence_distance::sequence_skip]
else:
return _X, y[(sequence_length - 1) * sequence_distance:-(sequence_distance - 1):sequence_skip]
def to_recurrent_feature_index(X, y, sequence_length, sequence_skip, i0, i1=0, id=1):
i1 = i1 if i1 != 0 else len(X)
x0 = torch.arange(i0, i1, id)
_X = torch.empty((len(x0), sequence_length, X.shape[1]))
for s in range(sequence_length):
s0 = x0 - (sequence_length - 1 - s) * sequence_skip
s0[s0 < 0] = 0
_X[:, s] = X[s0]
return _X, y[i0:i1:id]
def create_training_data_config(parser, c: Train_Config, update, save_all_to_mem=True, shuffle_all=True, dataset_name="", test_set=False):
in_features = c.in_features.data_to_features()
out_features = c.out_features.data_to_features()
if dataset_name == "":
raise Exception("ERROR: no dataset_name set")
if shuffle_all:
save_all_to_mem = True
_suffix = "_" + dataset_name + ("_normalized" if c.normalized_bones else "")
if update:
data = parser.load_numpy(c.normalized_bones)
batch_nr = 0
suffix = f"{_suffix}{batch_nr}"
while os.path.exists(os.path.join(path_data, in_features.name + suffix + ".dat")) or \
os.path.exists(os.path.join(path_data, out_features.name + suffix + ".dat")):
if os.path.exists(os.path.join(path_data, in_features.name + suffix + ".dat")):
os.remove(os.path.join(path_data, in_features.name + suffix + ".dat"))
if os.path.exists(os.path.join(path_data, out_features.name + suffix + ".dat")):
os.remove(os.path.join(path_data, out_features.name + suffix + ".dat"))
batch_nr += 1
suffix = f"{_suffix}{batch_nr}"
batch_nr = 0
X = []
y = []
for skeleton, tracks in tqdm(data, desc="extracting features"):
for _track in tracks:
track = _track[0] if len(_track.shape) == 4 else _track
_x = __frames_to_feature_batch(track, in_features, config=c)
_y = __frames_to_feature_batch(track, out_features, config=c)
X.extend(_x.reshape((-1, _x.shape[1])))
y.extend(_y.reshape((-1, _y.shape[1])))
while len(X) >= c.input_size:
next_X = X[c.input_size:]
next_y = y[c.input_size:]
X = X[:c.input_size]
y = y[:c.input_size]
X = torch.from_numpy(np.array(X))
if c.out_features is in_features:
y = X
else:
y = torch.from_numpy(np.array(y))
suffix = f"{_suffix}{batch_nr}"
torch.save(X, os.path.join(path_data, in_features.name + suffix + ".dat"))
torch.save(y, os.path.join(path_data, out_features.name + suffix + ".dat"))
batch_nr += 1
X = next_X
y = next_y
if len(X) > 0:
X = torch.from_numpy(np.array(X))
if c.out_features is in_features:
y = X
else:
y = torch.from_numpy(np.array(y))
suffix = f"{_suffix}{batch_nr}"
torch.save(X, os.path.join(path_data, in_features.name + suffix + ".dat"))
torch.save(y, os.path.join(path_data, out_features.name + suffix + ".dat"))
suffix = f"{_suffix}0"
# if data is not available, create it!
if not os.path.exists(os.path.join(path_data, in_features.name + suffix + ".dat")) or \
not os.path.exists(os.path.join(path_data, out_features.name + suffix + ".dat")):
yield from create_training_data_config(parser, c, True, save_all_to_mem, shuffle_all, dataset_name)
return
batch_nr = 0
batch_samples = 0
X_batch = None
y_batch = None
suffix = f"{_suffix}{batch_nr}"
X_all = None
y_all = None
while os.path.exists(os.path.join(path_data, in_features.name + suffix + ".dat")) and os.path.exists(
os.path.join(path_data, out_features.name + suffix + ".dat")):
X = torch.load(os.path.join(path_data, in_features.name + suffix + ".dat"))
y = torch.load(os.path.join(path_data, out_features.name + suffix + ".dat"))
input_size = c.input_size if c.input_size > 0 else len(X)
assert (len(X.shape) == 2)
if save_all_to_mem:
if X_all is None or y_all is None:
X_all = [X]
y_all = [y]
else:
X_all.append(X)
y_all.append(y)
else:
if X_batch is None:
X_batch = torch.empty((input_size, X.shape[1]), dtype=torch.float32)
y_batch = torch.empty((input_size, y.shape[1]), dtype=torch.float32)
for batch in range(max(1, len(X) // input_size)):
new_samples = min(len(X), input_size - batch_samples)
X_batch[batch_samples:batch_samples + new_samples] = X[batch * input_size: batch * input_size + new_samples]
y_batch[batch_samples:batch_samples + new_samples] = y[batch * input_size: batch * input_size + new_samples]
if batch_samples + new_samples == input_size:
if c.model.recurrent:
yield to_recurrent_feature(X_batch, y_batch, c.sequence_length, c.sequence_distance,
c.sequence_skip)
else:
yield X_batch, y_batch
if batch == max(1, len(X) // input_size) - 1:
batch_samples = min(input_size, len(X) - new_samples)
if batch_samples > 0:
X_batch[:batch_samples] = X[-batch_samples:]
y_batch[:batch_samples] = y[-batch_samples:]
else:
batch_samples = 0
else:
batch_samples += new_samples
batch_nr += 1
suffix = f"{_suffix}{batch_nr}"
if save_all_to_mem and X_all is not None:
X_all = torch.cat(X_all, dim=0)
y_all = torch.cat(y_all, dim=0)
if shuffle_all:
batches = len(X_all) // input_size
last_entry = len(X_all) - (len(X_all) % input_size)
if c.model.recurrent:
for i in range(0, batches, c.sequence_distance):
yield to_recurrent_feature_index(X_all, y_all, c.sequence_length, c.sequence_skip, i, last_entry, batches)
else:
for i in range(batches):
yield X_all[i:last_entry:batches], y_all[i:last_entry:batches]
else:
if test_set:
X_all = X_all[X_all.shape[0] * 8 // 10:]
y_all = y_all[y_all.shape[0] * 8 // 10:]
if c.model.recurrent:
yield to_recurrent_feature(X_all, y_all, c.sequence_length, c.sequence_distance, c.sequence_skip)
else:
yield X_all, y_all
|
from flask import Flask, request, jsonify
from decouple import config
from flask_cors import CORS
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk import tokenize
def create_app():
app =Flask(__name__)
CORS(app)
sid = SentimentIntensityAnalyzer()
@app.route('/')
def root():
pass
@app.route('/sentiment',methods=["GET"])
def sentiment():
text = request.values['text']
ss = sid.polarity_scores(text)
return jsonify(ss)
return app
|
print('=' * 12 + 'Desafio 84' + '=' * 12)
info = []
temp = []
resp = 'S'
menor = -1
maior = -1
contador = 0
while resp in 'sS':
temp.append(input('Digite o nome: '))
temp.append(float(input('Digite o peso: ')))
info.append(temp[:])
if menor == -1:
menor = temp[1]
maior = temp[1]
if temp[1] < menor:
menor = temp[1]
if temp[1] > maior:
maior = temp[1]
temp.clear()
contador+=1
resp = input('Deseja continuar [S/N]? ')
print(f'Foram cadastradas {contador} pessoas.')
print(f'O maior peso encontrado foi {maior} kg, que é o peso de: ', end='')
for c in info:
if c[1] == maior:
print(f'{c[0]}, ', end='')
print()
print(f'O menor peso encontrado foi {menor} kg, que é o peso de: ', end='')
for c in info:
if c[1] == menor:
print(f'{c[0]}, ', end='')
print() |
def celsius_to_fahrenheit(temperature):
return (temperature * (9 / 5) + 32)
|
# NOTE: diluted https://github.com/jic-dtool/dtool-lookup-server/blob/master/tests/__init__.py
import random
import string
import os
import sys
import pytest
# Pytest does not add the working directory to the path so we do it here.
_HERE = os.path.dirname(os.path.abspath(__file__))
_ROOT = os.path.join(_HERE, "..")
sys.path.insert(0, _ROOT)
JWT_PUBLIC_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8LrEp0Q6l1WPsY32uOPqEjaisQScnzO/XvlhQTzj5w+hFObjiNgIaHRceYh3hZZwsRsHIkCxOY0JgUPeFP9IVXso0VptIjCPRF5yrV/+dF1rtl4eyYj/XOBvSDzbQQwqdjhHffw0TXW0f/yjGGJCYM+tw/9dmj9VilAMNTx1H76uPKUo4M3vLBQLo2tj7z1jlh4Jlw5hKBRcWQWbpWP95p71Db6gSpqReDYbx57BW19APMVketUYsXfXTztM/HWz35J9HDya3ID0Dl+pE22Wo8SZo2+ULKu/4OYVcD8DjF15WwXrcuFDypX132j+LUWOVWxCs5hdMybSDwF3ZhVBH ec2-user@ip-172-31-41-191.eu-west-1.compute.internal" # NOQA
snowwhite_token = "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmcmVzaCI6ZmFsc2UsImlhdCI6MTYyMTEwMDgzMywianRpIjoiNmE3Yjk5NDYtNzU5My00OGNmLTg2NmUtMWJjZGIzNjYxNTVjIiwidHlwZSI6ImFjY2VzcyIsInN1YiI6InNub3ctd2hpdGUiLCJuYmYiOjE2MjExMDA4MzN9.gXdQpGnHDdOHTMG5OKJwNe8JoJU7JSGYooU5d8AxA_Vs8StKBBRKZJ6C6zS8SovIgcDEYGP12V25ZOF_fa42GuQErKqfwJ_RTLB8nHvfEJule9dl_4z-8-5dZigm3ieiYPpX8MktHq4FQ5vdQ36igWyTO5sK4X4GSvZjG6BRphM52Rb9J2aclO1lxuD_HV_c_rtIXI-SLxH3O6LLts8RdjqLJZBNhAPD4qjAbg_IDi8B0rh_I0R42Ou6J_Sj2s5sL97FEY5Jile0MSvBH7OGmXjlcvYneFpPLnfLwhsYUrzqYB-fdhH9AZVBwzs3jT4HGeL0bO0aBJ9sJ8YRU7sjTg" # NOQA
def random_string(
size=9,
prefix="test_",
chars=string.ascii_uppercase + string.ascii_lowercase + string.digits
):
return prefix + ''.join(random.choice(chars) for _ in range(size))
@pytest.fixture
def tmp_app_with_users(request):
from dtool_lookup_server import create_app, mongo, sql_db
from dtool_lookup_server.utils import (
register_users,
register_base_uri,
update_permissions,
)
tmp_mongo_db_name = random_string()
config = {
"SECRET_KEY": "secret",
"FLASK_ENV": "development",
"SQLALCHEMY_DATABASE_URI": "sqlite:///:memory:",
"MONGO_URI": "mongodb://localhost:27017/{}".format(tmp_mongo_db_name),
"SQLALCHEMY_TRACK_MODIFICATIONS": False,
"JWT_ALGORITHM": "RS256",
"JWT_PUBLIC_KEY": JWT_PUBLIC_KEY,
"JWT_TOKEN_LOCATION": "headers",
"JWT_HEADER_NAME": "Authorization",
"JWT_HEADER_TYPE": "Bearer",
}
app = create_app(config)
# Ensure the sql database has been put into the context.
app.app_context().push()
# Populate the database.
sql_db.Model.metadata.create_all(sql_db.engine)
# Register some users.
register_users([
dict(username="snow-white", is_admin=True),
dict(username="grumpy"),
dict(username="sleepy"),
])
base_uri = "s3://snow-white"
register_base_uri(base_uri)
permissions = {
"base_uri": base_uri,
"users_with_search_permissions": ["grumpy", "sleepy"],
"users_with_register_permissions": ["grumpy"]
}
update_permissions(permissions)
@request.addfinalizer
def teardown():
mongo.cx.drop_database(tmp_mongo_db_name)
sql_db.session.remove()
return app.test_client()
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: david@reciprocitylabs.com
# Maintained By: david@reciprocitylabs.com
import sys
from ggrc import settings
import ggrc
def get_extension_name(extension_setting, default):
extension_name = getattr(settings, extension_setting, default)
if extension_name is not None:
extension_name = extension_name if not callable(extension_name) else \
extension_name()
else:
extension_name = default
return extension_name
def get_extension_modules(modules=[]):
if len(modules) == 0:
extension_names = getattr(settings, 'EXTENSIONS')
if extension_names is None:
modules.append(None)
else:
for m in settings.EXTENSIONS:
modules.append(get_extension_module(m))
if len(modules) == 0 or modules[0] is None:
return []
else:
return modules
def get_extension_module(module_name):
__import__(module_name)
return sys.modules[module_name]
def get_extension_module_for(extension_setting, default, extension_modules={}):
if extension_setting not in extension_modules:
extension_name = get_extension_name(extension_setting, default)
if not extension_name:
extension_modules[extension_setting] = extension_name
else:
__import__(extension_name)
extension_modules[extension_setting] = sys.modules[extension_name]
return extension_modules[extension_setting]
def get_extension_instance(extension_setting, default, extensions={}):
if extension_setting not in extensions:
extension_name = get_extension_name(extension_setting, default)
idx = extension_name.rfind('.')
module_name = extension_name[0:idx]
class_name = extension_name[idx + 1:]
__import__(module_name)
module = sys.modules[module_name]
extensions[extension_setting] = getattr(module, class_name)(settings)
return extensions[extension_setting]
def _get_contributions(module, name):
"""Fetch contributions from a single module.
Args:
module: Python module that will be checked for a given attribute.
name: Name of the attribute that we want to collect from a module. The
attribute must be a list or a callable that returns a list.
Returns:
List of contributions found
Raises:
TypeError: If the attribute is not a list or a callable that returns a
list.
"""
contributions = getattr(module, name, [])
if callable(contributions):
contributions = contributions()
if isinstance(contributions, dict):
contributions = contributions.items()
if not isinstance(contributions, list):
raise TypeError("Contributed item must be a list or a callable that "
"returns a list")
return contributions
def get_module_contributions(name):
"""Fetch contributions from all modules if they exist.
This function loops through all modules and checks if the main module package
contains attribute with a given name or if it cotnains contributions which
have an attribute with the said name. It gathers all such attributes in a
list and returns it.
Args:
name (string): name of the contributed attribute that will be collected.
Returns:
A list of all collected atributes.
"""
all_contributions = []
all_modules = [ggrc] + get_extension_modules()
for module in all_modules:
all_contributions.extend(_get_contributions(module, name))
contributions_module = getattr(module, "contributions", None)
if contributions_module:
all_contributions.extend(_get_contributions(contributions_module, name))
if all(isinstance(val, tuple) for val in all_contributions):
all_contributions = dict(all_contributions)
return all_contributions
|
from datetime import datetime
from unittest import TestCase
from hl7.datatypes import _UTCOffset, parse_datetime
class DatetimeTest(TestCase):
def test_parse_date(self):
self.assertEqual(datetime(1901, 2, 13), parse_datetime("19010213"))
def test_parse_datetime(self):
self.assertEqual(
datetime(2014, 3, 11, 14, 25, 33), parse_datetime("20140311142533")
)
def test_parse_datetime_frac(self):
self.assertEqual(
datetime(2014, 3, 11, 14, 25, 33, 100000),
parse_datetime("20140311142533.1"),
)
self.assertEqual(
datetime(2014, 3, 11, 14, 25, 33, 10000),
parse_datetime("20140311142533.01"),
)
self.assertEqual(
datetime(2014, 3, 11, 14, 25, 33, 1000),
parse_datetime("20140311142533.001"),
)
self.assertEqual(
datetime(2014, 3, 11, 14, 25, 33, 100),
parse_datetime("20140311142533.0001"),
)
def test_parse_tz(self):
self.assertEqual(
datetime(2014, 3, 11, 14, 12, tzinfo=_UTCOffset(330)),
parse_datetime("201403111412+0530"),
)
self.assertEqual(
datetime(2014, 3, 11, 14, 12, 20, tzinfo=_UTCOffset(-300)),
parse_datetime("20140311141220-0500"),
)
def test_tz(self):
self.assertEqual("+0205", _UTCOffset(125).tzname(datetime.utcnow()))
self.assertEqual("-0410", _UTCOffset(-250).tzname(datetime.utcnow()))
|
# Copyright 2017 OpenStack.org
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Huawei has modified this source file.
# Copyright 2018 Huawei Technologies Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import logging
def list_ptrs(conn):
query = {
'limit': 10
}
for ptr in conn.dns.ptrs(**query):
logging.info(ptr)
def create_ptr(conn):
ptr = {
'ptrdname': 'www.turnbig.net',
'description': 'HaveFun.lee - For Test',
'ttl': 300,
'region': 'eu-de',
'floating_ip_id': '9e9c6d33-51a6-4f84-b504-c13301f1cc8c'
}
ptr = conn.dns.create_ptr(**ptr)
logging.info(ptr)
return ptr
def get_ptr(conn):
ptr = conn.dns.get_ptr('eu-de', '9e9c6d33-51a6-4f84-b504-c13301f1cc8c')
logging.info(ptr)
def restore_ptr(conn):
conn.dns.restore_ptr('eu-de', '9e9c6d33-51a6-4f84-b504-c13301f1cc8c')
# list_ptrs(connection)
# get_ptr(connection)
# create_ptr(connection)
# restore_ptr(connection)
|
import aiohttp
import asyncio
import websockets
import json
import logging
import traceback
from collections import defaultdict
from typing import Any, Awaitable, DefaultDict, Dict, Iterable, List, Optional, Type
from ._rules import SlackRule, SlackID
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class SlackBot:
def __init__(
self,
token: str,
rules: Optional[List[Type[SlackRule]]] = None,
report_to_user: Optional[str] = None,
) -> None:
self.token: str = token
self.report_to_user = report_to_user
self.me: SlackID = SlackID("")
self.id_to_user: Dict[SlackID, str] = {}
self.user_to_id: Dict[str, SlackID] = {}
self.id_to_channel: Dict[SlackID, str] = {}
self.channel_to_id: Dict[str, SlackID] = {}
rules_to_load = rules if rules is not None else SlackRule.all_rules()
self.rules: List[SlackRule] = [rule(self) for rule in rules_to_load]
self.session: aiohttp.ClientSession = aiohttp.ClientSession()
def get_readable_event(self, event: Dict[str, Any]) -> Dict[str, Any]:
event = dict(event)
event["channel_name"] = self.id_to_channel.get(SlackID(event.get("channel", "")))
event["user_name"] = self.id_to_user.get(SlackID(event.get("user", "")))
return event
async def log_and_report(self, message: str, level: int = logging.INFO) -> None:
"""Logs messages and reports them to the user."""
log.log(level, message)
if self.report_to_user:
await self.api_call(
method="chat.postMessage", channel=self.report_to_user, text=message
)
async def _handle_futures(
self, futures: Iterable[Awaitable[Any]], event: Optional[Dict[str, Any]] = None
) -> None:
"""Awaits futures with timeout and handles any exceptions they may raise."""
done, pending = await asyncio.wait(futures, timeout=20)
if event:
event = self.get_readable_event(event)
for fut in pending:
fut.cancel()
await self.log_and_report(f"Timeout: On event `{event}`", level=logging.ERROR)
for fut in done:
try:
await fut
except Exception as e:
await self.log_and_report(
f"Error: {e}\nOn event `{event}`\n```{traceback.format_exc()}```",
level=logging.ERROR,
)
async def api_call(self, **data: Any) -> Dict[Any, Any]:
method = data.pop("method")
data["token"] = self.token
async with self.session.post(f"https://slack.com/api/{method}", data=data) as response:
return await response.json()
async def paginated_api_call(self, collect_key: str, **data: Any) -> Dict[Any, Any]:
data["limit"] = 300
response = await self.api_call(**data)
ret = response[collect_key]
next_cursor = response.get("response_metadata", {}).get("next_cursor")
while next_cursor:
response = await self.api_call(cursor=next_cursor, **data)
ret.extend(response[collect_key])
next_cursor = response.get("response_metadata", {}).get("next_cursor")
return ret
async def run(self) -> None:
while True:
try:
response = await self.api_call(method="rtm.connect")
if not response["ok"]:
raise RuntimeError(f"Failed to connect to the RTM API. Response: {response}")
async with websockets.connect(response["url"]) as ws:
self.me = response["self"]["id"]
await asyncio.gather(self.load_user_map(), self.load_channel_map())
await self._handle_futures([rule.load() for rule in self.rules])
while True:
try:
message = await asyncio.wait_for(ws.recv(), timeout=20)
except asyncio.TimeoutError:
await asyncio.wait_for(ws.ping(), timeout=10)
else:
event = json.loads(message)
asyncio.ensure_future(self._process_event(event))
except websockets.ConnectionClosed:
pass
async def _process_event(self, event_dict: Dict[str, Any]) -> None:
event: DefaultDict[str, Any] = defaultdict(lambda: None, event_dict)
if event["user"] == self.me: # ignore events caused by me
return
if event["type"] == "message" and event["channel"] and event["channel"].startswith("D"):
log.info(f"Direct message: {self.get_readable_event(event)}")
async def _react_to_rule(rule: SlackRule) -> None:
responses = await rule.react(event)
if responses:
log.info(f"Rule {rule} is responding to an event.")
for response in responses:
await self.api_call(**response)
await self._handle_futures([_react_to_rule(rule) for rule in self.rules], event=event)
async def load_user_map(self) -> None:
users = await self.paginated_api_call("members", method="users.list")
self.id_to_user = {u["id"]: u["name"] for u in users}
self.user_to_id = {u["name"]: u["id"] for u in users}
async def load_channel_map(self) -> None:
channels = await self.paginated_api_call(
"channels", method="conversations.list", types="public_channel,private_channel"
)
self.id_to_channel = {c["id"]: c["name"] for c in channels}
self.channel_to_id = {c["name"]: c["id"] for c in channels}
|
x = 0
y = 1
for i in range(10):
x += y
y += x
print(x)
|
from typing import Any, Dict, cast
import httpx
import pytest
from fastapi import FastAPI, status
from httpx_oauth.oauth2 import BaseOAuth2, OAuth2
from fastapi_users.authentication import AuthenticationBackend
from fastapi_users.router.oauth import generate_state_token, get_oauth_router
from tests.conftest import AsyncMethodMocker, UserManagerMock, UserOAuthModel
@pytest.fixture
def app_factory(secret, get_user_manager_oauth, mock_authentication, oauth_client):
def _app_factory(redirect_url: str = None) -> FastAPI:
oauth_router = get_oauth_router(
oauth_client,
mock_authentication,
get_user_manager_oauth,
secret,
redirect_url,
)
app = FastAPI()
app.include_router(oauth_router)
return app
return _app_factory
@pytest.fixture
def test_app(app_factory):
return app_factory()
@pytest.fixture
def test_app_redirect_url(app_factory):
return app_factory("http://www.tintagel.bt/callback")
@pytest.fixture
@pytest.mark.asyncio
async def test_app_client(test_app, get_test_client):
async for client in get_test_client(test_app):
yield client
@pytest.fixture
@pytest.mark.asyncio
async def test_app_client_redirect_url(test_app_redirect_url, get_test_client):
async for client in get_test_client(test_app_redirect_url):
yield client
@pytest.mark.router
@pytest.mark.oauth
@pytest.mark.asyncio
class TestAuthorize:
async def test_success(
self,
async_method_mocker: AsyncMethodMocker,
test_app_client: httpx.AsyncClient,
oauth_client: BaseOAuth2,
):
get_authorization_url_mock = async_method_mocker(
oauth_client, "get_authorization_url", return_value="AUTHORIZATION_URL"
)
response = await test_app_client.get(
"/authorize", params={"scopes": ["scope1", "scope2"]}
)
assert response.status_code == status.HTTP_200_OK
get_authorization_url_mock.assert_called_once()
data = response.json()
assert "authorization_url" in data
async def test_with_redirect_url(
self,
async_method_mocker: AsyncMethodMocker,
test_app_client_redirect_url: httpx.AsyncClient,
oauth_client: BaseOAuth2,
):
get_authorization_url_mock = async_method_mocker(
oauth_client, "get_authorization_url", return_value="AUTHORIZATION_URL"
)
response = await test_app_client_redirect_url.get(
"/authorize", params={"scopes": ["scope1", "scope2"]}
)
assert response.status_code == status.HTTP_200_OK
get_authorization_url_mock.assert_called_once()
data = response.json()
assert "authorization_url" in data
@pytest.mark.router
@pytest.mark.oauth
@pytest.mark.asyncio
@pytest.mark.parametrize(
"access_token",
[
({"access_token": "TOKEN", "expires_at": 1579179542}),
({"access_token": "TOKEN"}),
],
)
class TestCallback:
async def test_invalid_state(
self,
async_method_mocker: AsyncMethodMocker,
test_app_client: httpx.AsyncClient,
oauth_client: BaseOAuth2,
user_oauth: UserOAuthModel,
access_token: str,
):
async_method_mocker(oauth_client, "get_access_token", return_value=access_token)
get_id_email_mock = async_method_mocker(
oauth_client, "get_id_email", return_value=("user_oauth1", user_oauth.email)
)
response = await test_app_client.get(
"/callback",
params={"code": "CODE", "state": "STATE"},
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
get_id_email_mock.assert_called_once_with("TOKEN")
async def test_active_user(
self,
async_method_mocker: AsyncMethodMocker,
test_app_client: httpx.AsyncClient,
oauth_client: BaseOAuth2,
user_oauth: UserOAuthModel,
user_manager_oauth: UserManagerMock,
access_token: str,
):
state_jwt = generate_state_token({}, "SECRET")
async_method_mocker(oauth_client, "get_access_token", return_value=access_token)
async_method_mocker(
oauth_client, "get_id_email", return_value=("user_oauth1", user_oauth.email)
)
async_method_mocker(
user_manager_oauth, "oauth_callback", return_value=user_oauth
)
response = await test_app_client.get(
"/callback",
params={"code": "CODE", "state": state_jwt},
)
assert response.status_code == status.HTTP_200_OK
data = cast(Dict[str, Any], response.json())
assert data["access_token"] == str(user_oauth.id)
async def test_inactive_user(
self,
async_method_mocker: AsyncMethodMocker,
test_app_client: httpx.AsyncClient,
oauth_client: BaseOAuth2,
inactive_user_oauth: UserOAuthModel,
user_manager_oauth: UserManagerMock,
access_token: str,
):
state_jwt = generate_state_token({}, "SECRET")
async_method_mocker(oauth_client, "get_access_token", return_value=access_token)
async_method_mocker(
oauth_client,
"get_id_email",
return_value=("user_oauth1", inactive_user_oauth.email),
)
async_method_mocker(
user_manager_oauth, "oauth_callback", return_value=inactive_user_oauth
)
response = await test_app_client.get(
"/callback",
params={"code": "CODE", "state": state_jwt},
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
async def test_redirect_url_router(
self,
async_method_mocker: AsyncMethodMocker,
test_app_client_redirect_url: httpx.AsyncClient,
oauth_client: BaseOAuth2,
user_oauth: UserOAuthModel,
user_manager_oauth: UserManagerMock,
access_token: str,
):
state_jwt = generate_state_token({}, "SECRET")
get_access_token_mock = async_method_mocker(
oauth_client, "get_access_token", return_value=access_token
)
async_method_mocker(
oauth_client, "get_id_email", return_value=("user_oauth1", user_oauth.email)
)
async_method_mocker(
user_manager_oauth, "oauth_callback", return_value=user_oauth
)
response = await test_app_client_redirect_url.get(
"/callback",
params={"code": "CODE", "state": state_jwt},
)
assert response.status_code == status.HTTP_200_OK
get_access_token_mock.assert_called_once_with(
"CODE", "http://www.tintagel.bt/callback", None
)
data = cast(Dict[str, Any], response.json())
assert data["access_token"] == str(user_oauth.id)
@pytest.mark.asyncio
@pytest.mark.oauth
@pytest.mark.router
async def test_route_names(
test_app: FastAPI, oauth_client: OAuth2, mock_authentication: AuthenticationBackend
):
authorize_route_name = (
f"oauth:{oauth_client.name}.{mock_authentication.name}.authorize"
)
assert test_app.url_path_for(authorize_route_name) == "/authorize"
callback_route_name = (
f"oauth:{oauth_client.name}.{mock_authentication.name}.callback"
)
assert test_app.url_path_for(callback_route_name) == "/callback"
|
import pandas
from urllib.parse import quote
import requests
import xml.etree.ElementTree as ET
METADATA_PATH = ''
apis = [('https://api.crossref.org/works/{}', 'crossref'), ('https://api.medra.org/metadata/{}', 'medra')]
preprint_doi_count = 0
not_found_list = []
invalid_doi_list = []
def get_journal(api, response):
# inputs the response to an api request (Response) and the api name (str)
# outputs the journal (str)
if api == 'crossref':
try:
# crossref lists the journal as a list under 'container-title': i.e. ['CNS Oncology']
journal = response.json()['message']['container-title'][0]
return journal
# IndexError thrown when 'container-title' for the article is [], meaning the article is likely preprint
# see https://www.biorxiv.org/content/10.1101/001727v1 for example
except IndexError:
global preprint_doi_count
preprint_doi_count += 1
elif api == 'medra':
# mEDRA returns metadata in XML format
root = ET.fromstring(response.content)
# each element is under an xmlns namespace
namespace = '{http://www.editeur.org/onix/DOIMetadata/2.0}'
# TitleText is the element under which the journal is stored
journal = root.find('.//{}TitleText'.format(namespace)).text
return journal
def get_url(r):
# inputs Response from a DOI.org API fetch request
# DOI returns a json file with a two-element list stored under the key 'values'
# one element points to some 'HS_ADMIN' information, and the other is the desired URL information
# in rare cases, the order of these two elements is reversed, so list_index checks for that
list_index = 1 if r.json()['values'][0]['type'] != 'URL' else 0
article_url = r.json()['values'][list_index]['data']['value']
return article_url
def doi_to_journal(doi):
# inputs a doi string, finds and returns journal name using crossref or medra
for i, (link, api) in enumerate(apis):
try:
encoded_doi = quote(doi, safe='')
response = requests.get(link.format(encoded_doi))
if response.status_code == 404:
response.raise_for_status()
journal = get_journal(api, response)
return journal
# 404 from any of the APIs
except requests.exceptions.HTTPError:
# if the program has already looked on all other apis, check if it's registered on doi.org
if i + 1 == len(apis):
r = requests.get('https://doi.org/api/handles/{}'.format(encoded_doi))
# DOI.org returns a response code of 100 if the Handle (DOI) is not found
if r.json()['responseCode'] != 100:
# if the DOI points to an article in either biorxiv or medrxiv (which a ton seem to)
# increment the count of preprint_dois by 1
article_url = get_url(r)
if 'rxiv.org' in article_url:
global preprint_doi_count
preprint_doi_count += 1
if 'biorxiv' in article_url: return 'bioRxiv'
elif 'medrxiv' in article_url: return 'medRxiv'
# a pretty common journal that's not in any of the API registries I've found
if 'jthoracdis.com' in article_url:
return 'Journal of Thorasic Disease'
else:
# append the doi and article url of every article not found to not_found_list
global not_found_list
not_found_list.append((doi, article_url))
else:
global invalid_doi_list
invalid_doi_list.append(doi)
metadata = pandas.read_csv(METADATA_PATH)
# select rows from metadata where the journal is null and the doi is not null,
# then take the doi column entry. This is used for doi_to_journal() only.
articles_doi = metadata.loc[(metadata['journal'].isna()) & (metadata['doi'].isna() == False), 'doi']
# There was some strange issue with the .isna() condition and copy/view, so
# this selects the indices of all the articles of interest
rows_to_modify = list(articles_doi.index.values)
metadata.loc[rows_to_modify, 'journal'] = articles_doi.apply(lambda doi: doi_to_journal(doi))
journals_added = len(rows_to_modify) - (len(not_found_list) + len(invalid_doi_list) + preprint_doi_count)
print('{} DOIs searched'.format(len(rows_to_modify)))
print(' - {} Valid Journal Names NOT Found: {}'.format(len(not_found_list), not_found_list))
print(' - {} Valid Journal Names Found'.format(journals_added))
print(' - {} Pre-Print Articles'.format(preprint_doi_count))
print(' - {} Invalid DOIs: {}'.format(len(invalid_doi_list), invalid_doi_list))
metadata.to_csv(METADATA_PATH)
|
import torch
from torch import nn
class ConvBlock(nn.Module):
def __init__(self, input_dim, output_dim, dropout=0.1):
super().__init__()
self.conv1 = self._create_conv_sequence(input_dim, output_dim, 128, dropout)
self.conv2 = self._create_conv_sequence(input_dim, output_dim, 256, dropout)
self.conv3 = self._create_conv_sequence(input_dim, output_dim, 512, dropout)
self.pointwise = nn.Conv1d(output_dim * 3, output_dim, 1)
def _create_conv_sequence(self, input_dim, output_dim, kernel_size, dropout):
return nn.Sequential(
nn.Conv1d(input_dim, output_dim, kernel_size, padding='same'),
nn.ReLU(),
nn.Dropout(dropout)
)
def forward(self, seq):
features1 = self.conv1(seq)
features2 = self.conv2(seq)
features3 = self.conv3(seq)
features = torch.cat((features1, features2, features3), dim=1)
features = self.pointwise(features)
return features
class HypertensionDetectorConvGRU(nn.Module):
def __init__(self, feature_dim, hidden_dim, seq_meta_len, n_layers, dropout):
super().__init__()
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.seq_meta_fc = nn.Linear(seq_meta_len, hidden_dim)
self.conv1 = ConvBlock(1, feature_dim, dropout=dropout)
self.conv2 = ConvBlock(feature_dim, feature_dim, dropout=dropout)
self.conv3 = ConvBlock(feature_dim, feature_dim, dropout=dropout)
self.pool = nn.MaxPool1d(2)
self.rnn = nn.GRU(feature_dim, hidden_dim, num_layers=n_layers, bidirectional=True, dropout=dropout)
self.fc1 = nn.Linear(2 * n_layers * hidden_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, 1)
def forward(self, seq, seq_meta):
"""Input shapes
seq: [batch_size, seq_len]
seq_meta: [batch_size, seq_meta_len]
"""
batch_size, seq_len = seq.shape
seq = seq.unsqueeze(1) # [batch_size, 1, seq_len]
features = self.conv1(seq) # [batch_size, feature_dim, seq_len]
features = self.pool(features)
features = self.conv2(features) # [batch_size, feature_dim, seq_len / 2]
features = self.pool(features)
features = self.conv3(features) # [batch_size, feature_dim, seq_len / 4]
features = features.permute(2, 0, 1) # [seq_len / 4, batch_size, feature_dim]
seq_meta = self.seq_meta_fc(seq_meta) # [batch_size, hidden_dim]
seq_meta = seq_meta.unsqueeze(0).repeat(self.n_layers * 2, 1, 1) # [n_layers * 2, batch_size, hidden_dim]
_, hidden = self.rnn(
features, seq_meta
) # [2 * num_layers, batch_size, hidden_dim]
hidden = hidden.permute(1, 0, 2).reshape(batch_size, -1) # [batch_size, 2 * num_layers * hidden_dim]
output = self.fc1(hidden) # [batch_size, hidden_dim]
output = self.fc2(output) # [batch_size, 1]
return output
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.