content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python
from flask_demo.app import app
from flask_demo.db import db
def main():
with app.app_context():
db.create_all()
if __name__ == '__main__':
main()
|
import argparse
import datetime
import gym
import envs
import numpy as np
import torch
import imageio
import itertools
from rl.model import GaussianPolicy, QNetwork, DeterministicPolicy
from transformer_split.util import getGraphStructure
from transformer_split.vae_model import VAE_Model
from torch.nn import functional as F
from transformer_vae import util
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument('--env1-name', default="ant",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--env2-name', default="ant3",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--model_path', default="runs/2021-05-19_13-46-41_VAE_ant-v0_both/",
help='model path')
parser.add_argument('--seed', type=int, default=123456, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--policy_hidden_size', type=int, default=256, metavar='N',
help='hidden size (default: 256)')
parser.add_argument('--latent_dim', type=int, default=128,
help='Encoder latent dimension')
parser.add_argument('--cuda', action="store_true",
help='run on CUDA (default: False)')
parser.add_argument('--agent_memory1', default='data/ant_jump.memory',
help='Path for saved replay memory')
parser.add_argument('--video_file_name', default="ant_turn.mp4",
help='output file name')
parser.add_argument('--msg_dim', type=int, default=32,
help='run on CUDA (default: False)')
parser.add_argument('--batch_size', type=int, default=1,
help='run on CUDA (default: False)')
parser.add_argument('--actor_path',
help='checkpoint training model every # steps')
parser.add_argument('--num_episodes', type=int, default=3, metavar='N',
help='maximum number of steps (default: 1000000)')
parser.add_argument('--root_size', type=int, default=11,
help='root dimension')
parser.add_argument('--lr', type=float, default=1e-4, metavar='N',
help='random seed (default: 123456)')
parser.add_argument(
"--transformer_norm", default=0, type=int, help="Use layernorm",
)
parser.add_argument(
"--beta",
type=float,
default=.1,
help="beta coefficient of KL divergence",
)
parser.add_argument(
"--gradient_penalty",
type=float,
default=10,
help="beta coefficient of KL divergence",
)
parser.add_argument(
"--discriminator_limiting_accuracy",
type=float,
default=0.7,
help="beta coefficient of KL divergence",
)
parser.add_argument(
"--attention_layers",
default=3,
type=int,
help="How many attention layers to stack",
)
parser.add_argument(
"--attention_heads",
default=2,
type=int,
help="How many attention heads to stack",
)
parser.add_argument(
"--attention_hidden_size",
type=int,
default=128,
help="Hidden units in an attention block",
)
parser.add_argument(
"--attention_embedding_size",
type=int,
default=128,
help="Hidden units in an attention block",
)
parser.add_argument(
"--dropout_rate",
type=float,
default=0.0,
help="How much to drop if drop in transformers",
)
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Agent
device = torch.device("cuda" if args.cuda else "cpu")
env_names = ["ant-v0", "ant3-v0", "ant_a-v0"]
train_envs = [gym.make(n) for n in env_names]
graphs = [getGraphStructure(e.xml) for e in train_envs]
# All environments have the same dimension per limb.
num_limbs = len(graphs[0]) #torso + body limbs
body_limbs = num_limbs - 1
dim_per_limb = int((train_envs[0].observation_space.shape[0] - args.root_size) / (body_limbs - 1))
max_num_limbs = max(len(g) for g in graphs)
args.dim_per_limb = dim_per_limb
args.max_num_limbs = max_num_limbs
root_dir = util.get_project_root()
render_env = train_envs[2]
render_topology = graphs[2]
render_limbs = len(render_topology)
expert_env = train_envs[0]
expert_topology = graphs[0]
policy = GaussianPolicy(
expert_env.observation_space.shape[0],
expert_env.action_space.shape[0],
args.policy_hidden_size,
expert_env.action_space).to(device)
policy.load_state_dict(torch.load(args.actor_path))
vae_model = VAE_Model(args)
vae_model.load_model(args.model_path)
def pad_state(data, state_size, max_num_limbs):
max_dim = args.root_size + state_size * (max_num_limbs - 1)
output = torch.zeros(max_dim)
output[:data.shape[0]] = torch.tensor(data)
return output
def pad_topology(top, max_num_limbs):
topology = torch.full((max_num_limbs,), -1, dtype=torch.int32)
topology[:len(top)] = torch.tensor(top, dtype=torch.int32)
return topology
# Evaluation loop
total_numsteps = 0
avg_reward = 0.
state = render_env.reset()
with imageio.get_writer(args.video_file_name, fps=30) as video:
for i_episode in itertools.count(1):
episode_reward = 0
episode_steps = 0
done = False
state = render_env.reset()
video.append_data(render_env.render('rgb_array'))
done = False
while not done:
state = pad_state(state, dim_per_limb, max_num_limbs).unsqueeze(0)
src_topology = pad_topology(render_topology, max_num_limbs).unsqueeze(0)
tgt_topology = pad_topology(expert_topology, max_num_limbs).unsqueeze(0)
x_hat = vae_model.transfer(state, tgt_topology)
x_hat = x_hat.detach().cpu()
x_hat = x_hat[:(render_limbs-1)]
x_hat = torch.FloatTensor(x_hat).to(device).unsqueeze(0)
action, _, _ = policy.sample(x_hat)
action = action.detach().cpu().numpy()[0]
next_state, reward, done, _ = render_env.step(action[0][:7])
video.append_data(render_env.render('rgb_array'))
episode_reward += reward
state = next_state
avg_reward += episode_reward
if i_episode > args.num_episodes:
break
|
# Standard Library Imports
import math
import random
# Third Party Imports
import pygame
from pygame import mixer
# Local Application Imports
from classes.rectblock import RectBlock
from classes.enemyblock import EnemyBlock
from classes.gameobject import (
Player,
Enemy,
)
from configurations import (
X_LOWER_BOUNDARY,
GAME_FONT,
NOT_INITIALIZED,
OFF_SCREEN_Y_CORD,
DEFAULT_ENEMY_SPEED,
EXPLOSION_SOUND_PATH,
DEFAULT_PLAYER_SPEED,
)
mixer.init()
def show_score(score, screen):
font = pygame.font.Font(GAME_FONT, 25)
score_text = font.render(
"Score: " + str(score.value), True, (255, 255, 255)
)
screen.blit(score_text, (score.x_cord, score.y_cord))
def is_player_outof_bounds(player):
return player.x_cord + player.x_cord_change < X_LOWER_BOUNDARY or (
player.x_cord + player.x_cord_change > Player.X_UPPER_BOUNDARY
)
def is_enemy_out_of_upper_bounds(enemy):
return enemy.x_cord + enemy.x_cord_change > Enemy.X_UPPER_BOUNDARY
def is_enemy_out_of_lower_bounds(enemy):
return enemy.x_cord + enemy.x_cord_change < X_LOWER_BOUNDARY
def should_enemy_fire(enemy):
if enemy is not EnemyBlock.DESTROYED_ENEMY_SLOT:
return random.randint(1, 250) == 1
return False
def is_bullet_init(gameobject):
return gameobject.bullet is not NOT_INITIALIZED
def track_bullet_movement(bullet, blocks, screen):
if bullet.is_off_screen():
bullet.reset_bullet()
return False
else:
bullet.blit(screen)
bullet.y_cord -= bullet.y_change
bullet_block_collision = False
bullet_block_collision = is_block_bullet_collision(blocks, bullet)
if bullet_block_collision is not None:
bullet_block_collision["block"].remove_node_from_row(
bullet_block_collision["rect"]
)
return False
return True
def do_game_over(enemies, screen, sub_text_message):
def show_game_over_text():
font = pygame.font.Font(GAME_FONT, 64)
text = font.render("GAME OVER", True, (255, 255, 255))
screen.blit(text, (200, 250))
font = pygame.font.Font(GAME_FONT, 32)
sub_text = font.render(sub_text_message.upper(), True, (255, 255, 255))
screen.blit(sub_text, (205, 315))
for enemy in enemies:
enemy.y_cord = OFF_SCREEN_Y_CORD
show_game_over_text()
def go_down_right(enemies, screen):
for enemy in enemies:
enemy.x_cord_change = DEFAULT_ENEMY_SPEED
enemy.y_cord += enemy.y_cord_change
enemy.blit(screen)
enemy.x_cord += enemy.x_cord_change
def go_down_left(enemies, screen):
for enemy in enemies:
enemy.x_cord_change = DEFAULT_ENEMY_SPEED * -1
enemy.y_cord += enemy.y_cord_change
enemy.blit(screen)
enemy.x_cord += enemy.x_cord_change
def is_collision(x1, x2, y1, y2):
distance = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
if distance < 27:
return True
return False
def destroy_enemy(enemy, on_screen_enemy_block, logical_enemy_block):
explosion_sound = mixer.Sound(EXPLOSION_SOUND_PATH)
explosion_sound.play()
on_screen_enemy_block.remove(enemy)
logical_enemy_block.replace_enemy(enemy, EnemyBlock.DESTROYED_ENEMY_SLOT)
def is_block_bullet_collision(blocks, bullet):
for block in blocks:
for rect in block.UNITS:
bullet_block_collision = is_collision(
bullet.x_cord, rect.x, bullet.y_cord, rect.y
)
if bullet_block_collision:
return {"block": block, "rect": rect}
return None
def check_events(player):
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player.x_cord_change = -1 * DEFAULT_PLAYER_SPEED
if event.key == pygame.K_RIGHT:
player.x_cord_change = DEFAULT_PLAYER_SPEED
if (
event.key == pygame.K_SPACE
and player.bullet is NOT_INITIALIZED
):
player.bullet_init()
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
player.x_cord_change = 0
return True
def create_blocks(number_of_blocks):
blocks = []
STARTING_X_RANGE = 0
ENDING_X_RANGE = 800
BLOCK_WIDTH = 90
DISTANCE_BETWEEN_TOP_LEFT_BLOCKS = 150
DISTANCE_BETWEEN_BLOCKS = DISTANCE_BETWEEN_TOP_LEFT_BLOCKS - BLOCK_WIDTH
# [DISTANCE_BETWEEN_BLOCKS] is just the 'empty' space between blocks.
BLOCK_HEIGHT_TOP = 375
BLOCK_HEIGHT_BOTTOM = 460
TOTAL_WIDTH = (BLOCK_WIDTH * number_of_blocks) + (
DISTANCE_BETWEEN_BLOCKS * (number_of_blocks - 1)
)
# [TOTAL_WIDTH] This is total amount of width taken up by all the blocks
# plus their distances between them (number_of_blocks - 1).
BLOCKS_X_RANGE = ENDING_X_RANGE - STARTING_X_RANGE
# [BLOCKS_X_RANGE] This is the range that the blocks and
# their distances can take up.
TOP_LEFT_START = (BLOCKS_X_RANGE - TOTAL_WIDTH) / 2
for i in range(number_of_blocks):
block = RectBlock(
top_left=(TOP_LEFT_START, BLOCK_HEIGHT_TOP),
bottom_right=(TOP_LEFT_START + BLOCK_WIDTH, BLOCK_HEIGHT_BOTTOM),
)
TOP_LEFT_START += DISTANCE_BETWEEN_TOP_LEFT_BLOCKS
blocks.append(block)
return blocks
def create_enemy_block():
TOP_LEFT = (50, 50)
BOTTOM_RIGHT = (100, 100)
return EnemyBlock(TOP_LEFT, BOTTOM_RIGHT)
|
"""Class: Printer."""
import ast
import sys
import astunparse
import typed_ast.ast3
class Printer(astunparse.Printer):
"""Partial rewrite of Printer from astunparse to handle typed_ast.ast3-based trees."""
def __init__(
self, file=sys.stdout, indent=" ", annotate_fields: bool = True,
include_attributes: bool = False):
"""Initialize Printer instance."""
super().__init__(file=file, indent=indent)
self._annotate_fields = annotate_fields
self._include_attributes = include_attributes
def _prepare_for_print(self, node):
if isinstance(node, list):
nodestart = "["
nodeend = "]"
children = [("", child) for child in node]
else:
nodestart = type(node).__name__ + "("
nodeend = ")"
children = [
(name + "=" if self._annotate_fields else '', value)
for name, value in typed_ast.ast3.iter_fields(node)]
if self._include_attributes and node._attributes:
children += [
(attr + '=' if self._annotate_fields else '', getattr(node, attr))
for attr in node._attributes]
return nodestart, children, nodeend
def generic_visit(self, node):
"""Print the syntax tree without unparsing it.
Merge of astunparse.Printer.generic_visit() and typed_ast.ast3.dump().
"""
nodestart, children, nodeend = self._prepare_for_print(node)
if len(children) > 1:
self.indentation += 1
self.write(nodestart)
for i, pair in enumerate(children):
attr, child = pair
if len(children) > 1:
self.write("\n" + self.indent_with * self.indentation)
if isinstance(child, (ast.AST, typed_ast.ast3.AST, list)):
self.write(attr)
self.visit(child)
else:
self.write(attr + repr(child))
if i != len(children) - 1:
self.write(",")
self.write(nodeend)
if len(children) > 1:
self.indentation -= 1
|
N,K=map(int,input().split())
L = [input().split() for i in range(N)]
for i in range(N):
L[i][2],L[i][3] = int(L[i][2]),int(L[i][3])
L.sort(key = lambda t:t[3])
L.sort(key = lambda t:(-t[2]))
cnt = 0
i = 0
d = {}
while cnt <K:
try:
d[L[i][0]]+=1
except:
d[L[i][0]]=1
cnt+=1
print(L[i][1])
i+=1
|
#!/usr/bin/python3
'''Advent of Code 2019 Day 10 tests'''
import unittest
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from aoc2019 import day10 # pylint: disable=wrong-import-position
class TestUM(unittest.TestCase):
'''Tests from day ten'''
def test_day10part1(self) -> None:
'''Part one tests'''
grid = ['.#..#',
'.....',
'#####',
'....#',
'...##']
self.assertEqual(day10.find_best_asteroid(grid), (3, 4, 8))
grid = ['......#.#.',
'#..#.#....',
'..#######.',
'.#.#.###..',
'.#..#.....',
'..#....#.#',
'#..#....#.',
'.##.#..###',
'##...#..#.',
'.#....####']
self.assertEqual(day10.find_best_asteroid(grid), (5, 8, 33))
grid = ['#.#...#.#.',
'.###....#.',
'.#....#...',
'##.#.#.#.#',
'....#.#.#.',
'.##..###.#',
'..#...##..',
'..##....##',
'......#...',
'.####.###.']
self.assertEqual(day10.find_best_asteroid(grid), (1, 2, 35))
grid = ['.#..#..###',
'####.###.#',
'....###.#.',
'..###.##.#',
'##.##.#.#.',
'....###..#',
'..#.#..#.#',
'#..#.#.###',
'.##...##.#',
'.....#.#..']
self.assertEqual(day10.find_best_asteroid(grid), (6, 3, 41))
grid = ['.#..##.###...#######',
'##.############..##.',
'.#.######.########.#',
'.###.#######.####.#.',
'#####.##.#.##.###.##',
'..#####..#.#########',
'####################',
'#.####....###.#.#.##',
'##.#################',
'#####.##.###..####..',
'..######..##.#######',
'####.##.####...##..#',
'.#####..#.######.###',
'##...#.##########...',
'#.##########.#######',
'.####.#.###.###.#.##',
'....##.##.###..#####',
'.#.#.###########.###',
'#.#.#.#####.####.###',
'###.##.####.##.#..##']
self.assertEqual(day10.find_best_asteroid(grid), (11, 13, 210))
def test_day10part2(self) -> None:
'''Part two test'''
grid = ['.#..##.###...#######',
'##.############..##.',
'.#.######.########.#',
'.###.#######.####.#.',
'#####.##.#.##.###.##',
'..#####..#.#########',
'####################',
'#.####....###.#.#.##',
'##.#################',
'#####.##.###..####..',
'..######..##.#######',
'####.##.####...##..#',
'.#####..#.######.###',
'##...#.##########...',
'#.##########.#######',
'.####.#.###.###.#.##',
'....##.##.###..#####',
'.#.#.###########.###',
'#.#.#.#####.####.###',
'###.##.####.##.#..##']
part1 = day10.find_best_asteroid(grid)
grid_angles = day10.all_grid_angles(grid, part1)
part2 = day10.destroy_asteroids(grid_angles)
self.assertEqual(part2[199], (8, 2))
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
u"""
author: Atsushi Sakai
"""
import numpy as np
def solve_qp_with_ep_const(P, q, A, b):
"""
solve quadratic programming with only equality constraints
min 0.5*x*P*x + q.T*x
s.t Ax = b
"""
# input check
if not isinstance(P, np.matrix):
raise TypeError("'P' must be a np.matrix")
if not isinstance(q, np.matrix):
raise TypeError("'q' must be a np.matrix")
if not isinstance(A, np.matrix):
raise TypeError("'A' must be a np.matrix")
if not isinstance(b, np.matrix):
raise TypeError("'b' must be a np.matrix")
if P.shape[0] != P.shape[1]:
raise ValueError("'P' must be a square matrix")
if P.shape[1] != q.shape[1]:
raise ValueError("'P' or 'q' is invalid matrix size")
if A.shape[0] != b.shape[1]:
raise ValueError("'A' or 'b' is invalid matrix size")
K1 = np.concatenate((P, A.T), axis=1)
K2 = np.concatenate((A, np.zeros((A.shape[0], A.shape[0]))), axis=1)
K = np.concatenate((K1, K2), axis=0)
d = np.concatenate((-q.T, b), axis=0)
star = np.linalg.solve(K, d)
x_star = star[0:A.shape[1], :]
return x_star
def test_solve_qp_with_ep_const():
print("start test_solve_qp_with_ep_const")
P = np.matrix(np.diag([1.0, 0.0]))
q = np.matrix(np.array([3.0, 4.0]))
A = np.matrix([1.0, 1.0])
b = np.matrix(1.0)
print("P")
print(P)
print("q")
print(q)
print("A")
print(A)
print("b")
print(b)
x = solve_qp_with_ep_const(P, q, A, b)
print("x")
print(x)
assert x[0] - 1.0 < 0.0001
assert x[1] - 0.0 < 0.0001
print("finish test_solve_qp_with_ep_const")
def test():
test_solve_qp_with_ep_const()
if __name__ == '__main__':
test()
|
import nltk
import numpy as np
from nltk.corpus import wordnet
def _to_unicode(str_, py3=True):
if py3:
return str_
else:
return unicode(str_)
def parse_pos_tag(tag):
# https://stackoverflow.com/questions/15586721/wordnet-lemmatization-and-pos-tagging-in-python
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
# As default pos in lemmatization is Noun
return wordnet.NOUN
def _post_tag(corpus):
corpus_w_tag = list()
for doc in corpus:
word_tag_tuple = nltk.pos_tag(doc)
corpus_w_tag.append([[word, parse_pos_tag(tag)] for word, tag in word_tag_tuple])
return corpus_w_tag
|
"""
This is the main file that holds the Tokenizer, Parser, and Interpreter
that actually compile the PDF.
"""
import os.path as path
import re
import copy as _copy
from decimal import Decimal
from placer.placer import Placer
from constants import CMND_CHARS, END_LINE_CHARS, ALIGNMENT, TT, TT_M, WHITE_SPACE_CHARS, NON_END_LINE_CHARS, PB_NUM_TABS, PB_NAME_SPACE, STD_FILE_ENDING, STD_LIB_FILE_NAME, OUT_TAB
from tools import assure_decimal, is_escaped, is_escaping, exec_python, eval_python, string_with_arrows, trimmed, print_progress_bar, prog_bar_prefix, calc_prog_bar_refresh_rate, assert_instance
from marked_up_text import MarkedUpText
from markup import Markup, MarkupStart, MarkupEnd
from toolbox import ToolBox
from placer.placers.naiveplacer import NaivePlacer
# -----------------------------------------------------------------------------
# Errors That Can Occur While Compiling
class Error(Exception):
def __init__(self, pos_start, pos_end, error_name, details):
self.pos_start = pos_start
self.pos_end = pos_end
self.error_name = error_name
self.details = details
def as_string(self):
result = f'Line {self.pos_start.ln + 1}, Column {self.pos_start.col + 1}, in file {self.pos_start.file_path}\n'
result += f' {self.error_name} Occured: {self.details}'
result += '\n' + string_with_arrows(self.pos_start.file_text, self.pos_start, self.pos_end)
return result
class ExpectedValidCmndNameError(Error):
def __init__(self, pos_start, pos_end, details):
super().__init__(pos_start, pos_end, 'Expected Valid Command Name', details)
class IllegalCharError(Error):
def __init__(self, pos_start, pos_end, details):
super().__init__(pos_start, pos_end, 'Illegal Character Error', details)
class ExpectedCharError(Error):
def __init__(self, pos_start, pos_end, details):
super().__init__(pos_start, pos_end, 'Expected Character Error', details)
class InvalidSyntaxError(Error):
def __init__(self, pos_start, pos_end, details=''):
super().__init__(pos_start, pos_end, 'Invalid Syntax Error', details)
class RunTimeError(Error):
def __init__(self, pos_start, pos_end, details, context):
super().__init__(pos_start, pos_end, 'Run-Time Error', details)
self.context = context
def generate_traceback(self):
result = ''
pos = self.pos_start
ctx = self.context
while ctx is not None:
result = f' File {pos.file_path}, line {pos.ln + 1}, in {ctx.display_name}\n' + result
pos = ctx.entry_pos
ctx = ctx.parent
return 'Traceback (most recent call last):\n' + result
class PythonException(RunTimeError):
def __init__(self, pos_start, pos_end, details, python_error, context):
import traceback
self.python_error = f'{python_error.exc_trace}'
super().__init__(pos_start, pos_end, details, context)
self.error_name = 'Python Exception'
def as_string(self):
string = super().as_string()
string += '\nHere is the Python Exception:\n\n'
string += f'{self.python_error}'
return string
# -----------------------------------------------------------------------------
# Position Class
class Position:
"""
Position in a Tokenized file or a file that is being tokenized.
"""
__slots__ = ['idx', 'ln', 'col', 'file_path', 'file_text']
def __init__(self, idx, ln, col, file_path, file_text):
self.idx = idx
self.ln = ln
self.col = col
self.file_path = file_path # The path tot he file that this is a position in
self.file_text = file_text # The text of the file this is a position in
def advance(self, current_char=None):
self.idx += 1
self.col += 1
if current_char in END_LINE_CHARS:
self.ln += 1
self.col = 0
return self
def copy(self):
return Position(self.idx, self.ln, self.col, self.file_path, self.file_text)
def __repr__(self):
file = self.file_path.split('\\')[-1]
return f"{self.__class__.__name__}(line {self.ln}, col {self.col}, in {file})"
# -----------------------------------------------------------------------------
# File Class
class File:
__slots__ = ['file_path',
'raw_text', 'tokens', 'ast',
'import_context', 'import_tokens', 'being_run']
def __init__(self, file_path):
self.file_path = file_path # Path to file
# Fields set in Compiler._compiler_import_file
self.raw_text = None # The raw text that is in the file
self.tokens = None # The tokens that make up the File once it has been tokenized
self.ast = None # The Abstract Syntax tree from the Tokens being Parsed
# Fields set by Compiler._import_file
self.import_context = None # The context obtained by running the file, can be used to import this file into another file
self.import_tokens = None # The tokens to add to the token_document when the file is imported
self.being_run = False
# -----------------------------------------------------------------------------
# Token Class
class Token:
__slots__ = ['start_pos', 'end_pos', 'type', 'value', 'space_before']
def __init__(self, type, value, start_pos, end_pos=None, space_before=True):
self.start_pos = start_pos
if isinstance(space_before, bool):
# Space before is whether there should be a space before the token
# when it is put on the page. This is so that tokens like the
# '=' and '{' that are singled out of a sentence can still tell
# the placer whether there was space before them because the
# default is to just put a space before each token is placed down.
self.space_before = space_before
else:
self.space_before = (space_before in WHITE_SPACE_CHARS)
if end_pos is None:
end_pos = self.start_pos.copy()
end_pos.advance() # Necessary if you want errors to display the errors correctly because they use start_pos - end_pos
self.end_pos = end_pos
else:
self.end_pos = end_pos
self.type = type
self.value = str(value)
if type == TT.WORD and value == '':
raise Exception(f'An empty string has been made into a Token. This is a compiler problem. {self}')
def matches(self, token_type, value):
"""
Checks if the given token_type and value matches this one.
"""
return self.type == token_type and self.value == value
def copy(self):
start_pos = None if self.start_pos is None else self.start_pos.copy()
end_pos = None if self.end_pos is None else self.end_pos.copy()
return Token(self.type, self.value, start_pos, end_pos, self.space_before)
def gen_pass_2_python(self, locals):
"""
Generates a SecondPassPythonToken that can store the locals
that should be provided when the python code is run in the Placer.
The Placer already has the globals that should be provided.
"""
start_pos = None if self.start_pos is None else self.start_pos.copy()
end_pos = None if self.end_pos is None else self.end_pos.copy()
return SecondPassPythonToken(self.type, self.value, start_pos, end_pos, self.space_before, locals)
def __repr__(self):
"""
This is what is called when you print this object since __str__ is undefined.
"""
return f"Token(\"<{self.type}>\":{' ' if self.space_before else ''}{self.value})"
class SecondPassPythonToken(Token):
__slots__ = Token.__slots__[:]
__slots__.extend(['locals'])
def __init__(self, type, value, start_pos, end_pos=None, space_before=False, locals=None):
super().__init__(type, value, start_pos, end_pos, space_before)
self.locals = locals
# -----------------------------------------------------------------------------
# Tokenizer Class
class Tokenizer:
"""
Takes raw text and tokenizes it.
"""
def __init__(self, file_path, file_text, starting_position=None, print_progress_bar=False):
super().__init__()
self._print_progress_bar = print_progress_bar
if starting_position:
# Parse assuming that you are starting at the given line and column int he file
self._pos = starting_position.copy()
self._pos.idx = -1
else:
# Parse assuming that you are starting at the beginning of the file
self._pos = Position(-1, 0, -1, file_path, file_text)
self._text = file_text
self._current_char = None
self._previous_char = ''
self._plain_text = ''
self._plain_text_start_pos = None
self._space_before_plaintext = False
self._unpaired_cbrackets = 0
self._unpaired_oparens = 0
self._tokens = []
self._advance()
def _advance(self, num=1):
"""Advances to the next character in the text if it should advance."""
for i in range(num):
self._previous_char = self._current_char
self._pos.advance(self._current_char)
self._current_char = self._text[self._pos.idx] if self._pos.idx < len(self._text) else None
@staticmethod
def plaintext_tokens_for_str(string, count_starting_space=False):
"""
If you want to write plaintext to the placer and the string to be
interpreted only as plaintext, then this is what you use to
tokenize the string. Just take the return-ed string from this
method and give it to the place_text method of the Placer.
If count_starting_space is True, then it will treat the whitespace
before the first letter as actual space that could produce
a paragraph break
"""
tokens = []
idx = -1
cc = None
def next_tok(idx):
idx += 1
return string[idx] if idx < len(string) else None, idx
def try_append_word(curr_word, space_before):
curr_word = re.sub('(\s)+', '', curr_word)
if len(curr_word) > 0:
tokens.append(Token(TT.WORD, curr_word, DUMMY_POSITION.copy(), space_before=space_before))
cc, idx = next_tok(idx)
if not count_starting_space:
# Eat all end line chars at beginning so no paragraph break at beginning
while (cc is not None) and (cc in END_LINE_CHARS):
cc, idx, = next_tok(idx)
space_before = False
curr_word = ''
while cc is not None:
if cc in NON_END_LINE_CHARS:
cc, idx = next_tok(idx)
try_append_word(curr_word, space_before)
curr_word = ''
space_before = True
while (cc is not None) and (cc in NON_END_LINE_CHARS):
cc, idx, = next_tok(idx)
continue
elif cc in END_LINE_CHARS:
cc, idx = next_tok(idx)
try_append_word(curr_word, space_before)
curr_word = ''
space_before = True
if cc in END_LINE_CHARS:
tokens.append(Token(TT.PARAGRAPH_BREAK, TT.PARAGRAPH_BREAK, DUMMY_POSITION.copy()))
cc, idx = next_tok(idx)
while (cc is not None) and (cc in END_LINE_CHARS):
cc, idx, = next_tok(idx)
continue
else:
curr_word += cc
cc, idx = next_tok(idx)
try_append_word(curr_word, space_before)
return tokens
@staticmethod
def marked_up_text_for_tokens(list_of_tokens):
"""
Returns a MarkedUpText object that is equivalent to the List of Tokens given.
"""
text = MarkedUpText()
curr_index = 0
pending_markups = []
for t in list_of_tokens:
if isinstance(t, (MarkupStart, MarkupEnd)):
text.add_markup_start_or_end(t, curr_index)
elif isinstance(t, Token):
if t.type == TT.PARAGRAPH_BREAK:
# Add two newlines to signify a paragraph break
text += '\n\n'
curr_index += 2
elif t.type in (TT.EXEC_PYTH2, TT.EVAL_PYTH2):
markup = Markup()
markup.add_python(t)
text.add_markup(markup, curr_index)
else:
if t.space_before:
text += ' '
curr_index += 1
text += t.value
curr_index += len(t.value)
else:
raise Exception(f'{t} was in the list of tokens given to be changed into MarkedUpText, but MarkedUpText can\'t denote it. This is a compiler problem, tell the makers of the compiler that you got this error.')
text_len = len(text)
#print(f'curr_index = {curr_index}, text_len = {text_len}, markups = {None if text_len not in text._markups else text._markups[text_len]}')
if text_len > 0 and text_len in text._markups:
markups = text._markups.pop(text_len)
index = text_len - 1
if index in text._markups:
text._markups[index].extend(markups)
else:
text._markups[index] = markups
#print(f'AFTER markups = {None if index not in text._markups else text._markups[index]}')
return text
@staticmethod
def tokens_for_marked_up_text(marked_up_text):
"""
Returns a list of tokens for the given MarkedUpText.
"""
def try_token(token_value, token_list):
if len(token_value) > 0:
space_before = (token_value[0] in WHITE_SPACE_CHARS)
tokens = Tokenizer.plaintext_tokens_for_str(str(token_value), True)
token_value = ''
if len(tokens) > 0:
tokens[0].space_before = space_before
token_list.extend(tokens)
return token_value, token_list
token_list = []
token_value = ''
pending_end_markups = []
for i, char in enumerate(marked_up_text):
markups = marked_up_text.markups_for_index(i)
# markups is a list of MarkupStart and MarkupEnd objects or
# None if there are None
# Since Markups are inclusive of their index, the MarkupStarts must
# be appended before the next char and the MarkupEnds must be
# appended after the next character is added
if markups:
token_value, token_list = try_token(token_value, token_list)
for markup in markups:
if isinstance(markup, MarkupStart):
token_list.append(markup)
else:
pending_end_markups.append(markup)
token_value += char
if pending_end_markups:
token_value, token_list = try_token(token_value, token_list)
for markup in pending_end_markups:
token_list.append(markup)
pending_end_markups = []
token_value, token_list = try_token(token_value, token_list)
return token_list
_what_can_be_escaped = {'{', '}', '=', '\\', '(', ')', ','}
def tokenize(self, file=True):
"""
Turn the raw text into tokens that the compiler can use.
If file is true, the tokenizer assumes that the text is from a file and
bookends the tokens with TT.FILE_START and TT.FILE_END
"""
self._tokens = []
self._plain_text = ''
what_can_be_escaped = self._what_can_be_escaped
if file:
self._tokens.append(Token(TT.FILE_START, '<FILE START>', self._pos.copy()))
print_progress = self._print_progress_bar
if print_progress:
text_len = len(self._text)
prefix = prog_bar_prefix('Tokenizing', self._pos.file_path)
refresh = calc_prog_bar_refresh_rate(text_len)
full_bar_printed = False
if print_progress_bar(0, text_len, prefix):
full_bar_printed = True
# By default, all text is plain text until something says otherwise
while self._current_char is not None:
i = self._pos.idx
if print_progress and (i % refresh) == 0:
print_progress_bar(i, text_len, prefix)
cc = self._current_char
t = None
if is_escaped(i, self._text, what_can_be_escaped):
self._plain_text_char()
elif is_escaping(i, self._text, what_can_be_escaped):
self._advance() # Just advance because it is just escaping something else
elif cc in END_LINE_CHARS:
self._try_word_token()
self._advance()
pos_start = self._pos.copy()
if self._current_char in END_LINE_CHARS:
while self._current_char in END_LINE_CHARS:
# Do nothing, just eat the END_LINE_CHARS now that we know that there is a PARAGRAPH_BREAK
self._advance()
t = Token(TT.PARAGRAPH_BREAK, TT.PARAGRAPH_BREAK, pos_start, self._pos.copy())
elif cc in NON_END_LINE_CHARS:
self._try_word_token()
self._advance()
elif cc == '{':
if self._unpaired_cbrackets == 0:
self._first_unpaired_bracket_pos = self._pos.copy()
self._unpaired_cbrackets += 1
t = Token(TT.OCBRACE, '{', self._pos.copy(), space_before=self._previous_char)
self._advance()
elif cc == '}':
self._unpaired_cbrackets -= 1
if self._unpaired_cbrackets < 0:
raise InvalidSyntaxError(self._pos.copy(), self._pos.copy().advance(),
'Unpaired, unescaped, closing curly bracket "}". You need to add an open curly bracket "{" before it or escape it by putting a backslash before it.')
t = Token(TT.CCBRACE, '}', self._pos.copy(), space_before=self._previous_char)
self._advance()
elif cc == '=':
t = Token(TT.EQUAL_SIGN, '=', self._pos.copy(), space_before=self._previous_char)
self._advance()
elif cc == '(':
if self._unpaired_oparens == 0:
self._first_unpaired_oparens_pos = self._pos.copy()
self._unpaired_oparens += 1
t = Token(TT.OPAREN, '(', self._pos.copy(), space_before=self._previous_char)
self._advance()
elif cc == ')':
self._unpaired_oparens -= 1
if self._unpaired_oparens < 0:
raise InvalidSyntaxError(self._pos.copy(), self._pos.copy().advance(),
'Unpaired, unescaped, closing parenthesis ")". You need to add an open curly bracket "(" before it or escape it by putting a backslash before it.')
t = Token(TT.CPAREN, ')', self._pos.copy(), space_before=self._previous_char)
self._advance()
elif cc == ',':
t = Token(TT.COMMA, ',', self._pos.copy(), space_before=self._previous_char)
self._advance()
elif cc == '\\':
t = self._tokenize_cntrl_seq()
else:
self._plain_text_char()
if t is not None:
# Actually append the Token (or list of tokens) if there is a Token to append
self._try_word_token()
if isinstance(t, Token):
self._tokens.append(t)
else:
# t must be a list of tokens
self._tokens.extend(t)
if print_progress and not full_bar_printed:
print_progress_bar(text_len, text_len, prefix)
if self._unpaired_cbrackets > 0:
raise InvalidSyntaxError(self._first_unpaired_bracket_pos.copy(), self._first_unpaired_bracket_pos.copy().advance(),
f'{self._unpaired_cbrackets} unpaired, unescaped, opening curly bracket(s) "{" starting from this opening curly bracket. Either escape each one by putting a backslash before them or pair them with a closing curly bracket "}".')
if self._unpaired_oparens > 0:
raise InvalidSyntaxError(self._first_unpaired_oparens_pos.copy(), self._first_unpaired_oparens_pos.copy().advance(),
f'{self._unpaired_oparens} unpaired, unescaped, opening parenthes(es) "(" starting from this open parenthes(es). Either escape each one by putting a backslash before them or pair them with a closing parenthesis ")".')
self._try_word_token()
if file:
self._tokens.append(Token(TT.FILE_END, '<FILE END>', self._pos.copy()))
return self._tokens
# -------------------------------------------------------------------------
# Parsing Methods
def _tokenize_cntrl_seq(self):
"""
Parse a control sequence.
"""
t = None
pos_start = self._pos.copy()
# NOTE: Multi-line matches tend be longer and so need to come before
# single-line matches because shorter matches will match before longer
# matches, even if the longer match would have worked had it been tried
# Multiple Line Python ----------------------
if self._match(TT_M.MULTI_LINE_PYTH_1PASS_EXEC_START):
t = self._tokenize_python(TT_M.MULTI_LINE_PYTH_1PASS_EXEC_END, 1, pos_start)
elif self._match(TT_M.MULTI_LINE_PYTH_1PASS_EVAL_START):
t = self._tokenize_python(TT_M.MULTI_LINE_PYTH_1PASS_EVAL_END, 1, pos_start, use_eval=True)
elif self._match(TT_M.MULTI_LINE_PYTH_2PASS_EXEC_START):
t = self._tokenize_python(TT_M.MULTI_LINE_PYTH_2PASS_EXEC_END, 2, pos_start)
elif self._match(TT_M.MULTI_LINE_PYTH_2PASS_EVAL_START):
t = self._tokenize_python(TT_M.MULTI_LINE_PYTH_2PASS_EVAL_END, 2, pos_start, use_eval=True)
# One Line Python -----------------------
elif self._match(TT_M.ONE_LINE_PYTH_1PASS_EXEC_START):
t = self._tokenize_python(TT_M.ONE_LINE_PYTH_1PASS_EXEC_END, 1, pos_start, one_line=True)
elif self._match(TT_M.ONE_LINE_PYTH_1PASS_EVAL_START):
t = self._tokenize_python(TT_M.ONE_LINE_PYTH_1PASS_EVAL_END, 1, pos_start, one_line=True, use_eval=True)
elif self._match(TT_M.ONE_LINE_PYTH_2PASS_EXEC_START):
t = self._tokenize_python(TT_M.ONE_LINE_PYTH_2PASS_EXEC_END, 2, pos_start, one_line=True)
elif self._match(TT_M.ONE_LINE_PYTH_2PASS_EVAL_START):
t = self._tokenize_python(TT_M.ONE_LINE_PYTH_2PASS_EVAL_END, 2, pos_start, one_line=True, use_eval=True)
# Comment ----------------------
elif self._match(TT_M.MULTI_LINE_COMMENT_START):
t = self._tokenize_comment(pos_start, one_line=False)
elif self._match(TT_M.SINGLE_LINE_COMMENT_START):
t = self._tokenize_comment(pos_start, one_line=True)
# Command --------------------------
else:
# It is an identifier, so tokenize it
t = self._tokenize_identifier()
return t
def _tokenize_python(self, end_codes, pass_num, pos_start, one_line=False, use_eval=False):
"""
Parses the string from self._pos as python code until one of the end_codes
are reached.
If one_line is true, that means that this python statement is supposed
to only be one line so it cannot turn the rest of the file
into python.
"""
python_str = ''
pos_end = self._pos.copy()
match_found = False
while self._current_char is not None:
if self._match(end_codes, False):
# Only eat the chars if they are not in the END_LINE_CHARS.
# Otherwise it is needed in order to determine whether to put
# in a PARAGRAPH_BREAK
if not self._current_char in END_LINE_CHARS:
self._match(end_codes)
match_found = True
break
else:
# Since python has not ended yet, just add the given char to it
python_str += self._current_char
self._advance()
if (self._current_char is None) and (not match_found) and (not one_line):
raise InvalidSyntaxError(pos_start, pos_end,
f'You made the rest of your file Python because there was no matching character sequence to end the Python section of your document denoted by this character sequence.')
pos_end = self._pos.copy()
if pass_num == 1:
if use_eval:
return Token(TT.EVAL_PYTH1, python_str, pos_start, pos_end)
else:
return Token(TT.EXEC_PYTH1, python_str, pos_start, pos_end)
else:
if use_eval:
return Token(TT.EVAL_PYTH2, python_str, pos_start, pos_end)
else:
return Token(TT.EXEC_PYTH2, python_str, pos_start, pos_end)
def _tokenize_comment(self, pos_start, one_line=False):
"""
Parses a comment, basically just eating any characters it finds until
the comment is done. None of the characters are put into any Token,
so the Parser will never even see them.
"""
pos_end = self._pos.copy()
if one_line:
# Its a one_line comment
while self._current_char is not None:
if self._match(TT_M.SINGLE_LINE_COMMENT_END):
break
else:
self._advance()
else:
found_match = False
# it's a continous comment, so parse until '<-%\' or '<-#\' is found
while self._current_char is not None:
if self._match(TT_M.MULTI_LINE_COMMENT_END):
found_match = True
break
else:
self._advance()
if self._current_char is None and not found_match:
raise InvalidSyntaxError(pos_start, pos_end, 'You commented out the rest of your file because there was no matching "<-%\\" or "<-#\\" to end the comment.')
if len(self._tokens) > 0 and self._tokens[-1].type == TT.PARAGRAPH_BREAK:
# Need to eat all end line white space now so that another
# PARAGRAPH_BREAK cannot be produced due to this comment text being
# ignored and there being white space before it. Two PARAGRAPH_BREAKs
# next to eachother breaks all grammar rules and causes the Parser
# to terminate early (i.e. before it reaches the FILE_END token)
while self._current_char in END_LINE_CHARS:
self._advance()
def _tokenize_identifier(self):
"""
Tokenize an identifier like \\bold or \\i
"""
identifier_name = ''
start_pos = self._pos.copy()
space_before = self._previous_char
#tokens = []
#tokens.append(Token(TT.BACKSLASH, '\\', start_pos.copy(), self._pos.copy(), space_before=space_before))
self._advance() # advance past '\\'
problem_start = self._pos.copy()
while self._current_char is not None:
if self._current_char in CMND_CHARS:
identifier_name += self._current_char
self._advance()
else:
if len(identifier_name) == 0:
raise ExpectedValidCmndNameError(problem_start, self._pos.copy(),
f'All commands must specify a valid name with all characters of it in {CMND_CHARS}\n"{self._current_char}" is not one of the valid characters. You either forgot to designate a valid command name or forgot to escape the backslash before this character.')
token = Token(TT.IDENTIFIER, identifier_name, start_pos.copy(), self._pos.copy(), space_before=space_before)
return token
# -------------------------------------------------------------------------
# Other Helper Methods
def _try_word_token(self):
"""
Create a WORD token given what is in self._plain_text
"""
self._plain_text = re.sub('(\s)+', '', self._plain_text)
if len(self._plain_text) > 0:
self._tokens.append(Token(TT.WORD, self._plain_text, self._plain_text_start_pos, self._pos.copy(), space_before=self._space_before_plaintext))
self._space_before_plaintext = False
self._plain_text = ''
self._plain_text_start_pos = None
def _plain_text_char(self):
"""
The current_char is a plain_text character
"""
if self._plain_text_start_pos is None:
self._plain_text_start_pos = self._pos.copy()
if self._pos.idx - 1 >= 0:
self._space_before_plaintext = (self._text[self._pos.idx - 1] in WHITE_SPACE_CHARS)
else:
self._space_before_plaintext = False
self._plain_text += self._current_char
self._advance()
def _match(self, matches:list, advance_past_on_match=True):
"""
Takes the given list of strings to match and sees if any of them match
the text at the current index of the self._text
This method does not look forward in the text for a match, just returns
True if the string starting at the current index matches any of
the matches.
If advance_past_on_match, then if this method matches something, it will
advance past the string it matched.
"""
index = self._pos.idx
for str_to_match in matches:
if ((index + len(str_to_match)) < len(self._text)) \
and (str_to_match == self._text[index:index + len(str_to_match)]):
if advance_past_on_match:
self._advance(len(str_to_match))
return True
return False
# -----------------------------------------------------------------------------
# Nodes for Parser
DUMMY_POSITION = Position(0, 0, 0, 'Dummy File Name', 'Dummy File Text')
class LeafNode:
"""
Base class for all Leaf Nodes (nodes that can only have one token)
"""
__slots__ = ['start_pos', 'end_pos']
def __init__(self, token):
"""
Takes a token and sets the start and end positions using it. Still
must name the token in the actual node (i.e. self.writing, etc.)
"""
self.start_pos = token.start_pos
self.end_pos = token.end_pos
class FileNode:
__slots__ = ['start_pos', 'end_pos', 'file_start', 'document', 'file_end']
def __init__(self, file_start, document, file_end):
self.file_start = file_start # Token
self.document = document # DocumentNode
self.file_end = file_end # Token
self.start_pos = file_start.start_pos
self.end_pos = file_end.end_pos
def __repr__(self):
return f'{self.__class__.__name__}({self.file_start}, {self.document}, {self.file_end})'
class DocumentNode:
__slots__ = ['start_pos', 'end_pos', 'starting_paragraph_break', 'paragraphs', 'ending_paragraph_break']
def __init__(self, paragraphs, starting_paragraph_break=None, ending_paragraph_break=None):
self.starting_paragraph_break = starting_paragraph_break # Token
self.paragraphs = paragraphs # List of ParagraphNodes
self.ending_paragraph_break = ending_paragraph_break # Token
if starting_paragraph_break:
self.start_pos = starting_paragraph_break.start_pos
elif len(paragraphs) > 0:
self.start_pos = paragraphs[0].start_pos
else:
self.start_pos = DUMMY_POSITION.copy()
if len(paragraphs) > 0:
self.end_pos = paragraphs[-1].end_pos
elif ending_paragraph_break:
self.end_pos = ending_paragraph_break.end_pos
elif starting_paragraph_break:
self.end_pos = starting_paragraph_break.end_pos
else:
self.end_pos = DUMMY_POSITION.copy()
def __repr__(self):
return f'{self.__class__.__name__}({self.paragraphs})'
class ParagraphNode:
__slots__ = ['start_pos', 'end_pos', 'writing', 'paragraph_break']
def __init__(self, paragraph_break, writing):
self.paragraph_break = paragraph_break # Token
self.writing = writing # WritingNode
self.start_pos = writing.start_pos
if paragraph_break:
self.end_pos = paragraph_break.end_pos
else:
self.end_pos = writing.end_pos
def __repr__(self):
return f'{self.__class__.__name__}({self.writing})'
class WritingNode(LeafNode):
__slots__ = LeafNode.__slots__[:]
__slots__.extend(['writing'])
def __init__(self, writing):
"""
writing can be either a python node or a plain_text node.
"""
super().__init__(writing)
self.writing = writing # PythonNode or PlainTextNode
def __repr__(self):
return f'{self.__class__.__name__}({self.writing})'
class PythonNode(LeafNode):
__slots__ = LeafNode.__slots__[:]
__slots__.extend(['python', 'python_string'])
def __init__(self, python):
"""
python is a single python Token (PASS1EXEC|PASS2EXEC|PASS1EVAL|PASS2EVAL)
"""
super().__init__(python)
self.python = python # one of the exec or eval Nodes
self.python_string = None
def __repr__(self):
return f'{self.__class__.__name__}({self.python})'
class CommandDefNode:
__slots__ = ['start_pos', 'end_pos', 'cmnd_name', 'cmnd_params', 'cmnd_key_params', 'text_group']
def __init__(self, cmnd_name, cmnd_params, cmnd_key_params, text_group):
self.start_pos = cmnd_name.start_pos
self.end_pos = text_group.end_pos
self.cmnd_name = cmnd_name # IDENTIFIER Token
self.cmnd_params = cmnd_params # list of CommandParamNodes
self.cmnd_key_params = cmnd_key_params # list of CommandKeyParamNodes
self.text_group = text_group # the text_group that the command will run
def __repr__(self):
cmnd_args = ''
for i, arg in enumerate(self.cmnd_params):
if i > 0:
cmnd_args += ', '
cmnd_args += f'{arg}'
return f'{self.__class__.__name__}({self.cmnd_name} = ({cmnd_args}) ' + '{' + f'{self.text_group}' + '}' + ')'
class CommandParamNode:
__slots__ = ['start_pos', 'end_pos', 'identifier']
def __init__(self, identifier):
self.start_pos = identifier.start_pos
self.end_pos = identifier.end_pos
self.identifier = identifier # IDENTIFIER Token
def __repr__(self):
return f'{self.__class__.__name__}({self.identifier})'
class CommandKeyParamNode:
__slots__ = ['start_pos', 'end_pos', 'key', 'text_group']
def __init__(self, key, text_group):
self.start_pos = key.start_pos
self.end_pos = text_group.end_pos
self.key = key # WORD Token
self.text_group = text_group # TextGroupNode
def __repr__(self):
return f'{self.__class__.__name__}({self.text_group})'
class CommandCallNode:
__slots__ = ['start_pos', 'end_pos', 'cmnd_name', 'cmnd_tex_args', 'cmnd_key_args']
def __init__(self, cmnd_name, cmnd_tex_args, cmnd_key_args):
self.start_pos = cmnd_name.start_pos
self.end_pos = cmnd_name.end_pos
self.cmnd_name = cmnd_name # IDENTIFIER Token
self.cmnd_tex_args = cmnd_tex_args # list of CommandTexArgNode
self.cmnd_key_args = cmnd_key_args # dict of keyword:CommandArgNode pairs
def __repr__(self):
string = f'{self.__class__.__name__}(\\{self.cmnd_name}'
# add args
for arg in self.cmnd_tex_args:
string += '{' + f'{arg}' + '}'
# add kwargs
for kwarg in self.cmnd_key_args:
string += '{' + f'{kwarg.key}={kwarg.text_group}' + '}'
# end string
string += ')'
return string
class CommandTexArgNode:
__slots__ = ['start_pos', 'end_pos', 'text_group']
def __init__(self, text_group):
self.start_pos = text_group.start_pos
self.end_pos = text_group.end_pos
self.text_group = text_group # TextGroupNode
def __repr__(self):
return f'{self.__class__.__name__}({self.text_group})'
class CommandKeyArgNode:
__slots__ = ['start_pos', 'end_pos', 'key', 'text_group']
def __init__(self, key, text_group):
self.start_pos = key.start_pos
self.end_pos = text_group.end_pos
self.key = key # IDENTIFIER Token
self.text_group = text_group # TextGroupNode
def __repr__(self):
return f'{self.__class__.__name__}({self.key}={self.text_group})'
class TextGroupNode:
__slots__ = ['start_pos', 'end_pos', 'ocbrace', 'document', 'ccbrace']
def __init__(self, ocbrace, document, ccbrace):
self.start_pos = ocbrace.start_pos
self.end_pos = ccbrace.end_pos
self.ocbrace = ocbrace
self.document = document
self.ccbrace = ccbrace
def __repr__(self):
return f'{self.__class__.__name__}({self.document})'
class PlainTextNode(LeafNode):
__slots__ = LeafNode.__slots__[:]
__slots__.extend(['plain_text'])
def __init__(self, plain_text:list):
"""
plain_text is a list of OCBRACE, CCBRACE, EQUAL_SIGN, and WORD Tokens
in any order.
"""
self.plain_text = plain_text # list of Tokens
if len(plain_text) > 0:
self.start_pos = plain_text[0].start_pos
self.end_pos = plain_text[-1].end_pos
else:
self.start_pos = DUMMY_POSITION.copy()
self.end_pos = DUMMY_POSITION.copy()
def __repr__(self):
return f'{self.__class__.__name__}({self.plain_text})'
# -----------------------------------------------------------------------------
# Parser Class and Related
class ParseResult:
"""
A class that wraps results from the Parser because the parser will be
trying out different things (is the next token plain text or a
paragraph break? neither? then whats the next thing it could be?) and
this ParseResult allows the Parser to try something and then undo that
thing. An error can also can be returned if none of the things that were
supposed to work actually work.
"""
__slots__ = ['error', 'node', 'last_registered_advance_count', 'advance_count', 'to_reverse_count', 'affinity']
def __init__(self):
self.error = None
self.node = None
self.last_registered_advance_count = 0
self.advance_count = 0
self.to_reverse_count = 0
self.affinity = 0
def register_advancement(self):
"""
Registers that the Parser advanced a token so that that advancement
can be undone later if need be.
"""
self.last_registered_advance_count = 1
self.advance_count += 1
def register(self, res):
"""
Registers a result, adding the error to this result if there was one and
returning the node.
"""
self.last_registered_advance_count = res.advance_count
self.advance_count += res.advance_count
self.affinity += res.affinity
if res.error: self.error = res.error
return res.node
def register_try(self, res):
"""
Returns None if the given result did not work and the Node of
the result if it did.
"""
if res.error:
self.affinity += res.affinity
self.to_reverse_count = res.advance_count
return None
return self.register(res)
def reversing(self):
"""
The last try is being reverse so set the to_reverse_count back to 0 and
return what it was so that it can be reversed.
"""
to_reverse = self.to_reverse_count
self.to_reverse_count = 0
return to_reverse
def add_affinity(self, amt=1):
"""
Affinity is how far along the result was getting before it ran into an
error. This is useful for when there are multiple possibilities as
to where the errors my be coming from such as in the writing rule
of this language's grammar. This affinity can be used to see whether
any of the rules applied or not because if non of them did, then
the parser is probably just at the end of the file.
"""
self.affinity += amt
def success(self, node):
self.node = node
return self
def failure(self, error):
if not self.error or self.last_registered_advance_count == 0:
self.error = error
return self
class Parser:
"""
Creates an Abstract Syntax Tree based on the rules in grammar.txt.
Look at grammar.txt for the outline of what the Parser is trying to do.
It takes each rule and recursively tries to make it work. When a rule
does not work, it returns a ParseResult with an error in
ParseResult.error. In the case of the error, the index is changed
back to what it was before the Parser tried the rule.
If there was no error, then the Node that was successfully created by
the rule is returned.
This Parser uses a top-down approach to parsing, as opposed to a bottom-up
approach to parsing, which is a far harder method of parsing to write
a Parser for.
"""
def __init__(self, tokens, print_progress_bar=False):
# Progress Printing Info
self._print_progress_bar = print_progress_bar
self._tokens_len = len(tokens)
file_path = '' if self._tokens_len == 0 else tokens[0].start_pos.file_path
self._progress_bar_prefix = prog_bar_prefix('Parsing', file_path)
self._prog_bar_refresh = calc_prog_bar_refresh_rate(self._tokens_len)
# Things needed to actually parse the tokens
self._tokens = tokens
self._tok_idx = -1
self._current_tok = None
self._advance()
def parse(self):
"""
Returns a ParseResult with either an error in res.error or a node in
res.node
"""
if self._print_progress_bar:
print_progress_bar(self._tok_idx, self._tokens_len, self._progress_bar_prefix)
if self._current_tok.type == TT.FILE_START:
res = self._file()
else:
res = self._document()
if self._print_progress_bar:
print_progress_bar(self._tok_idx, self._tokens_len, self._progress_bar_prefix)
return res
# ------------------------------
# Main Helper Methods
def _advance(self, parse_result=None):
"""
Advances to the next token. It returns the token before the new one and
registers an advancement with the given parse_result for convenience.
"""
prev_token = self._current_tok
if parse_result:
parse_result.register_advancement()
self._tok_idx += 1
self._update_current_tok()
return prev_token
def _reverse(self, parse_result):
self._tok_idx -= parse_result.reversing()
self._update_current_tok()
def _update_current_tok(self):
if self._tok_idx >= 0 and self._tok_idx < len(self._tokens):
self._current_tok = self._tokens[self._tok_idx]
else:
# TT.NONE_LEFT will NOT match any Tokens needed for any rule,
# forcing an error to occur in each rule and the rules to
# terminate. This is much safer than just not changing the token
# any more when you run out of tokens to parse because now, even if
# you have a low-level rule that will accept infinitely many of a
# token of a certain type, that type will not be infinitely given
# if the list of tokens ends on it
if self._current_tok is not None:
self._current_tok = Token(TT.NONE_LEFT, 'NO TOKENS LEFT', self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy())
else:
dummy_start_pos = DUMMY_POSITION.copy()
dummy_end_pos = dummy_start_pos.copy()
self._current_tok = Token(TT.NONE_LEFT, 'NO TOKENS LEFT', dummy_start_pos, dummy_end_pos)
# ------------------------------
# Rules
def _file(self):
"""
A document but with a FILE_START token at the beginning and a FILE_END
token at the end.
"""
res = ParseResult()
start_pos = self._current_tok.start_pos.copy()
if self._current_tok.type == TT.FILE_START:
file_start = self._advance(res)
else:
return res.failure(InvalidSyntaxError(start_pos, start_pos.copy().advance(),
'For some reason, your file does not begin with a FILE_START Token. This is a Compiler Error, so contact the developer and let them know.'))
document = res.register(self._document())
if res.error: return res
if self._current_tok.type == TT.FILE_END:
file_end = self._advance(res)
else:
return res.failure(InvalidSyntaxError(self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(),
f'Reached the end of the file but there was no FILE_END Token. The file must have Invalid Syntax or the compiler is having issues.\nALL TOKENS: {self._tokens}\n\nLAST TOKEN SEEN: {self._current_tok}\n\nLast Token Seen Index: {self._tok_idx}'))
return res.success(FileNode(file_start, document, file_end))
def _document(self):
"""
A document is a group of paragraphs, essentially.
"""
res = ParseResult()
paragraphs = []
# will eat token if there, otherwise nothing
self._eat_pb(res)
print_prog_bar = self._print_progress_bar
if print_prog_bar:
refresh = self._prog_bar_refresh
toks_len = self._tokens_len
prefix = self._progress_bar_prefix
while True:
# paragraph will be None if the try failed, otherwise it will be the
# new ParagraphNode
result = self._paragraph()
if result.error and result.affinity > 0:
res.register(result)
return res
paragraph = res.register_try(result)
# If, when we tried to make another paragraph, it failed,
# that means that there are no more paragraphs left in the
# document, so undo the try by going back the number of
# tokens that the try went forward
if not paragraph:
self._reverse(res)
break
else:
if print_prog_bar:
i = self._tok_idx
if (i % refresh) == 0:
print_progress_bar(i, toks_len, prefix)
paragraphs.append(paragraph)
self._eat_pb(res)
return res.success(DocumentNode(paragraphs))
def _paragraph(self):
"""
A peice of writing, with a paragraph break before it possibly.
"""
res = ParseResult()
start_pos = self._current_tok.start_pos.copy()
# Check for Paragraph Break
paragraph_break = self._eat_pb(res)
# Check for Writing
writing = res.register(self._writing())
if res.error:
return res
# writing should be a WritingNode and paragraph_break is a Token of
# type PARAGRAPH_BREAK
return res.success(ParagraphNode(paragraph_break, writing))
def _writing(self):
"""
A peice of writing such as something to run in python, a command def
or command call, text group, or pain text.
"""
res = ParseResult()
start_pos = self._current_tok.start_pos.copy()
results = []
new_res = self._python()
results.append(new_res)
writing = res.register_try(new_res)
if not writing:
self._reverse(res)
new_res = self._cmnd_def()
results.append(new_res)
writing = res.register_try(new_res)
if not writing:
self._reverse(res)
new_res = self._cmnd_call()
results.append(new_res)
writing = res.register_try(new_res)
if not writing:
self._reverse(res)
new_res = self._plain_text()
results.append(new_res)
writing = res.register_try(new_res)
if not writing:
self._reverse(res)
new_res = self._text_group()
results.append(new_res)
writing = res.register_try(new_res)
if not writing:
best_result = None
for result in results:
if result.affinity > 0 and ((not best_result) or result.affinity > best_result.affinity):
best_result = result
if not best_result:
return res.failure(InvalidSyntaxError(self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(),
'There was no writing, but writing was expected.'
))
else:
return res.failure(best_result)
# writing should be either a PythonNode or a PlainTextNode
return res.success(WritingNode(writing))
def _python(self):
"""
This fulfills the python rule of the grammar.
"""
res = ParseResult()
ct = self._current_tok
type = self._current_tok.type
# Python Switch Statement to figure out whether the token is a Python Token
try:
python = {
TT.EXEC_PYTH1: ct,
TT.EVAL_PYTH1: ct,
TT.EXEC_PYTH2: ct,
TT.EVAL_PYTH2: ct
}[ct.type]
except KeyError:
return res.failure(InvalidSyntaxError(ct.start_pos.copy(), ct.start_pos.copy().advance(),
'Expected a Token of Type PASS1EXEC, PASS1EVAL, PASS2EXEC, or PASS1EVAL but did not get one.')
)
self._advance(res)
# python should be a single python Token of type PASS1EXEC or PASS2EXEC
# or PASS1EVAL or PASS2EVAL
return res.success(PythonNode(python))
def _cmnd_def(self):
"""
A command definition. For example:
\\hi = (\\first_name, \\last_name={}) {
Hello \\first_name \\last_name
}
"""
res = ParseResult()
cmnd_name = res.register(self._need_token(TT.IDENTIFIER))
if res.error: return res
res.add_affinity()
self._eat_pb(res)
equal_sign = res.register(self._need_token(TT.EQUAL_SIGN))
if res.error: return res
res.add_affinity()
self._eat_pb(res)
cmnd_params = []
# (OPAREN PB? (cmnd_params PB? (COMMA PB? cmnd_params)*)? PB? CPAREN)?
oparen = res.register_try(self._need_token(TT.OPAREN))
if oparen:
res.add_affinity()
self._eat_pb(res)
cmnd_param = res.register_try(self._cmnd_param())
if not cmnd_param:
self._reverse(res)
else:
res.add_affinity()
cmnd_params.append(cmnd_param)
while True:
self._eat_pb(res)
comma = res.register_try(self._need_token(TT.COMMA))
if not comma:
self._reverse(res)
break
res.add_affinity()
cmnd_param = res.register(self._cmnd_param())
if res.error:
return res.failure(InvalidSyntaxError(
comma.start_pos.copy(), comma.end_pos.copy(),
'Extra comma. You need to either have a variable name after it or remove it.'
))
res.add_affinity()
cmnd_params.append(cmnd_param)
self._eat_pb(res)
cparen = res.register(self._need_token(TT.CPAREN))
if res.error:
return res.failure(InvalidSyntaxError(
oparen.start_pos, oparen.end_pos,
'You need to have a matching closing parenthesis ")" to match this parenthisis after your parameters for the Command Definition.'
))
res.add_affinity()
self._eat_pb(res)
# text_group
text_group = res.register(self._text_group())
if res.error:
return res.failure(InvalidSyntaxError(
self._current_tok.start_pos, self._current_tok.end_pos,
'Here, you need to have a pair of curly brackets "{}", at the very least, in order to finish off this command definition.'
))
res.add_affinity()
cmnd_tex_params = []
cmnd_key_params = []
for param in cmnd_params:
if isinstance(param, CommandParamNode):
cmnd_tex_params.append(param)
elif isinstance(param, CommandKeyParamNode):
cmnd_key_params.append(param)
else:
raise Exception(f'This was outputted as a command parameter but is not one: {param}')
return res.success(CommandDefNode(cmnd_name, cmnd_tex_params, cmnd_key_params, text_group))
def _cmnd_param(self):
"""
A command Parameter. So either \\hi = {a default value} or \\hi
"""
res = ParseResult()
self._eat_pb(res)
text_group = res.register_try(self._cmnd_key_param())
if text_group:
return res.success(text_group)
self._reverse(res)
text_group = res.register_try(self._cmnd_tex_param())
if text_group:
return res.success(text_group)
else:
self._reverse(res)
return res.failure(InvalidSyntaxError(self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(),
'Expected a Command Parameter here.'))
def _cmnd_key_param(self):
"""
A command parameter so \\hi = {a default value}
"""
res = ParseResult()
self._eat_pb(res)
key = res.register(self._need_token(TT.IDENTIFIER))
if res.error: return res
res.add_affinity()
self._eat_pb(res)
res.register(self._need_token(TT.EQUAL_SIGN))
if res.error: return res
res.add_affinity()
self._eat_pb(res)
text_group = res.register(self._text_group())
if res.error: return res
res.add_affinity()
return res.success(CommandKeyParamNode(key, text_group))
def _cmnd_tex_param(self):
"""
A command parameter that is just an IDENTIFIER
"""
res = ParseResult()
ident = res.register(self._need_token(TT.IDENTIFIER))
res.add_affinity()
if not ident:
return res
else:
return res.success(CommandParamNode(ident))
def _cmnd_call(self):
"""
A command call like
\\hi
or
\\hi{FirstName}{\\last_name={LastName}}
"""
res = ParseResult()
cmnd_name = res.register(self._need_token(TT.IDENTIFIER))
if res.error: return res
res.add_affinity()
args = []
while True:
arg = res.register_try(self._cmnd_arg())
if not arg:
self._reverse(res)
break
res.add_affinity()
args.append(arg)
cmnd_tex_args = []
cmnd_key_args = []
for arg in args:
if isinstance(arg, CommandTexArgNode):
cmnd_tex_args.append(arg)
elif isinstance(arg, CommandKeyArgNode):
cmnd_key_args.append(arg)
else:
raise Exception(f'Expected a command argument Node, instead got: {arg}')
return res.success(CommandCallNode(cmnd_name, cmnd_tex_args, cmnd_key_args))
def _cmnd_arg(self):
"""
A cmnd argument such as {FirstName} or {\\first_name={FirstName}} in
\\hi{FirstName}{\\first_name={FirstName}}
"""
res = ParseResult()
arg = res.register_try(self._cmnd_key_arg())
if arg:
return res.success(arg)
self._reverse(res)
arg = res.register_try(self._cmnd_tex_arg())
if arg:
return res.success(arg)
self._reverse(res)
return res.failure(InvalidSyntaxError(
self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(),
'Expected a Command Argument here.'
))
def _cmnd_tex_arg(self):
"""
A command text argument
\\he{FirstName}
"""
res = ParseResult()
text_group = res.register(self._text_group())
if res.error: return res
res.add_affinity()
return res.success(CommandTexArgNode(text_group))
def _cmnd_key_arg(self):
"""
A command key argument such as {\\first_name={FirstName}} in
\\he{\\first_name={FirstName}}
"""
res = ParseResult()
res.register(self._need_token(TT.OCBRACE))
if res.error: return res
res.add_affinity()
ident = res.register(self._need_token(TT.IDENTIFIER))
if res.error: return res
res.add_affinity()
self._eat_pb(res)
res.register(self._need_token(TT.EQUAL_SIGN))
if res.error: return res
res.add_affinity()
self._eat_pb(res)
text_group = res.register(self._text_group())
if res.error: return res
res.add_affinity()
res.register(self._need_token(TT.CCBRACE))
if res.error: return res
res.add_affinity()
return res.success(CommandKeyArgNode(ident, text_group))
def _text_group(self):
"""
A text group is
{ document }
"""
res = ParseResult()
ocb = res.register(self._need_token(TT.OCBRACE))
if res.error: return res
res.add_affinity()
document = res.register(self._document())
if res.error: return res
res.add_affinity()
ccb = res.register(self._need_token(TT.CCBRACE))
if res.error: return res
res.add_affinity()
return res.success(TextGroupNode(ocb, document, ccb))
def _plain_text(self):
res = ParseResult()
plain_text = []
while True:
cc = self._current_tok
start_pos = cc.start_pos
# Python Switch Statement
try:
new_tok = {
TT.BACKSLASH: cc,
TT.EQUAL_SIGN: cc,
TT.COMMA: cc,
TT.OPAREN: cc,
TT.CPAREN: cc,
TT.OBRACE: cc,
TT.CBRACE: cc,
TT.WORD: cc
}[cc.type]
# If I remember correctly, you cannot directly wrap the dict
# in this append method because it appends the error
# to the list when there is an error, which is problematic
plain_text.append(new_tok)
res.add_affinity()
except KeyError:
break
self._advance(res)
if len(plain_text) == 0:
return res.failure(InvalidSyntaxError(start_pos.copy(), start_pos.copy().advance(),
'Expected atleast 1 WORD, BACKSLASH, OCBRACE, CCBRACE, or EQUAL_SIGN Token.'
)
)
# plain_text is a list of OCBRACE, CCBRACE, EQUAL_SIGN, and WORD Tokens
# in any order.
return res.success(PlainTextNode(plain_text))
# -------------------------------------------------------------------------
# Non-Rule Lesser Help Methods
def _eat_pb(self, parse_result):
"""
Eat a PARAGRAPH_BREAK
A helper method that, unlike the other methods, just exists because
there are many rules with PARAGRAPH_BREAK? in them. This
method does that, returning None if the current token is not
a PARAGRAPH_BREAK and the PARAGRAPH_BREAK Token if there is one.
If a PARAGRAPH_BREAK token is found, the method also advances past
past it.
"""
par_break = None
if self._current_tok.type == TT.PARAGRAPH_BREAK:
par_break = self._advance(parse_result)
return par_break
def _need_token(self, token_type):
"""
A helper method that just checks that a token exists right now. Will
return a ParseResult with an error if the token is not the required
one and a ParseResult with the node of the result being the token if
the current token is the correct one.
This method exists not because there is a Node for it (there is not one)
but because what this method does is something that needs to be done
a lot in the parse methods.
"""
res = ParseResult()
if not (self._current_tok.type == token_type):
return res.failure(InvalidSyntaxError(self._current_tok.start_pos.copy(), self._current_tok.end_pos.copy(),
f'Expected a Token of type {token_type}, but got token {self._current_tok}'))
return res.success(self._advance(res))
# -----------------------------------------------------------------------------
# Interpreter and Related Classes
class RunTimeResult:
"""
Wraps a return value in the Interpreter so that, when a visit method
finishes visiting a Node, it can tell the Node that visited it various
things such as whether to return immediately or not.
"""
__slots__ = ['value', 'error']
def __init__(self):
self.reset()
def reset(self):
self.value = None
self.error = None
def register(self, res):
"""
Register the returned result from a Node you just visited. This way,
if you should return because an error occured or something, you can.
"""
self.error = res.error
return res.value
def success(self, value):
self.reset()
self.value = value
return self
def failure(self, error):
self.reset()
self.error = error
return self
class SymbolTable:
"""
The symbol table is used to store the commands.
"""
def __init__(self, parent=None):
self.symbols = {}
self.parent = parent
def get(self, name):
"""
Returns the value for the name if it is in the SymbolTable, None otherwise
"""
value = self.symbols.get(name, None)
if value == None and self.parent:
return self.parent.get(name)
return value
def set(self, name, value):
"""
Sets a the value for a name in the symbol table
"""
self.symbols[name] = value
def remove(self, name):
"""
Removes a name from the symbol table.
"""
self.symbols.pop(name)
def import_(self, other_symbol_table, commands_to_import=None):
"""
Imports the symbols of the other symbol table into this one.
If commands_to_import is None, then import every command. Otherwise,
only import the commands with the names listed.
"""
if commands_to_import is None:
self.symbols.update(other_symbol_table.symbols)
else:
oth_syms = other_symbol_table.symbols
for command_name in commands_to_import:
if command_name in oth_syms:
self.symbols[command_name] = oth_syms[command_name]
else:
raise AssertionError(f'Could not import {command_name}.')
def copy(self):
import copy
new = SymbolTable(None if self.parent is None else self.parent.copy())
new.symbols = copy.deepcopy(self.symbols)
return new
def __repr__(self):
string = f'\n{type(self).__name__}('
string += f'symbols={self.symbols}'
string += ')'
return string
class Context:
"""
Provides Context for every command/amount of python code that is run. By
that I mean that the Context determines what commands and variables are
available and when.
"""
__slots__ = ['display_name', 'file_path', 'entry_pos', 'parent',
'_globals', '_locals', 'symbols', '_token_document', 'global_level']
def __init__(self, display_name, file_path, parent=None, entry_pos=None, token_document=None, globals=None, locals=None, symbol_table=None):
"""
Context could be a function if in a function or the entire program
(global) if not in a function.
"""
self.display_name = display_name # the command/program name
self.file_path = file_path # the path to the file that the command is in
self.entry_pos = entry_pos # the position in the code where the context changed (where the command was called)
self.parent = parent # Parent context if there is one
# These are the globals and locals used by Python. The SymbolTable is
# used for Commands, not these
self._globals = globals # dict or None
self._locals = locals # dict or None
# Make sure that there are globals
self.globals() # will throw an error if there are no globals, even in parent contexts
if symbol_table is not None:
assert_instance(symbol_table, SymbolTable, or_none=False)
self.symbols = symbol_table # SymbolTable
elif parent is not None and parent.symbols is not None:
self.symbols = SymbolTable(parent.symbols)
else:
self.symbols = SymbolTable()
if token_document is not None:
self._token_document = token_document
else:
self._token_document = []
self.global_level = True
def __repr__(self):
string = f'\n{type(self).__name__}(\n'
string += f'\tdisplay_name={self.display_name}'
string += f'\tsymbols={self.symbols}'
string += f'\tglobals={self._globals}'
string += f'\tlocals={self._locals}'
string += f'\tparent={self.parent}'
string += '\n)'
return string
def copy(self):
_globals = None if self._globals is None else {key:val for key, val in self._globals.items()}
_locals = None if self._locals is None else {key:val for key, val in self._locals.items()}
entry_pos = None if self.entry_pos is None else self.entry_pos.copy()
parent = None if self.parent is None else self.parent.copy()
new = Context(self.display_name, self.file_path, parent, entry_pos, self._token_document[:], _globals, _locals)
new.symbols = self.symbols.copy()
return new
def gen_child(self, child_display_name:str, child_entry_pos=None, locals_to_add=None):
"""
Generates a child context i.e. a subcontext such as that which is inside
a command.
locals_to_add are things like the \\test variable below, which should
be made available to any Python Code that is inside the command
\\# Global Context
\\hello = (\\test) = {
\\# This should have a subcontext where commands can be defined in
\\# here but not mess with those defined in the global context/
\\# any parent context
\\test \\# is defined in this child context
}
\\# \\test is undefined here, in this global context
"""
# Generate the new python locals. Because only one locals dict can be
# passed to an exec or eval method at a time, it must have all the
# references to parent locals in it so that it works as if it could
# look up the locals hierarchy as the SymbolTables do for Commands
# In other words, the child Context's locals must be a superset of this
# Context's locals
child_lcls = {} if (self._locals is None) else {key:val for key, val in self._locals.items()}
if locals_to_add:
child_lcls.update(locals_to_add)
parent = self
# Give the new context a reference to globals so that it does not have
# to walk up a bunch of parents to get it anyway
child = Context(child_display_name, self.file_path, parent, child_entry_pos, self.token_document(), self.globals(), child_lcls, SymbolTable(self.symbols))
child.global_level = False
return child
def import_(self, other_context, tokens_to_import=[], commands_to_import=None):
"""
Takes another context and imports its contents into this one.
"""
self.symbols.import_(other_context.symbols, commands_to_import)
self.globals().update(other_context.globals())
self.token_document().extend(tokens_to_import)
def globals(self):
if self._globals is not None:
return self._globals
elif self.parent is not None:
return self.parent.globals()
else:
raise Exception("You did not pass in globals to the Global Context.")
def locals(self):
return self._locals
def token_document(self):
"""
The list of tokens that should be given to the Placer object to actually
make the PDFDocument.
"""
return self._token_document
def set_token_document(self, new_doc):
self._token_document = new_doc
class InterpreterFlags:
"""
Flags for the Interpreter so that it can know what to do when it does
a pass over an Abstract Syntax Tree created by the Parser.
The difference between these flags and the context in the Interpreter is
that things in the flags stay the same for the entire AST pass
whereas the things in the context could change at each visit to a node.
"""
def __init__(self):
pass
class Interpreter:
"""
The interpreter visits each node in the Abstract Syntax Tree generated
by the Parser and actually runs the corresponding code for the
node.
"""
def __init__(self):
self._context_stack = []
self._curr_context = None
self._command_node_stack = []
self._curr_command_node = None
def _push_context(self, context):
self._context_stack.append(context)
self._curr_context = context
def _pop_context(self):
self._context_stack.pop()
self._curr_context = self._context_stack[-1] if len(self._context_stack) > 0 else None
def curr_context(self):
return self._curr_context
def _push_command_node(self, command_node):
self._command_node_stack.append(command_node)
self._curr_command_node = command_node
def _pop_command_node(self):
self._command_node_stack.pop()
self._curr_command_node = self._command_node_stack[-1] if len(self._command_node_stack) > 0 else None
def curr_command_node(self):
return self._curr_command_node
def visit_root(self, node, context, flags, print_progress=False):
"""
The visit to the root node of an AST.
"""
if print_progress:
print(prog_bar_prefix(f'{OUT_TAB}Running AST for ', f'{context.display_name}', align='>', suffix='', append='...'))
prev_context = self._curr_context
self._curr_context = context
result = self.visit(node, context, flags)
self._curr_context = prev_context
if print_progress:
print(prog_bar_prefix(f'{OUT_TAB}Done Running AST for ', context.display_name, align='>', suffix='', append=''))
return result
def visit(self, node, context, flags):
method_name = f'_visit_{type(node).__name__}'
method = getattr(self, method_name, self._no_visit_method)
return method(node, context, flags)
def _no_visit_method(self, node, context, flags):
raise Exception(f'No _visit_{type(node).__name__} method defined in Interpreter')
# ------------------------------
# Rule Implementations
def _visit_FileNode(self, node, context, flags):
res = RunTimeResult()
result = res.register(self.visit(node.document, context, flags))
if res.error:
return res
return res.success(result)
def _visit_DocumentNode(self, node, context, flags):
res = RunTimeResult()
document = []
was_global = context.global_level
if was_global:
context.global_level = False
for paragraph in node.paragraphs:
write_tokens = res.register(self.visit(paragraph, context, flags))
if res.error:
return res
else:
if was_global:
context.token_document().extend(write_tokens)
document.extend(write_tokens)
if was_global:
context.global_level = True
return res.success(document)
def _visit_ParagraphNode(self, node, context, flags):
res = RunTimeResult()
# How long the document has gotten so far
i = len(context.token_document())
# Visit the writing (could be Plaintext, Python, command def, or a Command call)
write_tokens = res.register(self.visit(node.writing, context, flags))
if res.error:
return res
if len(write_tokens) > 0:
# Command was called and this Class was used to make the length
# of the write_tokens > 0 because a command was called
if write_tokens[0] == Interpreter.CommandCalled:
write_tokens.pop(0)
if node.paragraph_break:
# Add the paragraph break to before the current text was added
context.token_document().insert(i, node.paragraph_break)
return res.success(write_tokens)
def _visit_WritingNode(self, node, context, flags):
"""
Visits a WritingNode. If successful, this method will return a string of
what the ParagraphNode is supposed to write.
"""
res = RunTimeResult()
write_tokens = res.register(self.visit(node.writing, context, flags))
# Error Handling
if res.error:
return res
return res.success(write_tokens)
def _visit_PythonNode(self, node, context, flags):
res = RunTimeResult()
python_token = node.python
tt = python_token.type
# Execute or eval python
if tt == TT.EXEC_PYTH1:
python_result = exec_python(python_token.value, context.globals(), context.locals())
elif tt == TT.EVAL_PYTH1:
python_result = eval_python(python_token.value, context.globals(), context.locals())
# For second pass python, it needs to be kept until we are actually
# placing the text on the PDF, then the Placer will be made available
# to the python and the code can make changes to the PDF
elif tt in (TT.EXEC_PYTH2, TT.EVAL_PYTH2):
python_result = [python_token.gen_pass_2_python( \
None if context.locals() is None else \
{key:val for key, val in context.locals().items()})]
else:
raise Exception(f"The following token was found in a PythonNode, it is not supposed to be in a PythonNode: {tt}")
if isinstance(python_result, type(None)):
python_result = []
elif isinstance(python_result, str):
python_result = Tokenizer.plaintext_tokens_for_str(python_result)
elif isinstance(python_result, MarkedUpText):
python_result = Tokenizer.tokens_for_marked_up_text(python_result)
elif isinstance(python_result, Exception) or issubclass(type(python_result), Exception):
return res.failure(PythonException(node.start_pos.copy(), node.end_pos.copy(),
'An error occured while running your Python code.', python_result, context))
return res.success(python_result)
def _visit_CommandDefNode(self, node, context, flags):
res = RunTimeResult()
cmnd_name = node.cmnd_name.value
cmnd_params = node.cmnd_params
cmnd_key_params = node.cmnd_key_params
text_group = node.text_group
context.symbols.set(cmnd_name, Command(
cmnd_params,
cmnd_key_params,
text_group
))
return res.success([])
def _visit_CommandCallNode(self, node, context, flags):
res = RunTimeResult()
tokens = []
cmnd_name_str = node.cmnd_name.value
command_to_call = context.symbols.get(cmnd_name_str)
self._push_command_node(node)
if command_to_call is None:
# The command is undefined
return res.failure(RunTimeError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(),
'"\\' + f'{cmnd_name_str}" is undefined.',
context
))
elif isinstance(command_to_call, TextGroupNode):
# Handle when the "command" is actually a parameter that contains
# text. For example, in
#
# \hello = (\test) {
# \test
# }
#
# \test is a actually storing a TextGroupNode when the command
# \hello is called, so this method handles returning the TextGroupNode
# that that \test contains when \test is called
result = res.register(self.visit(command_to_call, context, flags))
if res.error: return res
if result:
tokens.extend(result)
else:
# Command is defined and we need to call it
min_args = len(command_to_call.params)
max_args = min_args + len(command_to_call.key_params)
num_positional_args = len(node.cmnd_tex_args)
num_key_args = len(node.cmnd_key_args)
num_args_given = num_positional_args + num_key_args
# Check if enough positional arguments were given
if num_positional_args < min_args:
return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(),
f'The "{cmnd_name_str}" command requires {min_args} argument(s), but {num_positional_args} was/were given.',
))
# Check if too many arguments were given
if num_args_given > max_args:
return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(),
f'The "{cmnd_name_str}" command takes {max_args} argument(s) max, but {num_args_given} was/were given.',
))
cmnd_args = {}
# Add all the command names first
cmnd_and_key_param_names = []
for param in command_to_call.params:
name = param.identifier.value
if name in cmnd_and_key_param_names:
return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(),
f'The argument "{name}" was given more than one time. Every argument can only be given once, either by a key-argument or a positional argument.'
))
cmnd_and_key_param_names.append(name)
# Take each Parameter key-value pair (so the key-value pairs
# in the definition of the command) and add them to the dict
for cmnd_key_param in command_to_call.key_params:
name = cmnd_key_param.key.value
# Now add the key-params because the positional arguments will
# fullfill parameters and key-parameters in the order that
# they are in cmnd_and_key_param_names
if name in cmnd_and_key_param_names:
return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(),
f'The argument "{name}" was given more than one time. Every argument can only be given once, either by a key-argument or a positional argument.'
))
cmnd_and_key_param_names.append(name)
cmnd_args[name] = cmnd_key_param.text_group
# Now replace those key-value pairs from the definiton of the command
# with those given in the call of command
for key_arg in node.cmnd_key_args:
# key params CommandKeyParamNode
key = key_arg.key.value
if not (key in cmnd_args):
return res.failure(InvalidSyntaxError(key_arg.key.start_pos.copy(), key_arg.key.end_pos.copy(),
f'"{key}" is not defined in command "{cmnd_name_str}". In other words, this key is not defined as a key-argument in the command\'s definition.',
))
cmnd_args[key] = key_arg.text_group
# now take each name from the POSITIONAL-ARGUMENT names provided in
# the command's definition and provide the values for them from
# the command call
for param_name, arg in zip(cmnd_and_key_param_names, node.cmnd_tex_args):
# params are CommandParamNode
cmnd_args[param_name] = arg.text_group
# Init py_locals, the python local variables to add to the current
# context
py_locals = {}
for key, arg in cmnd_args.items():
# Visit the argument node and get the tokens from it
new_tokens = res.register(self.visit(arg, context, flags))
if res.error:
return res
# Convert the tokens to MarkedUpText, something that can be used
# in Python
marked_up_text = Tokenizer.marked_up_text_for_tokens(new_tokens)
if marked_up_text == '<NONE>':
marked_up_text = None
# Assign each python local to its marked_up_text
py_locals[key] = marked_up_text
child_context = context.gen_child(cmnd_name_str, node.start_pos.copy(), py_locals)
# Just check to make sure that a value has been passed for each needed argument
for key, value in cmnd_args.items():
if value == 0:
return res.failure(InvalidSyntaxError(node.cmnd_name.start_pos.copy(), node.cmnd_name.end_pos.copy(),
f'"{key}", an argument in {cmnd_name_str}, has no value. You need to pass in an argument for it in this call of the command.',
))
else:
child_context.symbols.set(key, value)
self._push_context(child_context)
# actually run the command now that its variables have been added to the context
result = res.register(self.visit(command_to_call.text_group, child_context, flags))
if res.error: return res
tokens = result
self._pop_context()
self._pop_command_node()
if len(tokens) > 0:
# Find the first Token and set space_before to True if the
# command call had space_before = True, otherwise set it False
for token in tokens:
if isinstance(token, Token):
token.space_before = node.cmnd_name.space_before
break
# Tells the Paragraph Node that a Command was called so that it can
# decide whether to insert a paragraph break depending on whether
# there was one before the Command was called or not
tokens.insert(0, Interpreter.CommandCalled)
return res.success(tokens)
def _visit_TextGroupNode(self, node, context, flags):
res = RunTimeResult()
doc_tokens = res.register(self.visit(node.document, context, flags))
if res.error:
return res
for token in doc_tokens:
if isinstance(token, Token):
token.space_before = node.ocbrace.space_before
break
return res.success(doc_tokens)
def _visit_PlainTextNode(self, node, context, flags):
res = RunTimeResult()
return res.success(node.plain_text)
# -----------------------------
# Helper Classes
class CommandCalled:
"""
A helper class that just tells the Paragraph Node that a Command was
called so that it can make an imformed decision on whether to add a
paragraph break
"""
pass
# -----------------------------------------------------------------------------
# Compiler Class
class CompilerProxy:
"""
The actual object that is given to files being compiled named 'compiler'.
The reason this object is given and not the actual compiler because
this makes it clear what methods are actually meant to be used in
the files being compiled.
"""
def __init__(self, compiler):
self._compiler = compiler
# ---------------------------------
# Methods for Directory/File Finding
def main_file_path(self):
"""
The path to the main/input file that the compiler started with.
"""
return self._compiler.main_file_path()
def main_file_dir(self):
"""
The directory that the main/input file is in.
"""
return self._compiler.main_file_dir()
def curr_file_path(self):
"""
The path to the file that is currently being compiled i.e. the file
that you are in when you call this method.
"""
return self._compiler.curr_file_path()
def curr_file_dir(self):
"""
The directory that the current file being run is in.
"""
return self._compiler.curr_file_dir()
# ---------------------------------
# Methods for importing/inserting files
def strict_import_file(self, file_path):
self._compiler.strict_import_file(file_path)
def std_import_file(self, file_path):
self._compiler.std_import_file(file_path)
def import_file(self, file_path):
self._compiler.import_file(file_path)
def far_import_file(self, file_path):
self._compiler.far_import_file(file_path)
def insert_file(self, file_path):
self._compiler.insert_file(file_path)
def strict_insert_file(self, file_path):
self._compiler.strict_insert_file(file_path)
def far_insert_file(self, file_path):
self._compiler.far_insert_file(file_path)
# ---------------------------------
# Other Methods
def placer_class(self):
return self._compiler.placer_class()
def set_placer_class(self, placer_class):
return self._compiler.set_placer_class(placer_class)
class Compiler:
"""
This object orchestrates the compilation of plaintext files into PDFs
"""
def __init__(self, input_file_path, path_to_std_dir, print_progess_bars=False, encoding='utf-8'):
self._commands = {}
self._files_by_path = {}
assert path.isfile(input_file_path), f'The given path is not to a file or does not exist: {input_file_path}'
self._input_file_path = input_file_path
self._input_file_dir = path.dirname(input_file_path)
self._std_dir_path = path_to_std_dir
self._print_progress_bars = print_progess_bars
self._encoding = encoding # The encoding that the pdfo files are in
self._toolbox = ToolBox(self)
self._compiler_poxy = CompilerProxy(self)
self._placer_class = NaivePlacer
self._interpreter_stack = []
# The globals that will be copied every time a fresh set of globals
# is needed
self._globals = {'__name__': __name__, '__doc__': None, '__package__': None,
'__loader__': __loader__, '__spec__': None, '__annotations__': None,
'__builtins__': _copy.deepcopy(globals()['__builtins__']),
'compiler':self._compiler_poxy, 'toolbox':self._toolbox}
# remove any problematic builtins from the globals
rem_builtins = []
for key in rem_builtins:
self._globals['__builtins__'].pop(key)
# -------------------------------------------------------------------------
# Main Methods
def compile_pdf(self):
"""
Compiles the PDF and returns the PDFDocument that can be used to draw
the PDF multiple times to different files.
"""
fresh_context = self._fresh_context(self._input_file_path)
# Now run the main\input file
self._insert_file(self._input_file_path, fresh_context, print_progress=self._print_progress_bars)
from placer.token_stream import TokenStream
return TokenStream(fresh_context.token_document(), self._placer_class,
fresh_context.globals(), self._input_file_path,
self._print_progress_bars).place_tokens()
def compile_and_draw_pdf(self, output_pdf_path):
"""
Convenience function that compiles and draws the PDF
"""
self.compile_pdf().draw(output_pdf_path, print_progress=self._print_progress_bars)
# -------------------------------------------------------------------------
# Helper Methods
def _fresh_globals(self):
"""
Returns a fresh set of globals as they are before the program starts compiling.
These globals are for the python exec and eval methods that are used to
run python code.
"""
return {key:val for key, val in self._globals.items()}
def _fresh_context(self, file_path):
"""
Returns a fresh context for running a file as if it were the main/input
file (even if it isn't actually the main/input file).
"""
parent = None; entry_pos = None; token_document = []; locals = None
context = Context(file_path, file_path, parent, entry_pos, token_document, self._fresh_globals(), locals, SymbolTable())
# insert the standard file into the context
self._insert_file(self._path_to_std_file(STD_LIB_FILE_NAME), context, print_progress=self._print_progress_bars)
return context
def _push_interpreter(self):
"""
Pushes a new Interpreter onto the interpreter stack.
"""
self._interpreter_stack.append(Interpreter())
def _pop_interpreter(self):
"""
Pops the _curr_interpreter off the interpreter stack.
"""
return self._interpreter_stack.pop()
def _curr_interpreter(self):
"""
Returns the current Interpreter.
"""
_is = self._interpreter_stack
return None if len(_is) <= 0 else _is[-1]
def _curr_context(self):
"""
Returns the current Context.
"""
ci = self._curr_interpreter()
return None if ci is None else ci._curr_context
def _curr_tok_document(self):
"""
Returns the current document made of tokens, not to be confused with
the PDFDocument object that is returned by the Placer. The "document"
returned by this method is a list of Tokens that can be given to
a Placer to produce a PDFDocument.
"""
ci = self._curr_interpreter()
return None if ci is None else ci._curr_document
def _compiler_import_file(self, file_path, print_progress=False):
"""
Imports a file. If the file has not already been imported by the compiler,
this method will read in the file, tokenize, and parse it into
an Abstract Syntax Tree (AST), before caching the raw_text, tokens,
and ast in a File object and returning the File object. If the file
has already been imported, this method will return the cached File
object.
To run the file object, the root of the AST must be visited by the
Interpreter. This can be acheived by doing
Interpreter().visit_root(file.ast)
"""
assert path.isfile(file_path), f'Could not import "{file_path}"'
file_path = path.abspath(file_path)
# If file already imported, just return the file
if file_path in self._files_by_path:
return self._files_by_path[file_path]
file = File(file_path)
self._files_by_path[file_path] = file
try:
with open(file_path, encoding=self._encoding) as f:
file.raw_text = f.read() # Raw text that the file contains
except:
raise AssertionError(f'Could not decode the given file as {self._encoding}.')
file.tokens = Tokenizer(file.file_path, file.raw_text, print_progress_bar=print_progress).tokenize()
# Returns a ParseResult, so need to see if any errors. If no Errors, then set file.ast to the actual abstract syntax tree
file.ast = Parser(file.tokens, print_progress_bar=print_progress).parse()
if file.ast.error is not None:
raise file.ast.error
else:
file.ast = file.ast.node
return file
def _run_file(self, file, context, print_progress=False):
"""
Runs a file, importing it first if need be, and returns the tokens and
context that that the file generates. By "import", I mean that it
loads the file into memory, tokenizes it and makes it into an AST,
not that it does the same thing as the \\import command
context is the current Context that you want the file to be run in.
"""
if isinstance(file, str):
# It should be a file path
file_obj = self._compiler_import_file(file, print_progress)
else:
# It should be a File object
file_obj = file
if file_obj.being_run:
raise AssertionError(f"The given file is already being run (imported or inserted), so you probably have a circular import which is not allowed: {file_obj.file_path}")
else:
file_obj.being_run = True
self._push_interpreter()
# Save the context's current display_name and file_path
old_disp_name = context.display_name
old_path = context.file_path
# Give the context the display name and file path of the file it is now
# going into
context.display_name = file_obj.file_path
context.file_path = file_obj.file_path
# Since just pushed interpreter, self._curr_interpreter() should not be None
result = self._curr_interpreter().visit_root(file_obj.ast, context, InterpreterFlags(), print_progress)
# Restore the context's display name and file_path to what they were before
context.display_name = old_disp_name
context.file_path = old_path
self._pop_interpreter()
if result.error:
raise result.error
file_obj.being_run = False
return result.value # Return the tokens gotten by running the file
def _insert_file(self, file_path, context, print_progress=False):
"""
Inserts the file into the current file. This means that the file
must be run with the current context as if it were directly in the
file.
context is the context that this file is being inserted into
"""
# Since the context is directly given to self._run_file, all of the
# commands and whatnot in the global portion of the file will be
# added to the given context as if it was in the context directly
# and not in another file
was_global = context.global_level
context.global_level = True
i = len(context.token_document())
self._run_file(file_path, context, print_progress)
# Want to add a space before the first Token we come accross.
# Note: the compiler may still not render a space before the token
# if the token is at the start of a line. That is why this is safe
# to do. We are meely saying "this Token should have a space before
# it if it makes sense to have one before it"
doc = context.token_document()
ci = self._curr_interpreter()
if ci and ci.curr_command_node():
ccc = ci.curr_command_node()
length = len(doc)
while True:
if i >= length:
# reached end of Token document without finding a single
# Token
break
curr = doc[i]
if isinstance(curr, Token):
# Found a Token so set whether it has a space before it based
# on the current command that is being run and whether
# the command has a space before it (i.e. if there is
# space before \insert{file_path}, then the first token
# of the inserted text from the file should have a space
# before it, otherwise it should not have a space before
# it)
curr.space_before = ccc.cmnd_name.space_before
break
i += 1
context.global_level = was_global
def _import_file(self, file_path, context, commands_to_import=None, print_progress=False):
"""
Imports a file. This is very different from self._insert_file because
it takes the file, gives it a fresh context, and runs the file.
The resulting context can be saved to the File object for the file
because the resulting global context from running the file does
not depend on any other file's context. In this way, once a file
is imported once, its resulting tokens and Context can be reused
over and over again, whereas the tokens and Context from
self._insert_file cannot be and the file must be re-run every time
it is inserted into a file, regardless of whether it has been
inserted into a file before.
context is the context that you want to import the file into.
If commands_to_import are given, then only the commands by the names
specified in the list of strings will be imported. All Python globals
will still, however, be imported.
"""
file_obj = self._compiler_import_file(file_path, print_progress)
if file_obj.import_context is None:
# Since this file has not yet been run, we will have to run it
# now with a fresh context unrelated to any other context
# Using file_obj.file path in case it is different from the argument file_path
context_to_import = self._fresh_context(file_obj.file_path)
tokens = self._run_file(file_obj, context_to_import, print_progress)
# Since the file was imported, that means it does not depend on the
# current context and thus the context can be saved and reused later
file_obj.import_context = context_to_import
# I expect most imports to have some global Python code that they
# want to be run on the second pass, so that code must be imported
# too or else it will never reach the Placer and be run.
tokens_to_import = []
for token in tokens:
if isinstance(token, Token) and token.type in (TT.EXEC_PYTH2, TT.EVAL_PYTH2):
tokens_to_import.append(token)
file_obj.import_tokens = tokens_to_import
else:
# Since this file has been imported before, just reuse the same
# context as last time because the context is not dependant
# on the current context of when/where the file is being run
context_to_import = file_obj.import_context
tokens_to_import = file_obj.import_tokens
try:
context.import_(context_to_import, tokens_to_import, commands_to_import)
except AssertionError as e:
raise AssertionError(f'{file_path} could not be imported because of the following error:{e}')
def _path_to_std_file(self, file_path):
"""
Returns the file path as a file path to a standard directory file.
"""
# Replace the ending of the file path with the one used by all standard files
split_file_path = file_path.split('.')
if len(split_file_path) > 1 and split_file_path[-1] == STD_FILE_ENDING:
split_file_path.pop()
split_file_path.append(STD_FILE_ENDING)
file_path = '.'.join(split_file_path)
# check if the file exists
file_path = path.abspath(path.join(self._std_dir_path, file_path))
return file_path
def _path_rel_to_file(self, file_path, curr_file=True):
"""
Returns the file path if the given path is relative to the main file
being run or the current file being run.
"""
dir = self.curr_file_dir() if curr_file else self.main_file_dir()
file_path = path.abspath(path.join(dir, file_path))
return file_path
def _get_near_path(self, file_path):
"""
Gets the near path to insert/import. This checks the path relative to
to the current file first, then checks the file relative to the
main/input file, and then it checks the standard directory.
"""
ret_path = cf_rel_path = self._path_rel_to_file(file_path, curr_file=True)
if not path.isfile(ret_path):
ret_path = input_rel_path = self._path_rel_to_file(file_path, curr_file=False)
if not path.isfile(ret_path):
_file_path, file_name = path.split(file_path)
ret_path = std_path = self._path_to_std_file(file_path)
assert path.isfile(std_path), f'Could not get near path for "{file_path}" because neither "{cf_rel_path}", nor "{input_rel_path}", nor "{std_path}" lead to a file and/or exist.'
return ret_path
def _get_far_path(self, file_path):
"""
Gets the far path to insert/import. This checks the standard directory
first and then checks the path relative to the main/input file
and then checks the path relative to the current file.
"""
_file_path, file_name = path.split(file_path)
ret_path = std_path = self._path_to_std_file(file_path)
if not path.isfile(ret_path):
ret_path = input_rel_path = self._path_rel_to_file(file_path, curr_file=False)
if not path.isfile(ret_path):
ret_path = cf_rel_path = self._path_rel_to_file(file_path, curr_file=True)
assert path.isfile(std_path), f'Could not get far path for "{file_path}" because neither "{std_path}", nor "{input_rel_path}", nor "{cf_rel_path}" lead to a file and/or exist.'
return ret_path
# ------------------------------------
# Methods available from CompilerProxy
# Methods for Inserting and Importing Files
def insert_file(self, file_path):
"""
Runs the file at the given file_path, importing its commands but not
inserting its text into the current document.
"""
file_path = str(file_path)
cc = self._curr_context()
assert cc is not None, 'Cannot insert into a Non-existent Context. This is a Compiler error, report it the people making the compiler.'
self._insert_file(self._get_near_path(file_path), cc, print_progress=self._print_progress_bars)
def strict_insert_file(self, file_path):
"""
Runs the file at the given file path and inserts it into the current
document.
The file path is assumed to be relative to the current file.
"""
file_path = str(file_path)
cc = self._curr_context()
assert cc is not None, 'Cannot insert into a Non-existent Context. This is a Compiler error, report it the people making the compiler.'
# Actually insert the file
self._insert_file(self._path_rel_to_file(file_path), cc, print_progress=self._print_progress_bars)
def far_insert_file(self, file_path):
"""
Runs the file at the given file_path, importing its commands but not
inserting its text into the current document.
"""
file_path = str(file_path)
cc = self._curr_context()
assert cc is not None, 'Cannot far insert into a Non-existent Context. This is a Compiler error, report it the people making the compiler.'
self._insert_file(self._get_far_path(file_path), cc, print_progress=self._print_progress_bars)
def strict_import_file(self, file_path):
"""
Runs the file at the given file_path, importing its commands but not
inserting its text into the current document.
This file path is assumed to be relative to the main file being run.
"""
file_path = str(file_path)
cc = self._curr_context()
assert cc is not None, 'Cannot import into a Non-existent Context. This is a Compiler error, report it the people making the compiler.'
self._import_file(self._path_rel_to_file(file_path), cc, print_progress=self._print_progress_bars)
def import_file(self, file_path):
"""
Runs the file at the given file_path, importing its commands but not
inserting its text into the current document.
"""
file_path = str(file_path)
cc = self._curr_context()
assert cc is not None, 'Cannot import into a Non-existent Context. This is a Compiler error, report it the people making the compiler.'
self._import_file(self._get_near_path(file_path), cc, print_progress=self._print_progress_bars)
def std_import_file(self, file_path):
"""
Runs the file at the given file_path, importing its commands but not
inserting its text into the current document.
"""
file_path = str(file_path)
cc = self._curr_context()
assert cc is not None, 'Cannot import into a Non-existent Context. This is a Compiler error, report it the people making the compiler.'
self._import_file(self._path_to_std_file(file_path), cc, print_progress=self._print_progress_bars)
def far_import_file(self, file_path):
"""
Runs the file at the given file_path, importing its commands but not
inserting its text into the current document.
"""
file_path = str(file_path)
cc = self._curr_context()
assert cc is not None, 'Cannot import into a Non-existent Context. This is a Compiler error, report it the people making the compiler.'
self._import_file(self._get_far_path(file_path), cc, print_progress=self._print_progress_bars)
# Methods for retrieving files and directories for the current file.
def main_file_path(self):
"""
Returns the file path to the main/first/input file that is/was run.
"""
return self._input_file_path
def main_file_dir(self):
"""
Returns an absolute path to the directory that the main file that is
being run is in.
"""
return path.dirname(self.main_file_path())
def curr_file_path(self):
"""
Returns an absolute path to the current file that is being run.
"""
cc = self._curr_context()
assert cc is not None, f'The current context was None so the current file path could not be retrieved.'
return cc.file_path
def curr_file_dir(self):
"""
Returns an absolute path to the directory that the current file that is
being run is in.
"""
return path.dirname(self.curr_file_path())
# Misc Methods
def placer_class(self):
return self._placer_class
def set_placer_class(self, placer_class):
"""
Sets the placer class that will be used to place the tokens on the PDF.
This allows a person to, theoretically, create their own placer in
a pdfo file and make the compiler use that instead.
"""
self._placer_class = placer_class
class Command:
"""
Represents a command in the file.
"""
__slots__ = ['params', 'key_params', 'text_group']
def __init__(self, params, key_params, text_group):
self.params = params
self.key_params = key_params
self.text_group = text_group # This will be run for the command
|
pedidos = []
def criar_pedido(nome, sabor, observacao=None):
pedido = {}
pedido['nome'] = nome
pedido['sabor'] = sabor
pedido['observacao'] = observacao
return pedido
pedidos.append(criar_pedido('mario', 'pepperoni'))
pedidos.append(criar_pedido('marco', 'presunto', 'xyz'))
for pedido in pedidos:
template = 'Nome: {nome}\nSabor: {sabor}'
print(template.format(**pedido))
if pedido['observacao']:
print('Observacao: {}'.format(pedido['observacao']))
print('-'*20)
|
def fib_slow(n):
if n < 0:
raise ValueError('n must be non-negative')
if n == 1 or n == 0:
return 1
return fib_slow(n-1) + fib_slow(n-2)
def fib_fast(n):
cache = {0: 1, 1: 1, 2: 2}
def inner_fib(m):
if (m in cache):
return cache[m]
value = inner_fib(m-1) + inner_fib(m-2)
cache[m] = value
return value
return inner_fib(n)
|
class MyClass:
def __init__(self, attr):
self.attr = attr
def __add__(self, other):
return MyClass(self.attr + other.attr)
def method(self):
print(self.attr)
print(self.attr)
(MyClass(1) + MyClass(2)).meth<caret>od()
|
import boto3
import json
def publish_to_connect_sns(payload, topic):
sns = boto3.client('sns')
response = sns.publish (
TargetArn = topic,
Message = json.dumps(payload)
)
return response
def lambda_handler(event, context):
#sns_message = json.loads(event['Records'][0]['Sns']['Message'])
print(json.dumps(event))
escalation_target = event['Details']['Parameters']['escalationTarget']
message = event['Details']['Parameters']['message']
print(json.dumps(escalation_target))
payload = {
'message': message,
'priority': 'high',
'escalation': escalation_target
}
publish_to_connect_sns(payload, 'arn:aws:sns:eu-central-1:xxxx:alert_dispatcher')
print(json.dumps(payload))
resultMap = {'escalation':'done'}
return resultMap
|
'''
The split function here finds splits that minimize gini impurity.
It runs under numba for speed, since these are the innermost loops in
decision tree fitting.
author: David Thaler
date: August 2017
'''
import numpy as np
import numba
from . import tree_constants as tc
@numba.jit(nopython=True)
def split(x, y, wts, max_features, min_leaf):
'''
Given features x and labels y, find the feature index and threshold for a
split that produces the largest reduction in Gini impurity.
Each side of the split must have at least min_leaf samples.
Note:
If no valid split is found after max_features, and max_features is less
than the number of features, splitter will continue to try features, one
at a time, in random order, until a valid split is found, or until all
features have been tried.
Args:
x: m x n numpy array of numeric features
y: m-element 1-D numpy array of labels; must be 0-1.
wts: sample weights, use ones for unweighted case
max_features: try up to this number of features per split
Caller must set to value in 1...x.shape[1]
min_leaf: minimum number of samples for a leaf
Returns:
2-tuple of feature index and split threshold of best split.
'''
m, n = x.shape
NO_SPLIT = (tc.NO_FEATURE, tc.NO_THR)
improve = False
tot_wt = wts.sum()
ywt = y * wts
tot_ywt = ywt.sum()
# the Gini impurity of this node before splitting
node_score = 1 - (tot_ywt/tot_wt)**2 - ((tot_wt - tot_ywt)/tot_wt)**2
# a code optimization for pure nodes
if node_score==0:
return NO_SPLIT
# Stores score, threshold for each feature (1 > max value for gini)
results = np.ones((n, 2))
col_order = np.random.choice(np.arange(n), size=n, replace=False)
for col_ct in range(n):
if col_ct >= max_features and improve:
break
feature_idx = col_order[col_ct]
f = x[:, feature_idx]
# Produce 3 arrays:
# 1) sorted unique values in f
# 2) count of each unique value (usually 1)
# 3) # of positives for each unique value
ntot = np.zeros(m)
uniq = np.zeros(m)
npos = np.zeros(m)
cur_val = np.nan
num_uniq = 0
a = -1
b = -1
f_idx = np.argsort(f)
for i in range(m):
idx = f_idx[i]
if f[idx] != cur_val:
cur_val = f[idx]
uniq[num_uniq] = cur_val
num_uniq += 1
# count on left matches min_leaf, so start slice
if (i + 1) == min_leaf:
a = num_uniq - 1
# count on right (m - i - 1), dropped below min_leaf so end slice
if (m - i) == min_leaf:
b = num_uniq - 1
ntot[num_uniq - 1] += wts[idx]
npos[num_uniq - 1] += ywt[idx]
uniq = uniq[:num_uniq]
npos = npos[:num_uniq]
ntot = ntot[:num_uniq]
# at this point there might be no valid splits
if b <= a:
continue
# Get cumulative counts/positives/negatives for each possible split
nleft = ntot.cumsum()
nright = tot_wt - nleft
npos_left = npos.cumsum()
nneg_left = nleft - npos_left
npos_right = tot_ywt - npos_left
nneg_right = nright - npos_right
nleft = nleft[a:b]
nright = nright[a:b]
npos_left = npos_left[a:b]
npos_right = npos_right[a:b]
nneg_left = nneg_left[a:b]
nneg_right = nneg_right[a:b]
# This is a gini proxy from the form 2 * (p1 * p2)
gini_split = (npos_left * nneg_left / nleft) + (npos_right * nneg_right / nright)
# Select the best split
split_pos = gini_split.argmin()
# gini_split holds a proxy score that differs from gini by (2/tot_wt)
split_score = (2/tot_wt) * gini_split[split_pos]
split_idx = a + split_pos
thr = 0.5 * (uniq[split_idx] + uniq[split_idx + 1])
results[feature_idx] = (split_score, thr)
if split_score < node_score:
improve = True
best_split_idx = results[:, 0].argmin()
best_score = results[best_split_idx, 0]
if best_score < node_score:
best_thr = results[best_split_idx, 1]
return (best_split_idx, best_thr)
else:
return NO_SPLIT
|
#from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.bdf_interface.assign_type import (integer,
double)
class DLOAD(object):
type = 'DLOAD'
def __init__(self, model):
"""
Defines the DLOAD object.
Parameters
----------
model : BDF
the BDF object
"""
self.model = model
self.load_id = None
self.scale = None
#: individual scale factors (corresponds to load_ids)
self.scale_factors = []
#: individual load_ids (corresponds to scale_factors)
self.load_ids = []
def add_from_bdf(self, card, comment):
"""
Fills the DLOAD object from the BDF reader
:param card: the BDFCard object
:param comment: a comment
"""
if comment:
self.comment = comment
#: load ID
self.load_id = integer(card, 1, 'sid')
#: overall scale factor
self.scale = double(card, 2, 'scale')
# alternating of scale factor & load set ID
nLoads = len(card) - 3
assert nLoads % 2 == 0
for i in range(nLoads // 2):
n = 2 * i + 3
self.scale_factors.append(double(card, n, 'scale_factor'))
self.load_ids.append(integer(card, n + 1, 'load_id'))
def build(self):
pass
def comment(self):
if hasattr(self, '_comment'):
return self.comment
return []
def get_stats(self):
msg = []
msg.append(' %-8s: %i' % ('DLOAD[%s]' % self.load_id))
return msg
def write_card(self, bdf_file, size=8, lids=None):
list_fields = ['DLOAD', self.load_id, self.scale]
for (scale_factor, lid) in zip(self.scale_factors, self.load_ids):
list_fields += [scale_factor, lid]
bdf_file.write(print_card_8(list_fields, size))
|
import os
import unittest
from ont_fast5_api.fast5_file import Fast5File
from ont_fast5_api.fast5_interface import get_fast5_file
from ont_fast5_api.multi_fast5 import MultiFast5File
test_data = os.path.join(os.path.dirname(__file__), 'data')
class TestFast5Interface(unittest.TestCase):
def test_correct_type(self):
single_read_path = os.path.join(test_data, "single_reads", "read0.fast5")
single_read_id = Fast5File(single_read_path).get_read_id()
with get_fast5_file(single_read_path) as f5:
self.assertEqual(type(f5), Fast5File)
self.assertEqual(len(f5.get_read_ids()), 1)
self.assertEqual(single_read_id, f5.get_read_ids()[0])
self.get_raw(f5)
multi_read_path = os.path.join(test_data, "multi_read", "batch_0.fast5")
with get_fast5_file(multi_read_path) as f5:
self.assertEqual(type(f5), MultiFast5File)
self.assertTrue(len(f5.get_read_ids()) >= 1)
self.get_raw(f5)
def get_raw(self, f5):
# Test we can get raw data using the same method for single and multi
raw_data = f5.get_read(f5.get_read_ids()[0]).get_raw_data()
self.assertTrue(len(raw_data) >= 0)
|
# Copyright (c) 2020 fortiss GmbH
#
# Authors: Julian Bernhard, Klemens Esterle, Patrick Hart and
# Tobias Kessler
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import numpy as np
import time
import os
from bark.runtime.commons.parameters import ParameterServer
from bark.runtime.viewer.matplotlib_viewer import MPViewer
from bark.runtime.viewer.video_renderer import VideoRenderer
from bark.runtime.scenario.scenario_generation.config_with_ease import \
LaneCorridorConfig, ConfigWithEase
from bark.runtime.runtime import Runtime
from bark.runtime.viewer.panda3d_easy import Panda3dViewer
from bark.core.world.opendrive import *
from bark.core.world.goal_definition import *
from bark.core.models.behavior import *
from bark.core.commons import SetVerboseLevel
try:
from bark.core.world.evaluation import EvaluatorRss
except:
raise ImportError(
"This example requires building RSS, please run with \"bazel run //examples:merging_rss --define rss=true\"")
# parameters
param_server = ParameterServer()
# scenario
class CustomLaneCorridorConfig(LaneCorridorConfig):
def __init__(self,
params=None,
**kwargs):
super(CustomLaneCorridorConfig, self).__init__(params, **kwargs)
def goal(self, world):
road_corr = world.map.GetRoadCorridor(
self._road_ids, XodrDrivingDirection.forward)
lane_corr = self._road_corridor.lane_corridors[0]
return GoalDefinitionPolygon(lane_corr.polygon)
param_server["BehaviorIDMClassic"]["BrakeForLaneEnd"] = True
param_server["BehaviorIDMClassic"]["BrakeForLaneEndEnabledDistance"] = 60.0
param_server["BehaviorIDMClassic"]["BrakeForLaneEndDistanceOffset"] = 30.0
param_server["BehaviorLaneChangeRuleBased"]["MinRemainingLaneCorridorDistance"] = 80.
param_server["BehaviorLaneChangeRuleBased"]["MinVehicleRearDistance"] = 0.
param_server["BehaviorLaneChangeRuleBased"]["MinVehicleFrontDistance"] = 0.
param_server["BehaviorLaneChangeRuleBased"]["TimeKeepingGap"] = 0.
param_server["BehaviorMobilRuleBased"]["Politeness"] = 0.0
param_server["BehaviorIDMClassic"]["DesiredVelocity"] = 10.
param_server["World"]["FracLateralOffset"] = 2.0
param_server["Visualization"]["Evaluation"]["DrawRssDebugInfo"] = True
param_server["Visualization"]["Evaluation"]["DrawRssSafetyResponses"] = True
param_server["Visualization"]["Agents"]["DrawEvalGoals"] = False
SetVerboseLevel(0)
# configure both lanes of the highway. the right lane has one controlled agent
left_lane = CustomLaneCorridorConfig(params=param_server,
lane_corridor_id=0,
road_ids=[0, 1],
behavior_model=BehaviorMobilRuleBased(
param_server),
s_min=5.,
s_max=50.)
right_lane = CustomLaneCorridorConfig(params=param_server,
lane_corridor_id=1,
road_ids=[0, 1],
controlled_ids=True,
behavior_model=BehaviorMobilRuleBased(
param_server),
s_min=5.,
s_max=20.)
map_path = "bark/runtime/tests/data/DR_DEU_Merging_MT_v01_centered.xodr"
scenarios = \
ConfigWithEase(num_scenarios=3,
map_file_name=map_path,
random_seed=0,
params=param_server,
lane_corridor_configs=[left_lane, right_lane])
# viewer
viewer = MPViewer(params=param_server,
# x_range=[-35, 35],
# y_range=[-35, 35],
follow_agent_id=False)
# viewer = Panda3dViewer(params=param_server,
# x_range=[-40, 40],
# y_range=[-40, 40],
# follow_agent_id=True,
# light_pose=[1000, 1000, 100000],
# camera_pose=[1000, 980, 100])
sim_step_time = param_server["simulation"]["step_time",
"Step-time used in simulation",
0.2]
sim_real_time_factor = param_server["simulation"]["real_time_factor",
"execution in real-time or faster",
1.]
viewer = VideoRenderer(renderer=viewer,
world_step_time=sim_step_time,
fig_path="/tmp/video")
env = Runtime(step_time=0.2,
viewer=viewer,
scenario_generator=scenarios,
render=True)
# Defining vehicles dynamics for RSS
# Input format:
# [longitudinal max acceleration, longitudinal max braking, longitudinal min acceleration,
# longitudinal min brake correct, lateral max acceleration, lateral min braking,
# lateral flucatuation_margin, agent response time]
#
# Detailed explanation please see:
# https://intel.github.io/ad-rss-lib/ad_rss/Appendix-ParameterDiscussion/#parameter-discussion
# Default dynamics for every agent if it is not defined indivually
default_vehicle_dynamics = [1.7, -1.7, -1.69, -1.67, 0.2, -0.8, 0.1, 1.]
# Indivually dynamics, each defined with the agent id
agents_vehicle_dynamics = {1: [1.7, -1.7, -1.69, -1.67, 0.2, -0.8, 0.1, 1.],
2: [1.71, -1.7, -1.69, -1.67, 0.2, -0.8, 0.1, 1.]}
# Example of using RSS to evaluate the safety situation of the evaluating agent.
# The evaluating agent is defined with agent_id when initializing EvaluatorRss.
def print_rss_safety_response(evaluator_rss, world):
# Evaluating with RSS is quite computionally expensive
print("Overall safety response: ", evaluator_rss.Evaluate(world))
# print("Pairwise safety response: ",
# evaluator_rss.PairwiseEvaluate(world))
# print("Pairwise directional safety response: ",
# evaluator_rss.PairwiseDirectionalEvaluate(world))
param_server["EvalutaorRss"]["MapFilename"] = map_path
param_server["EvalutaorRss"]["DefaultVehicleDynamics"] = default_vehicle_dynamics
param_server["EvalutaorRss"]["SpecificAgentVehicleDynamics"] = agents_vehicle_dynamics
param_server["EvalutaorRss"]["CheckingRelevantRange"] = 1
# run n scenarios
for episode in range(0, 10):
env.reset()
current_world = env._world
eval_agent_id = env._scenario._eval_agent_ids[0]
# There are two ways to upset EvaluatorRss
# evaluator_rss = EvaluatorRss(eval_agent_id, map_path,
# default_vehicle_dynamics,
# agents_vehicle_dynamics,
# checking_relevent_range=1)
evaluator_rss = EvaluatorRss(eval_agent_id, param_server)
current_world.AddEvaluator("rss", evaluator_rss)
# step each scenario 40 times
for step in range(0, 40):
env.step()
print_rss_safety_response(evaluator_rss, current_world)
time.sleep(sim_step_time / sim_real_time_factor)
viewer.export_video(filename="/tmp/merging_rss", remove_image_dir=False)
|
#
# PySNMP MIB module COFFEE-POT-MIB (http://pysnmp.sf.net)
# ASN.1 source http://mibs.snmplabs.com:80/asn1/COFFEE-POT-MIB
# Produced by pysmi-0.0.7 at Sun Feb 14 00:07:06 2016
# On host bldfarm platform Linux version 4.1.13-100.fc21.x86_64 by user goose
# Using Python version 3.5.0 (default, Jan 5 2016, 17:11:52)
#
( OctetString, ObjectIdentifier, Integer, ) = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection")
( InterfaceIndex, ) = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
( NotificationGroup, ObjectGroup, ModuleCompliance, ) = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
( Counter64, MibIdentifier, TimeTicks, ObjectIdentity, Unsigned32, NotificationType, Bits, Gauge32, Integer32, transmission, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, iso, ModuleIdentity, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "MibIdentifier", "TimeTicks", "ObjectIdentity", "Unsigned32", "NotificationType", "Bits", "Gauge32", "Integer32", "transmission", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "iso", "ModuleIdentity")
( TextualConvention, TimeInterval, DisplayString, TimeStamp, ) = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "TimeInterval", "DisplayString", "TimeStamp")
coffee = ModuleIdentity((1, 3, 6, 1, 2, 1, 10, 132))
if mibBuilder.loadTexts: coffee.setLastUpdated('9803231700Z')
if mibBuilder.loadTexts: coffee.setOrganization('Networked Appliance Management Working Group')
if mibBuilder.loadTexts: coffee.setContactInfo(' Michael Slavitch\n Loran Technologies,\n 955 Green Valley Crescent\n Ottawa, Ontario Canada K2A 0B6\n\n Tel: 613-723-7505\n Fax: 613-723-7209\n E-mail: slavitch@loran.com')
if mibBuilder.loadTexts: coffee.setDescription('The MIB Module for coffee vending devices.')
potName = MibScalar((1, 3, 6, 1, 2, 1, 10, 132, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0,255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: potName.setDescription('The vendor description of the pot under management')
potCapacity = MibScalar((1, 3, 6, 1, 2, 1, 10, 132, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: potCapacity.setDescription('The number of units of beverage supported by this device\n (regardless of its current state) .')
potType = MibScalar((1, 3, 6, 1, 2, 1, 10, 132, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4,))).clone(namedValues=NamedValues(("automatic-drip", 1), ("percolator", 2), ("french-press", 3), ("espresso", 4),))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: potType.setDescription('The brew type of the coffee pot.')
potLocation = MibScalar((1, 3, 6, 1, 2, 1, 10, 132, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0,255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: potLocation.setDescription('The physical location of the pot in question')
potMonitor = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 132, 6))
potOperStatus = MibScalar((1, 3, 6, 1, 2, 1, 10, 132, 6, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5,))).clone(namedValues=NamedValues(("off", 1), ("brewing", 2), ("holding", 3), ("other", 4), ("waiting", 5),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: potOperStatus.setDescription('The operating status of the pot in question. Note\n that this is a read-only feature. Current hardware\n prevents us from changing the port state via SNMP.')
potLevel = MibScalar((1, 3, 6, 1, 2, 1, 10, 132, 6, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: potLevel.setDescription('The number of units of coffee under management. The\n units of level are defined in potMetric below.')
potMetric = MibScalar((1, 3, 6, 1, 2, 1, 10, 132, 6, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5,))).clone(namedValues=NamedValues(("espresso", 1), ("demi-tasse", 2), ("cup", 3), ("mug", 4), ("bucket", 5),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: potMetric.setDescription('The vendor description of the pot under management')
potStartTime = MibScalar((1, 3, 6, 1, 2, 1, 10, 132, 6, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: potStartTime.setDescription('The time in seconds since Jan 1 1970 to start the pot\n if and only if potOperStatus is waiting(5)')
lastStartTime = MibScalar((1, 3, 6, 1, 2, 1, 10, 132, 6, 5), TimeInterval()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lastStartTime.setDescription('The amount of time, in TimeTicks, since the coffee\n making process was initiated.')
potTemperature = MibScalar((1, 3, 6, 1, 2, 1, 10, 132, 6, 6), Integer32()).setUnits('degrees Centigrade').setMaxAccess("readonly")
if mibBuilder.loadTexts: potTemperature.setDescription('The ambient temperature of the coffee within the pot')
mibBuilder.exportSymbols("COFFEE-POT-MIB", potLocation=potLocation, potLevel=potLevel, potType=potType, PYSNMP_MODULE_ID=coffee, potMonitor=potMonitor, coffee=coffee, potStartTime=potStartTime, potTemperature=potTemperature, potMetric=potMetric, potName=potName, potOperStatus=potOperStatus, potCapacity=potCapacity, lastStartTime=lastStartTime)
|
import numpy as np
def accuracy(y, y_hat):
# Calculate classification accuracy of model
return (y.astype(int) == y_hat.astype(int)).sum() / y.size
def rmse(y, y_hat):
# Calculate root squared mean error of model
return np.sqrt(((y - y_hat) ** 2).mean())
def compute_scores(y, y_hat, classification_targets):
scores = np.zeros(y.shape[1])
# Calculate the classification and regression accuracy of the model
for i in range(y.shape[1]):
if i in classification_targets:
scores[i] = accuracy(y[:, i], y_hat[:, i])
else:
scores[i] = rmse(y[:, i], y_hat[:, i])
return scores
|
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.urls import path, include
from django.contrib import admin
from rest_framework.routers import DefaultRouter
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
from cart.views import ProductList, ProductDetail, OrdersViewSet, UserViewSet
router = DefaultRouter()
router.register(r'orders', OrdersViewSet)
router.register(r'users', UserViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(router.urls)),
path('api/products/', ProductList.as_view(), name='product-list'),
path('api/products/<int:pk>', ProductDetail.as_view(), name='product-detail'),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh')
]
|
import os
import sys
# import random
from math import sqrt
import numpy as np
# import matplotlib.pyplot as plt
cur_dir = os.getcwd()
dt = float(sys.argv[1])
# dt = 1
max_t = int(sys.argv[1])
time_unit = 1
class LoadAllxy(object):
"""docstring for LoadAllxy"""
def __init__(self, particle_indx: int):
self.loc = []
self.particle_indx = particle_indx
self.particle_num = 0
@staticmethod
def exist(x, y):
if x != -1 and y != -1:
return True
else:
return False
@staticmethod
def find_center(lx, ly, rx, ry):
lx, ly, rx, ry = float(lx), float(ly), float(rx), float(ry)
if LoadAllxy.exist(lx, ly) and LoadAllxy.exist(rx, ry):
cx, cy = (lx + rx) / 2, (ly + ry) / 2
dx, dy = rx - lx, ry - ly
ds = sqrt(dx**2 + dy**2)
if ds != 0:
flag = 1
Tx, Ty = -dy / ds, dx / ds
abs_ori = np.arctan2(Ty, Tx)
else:
flag = -1
abs_ori = None
else:
flag = -1
cx, cy = -1, -1
abs_ori = None
return (flag, cx, cy, abs_ori)
def extract(self, sec_loc):
lx, ly, rx, ry = sec_loc[0], sec_loc[1], sec_loc[2], sec_loc[3]
c_xy = LoadAllxy.find_center(lx, ly, rx, ry)
# print(lx, ly, rx, ry, c_xy)
self.loc.append(c_xy)
def load_all_xy(self):
with open('{}/{}'.format(cur_dir, 'all_xy.csv'), 'r') as rf:
for _, line in enumerate(rf):
line = line.split(',')[1:]
self.extract(line[self.particle_indx *
4:self.particle_indx * 4 + 4])
self.particle_num = int(len(line) / 4)
return self.loc
class ClctVACF(object):
def __init__(self, loc, data: dict, particle_num: int):
self.loc = loc
self.max_t = max_t
self.data = data
self.chunk_loc = {}
self.chunk_ori = {}
self.particle_num = particle_num
def chunk_raw(self):
flag = 0
self.chunk_loc[flag], self.chunk_ori[flag] = [], []
for i, item in enumerate(self.loc):
if item[0] != -1:
self.chunk_loc[flag].append((item[1], item[2]))
self.chunk_ori[flag].append(item[3])
if len(self.chunk_loc[flag]) != 0 and item[0] == -1:
flag += 1
self.chunk_loc[flag] = []
self.chunk_ori[flag] = []
else:
pass
@staticmethod
def map2range(ori_list):
if len(ori_list) == 0:
return ori_list
ranged_ori_list = [ori_list[0]]
for i in range(1, len(ori_list)):
increment = ori_list[i] - ori_list[i - 1]
if increment > np.pi:
increment -= 2 * np.pi
if increment < -np.pi:
increment += 2 * np.pi
ranged_ori_list.append(ranged_ori_list[i - 1] + increment)
return ranged_ori_list
@staticmethod
def clct_vel(loc_list, ori_list):
vels = np.empty((0, 3))
for t in range(len(loc_list) - time_unit):
x_t2, y_t2, ori_t2 = loc_list[t +
time_unit][0], loc_list[t + time_unit][1], ori_list[t + time_unit]
x_t1, y_t1, ori_t1 = loc_list[t][0], loc_list[t][1], ori_list[t]
vt = np.array([(x_t2 - x_t1) / time_unit / dt, (y_t2 - y_t1) /
time_unit / dt, (ori_t2 - ori_t1) / time_unit / dt])
vels = np.vstack((vels, vt))
return vels
def clctvacf(self, t):
sum_vtv02, sum_vtv02_ori, N = 0, 0, 0
for flag, loc_list in self.chunk_loc.items():
ori_list = self.map2range(self.chunk_ori[flag])
if len(ori_list) >= 2:
vels = self.clct_vel(loc_list, ori_list)
else:
vels = []
if len(vels) > t:
for i in range(len(vels) - t):
dv = vels[i + t] - vels[i]
sum_vtv02 += vels[i + t][0] * \
vels[i][0] + vels[i + t][1] * vels[i][1]
sum_vtv02_ori = vels[i + t][2] * vels[i][2]
N += 1
if N == 0:
return (0, 0, 0)
else:
return (sum_vtv02 / N, sum_vtv02_ori / N, 1)
def find_vacf(self):
self.chunk_raw()
for t in range(self.max_t):
print(self.particle_num, t)
vtv0, vtv0_ori, count = self.clctvacf(t)
if t not in self.data:
self.data[t] = [0, 0, 0]
self.data[t][0] += vtv0
self.data[t][1] += vtv0_ori
self.data[t][2] += count
return self.data
if __name__ == '__main__':
data = {}
particle_0 = LoadAllxy(0)
particle_0_loc = particle_0.load_all_xy()
particle_num = particle_0.particle_num
data = ClctVACF(particle_0_loc, data, 0).find_vacf()
for num in range(1, particle_num):
print(num)
data = ClctVACF(LoadAllxy(num).load_all_xy(),
data, num).find_vacf()
with open('{}/{}'.format(cur_dir, 'vacf-test0.csv'), 'w') as wf:
for key in sorted(data.keys()):
vtv0, vtv0_ori, count = data[key][0], data[key][1], data[key][2]
wf.write('{},{},{}'.format(
key * dt, vtv0 / count, vtv0_ori / count))
wf.write('\n')
|
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import sys
from sim.elf import load_elf
from sim.sim import OTBNSim
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('elf')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument(
'--dump-dmem',
metavar="FILE",
type=argparse.FileType('wb'),
help="after execution, write the data memory contents to this file")
parser.add_argument(
'--dump-regs',
metavar="FILE",
type=argparse.FileType('w'),
default=sys.stdout,
help=
"after execution, write the GPR and WDR contents to this file (default: STDOUT)"
)
args = parser.parse_args()
sim = OTBNSim()
load_elf(sim, args.elf)
sim.state.start(0)
sim.run(verbose=args.verbose)
if args.dump_dmem is not None:
args.dump_dmem.write(sim.dump_data())
if args.dump_regs is not None:
for idx, value in enumerate(sim.state.gprs.peek_unsigned_values()):
args.dump_regs.write(' x{:<2} = 0x{:08x}\n'.format(idx, value))
for idx, value in enumerate(sim.state.wdrs.peek_unsigned_values()):
args.dump_regs.write(' w{:<2} = 0x{:064x}\n'.format(idx, value))
return 0
if __name__ == "__main__":
sys.exit(main())
|
# Copyright 2014 NetApp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The shares api."""
from oslo_db import exception as db_exception
from oslo_log import log
from oslo_utils import timeutils
import six
from six.moves import http_client
import webob
from webob import exc
from manila.api import common
from manila.api.openstack import api_version_request as api_version
from manila.api.openstack import wsgi
from manila.api.views import share_networks as share_networks_views
from manila.db import api as db_api
from manila import exception
from manila.i18n import _
from manila import policy
from manila import quota
from manila.share import rpcapi as share_rpcapi
RESOURCE_NAME = 'share_network'
RESOURCES_NAME = 'share_networks'
LOG = log.getLogger(__name__)
QUOTAS = quota.QUOTAS
class ShareNetworkController(wsgi.Controller):
"""The Share Network API controller for the OpenStack API."""
_view_builder_class = share_networks_views.ViewBuilder
def __init__(self):
super(ShareNetworkController, self).__init__()
self.share_rpcapi = share_rpcapi.ShareAPI()
def show(self, req, id):
"""Return data about the requested network info."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'show')
try:
share_network = db_api.share_network_get(context, id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
return self._view_builder.build_share_network(req, share_network)
def delete(self, req, id):
"""Delete specified share network."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'delete')
try:
share_network = db_api.share_network_get(context, id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
share_instances = (
db_api.share_instances_get_all_by_share_network(context, id)
)
if share_instances:
msg = _("Can not delete share network %(id)s, it has "
"%(len)s share(s).") % {'id': id,
'len': len(share_instances)}
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
# NOTE(ameade): Do not allow deletion of share network used by share
# group
sg_count = db_api.count_share_groups_in_share_network(context, id)
if sg_count:
msg = _("Can not delete share network %(id)s, it has %(len)s "
"share group(s).") % {'id': id, 'len': sg_count}
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
for share_server in share_network['share_servers']:
self.share_rpcapi.delete_share_server(context, share_server)
db_api.share_network_delete(context, id)
try:
reservations = QUOTAS.reserve(
context, project_id=share_network['project_id'],
share_networks=-1, user_id=share_network['user_id'])
except Exception:
LOG.exception("Failed to update usages deleting "
"share-network.")
else:
QUOTAS.commit(context, reservations,
project_id=share_network['project_id'],
user_id=share_network['user_id'])
return webob.Response(status_int=http_client.ACCEPTED)
def _get_share_networks(self, req, is_detail=True):
"""Returns a list of share networks."""
context = req.environ['manila.context']
search_opts = {}
search_opts.update(req.GET)
if 'security_service_id' in search_opts:
networks = db_api.share_network_get_all_by_security_service(
context, search_opts['security_service_id'])
elif context.is_admin and 'project_id' in search_opts:
networks = db_api.share_network_get_all_by_project(
context, search_opts['project_id'])
elif context.is_admin and 'all_tenants' in search_opts:
networks = db_api.share_network_get_all(context)
else:
networks = db_api.share_network_get_all_by_project(
context,
context.project_id)
date_parsing_error_msg = '''%s is not in yyyy-mm-dd format.'''
if 'created_since' in search_opts:
try:
created_since = timeutils.parse_strtime(
search_opts['created_since'],
fmt="%Y-%m-%d")
except ValueError:
msg = date_parsing_error_msg % search_opts['created_since']
raise exc.HTTPBadRequest(explanation=msg)
networks = [network for network in networks
if network['created_at'] >= created_since]
if 'created_before' in search_opts:
try:
created_before = timeutils.parse_strtime(
search_opts['created_before'],
fmt="%Y-%m-%d")
except ValueError:
msg = date_parsing_error_msg % search_opts['created_before']
raise exc.HTTPBadRequest(explanation=msg)
networks = [network for network in networks
if network['created_at'] <= created_before]
opts_to_remove = [
'all_tenants',
'created_since',
'created_before',
'limit',
'offset',
'security_service_id',
'project_id'
]
for opt in opts_to_remove:
search_opts.pop(opt, None)
if search_opts:
for key, value in search_opts.items():
if key in ['ip_version', 'segmentation_id']:
value = int(value)
if (req.api_version_request >=
api_version.APIVersionRequest("2.36")):
networks = [network for network in networks
if network.get(key) == value or
(value in network.get(key.rstrip('~'))
if key.endswith('~') and
network.get(key.rstrip('~')) else ())]
else:
networks = [network for network in networks
if network.get(key) == value]
limited_list = common.limited(networks, req)
return self._view_builder.build_share_networks(
req, limited_list, is_detail)
def index(self, req):
"""Returns a summary list of share networks."""
policy.check_policy(req.environ['manila.context'], RESOURCE_NAME,
'index')
return self._get_share_networks(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of share networks."""
policy.check_policy(req.environ['manila.context'], RESOURCE_NAME,
'detail')
return self._get_share_networks(req)
def update(self, req, id, body):
"""Update specified share network."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'update')
if not body or RESOURCE_NAME not in body:
raise exc.HTTPUnprocessableEntity()
try:
share_network = db_api.share_network_get(context, id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
update_values = body[RESOURCE_NAME]
if 'nova_net_id' in update_values:
msg = _("nova networking is not supported starting in Ocata.")
raise exc.HTTPBadRequest(explanation=msg)
if share_network['share_servers']:
for value in update_values:
if value not in ['name', 'description']:
msg = (_("Cannot update share network %s. It is used by "
"share servers. Only 'name' and 'description' "
"fields are available for update") %
share_network['id'])
raise exc.HTTPForbidden(explanation=msg)
try:
share_network = db_api.share_network_update(context,
id,
update_values)
except db_exception.DBError:
msg = "Could not save supplied data due to database error"
raise exc.HTTPBadRequest(explanation=msg)
return self._view_builder.build_share_network(req, share_network)
def create(self, req, body):
"""Creates a new share network."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'create')
if not body or RESOURCE_NAME not in body:
raise exc.HTTPUnprocessableEntity()
values = body[RESOURCE_NAME]
values['project_id'] = context.project_id
values['user_id'] = context.user_id
if 'nova_net_id' in values:
msg = _("nova networking is not supported starting in Ocata.")
raise exc.HTTPBadRequest(explanation=msg)
try:
reservations = QUOTAS.reserve(context, share_networks=1)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
if 'share_networks' in overs:
LOG.warning("Quota exceeded for %(s_pid)s, "
"tried to create "
"share-network (%(d_consumed)d of %(d_quota)d "
"already consumed).", {
's_pid': context.project_id,
'd_consumed': _consumed('share_networks'),
'd_quota': quotas['share_networks']})
raise exception.ShareNetworksLimitExceeded(
allowed=quotas['share_networks'])
else:
try:
share_network = db_api.share_network_create(context, values)
except db_exception.DBError:
msg = "Could not save supplied data due to database error"
raise exc.HTTPBadRequest(explanation=msg)
QUOTAS.commit(context, reservations)
return self._view_builder.build_share_network(req, share_network)
def action(self, req, id, body):
_actions = {
'add_security_service': self._add_security_service,
'remove_security_service': self._remove_security_service
}
for action, data in body.items():
try:
return _actions[action](req, id, data)
except KeyError:
msg = _("Share networks does not have %s action") % action
raise exc.HTTPBadRequest(explanation=msg)
def _add_security_service(self, req, id, data):
"""Associate share network with a given security service."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'add_security_service')
share_network = db_api.share_network_get(context, id)
if share_network['share_servers']:
msg = _("Cannot add security services. Share network is used.")
raise exc.HTTPForbidden(explanation=msg)
security_service = db_api.security_service_get(
context, data['security_service_id'])
for attached_service in share_network['security_services']:
if attached_service['type'] == security_service['type']:
msg = _("Cannot add security service to share network. "
"Security service with '%(ss_type)s' type already "
"added to '%(sn_id)s' share network") % {
'ss_type': security_service['type'],
'sn_id': share_network['id']}
raise exc.HTTPConflict(explanation=msg)
try:
share_network = db_api.share_network_add_security_service(
context,
id,
data['security_service_id'])
except KeyError:
msg = "Malformed request body"
raise exc.HTTPBadRequest(explanation=msg)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
except exception.ShareNetworkSecurityServiceAssociationError as e:
raise exc.HTTPBadRequest(explanation=six.text_type(e))
return self._view_builder.build_share_network(req, share_network)
def _remove_security_service(self, req, id, data):
"""Dissociate share network from a given security service."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'remove_security_service')
share_network = db_api.share_network_get(context, id)
if share_network['share_servers']:
msg = _("Cannot remove security services. Share network is used.")
raise exc.HTTPForbidden(explanation=msg)
try:
share_network = db_api.share_network_remove_security_service(
context,
id,
data['security_service_id'])
except KeyError:
msg = "Malformed request body"
raise exc.HTTPBadRequest(explanation=msg)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
except exception.ShareNetworkSecurityServiceDissociationError as e:
raise exc.HTTPBadRequest(explanation=six.text_type(e))
return self._view_builder.build_share_network(req, share_network)
def create_resource():
return wsgi.Resource(ShareNetworkController())
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
# Each model should have a __str__ method that returns a string when an instance of the model is referenced.
"""
models.py
This is where you strcuture the models, then create them in the database by running the makemigrations and migrate
commands. This is the core of Django's model based views and forms.
"""
import os.path
from PIL import Image
from io import BytesIO
from django.core.files.base import ContentFile
from django.db import models
from django.contrib.auth.models import User
# Model of the park, creates embedded and static Google Maps urls when save() is called. Managed by admins.
class Park(models.Model):
name = models.CharField(max_length=200)
address = models.CharField(max_length=200)
maps_string = models.CharField(max_length=1000, null=True, blank=True)
static_string = models.CharField(max_length=1000, null=True, blank=True)
zip_code = models.CharField(max_length=5)
def __str__(self):
return self.name
# Override save function to create and store the Google Maps strings from the provided park name.
def save(self, *args, **kwargs):
if not self.maps_string:
self.maps_string = "https://www.google.com/maps/embed/v1/place?key=" + \
"AIzaSyBMDvz4zozQwoRPNcXrFX8OCGDp6c1FL7E&q=" + self.name.replace(" ", "+")
if not self.static_string:
self.static_string = "https://maps.googleapis.com/maps/api/staticmap?markers=" + \
self.name.replace(" ", "+") + \
"&size=400x400&zoom=12&key=AIzaSyD_g-oOuyVFlnlCbL7JLkO9wt-UOWruIMg"
super(Park, self).save(*args, **kwargs)
# Report type (Garbage, Oil Spill, etc.). Managed by admins.
class Category(models.Model):
type = models.CharField(max_length=200)
def __str__(self):
return self.type
# Report status (submitted, in progress, etc.). Managed by admins.
class Status(models.Model):
current_status = models.CharField(max_length=200)
def __str__(self):
return self.current_status
# Report model. Automatically takes the current time from the set Timezone. Description and image will be included
# by whoever creates it. Type, status, and the park are chosen from drop down menus. The user should be set by grabbing
# whoever is logged in. The image will be saved to the csc648-team13/media/photos folder. When the report is saved,
# a thumbnail is created, saved to the csc648-team13/media/thumbs folder, and the url is added to the thumbnail field.
class Report(models.Model):
sub_date = models.DateTimeField(auto_now_add=True)
description = models.TextField(max_length=1000, help_text="Please enter a short description of the issue.")
image = models.ImageField(upload_to='photos', null=True, blank=True, help_text="Upload an image of the"
" issue if you have one.")
thumbnail = models.ImageField(upload_to='thumbs', editable=False, null=True)
type = models.ForeignKey(Category, on_delete=models.CASCADE, help_text="Please select the type of issue.")
status = models.ForeignKey(Status, on_delete=models.CASCADE, default=1)
park = models.ForeignKey(Park, on_delete=models.CASCADE, help_text="Please select a park.")
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
def __str__(self):
return self.park.name + " " + str(self.sub_date)
# When a new report is successfully created, this redirects the user to it's new report detail page.
def get_absolute_url(self):
from django.urls import reverse
return reverse('search:report_detail', args=[str(self.id)])
# Override save function to create thumbnail.
def save(self, *args, **kwargs):
if not self.image.closed:
if not self.make_thumbnail():
raise Exception('Could not create thumbnail - is the file type valid?')
super(Report, self).save(*args, **kwargs)
# make_thumbnail and save functions created by users xjtian and ziiirp on stackoverflow
# https://stackoverflow.com/questions/23922289/django-pil-save-thumbnail-version-right-when-image-is-uploaded
def make_thumbnail(self):
photo = Image.open(self.image)
photo.thumbnail((250, 250), Image.ANTIALIAS)
thumb_name, thumb_extension = os.path.splitext(self.image.name)
thumb_extension = thumb_extension.lower()
thumb_filename = thumb_name + '_thumb' + thumb_extension
if thumb_extension in ['.jpg', '.jpeg']:
FTYPE = 'JPEG'
elif thumb_extension == '.gif':
FTYPE = 'GIF'
elif thumb_extension == '.png':
FTYPE = 'PNG'
else:
return False
temp_thumb = BytesIO()
photo.save(temp_thumb, FTYPE)
temp_thumb.seek(0)
self.thumbnail.save(thumb_filename, ContentFile(temp_thumb.read()), save=False)
temp_thumb.close()
return True
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Default(object):
# config DataBase
SECRET_KEY = os.environ.get("SECRET_KEY")
WORK_FACTOR = 12
JWT_SECRET_KEY = os.environ.get("JWT_SECRET_KEY")
SQLALCHEMY_TRACK_MODIFICATIONS = False
JWT_AUTH_USERNAME_KEY = os.environ.get("JWT_AUTH_USERNAME_KEY")
JWT_ACCESS_TOKEN_EXPIRES = os.environ.get("JWT_ACCESS_TOKEN_EXPIRES")
JWT_REFRESH_TOKEN_EXPIRES = os.environ.get("JWT_REFRESH_TOKEN_EXPIRES")
# configure mail server
MAIL_SERVER = os.environ.get("MAIL_SERVER", "smtp.gmail.com")
MAIL_PORT = os.environ.get("MAIL_PORT", 465)
MAIL_USE_TLS = os.environ.get("MAIL_USE_TLS", False)
MAIL_USE_SSL = os.environ.get("MAIL_USE_SSL", True)
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
MAIL_DEFAULT_SENDER = os.environ.get("MAIL_DEFAULT_SENDER")
# Configure JWT error message key
JWT_ERROR_MESSAGE_KEY = "message"
class Development(Default):
SQLALCHEMY_DATABASE_URI = os.getenv(
"DEV_DATABASE_URL"
) or "sqlite:///" + os.path.join(basedir, "data-dev.sqlite")
JWT_ACCESS_TOKEN_EXPIRES = False
JWT_REFRESH_TOKEN_EXPIRES = False
CORS_ORIGINS = ["*"]
# Send emails to the console
MAIL_BACKEND = "console"
class Testing(Default):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.getenv("TEST_DATABASE_URL") or "sqlite://"
WORK_FACTOR = 4
JWT_ACCESS_TOKEN_EXPIRES = False
JWT_REFRESH_TOKEN_EXPIRES = False
CORS_ORIGINS = ["*"]
class Production(Default):
# Heroku hack to connect to postgres dialect since sqlalchemy does things differently. # noqa
# https://help.heroku.com/ZKNTJQSK/why-is-sqlalchemy-1-4-x-not-connecting-to-heroku-postgres # noqa
# https://github.com/sqlalchemy/sqlalchemy/discussions/5799
db_uri = os.getenv("DATABASE_URL") or ""
if db_uri.startswith("postgres://"):
db_uri = db_uri.replace("postgres://", "postgresql://", 1)
SQLALCHEMY_DATABASE_URI = db_uri
CORS_ORIGINS = ["UPDATE THIS WITH FRONTEND ORIGINS"]
JWT_ACCESS_TOKEN_EXPIRES = 900
JWT_REFRESH_TOKEN_EXPIRES = 604800
MAIL_PORT = 2525
MAIL_USE_TLS = True
MAIL_USE_SSL = False
app_environments = {
"development": Development,
"testing": Testing,
"production": Production,
}
|
# -*- coding: utf-8 -*-
'''
Management of ipsets
======================
This is an ipset-specific module designed to manage IPSets for use
in IPTables Firewalls.
.. code-block:: yaml
setname:
ipset.set_present:
- set_type: bitmap:ip
- range: 192.168.0.0/16
- comment: True
setname:
ipset.set_absent:
- set_type: bitmap:ip
- range: 192.168.0.0/16
- comment: True
setname_entries:
ipset.present:
- set_name: setname
- entry: 192.168.0.3
- comment: Hello
- require:
- ipset: baz
setname_entries:
ipset.present:
- set_name: setname
- entry:
- 192.168.0.3
- 192.168.1.3
- comment: Hello
- require:
- ipset: baz
setname_entries:
ipset.absent:
- set_name: setname
- entry:
- 192.168.0.3
- 192.168.1.3
- comment: Hello
- require:
- ipset: baz
setname:
ipset.flush:
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if the ipset module is available in __salt__
'''
return 'ipset.version' in __salt__
def set_present(name, set_type, family='ipv4', **kwargs):
'''
.. versionadded:: 2014.7.0
Verify the set exists.
name
A user-defined set name.
set_type
The type for the set.
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
set_check = __salt__['ipset.check_set'](name)
if set_check is True:
ret['result'] = True
ret['comment'] = ('ipset set {0} already exists for {1}'
.format(name, family))
return ret
if __opts__['test']:
ret['comment'] = 'ipset set {0} would be added for {1}'.format(
name,
family)
return ret
command = __salt__['ipset.new_set'](name, set_type, family, **kwargs)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = ('ipset set {0} created successfully for {1}'
.format(name, family))
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to create set {0} for {2}: {1}'.format(
name,
command.strip(),
family
)
return ret
def set_absent(name, family='ipv4', **kwargs):
'''
.. versionadded:: 2014.7.0
Verify the set is absent.
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
set_check = __salt__['ipset.check_set'](name, family)
if not set_check:
ret['result'] = True
ret['comment'] = ('ipset set {0} for {1} is already absent'
.format(name, family))
return ret
if __opts__['test']:
ret['comment'] = 'ipset set {0} for {1} would be removed'.format(
name,
family)
return ret
flush_set = __salt__['ipset.flush'](name, family)
if flush_set:
command = __salt__['ipset.delete_set'](name, family)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = ('ipset set {0} deleted successfully for family {1}'
.format(name, family))
else:
ret['result'] = False
ret['comment'] = ('Failed to delete set {0} for {2}: {1}'
.format(name, command.strip(), family))
else:
ret['result'] = False
ret['comment'] = 'Failed to flush set {0} for {2}: {1}'.format(
name,
flush_set.strip(),
family
)
return ret
def present(name, entry=None, family='ipv4', **kwargs):
'''
.. versionadded:: 2014.7.0
Append a entry to a set
name
A user-defined name to call this entry by in another part of a state or
formula. This should not be an actual entry.
entry
A single entry to add to a set or a list of entries to add to a set
family
Network family, ipv4 or ipv6.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not entry:
ret['result'] = False
ret['comment'] = ('ipset entry must be specified')
return ret
entries = []
if isinstance(entry, list):
entries = entry
else:
entries.append(entry)
for entry in entries:
entry_opts = ''
if ' ' in entry:
entry, entry_opts = entry.split(' ', 1)
if 'timeout' in kwargs and 'timeout' not in entry_opts:
entry_opts = 'timeout {0} {1}'.format(kwargs['timeout'], entry_opts)
if 'comment' in kwargs and 'comment' not in entry_opts:
entry_opts = '{0} comment "{1}"'.format(entry_opts, kwargs['comment'])
_entry = ' '.join([entry, entry_opts.lstrip()]).strip()
if __salt__['ipset.check'](kwargs['set_name'],
_entry,
family) is True:
ret['comment'] += 'entry for {0} already in set {1} for {2}\n'.format(
entry,
kwargs['set_name'],
family)
else:
if __opts__['test']:
ret['result'] = None
ret['comment'] += 'entry {0} would be added to set {1} for family {2}\n'.format(
entry,
kwargs['set_name'],
family)
else:
command = __salt__['ipset.add'](kwargs['set_name'], _entry, family, **kwargs)
if 'Error' not in command:
ret['changes'] = {'locale': name}
ret['comment'] += 'entry {0} added to set {1} for family {2}\n'.format(
_entry,
kwargs['set_name'],
family)
else:
ret['result'] = False
ret['comment'] = 'Failed to add to entry {1} to set {0} for family {2}.\n{3}'.format(
kwargs['set_name'],
_entry, family, command)
return ret
def absent(name, entry=None, entries=None, family='ipv4', **kwargs):
'''
.. versionadded:: 2014.7.0
Remove a entry or entries from a chain
name
A user-defined name to call this entry by in another part of a state or
formula. This should not be an actual entry.
family
Network family, ipv4 or ipv6.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not entry:
ret['result'] = False
ret['comment'] = ('ipset entry must be specified')
return ret
entries = []
if isinstance(entry, list):
entries = entry
else:
entries.append(entry)
for entry in entries:
entry_opts = ''
if ' ' in entry:
entry, entry_opts = entry.split(' ', 1)
if 'timeout' in kwargs and 'timeout' not in entry_opts:
entry_opts = 'timeout {0} {1}'.format(kwargs['timeout'], entry_opts)
if 'comment' in kwargs and 'comment' not in entry_opts:
entry_opts = '{0} comment "{1}"'.format(entry_opts, kwargs['comment'])
_entry = ' '.join([entry, entry_opts]).strip()
log.debug('_entry {0}'.format(_entry))
if not __salt__['ipset.check'](kwargs['set_name'],
_entry,
family) is True:
ret['result'] = True
ret['comment'] += 'ipset entry for {0} not present in set {1} for {2}\n'.format(
_entry,
kwargs['set_name'],
family)
else:
if __opts__['test']:
ret['result'] = None
ret['comment'] += 'ipset entry {0} would be removed from set {1} for {2}\n'.format(
entry,
kwargs['set_name'],
family)
else:
command = __salt__['ipset.delete'](kwargs['set_name'], entry, family, **kwargs)
if 'Error' not in command:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] += 'ipset entry {1} removed from set {0} for {2}\n'.format(
kwargs['set_name'],
_entry,
family)
else:
ret['result'] = False
ret['comment'] = 'Failed to delete ipset entry from set {0} for {2}. ' \
'Attempted entry was {1}.\n' \
'{3}\n'.format(kwargs['set_name'], _entry, family, command)
return ret
def flush(name, family='ipv4', **kwargs):
'''
.. versionadded:: 2014.7.0
Flush current ipset set
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
set_check = __salt__['ipset.check_set'](name)
if set_check is False:
ret['result'] = False
ret['comment'] = ('ipset set {0} does not exist for {1}'
.format(name, family))
return ret
if __opts__['test']:
ret['comment'] = 'ipset entries in set {0} for {1} would be flushed'.format(
name,
family)
return ret
if __salt__['ipset.flush'](name, family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Flushed ipset entries from set {0} for {1}'.format(
name,
family
)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to flush ipset entries from set {0} for {1}' \
''.format(name, family)
return ret
|
from django.contrib import admin
# from .models import Test_my
# Register your models here.
# admin.site.register(Test_my)
|
## based on dtm1.py but using some cogsci papers
import logging
from gensim.models import ldaseqmodel
from gensim.corpora import Dictionary, bleicorpus, textcorpus
import numpy
from gensim.matutils import hellinger
import re
import os
def text_to_words(text):
clean = re.sub('[^A-Za-z0-9 ]+', '', text)
words = clean.lower().split()
return(words)
def load_documents(folder):
files = os.listdir(folder)
documents = list()
for file in files:
if file.endswith(".txt"):
path = os.path.join(folder, file)
with open(path, "r") as f:
text = f.read()
words = text_to_words(text)
documents.append(words)
return(documents)
def run(n, folder, verbose = True):
# documents is a list of lists, where each nested list has the words from one document
documents = load_documents(folder)
len(documents)
len(documents[0])
# remove common words
stoplist = set('for a an of the and or to in from on is are can we'.split())
documents = [[word for word in document if word not in stoplist] for document in documents]
# remove words that appear only once
from collections import defaultdict
frequency = defaultdict(int)
for document in documents:
for word in document:
frequency[word] += 1
documents = [[word for word in document if frequency[word] > 1] for document in documents]
len(documents)
[len(document) for document in documents]
# use only the first n words per document
documents = [document[:n] for document in documents]
[len(document) for document in documents]
class DTMcorpus(textcorpus.TextCorpus):
def get_texts(self):
return self.input
def __len__(self):
return len(self.input)
corpus = DTMcorpus(documents)
first_half = len(documents) / 2
second_half = len(documents) - first_half
time_slice = [first_half, second_half] # n documents split into 2 time slices
if verbose:
# activate logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# run
ldaseq = ldaseqmodel.LdaSeqModel(corpus=corpus, id2word=corpus.dictionary, time_slice=time_slice, num_topics=5)
# Visualizing dynamic topic models
from gensim.models.wrappers.dtmmodel import DtmModel
from gensim.corpora import Dictionary, bleicorpus
import pyLDAvis
doc_topic, topic_term, doc_lengths, term_frequency, vocab = ldaseq.dtm_vis(time=0, corpus=corpus)
vis_dtm = pyLDAvis.prepare(topic_term_dists=topic_term, doc_topic_dists=doc_topic, doc_lengths=doc_lengths, vocab=vocab, term_frequency=term_frequency)
# For ipython notebook:
# pyLDAvis.display(vis_dtm)
# This works best for me (then view dtm.html in a browser)
with open("dtm3.html", "w") as f:
pyLDAvis.save_html(vis_dtm, f)
return("dtm3.html saved.")
# use only the first n words per document
run(n = 100, folder = 'txt-small', verbose = False)
|
import sys
import time
import random
import openpyxl
import csv
from pathlib import Path
from loguru import logger
from pyngsi.sources.source import Source, Row
class SourceSampleOrion(Source):
"""
A SourceSampleOrion implements the Source from the NGSI Walkthrough tutorial.
Please have a look at :
https://fiware-orion.readthedocs.io/en/master/user/walkthrough_apiv2/index.html#entity-creationhttps://fiware-orion.readthedocs.io/en/master/user/walkthrough_apiv2/index.html#entity-creation
First two records are those of the tutorial.
Following records are randomized.
"""
def __init__(self, count: int = 5, delay: float = 1.0):
self.count = count if count > 0 else sys.maxsize
self.delay = delay
def __iter__(self):
i: int = 0
if self.count >= 1: # 1st element is fixed
yield Row("orionSample", "Room1;23;720")
i += 1
time.sleep(self.delay)
if self.count >= 2: # 2nd element is fixed
yield Row("orionSample", "Room2;21;711")
i += 1
time.sleep(self.delay)
# next elements are randomized
while i < self.count:
yield Row("orionSample", f"Room{i%9+1};{round(random.uniform(-10,50), 1)};{random.randint(700,1000)}")
i += 1
time.sleep(self.delay)
def reset(self):
pass
class SourceMicrosoftExcel(Source):
def __init__(self, filename, sheetid: int = 0, sheetname: str = None, ignore: int = 0):
logger.debug(f"{filename=}")
wb = openpyxl.load_workbook(filename, data_only=True)
ws = wb[sheetname] if sheetname else wb.worksheets[sheetid]
self.rows = ws.rows
self.provider = Path(filename).name
for _ in range(ignore): # skip lines
next(self.rows)
def __iter__(self):
for row in self.rows:
record = ";".join(
[str(cell.value) if cell.value else "" for cell in row])
logger.debug(f"{self.provider=}{record=}")
yield Row(self.provider, record)
|
# -*- coding: utf-8 -*-
"""
Created on January, 23 2021
@author: Gerd Duscher
"""
import unittest
import numpy as np
import sidpy
from scipy.ndimage import gaussian_filter
from pycroscopy.image import image_clean
import sys
if sys.version_info.major == 3:
unicode = str
def make_test_data():
im = np.zeros([64, 64])
im[4::8, 4::8] = 1
image = sidpy.Dataset.from_array(gaussian_filter(im, sigma=2))
image.data_type = 'Image'
image.dim_0.dimension_type = 'spatial'
image.dim_1.dimension_type = 'spatial'
atoms = []
for i in range(8):
for j in range(8):
atoms.append([8 * i + 4, 8 * j + 4])
return image, atoms
class TestUtilityFunctions(unittest.TestCase):
def test_clean_svd(self):
image, atoms_placed = make_test_data()
with self.assertRaises(TypeError):
image_clean.clean_svd(np.array(image))
with self.assertRaises(TypeError):
image.data_type = 'spectrum'
image_clean.clean_svd(image)
image.data_type = 'image'
clean_image = image_clean.clean_svd(image)
self.assertIsInstance(clean_image, sidpy.Dataset)
def test_decon_lr(self):
im = np.random.random([256, 256])
image = sidpy.Dataset.from_array(gaussian_filter(im, sigma=2))
image.data_type = 'Image'
image.dim_0.dimension_type = 'spatial'
image.dim_1.dimension_type = 'spatial'
image.x = image.dim_0
image.y = image.dim_1
clean_image = image_clean.decon_lr(image, verbose=True)
self.assertIsInstance(clean_image, sidpy.Dataset)
if __name__ == '__main__':
unittest.main()
|
from django.db import transaction
from django.http import HttpResponse
from django.views.generic.base import View
from reversion.views import create_revision, RevisionMixin
from test_app.models import TestModel
def save_obj_view(request):
return HttpResponse(TestModel.objects.create().id)
def save_obj_error_view(request):
with transaction.atomic():
TestModel.objects.create()
raise Exception("Boom!")
@create_revision()
def create_revision_view(request):
return save_obj_view(request)
class RevisionMixinView(RevisionMixin, View):
def revision_request_creates_revision(self, request):
silent = request.META.get("HTTP_X_NOREVISION", "false") == "true"
return super().revision_request_creates_revision(request) and not silent
def dispatch(self, request):
return save_obj_view(request)
|
# -*- coding: utf-8 -*-
"""
pmutt.test_pmutt_model_statmech_nucl
Tests for pmutt module
"""
import unittest
from pmutt.statmech import nucl
class TestEmptyNucl(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.nuclear = nucl.EmptyNucl()
self.nuclear_dict = {
'class': "<class 'pmutt.statmech.nucl.EmptyNucl'>"
}
def test_get_q(self):
self.assertEqual(self.nuclear.get_q(), 1.)
def test_get_CvoR(self):
self.assertEqual(self.nuclear.get_CvoR(), 0.)
def test_get_CpoR(self):
self.assertEqual(self.nuclear.get_CpoR(), 0.)
def test_get_UoRT(self):
self.assertEqual(self.nuclear.get_UoRT(), 0.)
def test_get_HoRT(self):
self.assertEqual(self.nuclear.get_HoRT(), 0.)
def test_get_SoR(self):
self.assertEqual(self.nuclear.get_SoR(), 0.)
def test_get_FoRT(self):
self.assertEqual(self.nuclear.get_FoRT(), 0.)
def test_get_GoRT(self):
self.assertEqual(self.nuclear.get_GoRT(), 0.)
def test_to_dict(self):
self.assertEqual(self.nuclear.to_dict(), self.nuclear_dict)
def test_from_dict(self):
self.assertEqual(nucl.EmptyNucl.from_dict(self.nuclear_dict),
self.nuclear)
if __name__ == '__main__':
unittest.main()
|
from contextlib import contextmanager
import logging
from flask import Flask
from flask import render_template
from flask import request
from flask import json
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# dialect+driver://username:password@host:port/database
engine = create_engine('postgresql+psycopg2://aolop@db:5432/paolosdb', echo=False)
Base = declarative_base()
Session = sessionmaker(bind=engine)
logging.basicConfig(
#filename='logs.txt',
level=logging.DEBUG,
format='[%(asctime)s] %(levelname)s: \n %(message)s\n',
datefmt='%a %Y-%b-%d %H:%M:%S %Z',
)
@contextmanager
def session_scope():
session = Session()
try:
yield session
session.commit()
except:
logging.error("Rolling back transaction")
session.rollback()
raise
finally:
session.close()
class Paolo(Base):
__tablename__ = 'paolos'
name = Column(String, primary_key=True)
verysecretsecret = Column(String)
latitude = Column(Float)
longitude = Column(Float)
birthday = Column(Integer)
lastseen = Column(String)
Base.metadata.create_all(engine)
app = Flask(__name__)
@app.route('/')
def paolo_is_here():
return render_template('hello.html')
@app.route('/log', methods=['GET', 'POST'])
def write_log():
if request.method == 'POST':
if request.is_json:
logging.info(
"Log endpoint, json. Received: {json}"
.format(json=request.json)
)
return request.json
else:
logging.info(
"Log endpoint, not json. Received: {data}"
.format(data=request.data)
)
return request.data
else:
return "Only POST for now...\n"
@app.route('/givelocation', methods=['POST'])
def give_location():
# I THINK this could do with some codes MAYBE
if request.method != 'POST':
logging.info("User attempted non-post method on give_location")
return "Only POST for now...\n"
if not request.is_json:
logging.info("User attempted non-json data on give_location")
return "Plz give json...\n"
try:
name = request.json["Name"]
timestamp = request.json["Timestamp"]
latitude = request.json["Latitude"]
longitude = request.json["Longitude"]
except: # This is not even the MVP, okay?
logging.error("In give_location, couldn't parse json or something??")
return "U give bad json...\n"
with session_scope() as session:
person = session.query(Paolo).filter(Paolo.name==name).one_or_none()
if not person:
logging.debug("Didn't find person in give_location. Creating")
newperson = Paolo(
name=name,
lastseen=timestamp,
latitude=latitude,
longitude=longitude,
)
session.add(newperson)
return "U not exist... I creat.\n"
else:
logging.debug("Found person in give_location. Updating")
person.latitude = latitude
person.longitude = longitude
person.lastseen = timestamp
return "OK, ...probably\n"
@app.route('/getlocation', methods=['GET'])
def get_location():
# codes
name = request.args.get('name', None)
if not name:
return "Plz give name\n"
with session_scope() as session:
person = session.query(Paolo).filter(Paolo.name==name).one_or_none()
if not person:
return "This person not exist\n"
return json.jsonify(
name=person.name,
lastseen=person.lastseen,
latitude=person.latitude,
longitude=person.longitude,
)
def add_some_stuff():
flamingo = Paolo(
name='flamingo',
verysecretsecret='onthelake',
latitude=41.794722,
longitude=-87.580833,
birthday=19270000,
)
cove = Paolo(
name='cove',
latitude=41.79543,
longitude=-87.5819805,
)
tadpoles = Paolo(
name='tadpoles',
latitude=41.795351,
longitude=-87.577861,
)
with session_scope() as session:
session.add_all([flamingo, cove, tadpoles])
try:
add_some_stuff()
except:
logging.info("Didn't add random stuff to db")
|
# coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class Algorithm(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'sizing': 'OneOfHexDominantSnappySizing',
'refinements': 'list[OneOfHexDominantSnappyRefinements]',
'cell_zones': 'list[SimmetrixCellZones]',
'automatic_layer_settings': 'OneOfSimmetrixMeshingFluidAutomaticLayerSettings',
'physics_based_meshing': 'bool',
'hex_core': 'bool',
'num_of_processors': 'int',
'advanced_simmetrix_settings': 'AdvancedSimmetrixSolidSettings',
'second_order': 'bool',
'enable_shell_meshing': 'bool',
'surface_element_type': 'str',
'meshing_mode': 'str'
}
attribute_map = {
'type': 'type',
'sizing': 'sizing',
'refinements': 'refinements',
'cell_zones': 'cellZones',
'automatic_layer_settings': 'automaticLayerSettings',
'physics_based_meshing': 'physicsBasedMeshing',
'hex_core': 'hexCore',
'num_of_processors': 'numOfProcessors',
'advanced_simmetrix_settings': 'advancedSimmetrixSettings',
'second_order': 'secondOrder',
'enable_shell_meshing': 'enableShellMeshing',
'surface_element_type': 'surfaceElementType',
'meshing_mode': 'meshingMode'
}
discriminator_value_class_map = {
'SIMMETRIX_MESHING_FLUID_V16': 'SimmetrixMeshingFluid',
'SIMMETRIX_MESHING_SOLID': 'SimmetrixMeshingSolid',
'HEX_DOMINANT_SNAPPY_V5': 'HexDominantSnappy'
}
def __init__(self, type='HEX_DOMINANT_SNAPPY_V5', sizing=None, refinements=None, cell_zones=None, automatic_layer_settings=None, physics_based_meshing=None, hex_core=None, num_of_processors=None, advanced_simmetrix_settings=None, second_order=None, enable_shell_meshing=None, surface_element_type=None, meshing_mode=None, local_vars_configuration=None): # noqa: E501
"""Algorithm - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._sizing = None
self._refinements = None
self._cell_zones = None
self._automatic_layer_settings = None
self._physics_based_meshing = None
self._hex_core = None
self._num_of_processors = None
self._advanced_simmetrix_settings = None
self._second_order = None
self._enable_shell_meshing = None
self._surface_element_type = None
self._meshing_mode = None
self.discriminator = 'type'
self.type = type
if sizing is not None:
self.sizing = sizing
if refinements is not None:
self.refinements = refinements
if cell_zones is not None:
self.cell_zones = cell_zones
if automatic_layer_settings is not None:
self.automatic_layer_settings = automatic_layer_settings
if physics_based_meshing is not None:
self.physics_based_meshing = physics_based_meshing
if hex_core is not None:
self.hex_core = hex_core
if num_of_processors is not None:
self.num_of_processors = num_of_processors
if advanced_simmetrix_settings is not None:
self.advanced_simmetrix_settings = advanced_simmetrix_settings
if second_order is not None:
self.second_order = second_order
if enable_shell_meshing is not None:
self.enable_shell_meshing = enable_shell_meshing
if surface_element_type is not None:
self.surface_element_type = surface_element_type
if meshing_mode is not None:
self.meshing_mode = meshing_mode
@property
def type(self):
"""Gets the type of this Algorithm. # noqa: E501
Schema name: HexDominantSnappy # noqa: E501
:return: The type of this Algorithm. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Algorithm.
Schema name: HexDominantSnappy # noqa: E501
:param type: The type of this Algorithm. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def sizing(self):
"""Gets the sizing of this Algorithm. # noqa: E501
:return: The sizing of this Algorithm. # noqa: E501
:rtype: OneOfHexDominantSnappySizing
"""
return self._sizing
@sizing.setter
def sizing(self, sizing):
"""Sets the sizing of this Algorithm.
:param sizing: The sizing of this Algorithm. # noqa: E501
:type: OneOfHexDominantSnappySizing
"""
self._sizing = sizing
@property
def refinements(self):
"""Gets the refinements of this Algorithm. # noqa: E501
:return: The refinements of this Algorithm. # noqa: E501
:rtype: list[OneOfHexDominantSnappyRefinements]
"""
return self._refinements
@refinements.setter
def refinements(self, refinements):
"""Sets the refinements of this Algorithm.
:param refinements: The refinements of this Algorithm. # noqa: E501
:type: list[OneOfHexDominantSnappyRefinements]
"""
self._refinements = refinements
@property
def cell_zones(self):
"""Gets the cell_zones of this Algorithm. # noqa: E501
:return: The cell_zones of this Algorithm. # noqa: E501
:rtype: list[SimmetrixCellZones]
"""
return self._cell_zones
@cell_zones.setter
def cell_zones(self, cell_zones):
"""Sets the cell_zones of this Algorithm.
:param cell_zones: The cell_zones of this Algorithm. # noqa: E501
:type: list[SimmetrixCellZones]
"""
self._cell_zones = cell_zones
@property
def automatic_layer_settings(self):
"""Gets the automatic_layer_settings of this Algorithm. # noqa: E501
:return: The automatic_layer_settings of this Algorithm. # noqa: E501
:rtype: OneOfSimmetrixMeshingFluidAutomaticLayerSettings
"""
return self._automatic_layer_settings
@automatic_layer_settings.setter
def automatic_layer_settings(self, automatic_layer_settings):
"""Sets the automatic_layer_settings of this Algorithm.
:param automatic_layer_settings: The automatic_layer_settings of this Algorithm. # noqa: E501
:type: OneOfSimmetrixMeshingFluidAutomaticLayerSettings
"""
self._automatic_layer_settings = automatic_layer_settings
@property
def physics_based_meshing(self):
"""Gets the physics_based_meshing of this Algorithm. # noqa: E501
Physics-based meshing takes setup information like materials, boundary conditions, and source terms into account to size the mesh accordingly. When enabled, the following adaptations will be made:</p><ul><li>Refinements on inlets and outlets</li><li>Different sizing for solid and fluid regions in CHT simulations</li></ul> <br>When toggled on users don’t have to worry about creating a <a href='https://www.simscale.com/docs/simulation-setup/simulation-control/' target='_blank'>separate cell zone</a>. # noqa: E501
:return: The physics_based_meshing of this Algorithm. # noqa: E501
:rtype: bool
"""
return self._physics_based_meshing
@physics_based_meshing.setter
def physics_based_meshing(self, physics_based_meshing):
"""Sets the physics_based_meshing of this Algorithm.
Physics-based meshing takes setup information like materials, boundary conditions, and source terms into account to size the mesh accordingly. When enabled, the following adaptations will be made:</p><ul><li>Refinements on inlets and outlets</li><li>Different sizing for solid and fluid regions in CHT simulations</li></ul> <br>When toggled on users don’t have to worry about creating a <a href='https://www.simscale.com/docs/simulation-setup/simulation-control/' target='_blank'>separate cell zone</a>. # noqa: E501
:param physics_based_meshing: The physics_based_meshing of this Algorithm. # noqa: E501
:type: bool
"""
self._physics_based_meshing = physics_based_meshing
@property
def hex_core(self):
"""Gets the hex_core of this Algorithm. # noqa: E501
<p>If <a href='https://www.simscale.com/docs/simulation-setup/meshing/standard/#hexcore' target='_blank'><b>Hex element core</b></a> is activated, the interior of the mesh gets covered by <a href='https://www.simscale.com/docs/simulation-setup/meshing/standard/#hexcore' target='_blank'><b>hexahedral elements</b></a>. The transition to the triangulated surface mesh is covered by tetrahedral and pyramid elements.<img src=\"/spec/resources/help/imgs/simmetrix-hexcore.png\" class=\"helpPopupImage\"/>Meshclip through a hex-core mesh.</p> # noqa: E501
:return: The hex_core of this Algorithm. # noqa: E501
:rtype: bool
"""
return self._hex_core
@hex_core.setter
def hex_core(self, hex_core):
"""Sets the hex_core of this Algorithm.
<p>If <a href='https://www.simscale.com/docs/simulation-setup/meshing/standard/#hexcore' target='_blank'><b>Hex element core</b></a> is activated, the interior of the mesh gets covered by <a href='https://www.simscale.com/docs/simulation-setup/meshing/standard/#hexcore' target='_blank'><b>hexahedral elements</b></a>. The transition to the triangulated surface mesh is covered by tetrahedral and pyramid elements.<img src=\"/spec/resources/help/imgs/simmetrix-hexcore.png\" class=\"helpPopupImage\"/>Meshclip through a hex-core mesh.</p> # noqa: E501
:param hex_core: The hex_core of this Algorithm. # noqa: E501
:type: bool
"""
self._hex_core = hex_core
@property
def num_of_processors(self):
"""Gets the num_of_processors of this Algorithm. # noqa: E501
<p>Selecting more processor cores might speed up the meshing process. Choosing a smaller computation instance will save core hours. <a href='https://www.simscale.com/docs/simulation-setup/meshing/#number-of-processors' target='_blank'>Learn more</a>.</p> # noqa: E501
:return: The num_of_processors of this Algorithm. # noqa: E501
:rtype: int
"""
return self._num_of_processors
@num_of_processors.setter
def num_of_processors(self, num_of_processors):
"""Sets the num_of_processors of this Algorithm.
<p>Selecting more processor cores might speed up the meshing process. Choosing a smaller computation instance will save core hours. <a href='https://www.simscale.com/docs/simulation-setup/meshing/#number-of-processors' target='_blank'>Learn more</a>.</p> # noqa: E501
:param num_of_processors: The num_of_processors of this Algorithm. # noqa: E501
:type: int
"""
allowed_values = [-1, 4, 8, 16, 32, 64, 96] # noqa: E501
if self.local_vars_configuration.client_side_validation and num_of_processors not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `num_of_processors` ({0}), must be one of {1}" # noqa: E501
.format(num_of_processors, allowed_values)
)
self._num_of_processors = num_of_processors
@property
def advanced_simmetrix_settings(self):
"""Gets the advanced_simmetrix_settings of this Algorithm. # noqa: E501
:return: The advanced_simmetrix_settings of this Algorithm. # noqa: E501
:rtype: AdvancedSimmetrixSolidSettings
"""
return self._advanced_simmetrix_settings
@advanced_simmetrix_settings.setter
def advanced_simmetrix_settings(self, advanced_simmetrix_settings):
"""Sets the advanced_simmetrix_settings of this Algorithm.
:param advanced_simmetrix_settings: The advanced_simmetrix_settings of this Algorithm. # noqa: E501
:type: AdvancedSimmetrixSolidSettings
"""
self._advanced_simmetrix_settings = advanced_simmetrix_settings
@property
def second_order(self):
"""Gets the second_order of this Algorithm. # noqa: E501
<p>The <a href='https://www.simscale.com/docs/simulation-setup/meshing/standard/#order' target='_blank'><b>mesh order</b></a> defines the shape and the number of nodes of the mesh elements. For a fast, rough analysis choose <i>first order</i> only. Activate <i>2nd order elements</i> for higher quality results</p> # noqa: E501
:return: The second_order of this Algorithm. # noqa: E501
:rtype: bool
"""
return self._second_order
@second_order.setter
def second_order(self, second_order):
"""Sets the second_order of this Algorithm.
<p>The <a href='https://www.simscale.com/docs/simulation-setup/meshing/standard/#order' target='_blank'><b>mesh order</b></a> defines the shape and the number of nodes of the mesh elements. For a fast, rough analysis choose <i>first order</i> only. Activate <i>2nd order elements</i> for higher quality results</p> # noqa: E501
:param second_order: The second_order of this Algorithm. # noqa: E501
:type: bool
"""
self._second_order = second_order
@property
def enable_shell_meshing(self):
"""Gets the enable_shell_meshing of this Algorithm. # noqa: E501
:return: The enable_shell_meshing of this Algorithm. # noqa: E501
:rtype: bool
"""
return self._enable_shell_meshing
@enable_shell_meshing.setter
def enable_shell_meshing(self, enable_shell_meshing):
"""Sets the enable_shell_meshing of this Algorithm.
:param enable_shell_meshing: The enable_shell_meshing of this Algorithm. # noqa: E501
:type: bool
"""
self._enable_shell_meshing = enable_shell_meshing
@property
def surface_element_type(self):
"""Gets the surface_element_type of this Algorithm. # noqa: E501
:return: The surface_element_type of this Algorithm. # noqa: E501
:rtype: str
"""
return self._surface_element_type
@surface_element_type.setter
def surface_element_type(self, surface_element_type):
"""Sets the surface_element_type of this Algorithm.
:param surface_element_type: The surface_element_type of this Algorithm. # noqa: E501
:type: str
"""
allowed_values = ["TRIANGULAR", "QUADDOMINANT"] # noqa: E501
if self.local_vars_configuration.client_side_validation and surface_element_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `surface_element_type` ({0}), must be one of {1}" # noqa: E501
.format(surface_element_type, allowed_values)
)
self._surface_element_type = surface_element_type
@property
def meshing_mode(self):
"""Gets the meshing_mode of this Algorithm. # noqa: E501
<p>The <a href='https://www.simscale.com/docs/simulation-setup/meshing/hex-dominant/#meshing-mode' target='_blank'>meshing mode</a> defines how the mesher should generate the mesh.</p><ul><li>The <b>Internal</b> mode will create the mesh <u>inside</u> of the geometry body. If the CAD consists of multiple solids, the mesher will attempt to create a multiregion mesh which is suitable for conjugate heat transfer analyses. Use this mode if the CAD model already represents the final fluid domain.</li><li><b>External</b> meshing will create the mesh <u>outside</u> of the bodies. The absolute dimensions of the mesh are determined by the <i>Background Mesh Box</i>. Use this mode in case you want to extract the fluid domain around your model.</li><li>The option <b>Material point</b> allows you to define a point inside the domain where the mesh will be placed. It can be used to select which part (or enclosed volume) of the model or should be meshed. The mesh will surround the material point and extend until the boundaries of the body. The location of the material point is defined by the <i>Material Point</i> geometry primitive.</li></ul> # noqa: E501
:return: The meshing_mode of this Algorithm. # noqa: E501
:rtype: str
"""
return self._meshing_mode
@meshing_mode.setter
def meshing_mode(self, meshing_mode):
"""Sets the meshing_mode of this Algorithm.
<p>The <a href='https://www.simscale.com/docs/simulation-setup/meshing/hex-dominant/#meshing-mode' target='_blank'>meshing mode</a> defines how the mesher should generate the mesh.</p><ul><li>The <b>Internal</b> mode will create the mesh <u>inside</u> of the geometry body. If the CAD consists of multiple solids, the mesher will attempt to create a multiregion mesh which is suitable for conjugate heat transfer analyses. Use this mode if the CAD model already represents the final fluid domain.</li><li><b>External</b> meshing will create the mesh <u>outside</u> of the bodies. The absolute dimensions of the mesh are determined by the <i>Background Mesh Box</i>. Use this mode in case you want to extract the fluid domain around your model.</li><li>The option <b>Material point</b> allows you to define a point inside the domain where the mesh will be placed. It can be used to select which part (or enclosed volume) of the model or should be meshed. The mesh will surround the material point and extend until the boundaries of the body. The location of the material point is defined by the <i>Material Point</i> geometry primitive.</li></ul> # noqa: E501
:param meshing_mode: The meshing_mode of this Algorithm. # noqa: E501
:type: str
"""
allowed_values = ["INTERNAL"] # noqa: E501
if self.local_vars_configuration.client_side_validation and meshing_mode not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `meshing_mode` ({0}), must be one of {1}" # noqa: E501
.format(meshing_mode, allowed_values)
)
self._meshing_mode = meshing_mode
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Algorithm):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Algorithm):
return True
return self.to_dict() != other.to_dict()
|
from api.models import Attendance, CourseUnit, Student
from logs import general_log
from logs.logs_decorator import log_operations_time, log_and_catch_query
from django.db import transaction
from rest.queries.utils import generate_random_id
@log_operations_time
@log_and_catch_query(
(Exception, "Erro ao obter todas as cadeiras")
)
def course_units():
"""
Get all course units available persisted in DB
:return: tuple with
- Course units data
- list of success messages
- list of error messages
"""
success = []
data = CourseUnit.objects.all().order_by('name')
success.append("Cadeiras obtidas com sucesso")
general_log.debug(f"{__name__}->{course_units.__name__} {success[-1]}")
return data, success, []
@log_operations_time
@log_and_catch_query(
(Exception, "Erro ao obter as cadeiras do docente")
)
def course_units_my(user):
"""
Get all course units assign to a teacher available persisted in DB
:return: tuple with
- Course units data
- list of success messages
- list of error messages
"""
success = []
data = set([attendance.course_unit for attendance in
Attendance.objects.filter(creator=user).order_by('course_unit__name')])
success.append("Cadeiras do docente obtidas com sucesso")
general_log.debug(f"{__name__}->{course_units.__name__} {success[-1]}")
return data, success, []
@log_operations_time
@log_and_catch_query(
(CourseUnit.DoesNotExist, "Cadeira selecionada não existe"),
(Exception, "Erro ao criar folha de presenças")
)
def create_attendance_sheet(creator, data):
"""
Function to create attendance_sheet in DB
:param creator: creator of the attendance_sheet
:param data: data for attendance_sheet creation (in this case course's ID)
:return: tuple with
- Dictionary with lesson's ID
- list of success messages
- list of error messages
"""
success = []
with transaction.atomic():
course = CourseUnit.objects.get(id=data.get('course_unit'))
while True:
random_id = generate_random_id()
if not Attendance.objects.filter(id=random_id).exists():
break
Attendance.objects.create(id=random_id, creator=creator, course_unit=course, summary=data.get('summary'))
success.append("Folha de presenças criada com sucesso")
general_log.debug(
f"{__name__}->{create_attendance_sheet.__name__} {success[-1]}")
return {"attendance_id": random_id}, success, []
@log_operations_time
@log_and_catch_query(
(Attendance.DoesNotExist, "Folha de presenças não existe"),
(Exception, "Erro ao mudar o estado da folha de presenças")
)
def attendance_status(user, sheet_id, status):
"""
Change the activation status of an attendance sheet and persist the changes in DB
:param user: Request's user
:param sheet_id: Attendance sheet's ID
:param status: Activation status
:return: tuple with
- Empty dictionary
- list of success messages
- list of error messages
"""
success, errors, is_authorized = [], [], True
attendance = Attendance.objects.get(id=sheet_id)
if attendance.creator == user:
attendance.is_active = status
attendance.save()
success.append("Estado da folha de presenças mudado com sucesso")
general_log.debug(
f"{__name__}->{attendance_status.__name__} {success[-1]}")
else:
is_authorized = False
errors.append("Apenas o criador desta folha de presenças pode mudar o estado da mesma")
general_log.error(
f"{__name__}->{attendance_status.__name__} {errors[-1]}")
return {'is_authorized': is_authorized}, success, errors
@log_operations_time
@log_and_catch_query(
(Attendance.DoesNotExist, "Folha de presenças não existe"),
(Exception, "Erro ao obter os alunos registados na folha de presença")
)
def students_in_attendance_sheet(user, sheet_id):
"""
Get students registered on an attendance sheet saved on DB
:param user: Http Request
:param sheet_id: Attendance sheet's ID
:return: tuple with
- Students nmecs
- list of success messages
- list of error messages
"""
data, success, errors, is_authorized = {}, [], [], True
attendance = Attendance.objects.get(id=sheet_id)
if attendance.creator == user:
data['data'] = {'students': attendance.students.all()}
success.append(
"Alunos registados na folha de presença obtidos com sucesso")
general_log.debug(
f"{__name__}->{students_in_attendance_sheet.__name__} {success[-1]}")
else:
is_authorized = False
errors.append("Apenas o criador desta folha pode obter os alunos registados na mesma")
general_log.error(
f"{__name__}->{students_in_attendance_sheet.__name__} {errors[-1]}")
data['is_authorized'] = is_authorized
return data, success, errors
@log_operations_time
@log_and_catch_query(
(Attendance.DoesNotExist, "Folha de presenças não existe"),
(Exception, "Erro ao registar-se na aula")
)
def attendance_sheet_student_registration(sheet_id, token, student):
"""
Register a user in an attendance sheet
:param sheet_id: Attendance sheet's ID
:param token: Request's user token
:param student: dictionary with student's data
:return: tuple with
- Empty dictionary
- list of success messages
- list of error messages
"""
success, errors = [], []
attendance = Attendance.objects.get(id=sheet_id)
if attendance.is_active or token is not None:
nmec, name = student.get('nmec'), student.get('name')
student = Student.objects.update_or_create(nmec=nmec, defaults={'name': name})[0]
attendance.students.add(student)
attendance.save()
success.append(
f"Aluno(s) registado(s) com sucesso na folha de presenças")
general_log.debug(
f"{__name__}->{attendance_sheet_student_registration.__name__} {success[-1]}")
else:
errors.append(
"A folha de presenças encontra-se encerrada. Não é possivel submeter o registo")
general_log.error(
f"{__name__}->{attendance_sheet_student_registration.__name__} {errors[-1]}")
return [], success, errors
@log_operations_time
@log_and_catch_query(
(Attendance.DoesNotExist, "Folha de presenças não existe"),
(Exception, "Erro ao eliminar registo(s) de aluno(s)")
)
def attendance_sheet_student_deletion(user, sheet_id, data):
"""
Remove a student from an attendance sheet and persist the changes
:param user: Http Request user
:param sheet_id: Attendance sheet's ID
:param data: Dictionary with a lits of nmecs to be removed
:return: Operations status wrapped on response's object
"""
success, errors = [], []
nmecs = data.get('nmecs')
attendance = Attendance.objects.get(id=sheet_id)
if attendance.creator != user:
errors.append(
"Apenas o criador desta folha pode remover registos de alunos na mesma")
general_log.error(
f"{__name__}->{students_in_attendance_sheet.__name__} {errors[-1]}")
else:
with transaction.atomic():
for nmec in nmecs:
student = Student.objects.filter(nmec=nmec)
if student.exists():
attendance.students.remove(student[0])
success.append("Registo(s) de aluno(s) eliminados com sucesso")
general_log.debug(
f"{__name__}->{attendance_sheet_student_registration.__name__} {success[-1]}")
return [], success, errors
@log_operations_time
@log_and_catch_query(
(Exception, "Erro ao obter as folhas de presença")
)
def attendance_sheets(user, course_id):
"""
Get attendance sheets available persisted in DB (if user is defined, returns the attendance sheets
associated with that user
:param user: Specified user
:param course_id: Course unit's ID
:return: tuple with
- Attendance sheets data
- list of success messages
- list of error messages
"""
success = []
data = Attendance.objects.filter(creator=user, course_unit_id=course_id)
success.append("Folhas de presença obtidas com sucesso")
general_log.debug(
f"{__name__}->{attendance_sheets.__name__} {success[-1]}")
return data, success, []
@log_operations_time
@log_and_catch_query(
(Attendance.DoesNotExist, "Folha de presenças não existe"),
(Exception, "Erro ao obter as informações da folha de presenças")
)
def attendance_info(sheet_id):
"""
Get attendance info
:param sheet_id: Sheet's ID
:return: tuple with
- Attendance sheets data
- list of success messages
- list of error messages
"""
success, errors = [], []
attendance = Attendance.objects.get(id=sheet_id)
creator = attendance.creator
data = {
'timestamp': attendance.register_timestamp,
'course_unit_name': attendance.course_unit.name,
'creator_name': f'{creator.first_name} {creator.last_name}',
'summary': attendance.summary,
'is_active': attendance.is_active
}
success.append("Folha de presenças obtida com sucesso")
general_log.debug(f"{__name__}->{attendance_info.__name__} {success[-1]}")
return data, success, errors
|
#!/usr/bin/env python3
from websearcher import web_downloader
"""
Download urls in urls_file to out_directory. Use command line arguments.
"""
downloader = web_downloader.WebDownloader("@./data/input/web_downloader_args.txt")
print("Downloading to " + downloader.args.out_directory)
downloader.request_urls_write_to_files()
|
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E09000025"
addresses_name = (
"parl.2019-12-12/Version 1/LBNewhamDemocracy_Club__12December2019.TSV"
)
stations_name = (
"parl.2019-12-12/Version 1/LBNewhamDemocracy_Club__12December2019.TSV"
)
elections = ["parl.2019-12-12"]
csv_delimiter = "\t"
allow_station_point_from_postcode = False
def station_record_to_dict(self, record):
# Fix from: 890908:polling_stations/apps/data_collection/management/commands/misc_fixes.py:224
# Carpenters and Docklands Centre 98 Gibbins Road Stratford London
if record.polling_place_id == "4823":
record = record._replace(polling_place_easting="538526.11")
record = record._replace(polling_place_northing="184252.81")
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn == "46003425":
return None # in an otherwise multiple station postcode, so safe to remove
if uprn == "10008988958": # Right property; wrong postcode
rec["postcode"] = "E162LL" # was E164LL
rec["accept_suggestion"] = False
if uprn == "10094371250": # Right property; wrong postcode
rec["postcode"] = "E125AD" # was E126AD
rec["accept_suggestion"] = False
if uprn in [
"10009004801", # E78BS -> E78AB : 386A Romford Road, London
"10009009060", # E151SL -> E151SH : School House, Gurney Road, London
"10008984571", # E154DY -> E153HU : The Angel Inn Public House, 21 Church Street, London
"10012835081", # E139ER -> E139PJ : 772 Barking Road, London
"10090756946", # E78AW -> E79AW : 36C Earlham Grove, London
"10090758259", # E63JE -> E66JE : 28 Buterfly Court, 1 Elderberry Way, London
"10090758260", # E63JE -> E66JE : 29 Butterly Court, 1 Elderberry Way, London
"10034509102", # E66AQ -> E66HE : Ground Floor Flat, 178 Charlemont Road, London
"10008987564", # E126TH -> E126TJ : 459A High Street North, London
"10009014196", # E78AR -> E79AR : Basement Flat, 84 Earlham Grove, London
]:
rec["accept_suggestion"] = True
if uprn in [
"10023995207", # E164EB -> E138NE : 301 Newham Way, London
"10090323539", # E78NL -> E78QJ : 93 Stafford Road, London
"10008983114", # E62BU -> E62BX : 84 Altmore Avenue, London
"10008988958", # Right property; wrong postcode
# Right property, wrong postcode
"10034508484", # E66DQ -> E61DQ : Flat Above, 33 Vicarage Lane, London
"10009012905", # E163DZ -> E163BZ : Ground Floor Flat, 62A Tree Road, London
]:
rec["accept_suggestion"] = False
return rec
|
from .base import SemaceApiClient
from .connection import SemaceConnection
|
#!/usr/bin/python
from getpass import getpass
from pyzabbix import ZabbixAPI
import time
import socket
import json
import time
import math
from threading import Thread
from collections import defaultdict
import re
import string
import os
class Poller(object):
def __init__(self, length, interval, vms, meas_server, delay):
self.cpu_perc = defaultdict(list)
self.ts = defaultdict(list)
self.thread = Thread(target = self.getVMsCPU, args=(length, interval, vms, meas_server, delay))
self.NodeToavgCPU = {}
self.NodeTovarCPU = {}
self.NodeToavgTS = {}
self.NodeTovarTS = {}
for vm in vms:
self.NodeToavgCPU[vm] = 0.0
self.NodeTovarCPU[vm] = 0.0
self.NodeToavgTS[vm] = 0.0
self.NodeTovarTS[vm] = 0.0
def start(self):
self.thread.start()
def join(self):
self.thread.join()
def getVMsCPU(self, length, interval, vm, xenaddr, delay):
raise NotImplementedError("Abstract Method")
def computeAvgCPU(self, vm):
for perc in self.cpu_perc[vm]:
self.NodeToavgCPU[vm] = self.NodeToavgCPU[vm] + perc
self.NodeToavgCPU[vm] = self.NodeToavgCPU[vm]/len(self.cpu_perc[vm])
def getAvgCPU(self, vm):
if self.NodeToavgCPU[vm] == 0:
self.computeAvgCPU(vm)
return self.NodeToavgCPU[vm]
def computeAvgTS(self, vm):
for i in range(0,len(self.ts[vm])-1):
self.NodeToavgTS[vm] = self.NodeToavgTS[vm] + (self.ts[vm][i+1] - self.ts[vm][i])
self.NodeToavgTS[vm] = (self.NodeToavgTS[vm]/len(self.ts[vm]))
def getAvgTS(self, vm):
if self.NodeToavgTS[vm] == 0:
self.computeAvgTS(vm)
return self.NodeToavgTS[vm]
def computeVarCPU(self, vm):
if self.NodeToavgCPU[vm] == 0.0:
self.computeAvgCPU(vm)
for i in range(0,len(self.cpu_perc[vm])):
self.NodeTovarCPU[vm] = self.NodeTovarCPU[vm] + math.pow((self.cpu_perc[vm][i] - self.NodeToavgCPU[vm]), 2)
self.NodeTovarCPU[vm] = (self.NodeTovarCPU[vm]/(len(self.cpu_perc[vm])-1))
def getVarCPU(self, vm):
if self.NodeTovarCPU[vm] == 0.0:
self.computeVarCPU(vm)
return self.NodeTovarCPU[vm]
def computeVarTS(self, vm):
if self.NodeToavgTS[vm] == 0.0:
self.computeAvgTS(vm)
for i in range(0,len(self.ts[vm])-1):
self.NodeTovarTS[vm] = self.NodeTovarTS[vm] + math.pow(((self.ts[vm][i+1] - self.ts[vm][i]) - self.NodeToavgTS[vm]), 2)
self.NodeTovarTS[vm] = (self.NodeTovarTS[vm]/(len(self.ts[vm])-1))
def getVarTS(self, vm):
if self.NodeTovarTS[vm] == 0.0:
self.computeVarTS(vm)
return self.NodeTovarTS[vm]
def getDEVCPU(self, vm):
if self.NodeTovarCPU[vm] == 0.0:
self.computeVarCPU(vm)
return math.sqrt(self.NodeTovarCPU[vm])
def getDEVTS(self, vm):
if self.NodeTovarTS[vm] == 0.0:
self.computeVarTS(vm)
return math.sqrt(self.NodeTovarTS[vm])
class XentopPoller(Poller):
XENPORT = 8888
MSIZE = 1024
KEYCPU = 'cpu_perc'
KEYTS = 'timestamp'
def __init__(self, length, interval, vms, xenaddr, delay):
Poller.__init__(self, length, interval, vms, xenaddr, delay)
def getVMsCPU(self, length, interval, vms, xenaddr, delay):
request = {'message':'getVMTop', 'vm_list':vms}
for i in range(0, length):
ts_start = time.time()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((xenaddr, self.XENPORT))
s.send(json.dumps(request))
result = json.loads(s.recv(self.MSIZE))
if delay > 0:
delay = delay - 1
else :
for vminfo in result:
self.cpu_perc[vminfo['name']].append(float(vminfo[self.KEYCPU]))
self.ts[vminfo['name']].append(float(vminfo[self.KEYTS]))
s.close()
ts_end = time.time()
diff = interval - (ts_end-ts_start)
if diff > 0:
time.sleep(diff)
class ZabbixPoller(Poller):
KEYITEMID='itemid'
KEYNAME='name'
KEYSERVER='server'
KEYUSERNAME='username'
KEYPASSWORD='password'
KEYCPU='lastvalue'
KEYTS='lastclock'
CONF='.zabbix.conf'
def __init__(self, length, interval, vms, delay):
Poller.__init__(self, length, interval, vms, None, delay)
self.load_conf()
self.zapi = ZabbixAPI(self.ZABBIX_SERVER)
self.zapi.login(self.USERNAME, self.PASSWORD)
self.map_id_to_name = {}
self.getVMsID(vms)
def load_conf(self):
if os.path.exists(self.CONF) == False:
print "Error Create Configuration File %s" % self.CONF
sys.exit(-2)
conf = open(self.CONF)
json_data = json.load(conf)
self.ZABBIX_SERVER = json_data[self.KEYSERVER]
self.USERNAME = json_data[self.KEYUSERNAME]
self.PASSWORD = json_data[self.KEYPASSWORD]
conf.close()
def getVMsID(self, vms):
for vm in vms:
item = self.zapi.item.get(filter={self.KEYNAME:vm})
self.map_id_to_name[item[0][self.KEYITEMID]]=vm
def getVMsCPU(self, length, interval, vms, xenaddr, delay):
i = 0
print "Zabbix Poller discards first \"delay\" values"
last_ts = 0
# Discard first "delay" values
while (delay >= 0):
ts_start = time.time()
items = self.zapi.item.get(filter={self.KEYNAME:vms})
ts_end = time.time()
for item in items:
if item:
ts_value = float(item[self.KEYTS])
if ts_value != last_ts:
delay = delay - 1
last_ts = ts_value
break
ts_end = time.time()
diff = 2 - (ts_end-ts_start)
if diff > 0:
time.sleep(diff)
print "Zabbix Poller takes data"
# Take data
while i < (length * len(vms)):
ts_start = time.time()
items = self.zapi.item.get(filter={self.KEYNAME:vms})
ts_end = time.time()
for item in items:
if item:
vm = self.map_id_to_name[item[self.KEYITEMID]]
ts_value = float(item[self.KEYTS])
if ts_value not in self.ts[vm]:
self.cpu_perc[vm].append(float(item[self.KEYCPU]))
self.ts[vm].append(ts_value)
i = i + 1
ts_end = time.time()
diff = 2 - (ts_end-ts_start)
if diff > 0:
time.sleep(diff)
if __name__ == '__main__':
poller = ZabbixPoller(10, 30, ['DREAMER-TESTBED-OSHI-PE-23'], 1)
poller.start()
poller.join()
for key, cpu_data in poller.cpu_perc.iteritems():
print "%s -> CPU:%s" %(key, cpu_data)
for key, ts_data in poller.ts.iteritems():
print "%s -> TS:%s" %(key, ts_data)
|
"""
measure.py
This module performs measurements!
"""
import numpy as np
def calculate_distance(r_a, r_b):
"""
Calculate the distance between two points.
Parameters
==========
r_a, r_b : np.ndarray
the coordinates of each point
Returns
=======
distance : float
The distance between the two points
Examples
========
>>> r1 = np.array([0, 0, 0])
>>> r2 = np.array([0, 0.1, 0])
>>> calculate_distance(r1, r2)
0.1
"""
if not isinstance(r_a, np.ndarray) or not isinstance(r_b, np.ndarray):
raise TypeError("One or both input parameters must be np.ndarrays!!!")
distance_vector = (r_a - r_b)
distance = np.linalg.norm(distance_vector)
return distance
def calculate_angle(r_a, r_b, r_c, degrees=False):
# Calculate the angle between three points. Answer is given in radians by default, but can be given in degrees
# by setting degrees=True
r_ab = r_b - r_a
r_bc = r_b - r_c
theta=np.arccos(np.dot(r_ab, r_bc)/(np.linalg.norm(r_ab)*np.linalg.norm(r_bc)))
if degrees:
return np.degrees(theta)
else:
return theta
|
from datetime import datetime
from sqlalchemy import create_engine, Column, Integer, String, DateTime, ForeignKey, JSON
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from upload.common.upload_config import UploadDbConfig
Base = declarative_base()
class DbUploadArea(Base):
__tablename__ = 'upload_area'
id = Column(Integer(), primary_key=True)
uuid = Column(String(), nullable=False)
bucket_name = Column(String(), nullable=False)
status = Column(String(), nullable=False)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
updated_at = Column(DateTime, default=datetime.utcnow, nullable=False, onupdate=datetime.utcnow)
class DbFile(Base):
__tablename__ = 'file'
id = Column(Integer(), primary_key=True)
s3_key = Column(String(), nullable=False)
s3_etag = Column(String(), nullable=True)
upload_area_id = Column(Integer(), ForeignKey('upload_area.id'), nullable=False)
name = Column(String(), nullable=False)
size = Column(Integer(), nullable=False)
checksums = Column(JSON(), nullable=False)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
updated_at = Column(DateTime, default=datetime.utcnow, nullable=False, onupdate=datetime.utcnow)
upload_area = relationship("DbUploadArea", back_populates="files")
class DbChecksum(Base):
__tablename__ = 'checksum'
id = Column(String(), primary_key=True)
file_id = Column(String(), ForeignKey('file.id'), nullable=False)
job_id = Column(String(), nullable=False)
status = Column(String(), nullable=False)
checksum_started_at = Column(DateTime(), nullable=False)
checksum_ended_at = Column(DateTime(), nullable=False)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
updated_at = Column(DateTime, default=datetime.utcnow, nullable=False, onupdate=datetime.utcnow)
file = relationship("DbFile", back_populates='checksum_records')
class DbValidation(Base):
__tablename__ = 'validation'
id = Column(String(), primary_key=True)
job_id = Column(String(), nullable=False)
status = Column(String(), nullable=False)
results = Column(String(), nullable=False)
validation_started_at = Column(DateTime(), nullable=False)
validation_ended_at = Column(DateTime(), nullable=False)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
updated_at = Column(DateTime, default=datetime.utcnow, nullable=False, onupdate=datetime.utcnow)
class DbNotification(Base):
__tablename__ = 'notification'
id = Column(String(), primary_key=True)
file_id = Column(String(), ForeignKey('file.id'), nullable=False)
status = Column(String(), nullable=False)
payload = Column(JSON(), nullable=False)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
updated_at = Column(DateTime, default=datetime.utcnow, nullable=False, onupdate=datetime.utcnow)
file = relationship("DbFile", back_populates='notifications')
DbUploadArea.files = relationship('DbFile', order_by=DbFile.id, back_populates='upload_area')
DbFile.checksum_records = relationship('DbChecksum', order_by=DbChecksum.created_at,
back_populates='file',
cascade='all, delete, delete-orphan')
DbFile.notifications = relationship('DbNotification',
order_by=DbNotification.created_at,
back_populates='file',
cascade='all, delete, delete-orphan')
class DBSessionMaker:
def __init__(self):
engine = create_engine(UploadDbConfig().database_uri)
Base.metadata.bind = engine
self.session_maker = sessionmaker()
self.session_maker.bind = engine
def session(self, **kwargs):
return self.session_maker(**kwargs)
|
import argparse
import json
import pcomfortcloud
from enum import Enum
def print_result(obj, indent = 0):
for key in obj:
value = obj[key]
if isinstance(value, dict):
print(" "*indent + key)
print_result(value, indent + 4)
elif isinstance(value, Enum):
print(" "*indent + "{0: <{width}}: {1}".format(key, value.name, width=25-indent))
elif isinstance(value, list):
print(" "*indent + "{0: <{width}}:".format(key, width=25-indent))
for elt in value:
print_result(elt, indent + 4)
print("")
else:
print(" "*indent + "{0: <{width}}: {1}".format(key, value, width=25-indent))
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main():
""" Start pcomfortcloud Comfort Cloud command line """
parser = argparse.ArgumentParser(
description='Read or change status of pcomfortcloud Climate devices')
parser.add_argument(
'username',
help='Username for pcomfortcloud Comfort Cloud')
parser.add_argument(
'password',
help='Password for pcomfortcloud Comfort Cloud')
parser.add_argument(
'-t', '--token',
help='File to store token in',
default='~/.pcomfortcloud-token')
parser.add_argument(
'-s', '--skipVerify',
help='Skip Ssl verification if set as True',
type=str2bool, nargs='?', const=True,
default=False)
parser.add_argument(
'-r', '--raw',
help='Raw dump of response',
type=str2bool, nargs='?', const=True,
default=False)
commandparser = parser.add_subparsers(
help='commands',
dest='command')
commandparser.add_parser(
'list',
help="Get a list of all devices")
get_parser = commandparser.add_parser(
'get',
help="Get status of a device")
get_parser.add_argument(
dest='device',
type=int,
help='Device number #')
set_parser = commandparser.add_parser(
'set',
help="Set status of a device")
set_parser.add_argument(
dest='device',
type=int,
help='Device number #'
)
set_parser.add_argument(
'-p', '--power',
choices=[
pcomfortcloud.constants.Power.On.name,
pcomfortcloud.constants.Power.Off.name],
help='Power mode')
set_parser.add_argument(
'-t', '--temperature',
type=float,
help="Temperature")
set_parser.add_argument(
'-f', '--fanSpeed',
choices=[
pcomfortcloud.constants.FanSpeed.Auto.name,
pcomfortcloud.constants.FanSpeed.Low.name,
pcomfortcloud.constants.FanSpeed.LowMid.name,
pcomfortcloud.constants.FanSpeed.Mid.name,
pcomfortcloud.constants.FanSpeed.HighMid.name,
pcomfortcloud.constants.FanSpeed.High.name],
help='Fan speed')
set_parser.add_argument(
'-m', '--mode',
choices=[
pcomfortcloud.constants.OperationMode.Auto.name,
pcomfortcloud.constants.OperationMode.Cool.name,
pcomfortcloud.constants.OperationMode.Dry.name,
pcomfortcloud.constants.OperationMode.Heat.name,
pcomfortcloud.constants.OperationMode.Fan.name],
help='Operation mode')
set_parser.add_argument(
'-e', '--eco',
choices=[
pcomfortcloud.constants.EcoMode.Auto.name,
pcomfortcloud.constants.EcoMode.Quiet.name,
pcomfortcloud.constants.EcoMode.Powerful.name],
help='Eco mode')
set_parser.add_argument(
'-n', '--nanoe',
choices=[
pcomfortcloud.constants.NanoeMode.On.name,
pcomfortcloud.constants.NanoeMode.Off.name,
pcomfortcloud.constants.NanoeMode.ModeG.name,
pcomfortcloud.constants.NanoeMode.All.name],
help='Nanoe mode')
# set_parser.add_argument(
# '--airswingauto',
# choices=[
# pcomfortcloud.constants.AirSwingAutoMode.Disabled.name,
# pcomfortcloud.constants.AirSwingAutoMode.AirSwingLR.name,
# pcomfortcloud.constants.AirSwingAutoMode.AirSwingUD.name,
# pcomfortcloud.constants.AirSwingAutoMode.Both.name],
# help='Automation of air swing')
set_parser.add_argument(
'-y', '--airSwingVertical',
choices=[
pcomfortcloud.constants.AirSwingUD.Auto.name,
pcomfortcloud.constants.AirSwingUD.Down.name,
pcomfortcloud.constants.AirSwingUD.DownMid.name,
pcomfortcloud.constants.AirSwingUD.Mid.name,
pcomfortcloud.constants.AirSwingUD.UpMid.name,
pcomfortcloud.constants.AirSwingUD.Up.name],
help='Vertical position of the air swing')
set_parser.add_argument(
'-x', '--airSwingHorizontal',
choices=[
pcomfortcloud.constants.AirSwingLR.Auto.name,
pcomfortcloud.constants.AirSwingLR.Left.name,
pcomfortcloud.constants.AirSwingLR.LeftMid.name,
pcomfortcloud.constants.AirSwingLR.Mid.name,
pcomfortcloud.constants.AirSwingLR.RightMid.name,
pcomfortcloud.constants.AirSwingLR.Right.name],
help='Horizontal position of the air swing')
dump_parser = commandparser.add_parser(
'dump',
help="Dump data of a device")
dump_parser.add_argument(
dest='device',
type=int,
help='Device number 1-x')
history_parser = commandparser.add_parser(
'history',
help="Dump history of a device")
history_parser.add_argument(
dest='device',
type=int,
help='Device number 1-x')
history_parser.add_argument(
dest='mode',
type=str,
help='mode (Day, Week, Month, Year)')
history_parser.add_argument(
dest='date',
type=str,
help='date of day like 20190807')
args = parser.parse_args()
session = pcomfortcloud.Session(args.username, args.password, args.token, args.raw, args.skipVerify == False)
session.login()
try:
if args.command == 'list':
print("list of devices and its device id (1-x)")
for idx, device in enumerate(session.get_devices()):
if(idx > 0):
print('')
print("device #{}".format(idx + 1))
print_result(device, 4)
if args.command == 'get':
if int(args.device) <= 0 or int(args.device) > len(session.get_devices()):
raise Exception("device not found, acceptable device id is from {} to {}".format(1, len(session.get_devices())))
device = session.get_devices()[int(args.device) - 1]
print("reading from device '{}' ({})".format(device['name'], device['id']))
print_result( session.get_device(device['id']) )
if args.command == 'set':
if int(args.device) <= 0 or int(args.device) > len(session.get_devices()):
raise Exception("device not found, acceptable device id is from {} to {}".format(1, len(session.get_devices())))
device = session.get_devices()[int(args.device) - 1]
print("writing to device '{}' ({})".format(device['name'], device['id']))
kwargs = {}
if args.power is not None:
kwargs['power'] = pcomfortcloud.constants.Power[args.power]
if args.temperature is not None:
kwargs['temperature'] = args.temperature
if args.fanSpeed is not None:
kwargs['fanSpeed'] = pcomfortcloud.constants.FanSpeed[args.fanSpeed]
if args.mode is not None:
kwargs['mode'] = pcomfortcloud.constants.OperationMode[args.mode]
if args.eco is not None:
kwargs['eco'] = pcomfortcloud.constants.EcoMode[args.eco]
if args.nanoe is not None:
kwargs['nanoe'] = pcomfortcloud.constants.NanoeMode[args.nanoe]
if args.airSwingHorizontal is not None:
kwargs['airSwingHorizontal'] = pcomfortcloud.constants.AirSwingLR[args.airSwingHorizontal]
if args.airSwingVertical is not None:
kwargs['airSwingVertical'] = pcomfortcloud.constants.AirSwingUD[args.airSwingVertical]
session.set_device(device['id'], **kwargs)
if args.command == 'dump':
if int(args.device) <= 0 or int(args.device) > len(session.get_devices()):
raise Exception("device not found, acceptable device id is from {} to {}".format(1, len(session.get_devices())))
device = session.get_devices()[int(args.device) - 1]
print_result(session.dump(device['id']))
if args.command == 'history':
if int(args.device) <= 0 or int(args.device) > len(session.get_devices()):
raise Exception("device not found, acceptable device id is from {} to {}".format(1, len(session.get_devices())))
device = session.get_devices()[int(args.device) - 1]
print_result(session.history(device['id'], args.mode, args.date))
except pcomfortcloud.ResponseError as ex:
print(ex.text)
# pylint: disable=C0103
if __name__ == "__main__":
main()
|
"""
Metrics that provide data about commits & their associated activity
"""
import datetime
import sqlalchemy as s
import pandas as pd
from augur.util import register_metric
@register_metric()
def committers(self, repo_group_id, repo_id=None, begin_date=None, end_date=None, period='month'):
"""
:param repo_id: The repository's id
:param repo_group_id: The repository's group id
:param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of persons/period
"""
if not begin_date:
begin_date = '1970-1-1 00:00:01'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
committersSQL = None
if repo_id:
committersSQL = s.sql.text(
"""
SELECT DATE,
repo_name,
rg_name,
COUNT ( author_count )
FROM
(
SELECT
date_trunc(:period, commits.cmt_author_date::date) as date,
repo_name,
rg_name,
cmt_author_name,
cmt_author_email,
COUNT ( cmt_author_name ) AS author_count
FROM
commits, repo, repo_groups
WHERE
commits.repo_id = :repo_id AND commits.repo_id = repo.repo_id
AND repo.repo_group_id = repo_groups.repo_group_id
AND commits.cmt_author_date BETWEEN :begin_date and :end_date
GROUP BY date, repo_name, rg_name, cmt_author_name, cmt_author_email
ORDER BY date DESC
) C
GROUP BY
C.DATE,
repo_name,
rg_name
ORDER BY C.DATE desc
"""
)
else:
committersSQL = s.sql.text(
"""
SELECT DATE,
rg_name,
COUNT ( author_count )
FROM
(
SELECT
date_trunc(:period, commits.cmt_author_date::date) as date,
rg_name,
cmt_author_name,
cmt_author_email,
COUNT ( cmt_author_name ) AS author_count
FROM
commits, repo, repo_groups
WHERE
commits.repo_id = repo.repo_id
AND repo.repo_group_id = repo_groups.repo_group_id
AND commits.cmt_author_date BETWEEN :begin_date and :end_date
AND repo.repo_group_id = :repo_group_id
GROUP BY date, rg_name, cmt_author_name, cmt_author_email
ORDER BY date DESC
) C
GROUP BY
C.DATE,
rg_name
ORDER BY C.DATE desc
"""
)
results = pd.read_sql(committersSQL, self.database, params={'repo_id': repo_id,
'repo_group_id': repo_group_id,'begin_date': begin_date, 'end_date': end_date, 'period':period})
return results
@register_metric()
def annual_commit_count_ranked_by_new_repo_in_repo_group(self, repo_group_id, repo_id=None, begin_date=None, end_date=None, period='month'):
"""
For each repository in a collection of repositories being managed, each REPO that first appears in the parameterized
calendar year (a new repo in that year), show all commits for that year (total for year by repo).
Result ranked from highest number of commits to lowest by default.
:param repo_id: The repository's id
:param repo_group_id: The repository's group id
:param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of data
"""
if not begin_date:
begin_date = '1970-1-1 00:00:01'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
cdRgNewrepRankedCommitsSQL = None
if not repo_id:
table = 'dm_repo_group_annual' if period == 'year' or period == 'all' else 'dm_repo_group_monthly' if period == 'month' else 'dm_repo_group_weekly'
cdRgNewrepRankedCommitsSQL = s.sql.text("""
SELECT repo_groups.repo_group_id, rg_name, year, sum(cast(added AS INTEGER) - cast(removed AS INTEGER) - cast(whitespace AS INTEGER)) AS net, sum(cast(patches AS INTEGER)) AS commits
FROM {0}, repo_groups
WHERE {0}.repo_group_id = repo_groups.repo_group_id
AND repo_groups.repo_group_id = :repo_group_id
AND (
year > date_part('year', TIMESTAMP :begin_date)
OR (
year = date_part('year', TIMESTAMP :begin_date)
AND {1} >= date_part('{1}', TIMESTAMP :begin_date)
)
)
AND (
year < date_part('year', TIMESTAMP :end_date)
OR (
year = date_part('year', TIMESTAMP :end_date)
AND {1} <= date_part('{1}', TIMESTAMP :end_date)
)
)
GROUP BY repo_groups.repo_group_id, rg_name, YEAR
ORDER BY YEAR ASC
""".format(table, period))
else:
table = 'dm_repo_annual' if period == 'year' or period == 'all' else 'dm_repo_monthly' if period == 'month' else 'dm_repo_weekly'
cdRgNewrepRankedCommitsSQL = s.sql.text("""
SELECT repo.repo_id, repo_name, year, sum(cast(added AS INTEGER) - cast(removed AS INTEGER) - cast(whitespace AS INTEGER)) AS net, sum(cast(patches AS INTEGER)) AS commits
FROM {0}, repo
WHERE {0}.repo_id = repo.repo_id
AND repo.repo_id = :repo_id
AND (
year > date_part('year', TIMESTAMP :begin_date)
OR (
year = date_part('year', TIMESTAMP :begin_date)
AND {1} >= date_part('{1}', TIMESTAMP :begin_date)
)
)
AND (
year < date_part('year', TIMESTAMP :end_date)
OR (
year = date_part('year', TIMESTAMP :end_date)
AND {1} <= date_part('{1}', TIMESTAMP :end_date)
)
)
GROUP BY repo.repo_id, repo_name, YEAR
ORDER BY YEAR ASC
""".format(table, period))
results = pd.read_sql(cdRgNewrepRankedCommitsSQL, self.database, params={'repo_id': repo_id,
'repo_group_id': repo_group_id,'begin_date': begin_date, 'end_date': end_date})
return results
@register_metric()
def annual_commit_count_ranked_by_repo_in_repo_group(self, repo_group_id, repo_id=None, timeframe=None):
"""
For each repository in a collection of repositories being managed, each REPO's total commits during the current Month,
Year or Week. Result ranked from highest number of commits to lowest by default.
:param repo_id: The repository's id
:param repo_group_id: The repository's group id
:param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of data
"""
if timeframe == None:
timeframe = 'all'
cdRgTpRankedCommitsSQL = None
if repo_id:
if timeframe == 'all':
cdRgTpRankedCommitsSQL = s.sql.text("""
SELECT repo.repo_id, repo_name as name, SUM(added - removed - whitespace) as net, patches
FROM dm_repo_annual, repo, repo_groups
WHERE repo.repo_group_id = (select repo.repo_group_id from repo where repo.repo_id = :repo_id)
AND repo.repo_group_id = repo_groups.repo_group_id
AND dm_repo_annual.repo_id = repo.repo_id
group by repo.repo_id, patches
order by net desc
LIMIT 10
""")
elif timeframe == 'year':
cdRgTpRankedCommitsSQL = s.sql.text("""
SELECT repo.repo_id, repo_name as name, SUM(added - removed - whitespace) as net, patches
FROM dm_repo_annual, repo, repo_groups
WHERE repo.repo_group_id = (select repo.repo_group_id from repo where repo.repo_id = :repo_id)
AND repo.repo_group_id = repo_groups.repo_group_id
AND dm_repo_annual.repo_id = repo.repo_id
AND date_part('year', repo_added) = date_part('year', CURRENT_DATE)
group by repo.repo_id, patches
order by net desc
LIMIT 10
""")
elif timeframe == 'month':
cdRgTpRankedCommitsSQL = s.sql.text("""
SELECT repo.repo_id, repo_name as name, SUM(added - removed - whitespace) as net, patches
FROM dm_repo_monthly, repo, repo_groups
WHERE repo.repo_group_id = (select repo.repo_group_id from repo where repo.repo_id = :repo_id)
AND repo.repo_group_id = repo_groups.repo_group_id
AND dm_repo_monthly.repo_id = repo.repo_id
AND date_part('year', repo_added) = date_part('year', CURRENT_DATE)
AND date_part('month', repo_added) = date_part('month', CURRENT_DATE)
group by repo.repo_id, patches
order by net desc
LIMIT 10
""")
else:
if timeframe == 'all':
cdRgTpRankedCommitsSQL = s.sql.text("""
SELECT repo.repo_id, repo_name as name, SUM(added - removed - whitespace) as net, patches
FROM augur_data.dm_repo_annual, repo, repo_groups
WHERE repo.repo_group_id = :repo_group_id
AND repo.repo_group_id = repo_groups.repo_group_id
AND dm_repo_annual.repo_id = repo.repo_id
group by repo.repo_id, patches
order by net desc
LIMIT 10
""")
elif timeframe == "year":
cdRgTpRankedCommitsSQL = s.sql.text(
"""
SELECT repo.repo_id, repo_name as name, SUM(added - removed - whitespace) as net, patches
FROM dm_repo_annual, repo, repo_groups
WHERE repo.repo_group_id = :repo_group_id
AND repo.repo_group_id = repo_groups.repo_group_id
AND dm_repo_annual.repo_id = repo.repo_id
AND date_part('year', repo_added) = date_part('year', CURRENT_DATE)
group by repo.repo_id, patches
order by net desc
LIMIT 10
"""
)
elif timeframe == 'month':
cdRgTpRankedCommitsSQL = s.sql.text("""
SELECT repo.repo_id, repo_name as name, SUM(added - removed - whitespace) as net, patches
FROM dm_repo_annual, repo, repo_groups
WHERE repo.repo_group_id = :repo_group_id
AND repo.repo_group_id = repo_groups.repo_group_id
AND dm_repo_annual.repo_id = repo.repo_id
AND date_part('year', repo_added) = date_part('year', CURRENT_DATE)
AND date_part('month', repo_added) = date_part('month', CURRENT_DATE)
group by repo.repo_id, patches
order by net desc
LIMIT 10
""")
results = pd.read_sql(cdRgTpRankedCommitsSQL, self.database, params={ "repo_group_id": repo_group_id,
"repo_id": repo_id})
return results
@register_metric()
def top_committers(self, repo_group_id, repo_id=None, year=None, threshold=0.5):
"""
Returns a list of contributors contributing N% of all commits.
:param repo_group_id: Repo group ID
:param repo_id: Repo ID.
:param year: Year. eg: 2018, 2107. Defaults to current year.
:param threshold: The threshold to specify N%. Defaults to 0.5
"""
threshold = float(threshold)
if threshold < 0 or threshold > 1:
raise ValueError('threshold should be between 0 and 1')
if year is None:
year = datetime.datetime.now().year
if not repo_id:
total_commits_SQL = s.sql.text("""
SELECT SUM(patches)::int
FROM
(SELECT repo_group_id, email, year, patches
FROM dm_repo_group_annual
WHERE year = :year AND repo_group_id = :repo_group_id
ORDER BY patches DESC) a
""")
results = pd.read_sql(total_commits_SQL, self.database,
params={'year': year, 'repo_group_id': repo_group_id})
else:
total_commits_SQL = s.sql.text("""
SELECT SUM(patches)::int
FROM
(SELECT repo_id, email, year, patches
FROM dm_repo_annual
WHERE year = :year AND repo_id = :repo_id
ORDER BY patches DESC) a
""")
results = pd.read_sql(total_commits_SQL, self.database,
params={'year': year, 'repo_id': repo_id})
total_commits = int(results.iloc[0]['sum'])
threshold_commits = round(threshold * total_commits)
if not repo_id:
committers_SQL = s.sql.text("""
SELECT
a.repo_group_id,
rg_name AS repo_group_name,
a.email,
SUM(a.patches)::int AS commits
FROM
(SELECT repo_group_id, email, year, patches
FROM dm_repo_group_annual
WHERE year = :year AND repo_group_id = :repo_group_id
ORDER BY patches DESC) a, repo_groups
WHERE a.repo_group_id = repo_groups.repo_group_id
GROUP BY a.repo_group_id, repo_group_name, a.email
ORDER BY commits DESC
""")
results = pd.read_sql(committers_SQL, self.database,
params={'year': year, 'repo_group_id': repo_group_id})
else:
committers_SQL = s.sql.text("""
SELECT
a.repo_id,
repo.repo_name,
a.email,
SUM(a.patches)::int AS commits
FROM
(SELECT repo_id, email, year, patches
FROM dm_repo_annual
WHERE year = :year AND repo_id = :repo_id
ORDER BY patches DESC) a, repo
WHERE a.repo_id = repo.repo_id
GROUP BY a.repo_id, repo.repo_name, a.email
ORDER BY commits DESC
""")
results = pd.read_sql(committers_SQL, self.database,
params={'year': year, 'repo_id': repo_id})
cumsum = 0
for i, row in results.iterrows():
cumsum += row['commits']
if cumsum >= threshold_commits:
results = results[:i + 1]
break
if not repo_id:
rg_name = results.iloc[0]['repo_group_name']
results.loc[i+1] = [repo_group_id, rg_name, 'other_contributors',
int(total_commits - cumsum)]
else:
repo_name = results.iloc[0]['repo_name']
results.loc[i+1] = [repo_id, repo_name, 'other_contributors',
int(total_commits - cumsum)]
return results
|
#!/usr/bin/env python3
"""
Make a custom colormap from a list of colors
References
----------
How to create a colormap:
https://matplotlib.org/3.1.0/tutorials/colors/colormap-manipulation.html
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import json
import os
import collections
# seaborn settings
sns.set_style('white')
sns.set_context("notebook")
sns.set(font='Arial')
package_directory = os.path.dirname(os.path.abspath(__file__))
def set_ticksStyle(x_size=4, y_size=4, x_dir='in', y_dir='in'):
"""
Ticks settings for plotting
Parameters
----------
x_size : float
length of x-ticks
y_size : float
length of y-ticks
x_dir : str, ('in' or 'out')
inward or outward facing x-ticks
y_dir : str, ('in' or 'out')
inward or outward facing y-ticks
"""
sns.set_style('ticks', {'xtick.major.size': x_size, 'ytick.major.size': y_size, 'xtick.direction': x_dir, 'ytick.direction': y_dir})
def load_colors(filename='colormaps.json'):
"""
Load rgba colors from a json file
Parameters
----------
filename : str
"""
with open(os.path.join(package_directory, filename)) as f:
inputcolors = json.load(f)
for name,colors_rgb in inputcolors.items():
colors_rgb = np.array(colors_rgb)
if colors_rgb.shape[1] == 3:
colors_rgba = np.hstack((colors_rgb / 255, np.ones((colors_rgb.shape[0], 1))))
else:
colors_rgba = colors_rgb
inputcolors[name] = colors_rgba
return inputcolors
def _scramble_pop(d):
"""
Reorder the colors as [first, last, second, second but last, ...]
"""
try:
while(True):
yield d.popleft()
yield d.pop()
except IndexError:
pass
def get_cmap(name=None, colormap_filename='colormaps.json'):
"""
Return the selected LinearSegmentedColormap or a dictionary of all colormaps registered in colormap_filename
Parameters
----------
name : str
colormap_filename : str
path to a json file encoding a dictionary of colors which define custom colormaps
"""
default_inputcolors = load_colors(colormap_filename)
default_cmaps = {key: make_colormap(colors, key) for key,colors in default_inputcolors.items()}
if name is None:
return default_cmaps
else:
try:
return default_cmaps[name][1]
except KeyError:
print('Colormap \"{}\" is not yet in the list of registered colormaps.'
'You may add your input colors to colormaps.json'.format(name))
return None
def naturalcolors():
"""
Wrapper for naturalcolors map
"""
default_cmaps = get_cmap()
return default_cmaps['naturalcolors']
def list_cmaps():
"""
List all available colormaps
"""
return list(get_cmap().keys())
def make_colormap(colors, name='newcolormap'):
"""
Build a listed and a linear segmented colormap from a list of colors
Parameters
----------
colors : array_like
name : str
"""
listedCmap = mpl.colors.ListedColormap(colors, name=name + '_list')
linearSegmentedCmap = _listed2linearSegmentedColormap(listedCmap, name)
return listedCmap, linearSegmentedCmap
def _listed2linearSegmentedColormap(listedCmap, name='newcolormap'):
"""
Convert a listed to a linear segmented colormap
Parameters
----------
listedCmap : listed_colormap
name : str
"""
c = np.array(listedCmap.colors)
x = np.linspace(0, 1, len(c))
cdict = cdict = {'red': np.vstack((x, c[:, 0], c[:, 0])).T,
'green': np.vstack((x, c[:, 1], c[:, 1])).T,
'blue': np.vstack((x, c[:, 2], c[:, 2])).T}
return mpl.colors.LinearSegmentedColormap(name=name, segmentdata=cdict, N=256)
def get_colors(cmap, n, scramble=False):
"""
Extract n colors from a colormap
Parameters
----------
cmap : colormap or str
listed / linear segmented colormap or the name of a registered colormap
n : int
number of colors to extract from the colormap
"""
if type(cmap) is str:
name = cmap
cmap = plt.get_cmap(cmap)
else:
name = cmap.name
if n > cmap.N:
print('The colormap \"{}\"" is built from {:d} colors. Those are listed below'.format(cmap.name, cmap.N))
n = cmap.N
colors = cmap(np.linspace(0, 1, n))
if scramble:
colors = np.array(list(_scramble_pop(collections.deque(colors))))
return colors
def drawColorCircle(cmap, n=24, area=200):
"""
Draw a color circle from the colormap
Parameters
----------
cmap : colormap or str
listed / linear segmented colormap or the name of a registered colormap
n : int
number of colors to display in the color circle (set n=256 for a continuous circle)
area : int
size of the circles to draw
"""
if type(cmap) is str:
name = cmap
cmap = plt.get_cmap(cmap)
else:
name = cmap.name
with sns.axes_style('white'):
set_ticksStyle()
ax = plt.subplot(111, projection='polar')
if n > cmap.N:
print('The colormap \"{}\"" is built from {:d} colors'.format(cmap.name, cmap.N))
n = cmap.N
theta = np.linspace(0, 2 * np.pi - 2 * np.pi / n, n)
r = [1] * n
ax.scatter(theta, r, c=theta, s=area, cmap=cmap)
ax.axis('off')
ax.grid(which='major', visible=False)
ax.text(0, 0, name, va='center', ha='center', fontsize=12)
def drawColorBar(cmap):
"""
Draw a colorbar from the colormap
Parameters
----------
cmap : colormap or str
listed / linear segmented colormap or the name of a registered colormap
"""
if type(cmap) is str:
name = cmap
cmap = plt.get_cmap(cmap)
else:
name = cmap.name
with sns.axes_style('white'):
set_ticksStyle()
fig, ax = plt.subplots(figsize=(4, 1))
fig.subplots_adjust(bottom=0.7)
ax.set_axis_off()
mpl.colorbar.ColorbarBase(ax, cmap=cmap, orientation='horizontal')
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3] / 2.
fig.text(x_text, y_text, name, va='center', ha='right', fontsize=12)
|
import csv
table = []
with open('iso639-autonyms.tsv', 'rt', newline='', encoding='utf-8') as f:
reader = csv.reader(f, dialect='excel-tab')
for row in reader:
current_row = []
current_row.extend(row[:3])
current_row.append(row[3].title())
table.append(current_row)
with open('iso639-autonyms.csv', 'wt', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerows(table)
|
class Solution:
def minReorder(self, n: int, connections: List[List[int]]) -> int:
tree = [[] for _ in range(n)]
# a --> b
for a, b in connections:
tree[a].append((b, 1))
tree[b].append((a, 0))
self.ans = 0
def dfs(u, parent):
for v, d in tree[u]:
if v != parent:
if d == 1:
self.ans += 1
dfs(v, u)
dfs(0, -1)
return self.ans
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from clients.ctm_api_client.configuration import Configuration
class RoleDataFull(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"overwrite": "bool",
"organization_groups": "list[str]",
"role_data": "RoleData",
}
attribute_map = {
"overwrite": "Overwrite",
"organization_groups": "OrganizationGroups",
"role_data": "RoleData",
}
def __init__(
self,
overwrite=None,
organization_groups=None,
role_data=None,
_configuration=None,
): # noqa: E501
"""RoleDataFull - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._overwrite = None
self._organization_groups = None
self._role_data = None
self.discriminator = None
if overwrite is not None:
self.overwrite = overwrite
if organization_groups is not None:
self.organization_groups = organization_groups
if role_data is not None:
self.role_data = role_data
@property
def overwrite(self):
"""Gets the overwrite of this RoleDataFull. # noqa: E501
can overwrtie existing role # noqa: E501
:return: The overwrite of this RoleDataFull. # noqa: E501
:rtype: bool
"""
return self._overwrite
@overwrite.setter
def overwrite(self, overwrite):
"""Sets the overwrite of this RoleDataFull.
can overwrtie existing role # noqa: E501
:param overwrite: The overwrite of this RoleDataFull. # noqa: E501
:type: bool
"""
self._overwrite = overwrite
@property
def organization_groups(self):
"""Gets the organization_groups of this RoleDataFull. # noqa: E501
organization groups # noqa: E501
:return: The organization_groups of this RoleDataFull. # noqa: E501
:rtype: list[str]
"""
return self._organization_groups
@organization_groups.setter
def organization_groups(self, organization_groups):
"""Sets the organization_groups of this RoleDataFull.
organization groups # noqa: E501
:param organization_groups: The organization_groups of this RoleDataFull. # noqa: E501
:type: list[str]
"""
self._organization_groups = organization_groups
@property
def role_data(self):
"""Gets the role_data of this RoleDataFull. # noqa: E501
authorization role # noqa: E501
:return: The role_data of this RoleDataFull. # noqa: E501
:rtype: RoleData
"""
return self._role_data
@role_data.setter
def role_data(self, role_data):
"""Sets the role_data of this RoleDataFull.
authorization role # noqa: E501
:param role_data: The role_data of this RoleDataFull. # noqa: E501
:type: RoleData
"""
self._role_data = role_data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(RoleDataFull, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RoleDataFull):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RoleDataFull):
return True
return self.to_dict() != other.to_dict()
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import compas
if not compas.IPY:
import imageio
__all__ = ["gif_from_images"]
def gif_from_images(
files,
gif_path,
fps=10,
loop=0,
reverse=False,
pingpong=False,
subrectangles=True,
delete_files=False,
):
"""Create an animated GIF from a series of images.
Parameters
----------
files : list
The image series.
gif_path : str
The location to save the GIF.
fps : int, optional
Frames per second.
loop : int, optional
The number of loops.
reverse : bool, optional
If True, reverse the image series.
pingpong : bool, optional
If True, add a reverse sequence to the end of the base sequence to go back to the beginning.
subrectangles : bool, optional
If True, optimize the file size by looking for invariant subrectangles.
Returns
-------
None
"""
if reverse:
files.reverse()
if pingpong:
files += files[::-1]
with imageio.get_writer(
gif_path, mode="I", fps=fps, loop=loop, subrectangles=subrectangles
) as writer:
for filename in files:
image = imageio.imread(filename)
writer.append_data(image)
if delete_files:
for filename in files:
os.remove(filename)
|
from pydantic import BaseModel, Extra
class Mqtt5ChannelBinding(BaseModel):
"""
This document defines how to describe MQTT 5-specific information on AsyncAPI.
This object MUST NOT contain any properties. Its name is reserved for future use.
"""
class Config:
extra = Extra.forbid
class Mqtt5MessageBinding(BaseModel):
"""
This document defines how to describe MQTT 5-specific information on AsyncAPI.
This object MUST NOT contain any properties. Its name is reserved for future use.
"""
class Config:
extra = Extra.forbid
class Mqtt5OperationBinding(BaseModel):
"""
This document defines how to describe MQTT 5-specific information on AsyncAPI.
This object MUST NOT contain any properties. Its name is reserved for future use.
"""
class Config:
extra = Extra.forbid
class Mqtt5ServerBinding(BaseModel):
"""
This document defines how to describe MQTT 5-specific information on AsyncAPI.
This object MUST NOT contain any properties. Its name is reserved for future use.
"""
class Config:
extra = Extra.forbid
|
#!/usr/bin/python3
import pandas as pd
import os.path
from pinja.color.color import *
class CsvImprivement:
KEYWORD1 = "call"
IMPROVEMENT_STR1 = "call 0x0"
extension = ".csv"
new_tailname_extension = "_TRANS.csv"
new_file_name = ""
def create_improvement_csv(self, input_csv_file_path):
input_csv = pd.read_csv(input_csv_file_path)
output_data_list = []
output_data_frame = None
for row in input_csv.itertuples():
output_row_list = []
for i in range(len(row)):
if self.KEYWORD1 in str(row[i]):
output_row_list.append(self.IMPROVEMENT_STR1)
else:
output_row_list.append(row[i])
output_data_list.append(output_row_list)
output_data_frame = pd.DataFrame(output_data_list)
self.create_csv_file_name(input_csv_file_path)
temp_newfilename = self.new_file_name + self.new_tailname_extension
output_data_frame.to_csv(temp_newfilename)
print_green('OUTPUT >>>>>>>> {} '.format(temp_newfilename))
def create_csv_file_name(self, input_csv_file_path):
name, ext = os.path.splitext(input_csv_file_path)
self.new_file_name = name
|
#coding=utf-8
import tornado.web as web
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPRequest
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.web import StaticFileHandler
from tornado.log import enable_pretty_logging
import motor
import os
import json
from bson import ObjectId
enable_pretty_logging()
client = AsyncHTTPClient()
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return json.JSONEncoder.default(self, o)
class PoolHandler(web.RequestHandler):
@gen.coroutine
def post(self):
self.write('')
self.finish()
@gen.coroutine
def get(self):
resp_data = []
cursor = db.user.find()
cursor.sort([('id', -1)])
for document in (yield cursor.to_list(length=999)):
resp_data.append(document)
self.write(JSONEncoder().encode(resp_data))
self.finish()
class PlusOneHandler(web.RequestHandler):
@gen.coroutine
def post(self):
self.write('')
self.finish()
@gen.coroutine
def get(self):
life = yield db.life.find_one()
newTime = life['timeRaised'] + 1
life['timeRaised'] = newTime
yield db.life.update({'id': 1}, {'$set': {'timeRaised': newTime}})
self.write(JSONEncoder().encode(life))
self.finish()
class RemoveHandler(web.RequestHandler):
@gen.coroutine
def post(self):
usr_data = json.loads(self.request.body)
yield db.user.remove({'id': int(usr_data["id"]), 'mobile': int(usr_data["mobile"])})
self.finish()
@gen.coroutine
def get(self):
self.write('')
self.finish()
class UploadHandler(web.RequestHandler):
@gen.coroutine
def post(self):
self.finish()
usr_data = json.loads(self.request.body)
print usr_data
yield db.user.remove({'id': int(usr_data["id"])})
yield db.follow.remove({'id': int(usr_data["id"])})
if usr_data["followRing1"] is not None and int(usr_data["followRing1"]) > 0:
follow_1 = str(int(usr_data["followRing1"]))
yield db.follow.insert({'id': int(usr_data["id"]), 'follow': int(usr_data["followRing1"])})
else:
follow_1 = ''
if usr_data["followRing2"] is not None and int(usr_data["followRing2"]) > 0:
follow_2 = str(int(usr_data["followRing2"]))
yield db.follow.insert({'id': int(usr_data["id"]), 'follow': int(usr_data["followRing2"])})
else:
follow_2 = ''
if usr_data["followRing3"] is not None and int(usr_data["followRing3"]) > 0:
follow_3 = str(int(usr_data["followRing3"]))
yield db.follow.insert({'id': int(usr_data["id"]), 'follow': int(usr_data["followRing3"])})
else:
follow_3 = ''
if usr_data["followRing4"] is not None and int(usr_data["followRing4"]) > 0:
follow_4 = str(int(usr_data["followRing4"]))
yield db.follow.insert({'id': int(usr_data["id"]), 'follow': int(usr_data["followRing4"])})
else:
follow_4 = ''
if usr_data["followRing5"] is not None and int(usr_data["followRing5"]) > 0:
follow_5 = str(int(usr_data["followRing5"]))
yield db.follow.insert({'id': int(usr_data["id"]), 'follow': int(usr_data["followRing5"])})
else:
follow_5 = ''
ad = ''
if usr_data["ad"] is not None:
ad = usr_data["ad"]
yield db.user.insert({'id': int(usr_data["id"]), 'mobile': int(usr_data["mobile"]), 'ad': ad})
sms_mobile = str(int(usr_data["mobile"]))
sms_follow = follow_1 + '/' + follow_2 + '/' + follow_3 + '/' + follow_4 + '/' + follow_5
visit_index = HTTPRequest(
url="http://222.73.117.158/msg/HttpBatchSendSM?account=anhuidudai_kdys&pswd=Ustcring2016&mobile=" + sms_mobile + "&msg=【科大戒指】你成功关注了" + sms_follow + "号戒指&needstatus=true",
method='GET',
headers={'User-Agent': 'Firefox'}
)
yield gen.Task(client.fetch, visit_index)
notice_users = []
cursor = db.follow.find({'follow': int(usr_data["id"])})
for document in (yield cursor.to_list(length=int(100))):
notice_users.append(document)
for user in notice_users:
print user['id']
user_info = yield db.user.find_one({'id': int(user['id'])})
if user_info is not None and user_info['mobile'] is not None:
sms_mobile = str(int(user_info["mobile"]))
sms_id = str(usr_data["id"])
visit_index = HTTPRequest(
url="http://222.73.117.158/msg/HttpBatchSendSM?account=anhuidudai_kdys&pswd=Ustcring2016&mobile=" + sms_mobile + "&msg=【科大戒指】有同学发布了" + sms_id + "号戒指&needstatus=true",
method='GET',
headers={'User-Agent': 'Firefox'}
)
yield gen.Task(client.fetch, visit_index)
@gen.coroutine
def get(self):
self.write('')
self.finish()
def make_app(static_path):
return web.Application([
(r"/pool", PoolHandler),
(r"/upload", UploadHandler),
(r"/remove", RemoveHandler),
(r"/plus-one", PlusOneHandler),
(r"/", web.RedirectHandler, {'url': 'index.html'}),
(r"/(.*)", StaticFileHandler, {'path': static_path}),
], db=db)
if __name__ == "__main__":
static_path = os.path.dirname(os.path.realpath(__file__)) + '/public'
ip = '127.0.0.1'
port = 8969
dbclient = motor.MotorClient(ip, 27017)
# database is ustcRing
db = dbclient.ustcRing
app = make_app(static_path)
app.listen(port)
IOLoop.instance().start()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import sc2reader
from sc2reader.utils import get_files
from sc2reader.exceptions import ReadError
def doFile(filename, arguments):
'''Prints summary information about SC2 replay file'''
try:
replay = sc2reader.read_file(filename, debug=True)
except ReadError as e:
prev = e.game_events[-1]
print "\nVersion {0} replay:\n\t{1}".format(e.replay.release_string, e.replay.filename)
print "\t{0}, Type={1:X}, Code={2:X}".format(e.msg, e.type,e.code)
print "\tPrevious Event: {0}".format(prev.name)
print "\t\t"+prev.bytes.encode('hex')
print "\tFollowing Bytes:"
print "\t\t"+e.buffer.read_range(e.location,e.location+30).encode('hex')
print "Error with '{0}': ".format(filename)
print e
return
except TypeError as e:
print "Error with '%s': " % filename,
print e
return
except ValueError as e:
print "Error with '%s': " % filename,
print e
return
if arguments.map:
print " Map: {0}".format(replay.map)
if arguments.length:
print " Length: {0}".format(replay.length)
if arguments.date:
print " Date: {0}".format(replay.date)
if arguments.teams:
races = list()
for team in replay.teams:
races.append(''.join([player.pick_race[0] for player in team.players]))
print " Teams: {0}".format("v".join(races))
for team in replay.teams:
print " Team {0}\t{1} ({2})".format(team.number,team.players[0].name,team.players[0].pick_race[0])
for player in team.players[1:]:
print " \t{0} ({1})".format(player.name,player.pick_race[0])
if arguments.messages:
print " Messages:"
for message in replay.messages:
print " {0}".format(message)
if arguments.version:
print " Version: {0}".format(replay.release_string)
print
def main():
parser = argparse.ArgumentParser(description='Prints basic information from SC2 replay files or directories.')
parser.add_argument('paths', metavar='filename', type=str, nargs='+',
help="Paths to one or more SC2Replay files or directories")
parser.add_argument('--date', action="store_true", default=True,
help="Print game date")
parser.add_argument('--length', action="store_true", default=False,
help="Print game duration mm:ss in game time (not real time)")
parser.add_argument('--map', action="store_true", default=True,
help="Print map name")
parser.add_argument('--messages', action="store_true", default=False,
help="Print in-game player chat messages")
parser.add_argument('--teams', action="store_true", default=True,
help="Print teams, their players, and the race matchup")
parser.add_argument('--version', action="store_true", default=True,
help="Print the release string as seen in game")
parser.add_argument('--recursive', action="store_true", default=True,
help="Recursively read through directories of replays")
arguments = parser.parse_args()
for path in arguments.paths:
if arguments.recursive:
files = get_files(path)
else:
files = get_files(path, depth=0)
for file in files:
print "\n--------------------------------------\n{0}\n".format(file)
doFile(file, arguments)
if __name__ == '__main__':
main()
|
from c0101_retrieve_ref import retrieve_ref
from c0102_timestamp import timestamp_source
from c0103_trim_record_to_max import trim_record_to_max
from c0104_plot_timestamp import plot_timestamp
from c0105_find_records import find_records
from c0106_record_to_summary import record_to_summary
from c0107_decide_inclusion import decide_inclusion
from c0108_save_meta import save_meta
from c0109_retrieve_meta import retrieve_meta
from c0110_find_temp_end import find_temp_end
from c0111_retrieve_analyzed import retrieve_analyzed
from c0112_plot_truncate import plot_truncate
from c0113_plot_acc import plot_acc
from c0201_statisticsCalculation import statisticsCalculation
from c0202_machineLearningBasic import machineLearningBasic
from c0204_statisticSegments import statisticSegments
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statistics
def statisticsAnalysis():
"""
Statistics
"""
print("begin statistical analysis")
# statisticsCalculation()
# run statistics on each segment
statisticSegments()
# machineLearningBasic()
print("end statistical analysis")
|
n = int(input())
third = n % 10
first = n // 100
second = n // 10 % 10
for a in range(1, third + 1):
for b in range(1, second + 1):
for c in range(1, first + 1):
print('{0} * {1} * {2} = {3};'.format(a, b, c, (a * b * c)))
|
def mul(a, b):
"""
>>> mul(2, 3)
6
>>> mul('a', 2)
'aa'
"""
return a * b
def add(a, b):
"""
>>> add(2, 3)
5
>>> add('a', 'b')
'ab'
"""
return a + b
|
import pytest
from presidio_analyzer import RecognizerResult
from presidio_evaluator.data_generator.presidio_perturb import PresidioPerturb
from tests import get_mock_fake_df
import pandas as pd
@pytest.mark.parametrize(
# fmt: off
"text, entity1, entity2, start1, end1, start2, end2",
[
(
"Hi I live in South Africa and my name is Toma",
"LOCATION", "PERSON", 13, 25, 41, 45,
),
("Africa is my continent, James", "LOCATION", "PERSON", 0, 6, 24, 29,),
],
# fmt: on
)
def test_presidio_perturb_two_entities(
text, entity1, entity2, start1, end1, start2, end2
):
presidio_response = [
RecognizerResult(entity_type=entity1, start=start1, end=end1, score=0.85),
RecognizerResult(entity_type=entity2, start=start2, end=end2, score=0.85),
]
presidio_perturb = PresidioPerturb(fake_pii_df=get_mock_fake_df())
fake_df = presidio_perturb.fake_pii
perturbations = presidio_perturb.perturb(
original_text=text, presidio_response=presidio_response, count=5
)
assert len(perturbations) == 5
for perturbation in perturbations:
assert fake_df[entity1].str.lower()[0] in perturbation.lower()
assert fake_df[entity2].str.lower()[0] in perturbation.lower()
assert text[:start1].lower() in perturbation.lower()
assert text[end1:start2].lower() in perturbation.lower()
def test_entity_translation():
text = "My email is email@email.com"
presidio_response = [
RecognizerResult(entity_type="EMAIL_ADDRESS", start=12, end=27, score=0.5)
]
presidio_perturb = PresidioPerturb(fake_pii_df=get_mock_fake_df())
fake_df = presidio_perturb.fake_pii
perturbations = presidio_perturb.perturb(
original_text=text, presidio_response=presidio_response, count=1
)
assert fake_df["EMAIL_ADDRESS"].str.lower()[0] in perturbations[0]
def test_subset_perturbation():
text = "My name is Dan"
presidio_response = [
RecognizerResult(entity_type="PERSON", start=11, end=14, score=0.5)
]
fake_df = pd.DataFrame(
{
"FIRST_NAME": ["Neta", "George"],
"LAST_NAME": ["Levy", "Harrison"],
"GENDER": ["Female", "Male"],
"NameSet": ["Hebrew", "English"],
}
)
ignore_types = {"DATE_TIME", "LOCATION", "ADDRESS", "GENDER"}
presidio_perturb = PresidioPerturb(fake_pii_df=fake_df, ignore_types=ignore_types)
perturbations = presidio_perturb.perturb(
original_text=text,
presidio_response=presidio_response,
namesets=["Hebrew"],
genders=["Female"],
count=5,
)
for pert in perturbations:
assert "neta" in pert.lower()
|
# DESCRIPTION: Tests the UI and GUI.
# 4920646f6e5c2774206361726520696620697420776f726b73206f6e20796f7572206d61636869
# 6e652120576520617265206e6f74207368697070696e6720796f7572206d616368696e6521
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# TODO: Finsih off these tests.
import unittest
from lib import core, chessboard, pieces, usercontrol
from tests.test_core import errormessage
class BasicUICalls(unittest.TestCase):
"""Conducts the very basic tests on the UI."""
def setUp(self):
self.ui = usercontrol.EngineUI()
return None
def test_processusermove_King(self):
startpos = core.Vector(4, 4)
endpos = core.Vector(5, 4)
piece = pieces.KingPiece
string = 'Ke5>e6'
self.assertEqual(
self.ui.processusermove(string),
(piece, (startpos, endpos))
)
return None
def test_processusermove_Queen(self):
startpos = core.Vector(2, 3)
endpos = core.Vector(3, 4)
piece = pieces.QueenPiece
string = 'Qd3>e4'
self.assertEqual(
self.ui.processusermove(string),
(piece, (startpos, endpos))
)
return None
def test_processusermove_Bishop(self):
startpos = core.Vector(0, 7)
endpos = core.Vector(1, 6)
piece = pieces.BishopPiece
string = 'Bh1>g2'
self.assertEqual(
self.ui.processusermove(string),
(piece, (startpos, endpos))
)
return None
def test_processusermove_Knight(self):
startpos = core.Vector(0, 5)
endpos = core.Vector(2, 4)
piece = pieces.KnightPiece
string = 'Nf1>e3'
self.assertEqual(
self.ui.processusermove(string),
(piece, (startpos, endpos))
)
return None
def test_processusermove_Rook(self):
startpos = core.Vector(0, 0)
endpos = core.Vector(5, 0)
piece = pieces.RookPiece
string = 'Ra1>a6'
self.assertEqual(
self.ui.processusermove(string),
(piece, (startpos, endpos))
)
return None
def test_processusermove_Pawn(self):
startpos = core.Vector(1, 4)
endpos = core.Vector(3, 4)
piece = pieces.PawnPiece
string = 'Pe2>e4'
self.assertEqual(
self.ui.processusermove(string),
(piece, (startpos, endpos))
)
return None
def test_processusermove_badstring(self):
with self.assertRaises(NameError):
self.ui.processusermove('hello there')
with self.assertRaises(NameError):
self.ui.processusermove('Kg1>Jd2')
return None
def test_processusermove_badnotation(self):
with self.assertRaises(core.UnknownPieceError):
self.ui.processusermove('Me4>e6')
return None
def test_processusermove_nonstring(self):
with self.assertRaises(TypeError):
self.ui.processusermove([5, 1])
with self.assertRaises(TypeError):
self.ui.processusermove(12)
with self.assertRaises(TypeError):
self.ui.processusermove(-5.22)
with self.assertRaises(TypeError):
self.ui.processusermove(pieces.KingPiece)
return None
def test_addmovetohistory_basicmove(self):
movestring = 'Pe2>e4'
self.ui.addmovetohistory('P', 12, 28)
self.assertEqual(
self.ui.history[-1], movestring,
errormessage(self.ui.history[-1], movestring)
)
return None
def test_addmovetohistory_capture(self):
movestring = 'Qe5xd5'
self.ui.addmovetohistory('Q', 36, 35, capture=True)
self.assertEqual(
self.ui.history[-1], movestring,
errormessage(self.ui.history[-1], movestring)
)
return None
def test_addmovetohistory_check(self):
movestring = 'Qe5>d5+'
self.ui.addmovetohistory('Q', 36, 35, check=True)
self.assertEqual(
self.ui.history[-1], movestring,
errormessage(self.ui.history[-1], movestring)
)
return None
def test_addmovetohistory_checkmate(self):
movestring = 'Bh7>f5#'
self.ui.addmovetohistory('B', 55, 37, checkmate=True)
self.assertEqual(
self.ui.history[-1], movestring,
errormessage(self.ui.history[-1], movestring)
)
return None
def test_addmovetohistory_castlelong(self):
movestring = '0-0-0'
self.ui.addmovetohistory(castlelong=True)
self.assertEqual(
self.ui.history[-1], movestring,
errormessage(self.ui.history[-1], movestring)
)
return None
def test_addmovetohistory_castleshort(self):
movestring = '0-0'
self.ui.addmovetohistory(castleshort=True)
self.assertEqual(
self.ui.history[-1], movestring,
errormessage(self.ui.history[-1], movestring)
)
return None
def test_addmovetohistory_promotionto(self):
movestring = 'Pa7>a8=Q'
self.ui.addmovetohistory('P', 48, 56, promotionto='Q')
self.assertEqual(
self.ui.history[-1], movestring,
errormessage(self.ui.history[-1], movestring)
)
return None
class BasicGUICalls(unittest.TestCase):
"""Conducts tests on the GUI."""
def setUp(self):
self.gui = usercontrol.EngineGUI()
self.board = chessboard.ChessBoard()
self.board.setupnormalboard()
return None
def test_generateasciiboard(self):
print "\n"
print self.gui.generateasciiboard(self.board, 'white')
return None
if __name__ == '__main__':
unittest.main(verbosity=2)
|
import os
from options.test_options import TestOptions
from models import create_model
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
import torch
def isSubString(Str,Filters):
flag = False
for substr in Filters:
if substr in Str:
flag = True
break
return flag
def getImageList(image_dir):
filelist = []
filters = [".jpg",".bmp",".png"]
files = os.listdir(image_dir)
for f in files:
if isSubString(f,filters):
filelist.append(f)
return filelist
def loadImage(image_path,h,w):
image = Image.open(image_path).convert("RGB")
return image.resize((h,w))
#nparray = np.asarray(image)
#return nparrary.astype("float").reshape((1,c,h,w))
def getTransforms():
t_list = []
t_list.append(transforms.ToTensor())
t_list.append(transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5)))
return transforms.Compose(t_list)
if __name__ == "__main__":
opt = TestOptions().parse()
opt.nThreads = 1
opt.batchSize = 1
trans = getTransforms()
model = create_model(opt)
filelist = getImageList(opt.dataroot)
for image_file in filelist:
print image_file,"..."
image = loadImage(opt.dataroot + "/" + image_file,opt.fineSize,opt.fineSize)
image = trans(image)
image = torch.unsqueeze(image, 0)
if opt.model == "cycle_gan":
if opt.which_direction == "AtoB":
model.inferenceA2B(image)
model.save_image(opt.results_dir + "/gb_" + image_file,model.fake_B)
else:
model.inferenceB2A(image)
model.save_image(opt.results_dir + "/ga_" + image_file,model.fake_A)
if opt.model == "cartoon_gan":
model.inference(image)
model.save_image(opt.results_dir + "/"+ image_file,model.gfake)
|
"""
Ajay Kc
013213328
EE381
Project 5 Part 2
The problem calculates the percentage of the sample mean fitting under the area between
various confidence intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
import math as math
bearingList = np.random.normal(75,0.75,1000000)
def checkConfidenceInterval(mean,sd,n,num):
upper = mean+ num*(sd/math.sqrt(n))
lower = mean -num*(sd/math.sqrt(n))
if((75>=lower)and(75<=upper)):
return "Success"
else:
return "Failure"
def estimatePopulationMean(n, confidenceInterval):
normalList = []
tDistList = []
num1=0
num2={}
if(confidenceInterval == 95):
num1 = 1.96
num2 = {5:2.78,40:2.02,120:1.98}
else:
num1 = 2.58
num2 = {5:4.60,40:2.70,120:2.62}
for i in range(0, 10000):
sampleSet = np.random.choice(bearingList,n)
mean = np.mean(sampleSet)
sd = np.std(sampleSet)
normalList.append(checkConfidenceInterval(mean,sd,n,num1))
tDistList.append(checkConfidenceInterval(mean,sd,n,num2[n]))
successCountNormal = normalList.count("Success")
successCounttDist = tDistList.count("Success")
print("For n = %s" %n)
print("For %s confidence interval, %s using normal distribution" %(confidenceInterval,successCountNormal/100))
print("For %s confidence interval, %s using student's t distribution" %(confidenceInterval, successCounttDist / 100))
print(" ")
estimatePopulationMean(5,95)
estimatePopulationMean(5,99)
estimatePopulationMean(40,95)
estimatePopulationMean(40,99)
estimatePopulationMean(120,95)
estimatePopulationMean(120,99)
|
import os
import time
import numpy as np
import pandas as pd
from tqdm import tqdm
from datetime import datetime
import torch
import torch.utils.data as data
from torch.utils.tensorboard import SummaryWriter
import ast
from daisy.utils.sampler import Sampler
from daisy.utils.parser import parse_args
from daisy.utils.splitter import split_test, split_validation, perform_evaluation
from daisy.utils.data import PointData, PairData, incorporate_in_ml100k, sparse_mx_to_torch_sparse_tensor,\
incorporate_sinfo_by_dim
from daisy.utils.loader import load_rate, get_ur, convert_npy_mat, build_candidates_set, add_last_clicked_item_context
from torch_geometric.utils import from_scipy_sparse_matrix
from scipy.sparse import identity, csr_matrix
from IPython import embed
def build_evaluation_set(test_ur, total_train_ur, item_pool, candidates_num, sampler, context_flag=False, tune=False):
test_ucands = build_candidates_set(test_ur, total_train_ur, item_pool, candidates_num, context_flag=context_flag)
#embed()
# get predict result
print('')
print('Generate recommend list...')
print('')
loaders = {}
for u in tqdm(test_ucands.keys(), disable=tune):
# build a test MF dataset for certain user u to accelerate
if context_flag:
# tmp = pd.DataFrame({
# 'user': u[0],
# 'item': test_ucands[u],
# 'context': [*u[1:]],
# 'rating': [0. for _ in test_ucands[u]], # fake label, make nonsense
# })
tmp = pd.DataFrame({
'user': [u[0] for _ in test_ucands[u]],
'item': test_ucands[u],
'context': [[*u[1:]] for _ in test_ucands[u]],
'rating': [0. for _ in test_ucands[u]], # fake label, make nonsense
})
else:
tmp = pd.DataFrame({
'user': [u for _ in test_ucands[u]],
'item': test_ucands[u],
'rating': [0. for _ in test_ucands[u]], # fake label, make nonsense
})
tmp_neg_set = sampler.transform(tmp, is_training=False, context=context_flag)
tmp_dataset = PairData(tmp_neg_set, is_training=False, context=context_flag)
tmp_loader = data.DataLoader(
tmp_dataset,
batch_size=candidates_num,
shuffle=False,
num_workers=0
)
loaders[u] = tmp_loader
return loaders, test_ucands
def main(args=None):
''' all parameter part '''
if args is None:
args = parse_args()
# if args.algo_name == 'ngcf':
# args.gce = True
# for visualization
date = datetime.now().strftime('%y%m%d%H%M%S')
if args.logs:
if len(args.logsname) == 0:
string1 = "SINFO" if args.side_information else ""
random_context = "random_context" if args.random_context else ""
context_type = args.context_type if args.dataset == 'frappe' else ""
rankall = 'RANK_ALL' if args.rankall else ""
INIT = "INIT" if args.load_init_weights else ""
string2 = "reindexed" if args.reindex and not args.gce else f"graph_{args.gcetype}"
string3 = "_UII_" if args.uii and args.context else "_UIC_"
string = string1 + string2 + string3
context_folder = "context" if args.context else "no_context"
loss = 'BPR' if args.loss_type == "BPR" else "CL"
sampling = 'neg_sampling_each_epoch' if args.neg_sampling_each_epoch else ""
stopping = 'not_early_stopping' if args.not_early_stopping else ""
total_info = f'{args.algo_name}_{rankall}_lr={args.lr}_DO={args.dropout}_bs={args.batch_size}_{string}' \
f'_{args.epochs}epochs_{sampling}_{stopping}'
writer = SummaryWriter(log_dir=f'logs/{args.dataset}/{context_folder}/logs_{total_info}_{date}/')
else:
writer = SummaryWriter(log_dir=f'logs/{args.dataset}/logs_{args.logsname}_{date}/')
else:
writer = SummaryWriter(log_dir=f'logs/nologs/logs/')
# p = multiprocessing.Pool(args.num_workers)
# FIX SEED AND SELECT DEVICE
seed = args.seed
args.lr = float(args.lr)
args.batch_size = int(args.batch_size)
args.dropout = float(args.dropout)
if torch.cuda.is_available() and args.remove_top_users == 0:
torch.cuda.manual_seed_all(seed)
device = "cuda"
else:
device = "cpu"
np.random.seed(seed)
torch.manual_seed(seed)
device = torch.device(device)
# store running time in time_log file
time_log = open('time_log.txt', 'a')
''' LOAD DATA AND ADD CONTEXT IF NECESSARY '''
df, users, items, unique_original_items, popularity_dic = load_rate(args.dataset, args.prepro, binary=True,
context=args.context,
gce_flag=args.gce,
cut_down_data=args.cut_down_data,
remove_top_users=args.remove_top_users,
remove_on=args.remove_on,
side_info=args.side_information,
context_type=args.context_type,
context_as_userfeat=args.context_as_userfeat,
flag_run_statistics=args.statistics)
if args.side_information and not args.dataset == 'ml-100k':
if args.dataset in ['lastfm', 'drugs']:
if args.context_as_userfeat:
aux_si = df.iloc[:, :-2].copy() # take all columns unless user, rating and timestamp
args.context = False
else:
if 'context' in df.columns:
aux_si = df.iloc[:, :-3].copy() # take all columns unless user, rating and timestamp
aux_si.drop(columns=['context'], inplace=True)
else:
aux_si = df.iloc[:, :-2].copy() # take all columns unless user, rating and timestamp
elif (np.unique(df['timestamp']) == [1])[0]:
# BIPARTED GRAPH
args.context = False
print('BI-PARTED GRAPH WITH X')
aux_si = df.iloc[:, :-2].copy() # take all columns unless user, rating and timestamp
else:
aux_si = df[['item', 'side_info']].copy()
aux_si = aux_si.drop_duplicates('item')
else:
if 'user-feat' in df.columns:
df.drop(columns=['user-feat'], inplace=True)
print('NO SIDE EFFECT')
if args.reindex:
if 'array_context_flag' in df.columns: # isinstance(row['context'], list)
if args.context:
assert (np.unique(df['timestamp']) == 1)[0] == True
df['user'] = df['user'].astype(np.int64)
df['item'] = df['item'].astype(np.int64)
df['item'] = df['item'] + users
df['context'] = df['context'].apply(lambda x: ast.literal_eval(x))
df['context'] = df['context'].apply(lambda x: [protein + (users+items) for protein in x])
#and type(ast.literal_eval(df['context'][0])) is list:
# timestamp is forced
else:
df = df.astype(np.int64)
df['item'] = df['item'] + users
if args.context:
df = add_last_clicked_item_context(df, args.dataset, args.random_context)
# add context as independent nodes
if not args.uii:
df['context'] = df['context'] + items
# TODO: else: df['context'] = df['context'].apply(lambda x: [c] for c in x)
# check last number is positive
# np.max(df.to_numpy(), axis=0)
assert df['item'].tail().values[-1] > 0
''' SPLIT DATA '''
train_set, test_set = split_test(df, args.test_method, args.test_size)
train_set, val_set, _ = split_validation(train_set, val_method=args.test_method, list_output=False)
# temporary used for tuning test result
# train_set = pd.read_csv(f'./experiment_data/train_{args.dataset}_{args.prepro}_{args.test_method}.dat')
# test_set = pd.read_csv(f'./experiment_data/test_{args.dataset}_{args.prepro}_{args.test_method}.dat')
df = pd.concat([train_set, test_set], ignore_index=True)
if not args.context:
dims = np.max(df.to_numpy().astype(int), axis=0) + 1
else:
if 'array_context_flag' in df.columns:
# type(df.to_numpy()[0][2]) == list
import itertools
prot_list = list(itertools.chain(df['context'].values))
# lenghts = [len(np.unique(seq)) for seq in prot_list]
# context_num = int(np.max(lenghts))
context_num = 1
flattened_proteins = [val for sublist in prot_list for val in sublist]
dims = np.max(df[['user', 'item']].to_numpy().astype(int), axis=0) + 1
dims = np.append(dims, [np.max(flattened_proteins)+1])
else:
dims = np.max(df.to_numpy().astype(int), axis=0) + 1
context_num = int(len(dims) - 4)
''' GET GROUND-TRUTH AND CANDIDATES '''
# get ground truth
test_ur = get_ur(test_set, context=args.context, eval=False)
val_ur = get_ur(val_set, context=args.context, eval=False)
total_train_ur = get_ur(train_set, context=args.context, eval=True)
# initial candidate item pool
item_pool = set(range(dims[0], dims[1])) if args.reindex else set(range(dims[1]))
candidates_num = items if args.rankall else args.cand_num
print('='*50, '\n')
''' FORMAT DATA AND CHOOSE MODEL '''
sampler = Sampler(
dims,
num_ng=args.num_ng,
sample_method=args.sample_method,
sample_ratio=args.sample_ratio,
reindex=args.reindex
)
neg_set, adj_mx = sampler.transform(train_set, is_training=True, context=args.context, pair_pos=None)
if args.gce:
# if args.mh > 1:
# print(f'[ MULTI HOP {args.mh} ACTIVATED ]')
# adj_mx = adj_mx.__pow__(int(args.mh))
X = sparse_mx_to_torch_sparse_tensor(identity(adj_mx.shape[0])).to(device)
# X, _ = from_scipy_sparse_matrix(identity(adj_mx.shape[0]))
# X = X.to(device)
if args.side_information:
if args.dataset == 'ml-100k':
# X_gender = sparse_mx_to_torch_sparse_tensor(X_gender_mx).to(device)
# X = torch.cat((X, X_gender), -1) # torch.Size([2096, 2114]) 2096 + 18 = 2114
si = pd.read_csv(f'./data/{args.dataset}/side-information.csv', index_col=0)
si.rename(columns={'id': 'item', 'genres': 'side_info'}, inplace=True)
# si = si[['item', 'side_info']]
if df['item'].min() > 0: # Reindex items
# TODO: INCORPORATE si_extension to X
si_extension = incorporate_in_ml100k(si[['item', 'side_info']], X.shape[1], unique_original_items,
users)
X_gender = sparse_mx_to_torch_sparse_tensor(csr_matrix(si_extension.values)).to(device)
if args.actors:
si.drop(columns=['side_info'], inplace=True)
si.rename(columns={'actors': 'side_info'}, inplace=True)
si_ext = incorporate_in_ml100k(si[['item', 'side_info']], X.shape[1], unique_original_items, users)
X_sinfo = sparse_mx_to_torch_sparse_tensor(csr_matrix(si_ext.values)).to(device)
X = torch.cat((X, X_gender, X_sinfo), -1)
else:
X = torch.cat((X, X_gender), -1) # torch.Size([2096, 2114]) 2096 + 18 = 2114
elif args.dataset == 'music': # MORE GENERIC CASE
si_extension = incorporate_sinfo_by_dim(aux_si, X.shape[1], users)
X_sinfo = sparse_mx_to_torch_sparse_tensor(csr_matrix(si_extension.values)).to(device)
X = torch.cat((X, X_sinfo), -1)
else: #lastfm # drugs
cat_mx = []
for col in aux_si.columns[2:]:
# context_as_userfeat
si_extension = incorporate_sinfo_by_dim(aux_si, X.shape[1], dims, col=col,
contextasfeature=args.context_as_userfeat)
X_sinfo = sparse_mx_to_torch_sparse_tensor(csr_matrix(si_extension.astype(str).astype(int).values)).to(device)
cat_mx.append(X_sinfo)
X_sinfo = torch.cat(cat_mx, -1)
X = torch.cat((X, X_sinfo), -1)
# embed()
# We retrieve the graph's edges and send both them and graph to device in the next two lines
edge_idx, edge_attr = from_scipy_sparse_matrix(adj_mx)
# TODO: should I pow the matrix here?
edge_idx = edge_idx.to(device)
if args.problem_type == 'pair':
# train_dataset = PairData(neg_set, is_training=True, context=args.context)
train_dataset = PairData(train_set, sampler=sampler, adj_mx=adj_mx, is_training=True, context=args.context)
else:
train_dataset = PointData(neg_set, is_training=True, context=args.context)
user_num = dims[0]
max_dim = dims[2] if args.context else dims[1]
# X = X.to_dense()
if args.gce and args.side_information:
# TODO: I THINK ITS LIKE THIS! UNCOMMENT
print('GCE GOOD WAY')
max_dim = X.shape[1]
# TODO: I THINK ITS LIKE THIS! COMMENT
# X = torch.transpose(X, 0, 1)
if args.problem_type == 'point':
if args.algo_name == 'mf':
from daisy.model.point.MFRecommender import PointMF
model = PointMF(
user_num,
max_dim,
factors=args.factors,
epochs=args.epochs,
optimizer=args.optimizer,
lr=args.lr,
reg_1=args.reg_1,
reg_2=args.reg_2,
loss_type=args.loss_type,
reindex=args.reindex,
X=X if args.gce else None,
GCE_flag=args.gce,
A=edge_idx if args.gce else None,
dropout=args.dropout,
gpuid=args.gpu
)
elif args.algo_name == 'fm':
from daisy.model.point.FMRecommender import PointFM
model = PointFM(
user_num,
max_dim,
factors=args.factors,
optimizer=args.optimizer,
epochs=args.epochs,
lr=args.lr,
reg_1=args.reg_1,
reg_2=args.reg_2,
loss_type=args.loss_type,
GCE_flag=args.gce,
reindex=args.reindex,
X=X if args.gce else None,
A=edge_idx if args.gce else None,
gpuid=args.gpu,
dropout=args.dropout
)
elif args.algo_name == 'nfm':
from daisy.model.point.NFMRecommender import PointNFM
model = PointNFM(
user_num,
max_dim,
factors=args.factors,
optimizer=args.optimizer,
act_function=args.act_func,
num_layers=args.num_layers,
batch_norm=args.no_batch_norm,
dropout=args.dropout,
epochs=args.epochs,
lr=args.lr,
reg_1=args.reg_1,
reg_2=args.reg_2,
loss_type=args.loss_type,
GCE_flag=args.gce,
reindex=args.reindex,
X=X if args.gce else None,
A=edge_idx if args.gce else None,
gpuid=args.gpu,
mf=args.mf,
)
elif args.algo_name == 'ncf':
layers = [len(dims[:-2])*64, 64, 32, 8] if not args.context else [len(dims[:-2])*64, 64, 32, 8]
from daisy.model.point.NCF import NCF
model = NCF(
user_num,
max_dim,
factors=args.factors,
layers=layers,
GCE_flag=args.gce,
reindex=args.reindex,
X=X if args.gce else None,
A=edge_idx if args.gce else None,
gpuid=args.gpu,
mf=args.mf,
dropout=args.dropout
)
elif args.algo_name == 'deepfm':
from daisy.model.point.DeepFMRecommender import PointDeepFM
model = PointDeepFM(
user_num,
max_dim,
factors=args.factors,
act_activation=args.act_func,
optimizer=args.optimizer,
num_layers=args.num_layers,
batch_norm=args.no_batch_norm,
dropout=args.dropout,
epochs=args.epochs,
lr=args.lr,
reg_1=args.reg_1,
reg_2=args.reg_2,
loss_type=args.loss_type,
GCE_flag=args.gce,
reindex=args.reindex,
context_flag=args.context,
X=X if args.gce else None,
A=edge_idx if args.gce else None,
gpuid=args.gpu,
)
else:
raise ValueError('Invalid algorithm name')
elif args.problem_type == 'pair':
if args.algo_name == 'mf':
from daisy.model.pair.MFRecommender import PairMF
model = PairMF(
user_num,
max_dim,
factors=args.factors,
epochs=args.epochs,
lr=args.lr,
reg_1=args.reg_1,
reg_2=args.reg_2,
loss_type=args.loss_type,
GCE_flag=args.gce,
reindex=args.reindex,
X=X if args.gce else None,
A=edge_idx if args.gce else None,
gpuid=args.gpu,
dropout=args.dropout,
args=args
)
elif args.algo_name == 'fm':
from daisy.model.pair.FMRecommender import PairFM
model = PairFM(
user_num,
max_dim,
factors=args.factors,
epochs=args.epochs,
lr=args.lr,
reg_1=args.reg_1,
reg_2=args.reg_2,
loss_type=args.loss_type,
GCE_flag=args.gce,
reindex=args.reindex,
X=X if args.gce else None,
A=edge_idx if args.gce else None,
gpuid=args.gpu,
dropout=args.dropout,
args=args
)
elif args.algo_name == 'ngcf':
from daisy.model.pair.NGCF import PairNGCF
# model = PairNGCF(
# n_users=user_num,
# n_items=max_dim,
# embed_size=args.factors,
# adj_matrix=adj_mx,
# device=device,
# reindex=args.reindex,
# n_layers=2
#
# )
model = PairNGCF(
n_users=user_num,
max_dim=max_dim,
emb_dim=args.factors,
adj_mtx=adj_mx,
device=device,
reindex=args.reindex,
layers=[64, 64],
node_dropout=args.dropout,
mess_dropout=0
)
elif args.algo_name == 'nfm':
from daisy.model.pair.NFMRecommender import PairNFM
model = PairNFM(
user_num,
max_dim,
factors=args.factors,
act_function=args.act_func,
num_layers=args.num_layers,
batch_norm=args.no_batch_norm,
dropout=args.dropout,
epochs=args.epochs,
lr=args.lr,
reg_1=args.reg_1,
reg_2=args.reg_2,
loss_type=args.loss_type,
GCE_flag=args.gce,
reindex=args.reindex,
X=X if args.gce else None,
A=edge_idx if args.gce else None,
gpuid=args.gpu,
mf=args.mf
)
elif args.algo_name == 'ncf':
# layers = [len(dims[:-2])*32, 32, 16, 8] if not args.context else [len(dims[:-2])*32, 32, 16, 8]
# args.factors = layers[1]
from daisy.model.pair.NCFRecommender import PairNCF
model = PairNCF(
user_num,
max_dim,
args.factors,
num_layers=3,
GCE_flag=args.gce,
reindex=args.reindex,
X=X if args.gce else None,
A=edge_idx if args.gce else None,
gpuid=args.gpu,
mf=args.mf,
dropout=args.dropout,
num_context=context_num
)
elif args.algo_name == 'deepfm':
from daisy.model.pair.DeepFMRecommender import PairDeepFM
model = PairDeepFM(
user_num,
max_dim,
factors=args.factors,
act_activation=args.act_func,
num_layers=args.num_layers,
batch_norm=args.no_batch_norm,
dropout=args.dropout,
epochs=args.epochs,
lr=args.lr,
reg_1=args.reg_1,
reg_2=args.reg_2,
loss_type=args.loss_type,
GCE_flag=args.gce,
reindex=args.reindex,
context_flag=args.context,
X=X if args.gce else None,
A=edge_idx if args.gce else None,
gpuid=args.gpu,
)
else:
raise ValueError('Invalid algorithm name')
else:
raise ValueError('Invalid problem type')
def collate_fn(batch):
return list(zip(*batch))
''' BUILD RECOMMENDER PIPELINE '''
train_loader = data.DataLoader(
train_dataset,
batch_size=int(args.batch_size),
shuffle=True,
num_workers=args.num_workers,
# collate_fn=lambda x: x
collate_fn=collate_fn
)
# TODO: refactor train
if args.problem_type == 'pair':
if args.remove_top_users > 0:
# do inference
print('+'*80)
print('NO TRAINING -- JUST INFERENCE')
''' INFERENCE '''
print('TEST_SET: Start Calculating Metrics......')
loaders_test, candidates_test = build_evaluation_set(test_ur, total_train_ur, item_pool, candidates_num,
sampler, context_flag=args.context)
s_time = time.time()
elapsed_time = time.time() - s_time
hours, rem = divmod(elapsed_time, 3600)
minutes, seconds = divmod(rem, 60)
_, _, _ = perform_evaluation(loaders_test, candidates_test, model, args, device, test_ur, s_time,
minutes_train=minutes, writer=None, seconds_train=seconds, desc=total_info,
populary_dict=popularity_dic)
exit()
else:
loaders, candidates = build_evaluation_set(val_ur, total_train_ur, item_pool, candidates_num, sampler,
context_flag=args.context)
s_time = time.time()
from daisy.model.pair.train import train
train(args, model, train_loader, device, args.context, loaders, candidates, val_ur, writer=writer,
desc=total_info)
elif args.problem_type == 'point':
loaders, candidates = build_evaluation_set(val_ur, total_train_ur, item_pool, candidates_num, sampler,
context_flag=args.context)
from daisy.model.point.train import train
train(args, model, train_loader, device, args.context, loaders, candidates, val_ur, writer=writer)
else:
raise ValueError()
elapsed_time = time.time() - s_time
hours, rem = divmod(elapsed_time, 3600)
minutes, seconds = divmod(rem, 60)
print(f'TOTAL ELAPSED TIME: {hours:.2f} hours, {minutes:.2f} min, {seconds:.4f}seconds')
time_log.write(f'{args.dataset}_{args.prepro}_{args.test_method}_{args.problem_type}{args.algo_name}'
f'_{args.loss_type}_{args.sample_method}_GCE={args.gce}, {minutes:.2f} min, {seconds:.4f}seconds' + '\n')
time_log.close()
# print('+'*80)
# ''' TEST METRICS '''
# print('TEST_SET: Start Calculating Metrics......')
# loaders_test, candidates_test = build_evaluation_set(test_ur, total_train_ur, item_pool, candidates_num,
# sampler, context_flag=args.context)
# perform_evaluation(loaders_test, candidates_test, model, args, device, test_ur, s_time, minutes_train=minutes,
# writer=None, seconds_train=seconds)
if __name__ == '__main__':
main()
|
"""
Module for classes to prepare training datasets
Prepare training data for pointer generator (add tags <s> and </s> tags to summaries):
Prepare bioasq
python prepare_training_data.py -bt
Or with the question:
python prepare_training_data.py -bt --add-q
Prepare training data for bart, with or without question appended to beginning of abstract text
python prepare_training_data.py --bart-bioasq --add-q
python prepare_training_data.py --bart-bioasq
Prepare training data for sentence classification:
python prepare_training_data.py --bioasq-sent
"""
import json
import argparse
import re
import os
from sklearn.utils import shuffle as sk_shuffle
import rouge
import spacy
def get_args():
"""
Argument defnitions
"""
parser = argparse.ArgumentParser(description="Arguments for data exploration")
parser.add_argument("-t",
dest="tag_sentences",
action="store_true",
help="tag the sentences with <s> and </s>, for use with pointer generator network")
parser.add_argument("-e",
dest="summ_end_tag",
action="store_true",
help="Add the summ end tag to the end of the summaries. This was observed not to improve performance on the MedInfo evaluation set")
parser.add_argument("-b",
dest="bioasq_pg",
action="store_true",
help="Make the bioasq training set for pointer generator")
parser.add_argument("--bioasq-sent",
dest="bioasq_sc",
action="store_true",
help="Make the bioasq training set for sentence classification")
parser.add_argument("--bart-bioasq",
dest="bart_bioasq",
action="store_true",
help="Prepare the bioasq training set for bart")
parser.add_argument("--add-q",
dest="add_q",
action="store_true",
help="Concatenate the question to the beginning of the text. Currently only implemented as an option for bart data and the bioasq abs2summ data")
return parser
class BioASQ():
"""
Class to create various versions of BioASQ training dataset
"""
def __init__(self):
"""
Initiate spacy
"""
self.nlp = spacy.load('en_core_web_sm')
self.Q_END = " [QUESTION?] "
self.SUMM_END = " [END]"
self.ARTICLE_END = " [ARTICLE_SEP] "
def format_summary_sentences(self, summary):
"""
Split summary into sentences and add sentence tags to the strings: <s> and </s>
"""
tokenized_abs = self.nlp(summary)
summary = " ".join(["<s> {s} </s>".format(s=s.text.strip()) for s in tokenized_abs.sents])
return summary
def _load_bioasq(self):
"""
Load bioasq collection generated in process_bioasq.py
"""
with open("data/bioasq_collection.json", "r", encoding="utf8") as f:
data = json.load(f)
return data
def create_abstract2snippet_dataset(self):
"""
Generate the bioasq abstract to snippet (1 to 1) training dataset. This function creates data uses the same keys, summary and articles for each summary-article pair
as the medlinplus training data does. This allows compatibility with the answer summarization data loading function in the pointer generator network.
This is currently the only dataset with the option to include question. Since this dataset works the best, the add_q option was not added to the others.
"""
bioasq_collection = self._load_bioasq()
training_data_dict = {}
snip_id = 0
for i, q in enumerate(bioasq_collection):
question = q
for snippet in bioasq_collection[q]['snippets']:
training_data_dict[snip_id] = {}
if args.summ_end_tag:
snippet_text = snippet['snippet'] + self.SUMM_END
else:
snippet_text = snippet['snippet']
if args.tag_sentences:
snippet_text = self.format_summary_sentences(snippet_text)
training_data_dict[snip_id]['summary'] = snippet_text
# Add the question with a special question seperator token to the beginning of the article.
abstract = snippet['article']
with_question = "_without_question"
if args.add_q:
abstract = question + self.Q_END + abstract
with_question = "_with_question"
training_data_dict[snip_id]['articles'] = abstract
training_data_dict[snip_id]['question'] = question
snip_id += 1
with open("data/bioasq_abs2summ_training_data{}.json".format(with_question), "w", encoding="utf=8") as f:
json.dump(training_data_dict, f, indent=4)
def calculate_sentence_level_rouge(self, snip_sen, abs_sen, evaluator):
"""
For each pair of sentences, calculate rouge score
"""
rouge_score = evaluator.get_scores(abs_sen, snip_sen)['rouge-l']['f']
return rouge_score
def create_binary_sentence_classification_dataset_with_rouge(self):
"""
Create a dataset for training a sentence classification model, where the binary y labels are assigned based on the
best rouge score for a sentence in the article when compared to each sentence in the summary
"""
# Initiate rouge evaluator
evaluator = rouge.Rouge(metrics=['rouge-l'],
max_n=3,
limit_length=False,
length_limit_type='words',
apply_avg=False,
apply_best=True,
alpha=1,
weight_factor=1.2,
stemming=False)
bioasq_collection = self._load_bioasq()
training_data_dict = {}#
snip_id = 0
for i, q in enumerate(bioasq_collection):
question = q
for snippet in bioasq_collection[q]['snippets']:
training_data_dict[snip_id] = {}
labels = []
# Sentencize snippet
snippet_text = snippet['snippet']
tokenized_snip = self.nlp(snippet_text)
snippet_sentences = [s.text.strip() for s in tokenized_snip.sents]
# Sentencize abstract
abstract_text = snippet['article']
tokenized_abs = self.nlp(abstract_text)
abstract_sentences = [s.text.strip() for s in tokenized_abs.sents]
rouge_scores = []
for abs_sen in abstract_sentences:
best_rouge = 0
for snip_sen in snippet_sentences:
rouge_score = self.calculate_sentence_level_rouge(snip_sen, abs_sen, evaluator)
if best_rouge < rouge_score:
best_rouge = rouge_score
if best_rouge > .9:
label = 1
else:
label = 0
labels.append(label)
training_data_dict[snip_id]['question'] = q
training_data_dict[snip_id]['sentences'] = abstract_sentences
training_data_dict[snip_id]['labels'] = labels
snip_id += 1
with open("data/bioasq_abs2summ_binary_sent_classification_training.json", "w", encoding="utf=8") as f:
json.dump(training_data_dict, f, indent=4)
# For each sentence in each abstract, compare it to each sentence in answer. Record the best rouge score.
def create_data_for_bart(self):
"""
Write the train and val data to file so that the processor and tokenizer for bart will read it, as per fairseqs design
"""
bioasq_collection = self._load_bioasq()
# Additional string is added to the question of the beginning of the abstract text
if args.add_q:
q_name = "with_question"
else:
q_name = "without_question"
# Open medinfo data preprocessed in prepare_validation_data.py
with open("data/medinfo_section2answer_validation_data_{}.json".format(q_name), "r", encoding="utf-8") as f:
medinfo_val = json.load(f)
try:
os.mkdir("../models/bart/bart_config/{}".format(q_name))
except FileExistsError:
print("Directory ", q_name , " already exists")
train_src = open("../models/bart/bart_config/{q}/bart.train_{q}.source".format(q=q_name), "w", encoding="utf8")
train_tgt = open("../models/bart/bart_config/{q}/bart.train_{q}.target".format(q=q_name), "w", encoding="utf8")
val_src = open("../models/bart/bart_config/{q}/bart.val_{q}.source".format(q=q_name), "w", encoding="utf8")
val_tgt = open("../models/bart/bart_config/{q}/bart.val_{q}.target".format(q=q_name), "w", encoding="utf8")
snippets_list = []
abstracts_list = []
for i, q in enumerate(bioasq_collection):
for snippet in bioasq_collection[q]['snippets']:
snippet_text = snippet['snippet'].strip()
abstract_text = snippet['article'].strip()
# Why is there whitespace in the question?
question = q.replace("\n", " ")
if args.add_q:
abstract_text = question + self.Q_END + abstract_text
abstracts_list.append(abstract_text)
snippets_list.append(snippet_text)
snp_cnt = 0
print("Shuffling data")
snippets_list, abstracts_list = sk_shuffle(snippets_list, abstracts_list, random_state=13)
for snippet_text, abstract_text in zip(snippets_list, abstracts_list):
snp_cnt += 1
train_src.write("{}\n".format(abstract_text))
train_tgt.write("{}\n".format(snippet_text))
for q_id in medinfo_val:
# The prepared medinfo data may have sentence tags in it for pointer generator.
# There is an option in the prepare_validation_data.py script to not tag the data,
# but it is easier to keep track of the datasets to just remove the tags here.
summ = medinfo_val[q_id]['summary'].strip()
summ = summ.replace("<s>", "")
summ = summ.replace("</s>", "")
articles = medinfo_val[q_id]['articles'].strip()
val_src.write("{}\n".format(articles))
val_tgt.write("{}\n".format(summ))
train_src.close()
train_tgt.close()
val_src.close()
val_tgt.close()
# Make sure there were no funny newlines added
train_src = open("../models/bart/bart_config/{q}/bart.train_{q}.source".format(q=q_name), "r", encoding="utf8").readlines()
train_tgt = open("../models/bart/bart_config/{q}/bart.train_{q}.target".format(q=q_name), "r", encoding="utf8").readlines()
val_src = open("../models/bart/bart_config/{q}/bart.val_{q}.source".format(q=q_name), "r", encoding="utf8").readlines()
val_tgt = open("../models/bart/bart_config/{q}/bart.val_{q}.target".format(q=q_name), "r", encoding="utf8").readlines()
print("Number of snippets: ", snp_cnt)
assert len(train_src) == snp_cnt, len(train_src)
assert len(train_tgt) == snp_cnt
assert len(val_src) == len(medinfo_val)
assert len(val_tgt) == len(medinfo_val)
def process_data():
"""
Save training data sets
"""
if args.bioasq_pg:
BioASQ().create_abstract2snippet_dataset()
if args.bioasq_sc:
BioASQ().create_binary_sentence_classification_dataset_with_rouge()
if args.bart_bioasq:
BioASQ().create_data_for_bart()
if __name__ == "__main__":
global args
args = get_args().parse_args()
process_data()
|
# The new config inherits a base config to highlight the necessary modification
_base_ = '/home/guest01/projects/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py'
# 1. dataset settings
dataset_type = 'CocoDataset'
classes = ('chromos',)
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
# explicitly add your class names to the field `classes`
classes=classes,
ann_file='/home/guest01/projects/mmdetection/data/chromos/annotations/instances_train2017.json',
img_prefix='/home/guest01/projects/mmdetection/data/chromos/train2017'),
val=dict(
type=dataset_type,
# explicitly add your class names to the field `classes`
classes=classes,
ann_file='/home/guest01/projects/mmdetection/data/chromos/annotations/instances_val2017.json',
img_prefix='/home/guest01/projects/mmdetection/data/chromos/val2017'),
test=dict(
type=dataset_type,
# explicitly add your class names to the field `classes`
classes=classes,
ann_file='/home/guest01/projects/mmdetection/data/chromos/annotations/instances_val2017.json',
img_prefix='/home/guest01/projects/mmdetection/data/chromos/val2017'))
# 2. model settings
# explicitly over-write all the `num_classes` field from default 80 to 5.
model = dict(
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
# explicitly over-write all the `num_classes` field from default 80 to 5.
num_classes=1),
dict(
type='Shared2FCBBoxHead',
# explicitly over-write all the `num_classes` field from default 80 to 5.
num_classes=1),
dict(
type='Shared2FCBBoxHead',
# explicitly over-write all the `num_classes` field from default 80 to 5.
num_classes=1)],
# explicitly over-write all the `num_classes` field from default 80 to 5.
mask_head=dict(num_classes=1)))
# We can use the pre-trained Mask RCNN model to obtain higher performance
load_from = 'checkpoints/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth'
|
"""
Django settings for src project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lb1b2210#j!(m15!5)9*n-2pzx6g(tcz4h+t7wwaj$aul1z8oc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
CUSTOM_APPS = [
'app'
]
INSTALLED_APPS += CUSTOM_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'src.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'src.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = False
USE_TZ = False
DATETIME_FORMAT = 'Y-m-d H:i:s'
DATE_FORMAT = 'Y-m'
TIME_FORMAT = 'H:i:s'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'src/static'),
)
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
}
try:
# pylint: disable=W0614, C0413, wildcard-import
from src.local_settings import * # noqa
except ImportError:
pass
|
#!/usr/bin/python3
# Copyright 2017 ETH Zurich
# Copyright 2018 ETH Zurich, Anapaya Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`sciond` --- Wrapper over low level SCIOND API
===================================================
"""
# Stdlib
import os
# SCION
from lib.defines import (
SCIOND_API_DEFAULT_SOCK,
SCIOND_API_SOCKDIR,
)
def get_default_sciond_path(ia=None):
"""Return sciond socket path for a given IA
:param ia: ISD_AS addr
:returns: Format string representing path of sciond socket
"""
sock_path = ""
if ia is None or ia.is_zero():
sock_path = SCIOND_API_DEFAULT_SOCK
else:
sock_path = "sd%s.sock" % (ia.file_fmt())
return os.path.join(SCIOND_API_SOCKDIR, sock_path)
|
"""Has binance trading api interface."""
from decimal import Decimal
from typing import Optional, Protocol
from exapi.models.binance import (BinanceAccountInfoModel,
BinanceAccountTrades,
BinanceCanceledOrderModel,
BinanceCanceledOrders, BinanceOrderInfoModel,
BinanceOrderInfos, BinanceOrderModel,
BinanceTestOrderModel)
from exapi.typedefs.binance import (OrderResponseType, OrderSide,
OrderType, TimeInForce)
class IBinanceSpotTradingAPI(Protocol):
"""Binance trading api interface.
Has methods for trading request making to binance exchange.
"""
async def new_test_order(self, symbol: str,
side: OrderSide,
type: OrderType,
time_in_force: Optional[TimeInForce] = None,
quantity: Optional[Decimal] = None,
quantity_precision: Optional[int] = None,
quote_order_qty: Optional[Decimal] = None,
price: Optional[Decimal] = None,
price_precision: Optional[int] = None,
new_client_order_id: Optional[str] = None,
stop_price: Optional[Decimal] = None,
iceberg_qty: Optional[Decimal] = None,
new_order_resp_type: Optional[OrderResponseType] = None,
recv_window: Optional[int] = None,
timestamp: Optional[int] = None
) -> BinanceTestOrderModel:
"""Tests new order creation and signature/recv_window long.
Creates and validates a new order but does not send it into the matching engine.
Weight: 1
Json example:
{}
Args:
symbol (str)
side (OrderSide)
type (OrderType)
time_in_force (Optional[TimeInForce], optional)
quantity (Optional[Decimal], optional)
quantity_precision (Optional[int], optional): quantity precision (digits after comma).
If None will be used precision from number. Will not be sended in request.
quote_order_qty (Optional[Decimal], optional)
price (Optional[Decimal], optional)
price_precision (Optional[int], optional): price precision (digits after comma).
If None will be used precision from number. Will not be sended in request.
new_client_order_id (Optional[str], optional): A unique id among open orders.
Automatically generated if not sent.
stop_price (Optional[Decimal], optional): Used with STOP_LOSS, STOP_LOSS_LIMIT,
TAKE_PROFIT, and TAKE_PROFIT_LIMIT orders.
iceberg_qty (Optional[Decimal], optional): Used with LIMIT, STOP_LOSS_LIMIT,
and TAKE_PROFIT_LIMIT to create an iceberg order.
new_order_resp_type ([type], optional): Set the response JSON. ACK, RESULT, or FULL;
MARKET and LIMIT order types default to FULL, all other orders default to ACK.
recv_window (Optional[int], optional): The value cannot be greater than 60000.
timestamp (Optional[int]): if None current timestamp in milliseconds will be used.
Returns:
BinanceTestOrderModel
"""
async def new_order(self, symbol: str,
side: OrderSide,
type: OrderType,
time_in_force: Optional[TimeInForce] = None,
quantity: Optional[Decimal] = None,
quantity_precision: Optional[int] = None,
quote_order_qty: Optional[Decimal] = None,
price: Optional[Decimal] = None,
price_precision: Optional[int] = None,
new_client_order_id: Optional[str] = None,
stop_price: Optional[Decimal] = None,
iceberg_qty: Optional[Decimal] = None,
new_order_resp_type: Optional[OrderResponseType] = None,
recv_window: Optional[int] = None,
timestamp: Optional[int] = None
) -> BinanceOrderModel:
"""Send in a new order.
Weight: 1
Json example:
{ // ACK response type
"symbol": "BTCUSDT",
"orderId": 28,
"orderListId": -1, // Unless OCO, value will be -1
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595
} OR
{ // RESULT response type
"symbol": "BTCUSDT",
"orderId": 28,
"orderListId": -1, // Unless OCO, value will be -1
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"cummulativeQuoteQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL"
} OR
{ // FULL response type
"symbol": "BTCUSDT",
"orderId": 28,
"orderListId": -1, //Unless OCO, value will be -1
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"cummulativeQuoteQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL",
"fills": [
{
"price": "4000.00000000",
"qty": "1.00000000",
"commission": "4.00000000",
"commissionAsset": "USDT"
},
{
"price": "3999.00000000",
"qty": "5.00000000",
"commission": "19.99500000",
"commissionAsset": "USDT"
},
{
"price": "3998.00000000",
"qty": "2.00000000",
"commission": "7.99600000",
"commissionAsset": "USDT"
},
{
"price": "3997.00000000",
"qty": "1.00000000",
"commission": "3.99700000",
"commissionAsset": "USDT"
},
{
"price": "3995.00000000",
"qty": "1.00000000",
"commission": "3.99500000",
"commissionAsset": "USDT"
},
...
]
}
Args:
symbol (str)
side (OrderSide)
type (OrderType)
time_in_force (Optional[TimeInForce], optional)
quantity (Optional[Decimal], optional)
quantity_precision (Optional[int], optional): quantity precision (digits after comma).
If None will be used precision from number. Will not be sended in request.
quote_order_qty (Optional[Decimal], optional)
price (Optional[Decimal], optional)
price_precision (Optional[int], optional): price precision (digits after comma).
If None will be used precision from number. Will not be sended in request.
new_client_order_id (Optional[str], optional): A unique id among open orders.
Automatically generated if not sent.
stop_price (Optional[Decimal], optional): Used with STOP_LOSS, STOP_LOSS_LIMIT,
TAKE_PROFIT, and TAKE_PROFIT_LIMIT orders.
iceberg_qty (Optional[Decimal], optional): Used with LIMIT, STOP_LOSS_LIMIT,
and TAKE_PROFIT_LIMIT to create an iceberg order.
new_order_resp_type ([type], optional): Set the response JSON. ACK, RESULT, or FULL;
MARKET and LIMIT order types default to FULL, all other orders default to ACK.
recv_window (Optional[int], optional): The value cannot be greater than 60000.
timestamp (Optional[int]): if None current timestamp in milliseconds will be used.
Returns:
BinanceOrderModel
"""
async def cancel_order(self, symbol: str,
order_id: Optional[int] = None,
orig_client_order_id: Optional[str] = None,
new_client_order_id: Optional[str] = None,
recv_window: Optional[int] = None,
timestamp: Optional[int] = None
) -> BinanceCanceledOrderModel:
"""Cancel an active order.
Either order_id or orig_client_order_id must be sent.
Weight: 1
Json example:
{
"symbol": "LTCBTC",
"origClientOrderId": "myOrder1",
"orderId": 4,
"orderListId": -1, // Unless part of an OCO, the value will always be -1.
"clientOrderId": "cancelMyOrder1",
"price": "2.00000000",
"origQty": "1.00000000",k
"executedQty": "0.00000000",
"cummulativeQuoteQty": "0.00000000",
"status": "CANCELED",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY"
}
Args:
symbol (str)
order_id (Optional[int], optional)
orig_client_order_id (Optional[str], optional)
new_client_order_id (Optional[str], optional): Used to uniquely identify this cancel.
Automatically generated by default.
recv_window (Optional[int], optional): The value cannot be greater than 60000.
timestamp (Optional[int]): if None current timestamp in milliseconds will be used.
Returns:
BinanceCanceledOrderModel
"""
async def cancel_orders(self, symbol: str,
recv_window: Optional[int] = None,
timestamp: Optional[int] = None
) -> BinanceCanceledOrders:
"""Cancels all active orders on a symbol. This includes OCO orders.
Weight: 1
[
{
"symbol": "BTCUSDT",
"origClientOrderId": "E6APeyTJvkMvLMYMqu1KQ4",
"orderId": 11,
"orderListId": -1,
"clientOrderId": "pXLV6Hz6mprAcVYpVMTGgx",
"price": "0.089853",
"origQty": "0.178622",
"executedQty": "0.000000",
"cummulativeQuoteQty": "0.000000",
"status": "CANCELED",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY"
},
{
"symbol": "BTCUSDT",
"origClientOrderId": "A3EF2HCwxgZPFMrfwbgrhv",
"orderId": 13,
"orderListId": -1,
"clientOrderId": "pXLV6Hz6mprAcVYpVMTGgx",
"price": "0.090430",
"origQty": "0.178622",
"executedQty": "0.000000",
"cummulativeQuoteQty": "0.000000",
"status": "CANCELED",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY"
},
{
"orderListId": 1929,
"contingencyType": "OCO",
"listStatusType": "ALL_DONE",
"listOrderStatus": "ALL_DONE",
"listClientOrderId": "2inzWQdDvZLHbbAmAozX2N",
"transactionTime": 1585230948299,
"symbol": "BTCUSDT",
"orders": [
{
"symbol": "BTCUSDT",
"orderId": 20,
"clientOrderId": "CwOOIPHSmYywx6jZX77TdL"
},
{
"symbol": "BTCUSDT",
"orderId": 21,
"clientOrderId": "461cPg51vQjV3zIMOXNz39"
},
...
],
"orderReports": [
{
"symbol": "BTCUSDT",
"origClientOrderId": "CwOOIPHSmYywx6jZX77TdL",
"orderId": 20,
"orderListId": 1929,
"clientOrderId": "pXLV6Hz6mprAcVYpVMTGgx",
"price": "0.668611",
"origQty": "0.690354",
"executedQty": "0.000000",
"cummulativeQuoteQty": "0.000000",
"status": "CANCELED",
"timeInForce": "GTC",
"type": "STOP_LOSS_LIMIT",
"side": "BUY",
"stopPrice": "0.378131",
"icebergQty": "0.017083"
},
{
"symbol": "BTCUSDT",
"origClientOrderId": "461cPg51vQjV3zIMOXNz39",
"orderId": 21,
"orderListId": 1929,
"clientOrderId": "pXLV6Hz6mprAcVYpVMTGgx",
"price": "0.008791",
"origQty": "0.690354",
"executedQty": "0.000000",
"cummulativeQuoteQty": "0.000000",
"status": "CANCELED",
"timeInForce": "GTC",
"type": "LIMIT_MAKER",
"side": "BUY",
"icebergQty": "0.639962"
},
...
]
},
...
]
Args:
symbol (str)
recv_window (Optional[int], optional): The value cannot be greater than 60000.
timestamp (Optional[int]): if None current timestamp in milliseconds will be used.
Returns:
BinanceCanceledOrders
"""
async def query_order(self, symbol: str,
order_id: Optional[int] = None,
orig_client_order_id: Optional[str] = None,
recv_window: Optional[int] = None,
timestamp: Optional[int] = None
) -> BinanceOrderInfoModel:
"""Check an order's status.
Weight: 1
Notes:
- Either orderId or origClientOrderId must be sent.
- For some historical orders cummulativeQuoteQty will be < 0,
meaning the data is not available at this time.
Json example:
{
"symbol": "LTCBTC",
"orderId": 1,
"orderListId": -1, // Unless OCO, value will be -1
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"cummulativeQuoteQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559,
"updateTime": 1499827319559,
"isWorking": true,
"origQuoteOrderQty": "0.000000"
}
Args:
symbol (str)
order_id (Optional[int], optional)
orig_client_order_id (Optional[str], optional)
recv_window (Optional[int], optional): The value cannot be greater than 60000.
timestamp (Optional[int]): if None current timestamp in milliseconds will be used.
Returns:
BinanceOrderInfoModel
"""
async def get_current_open_orders(self, symbol: Optional[str] = None,
recv_window: Optional[int] = None,
timestamp: Optional[int] = None
) -> BinanceOrderInfos:
"""Get all open orders on a symbol. Careful when accessing this with no symbol.
If the symbol is not sent, orders for all symbols will be returned in an array.
Weight: 1 for a single symbol; 40 when the symbol parameter is omitted
Json example:
[
{
"symbol": "LTCBTC",
"orderId": 1,
"orderListId": -1, // Unless OCO, the value will always be -1
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"cummulativeQuoteQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559,
"updateTime": 1499827319559,
"isWorking": true,
"origQuoteOrderQty": "0.000000"
},
...
]
Args:
symbol (Optional[str], optional): If the symbol is not sent,
orders for all symbols will be returned in an array.
recv_window (Optional[int], optional): The value cannot be greater than 60000.
timestamp (Optional[int]): if None current timestamp in milliseconds will be used.
Returns:
BinanceOrderInfos
"""
async def get_all_orders(self, symbol: str,
order_id: Optional[int] = None,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
limit: Optional[int] = None,
recv_window: Optional[int] = None,
timestamp: Optional[int] = None
) -> BinanceOrderInfos:
"""Get all account orders; active, canceled, or filled.
Weight: 5 with symbol
Notes:
- If orderId is set, it will get orders >= that orderId.
Otherwise most recent orders are returned.
- For some historical orders cummulativeQuoteQty will be < 0,
meaning the data is not available at this time.
Json example:
[
{
"symbol": "LTCBTC",
"orderId": 1,
"orderListId": -1, // Unless OCO, the value will always be -1
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"cummulativeQuoteQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559,
"updateTime": 1499827319559,
"isWorking": true,
"origQuoteOrderQty": "0.000000"
},
...
]
Args:
symbol (str)
timestamp (int)
order_id (Optional[int], optional)
start_time (Optional[int], optional)
end_time (Optional[int], optional)
limit (Optional[int], optional): Default 500; max 1000.
recv_window (Optional[int], optional): The value cannot be greater than 60000.
Returns:
BinanceOrderInfos
"""
async def get_account_info(self, recv_window: Optional[int] = None,
timestamp: Optional[int] = None
) -> BinanceAccountInfoModel:
"""Get current account information.
Weight: 5
Json example:
{
"makerCommission": 15,
"takerCommission": 15,
"buyerCommission": 0,
"sellerCommission": 0,
"canTrade": true,
"canWithdraw": true,
"canDeposit": true,
"updateTime": 123456789,
"accountType": "SPOT",
"balances": [
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
},
{
"asset": "LTC",
"free": "4763368.68006011",
"locked": "0.00000000"
}
],
"permissions": [
"SPOT"
]
}
Args:
recv_window (Optional[int], optional): The value cannot be greater than 60000.
timestamp (Optional[int]): if None current timestamp in milliseconds will be used.
Returns:
BinanceAccountInfoModel
"""
async def get_trades(self, symbol: str,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
from_id: Optional[int] = None,
limit: Optional[int] = None,
recv_window: Optional[int] = None,
timestamp: Optional[int] = None
) -> BinanceAccountTrades:
"""Get trades for a specific account and symbol.
Weight: 5
Notes:
- If fromId is set, it will get id >= that from_id.
Otherwise most recent trades are returned.
Json example:
[
{
"symbol": "BNBBTC",
"id": 28457,
"orderId": 100234,
"orderListId": -1, // Unless OCO, the value will always be -1
"price": "4.00000100",
"qty": "12.00000000",
"quoteQty": "48.000012",
"commission": "10.10000000",
"commissionAsset": "BNB",
"time": 1499865549590,
"isBuyer": true,
"isMaker": false,
"isBestMatch": true
},
...
]
Args:
symbol (str)
start_time (Optional[int], optional)
end_time (Optional[int], optional)
from_id (Optional[int], optional): trade id to fetch from.
Default gets most recent trades.
limit (Optional[int], optional): Default 500; max 1000.
recv_window (Optional[int], optional): The value cannot be greater than 60000.
timestamp (Optional[int]): if None current timestamp in milliseconds will be used.
Returns:
BinanceAccountTrades
"""
|
import kerfi
import tests
class CleanupPropertyTestCase(tests.BaseTestCase):
"""
Test cases for kerfi.cleanup_property() function.
"""
def test(self):
"""
Tests the kerfi.cleanup_property() function.
"""
self.assertEquals('key_key', kerfi.cleanup_property(' key key: '))
|
# Time: O(n)
# Space: O(1)
# An encoded string S is given.
# To find and write the decoded string to a tape,
# the encoded string is read one character at a time and the following steps are taken:
#
# If the character read is a letter, that letter is written onto the tape.
# If the character read is a digit (say d),
# the entire current tape is repeatedly written d-1 more times in total.
# Now for some encoded string S, and an index K,
# find and return the K-th letter (1 indexed) in the decoded string.
#
# Example 1:
#
# Input: S = "leet2code3", K = 10
# Output: "o"
# Explanation:
# The decoded string is "leetleetcodeleetleetcodeleetleetcode".
# The 10th letter in the string is "o".
# Example 2:
#
# Input: S = "ha22", K = 5
# Output: "h"
# Explanation:
# The decoded string is "hahahaha". The 5th letter is "h".
# Example 3:
#
# Input: S = "a2345678999999999999999", K = 1
# Output: "a"
# Explanation:
# The decoded string is "a" repeated 8301530446056247680 times. The 1st letter is "a".
#
# Note:
#
# 2 <= S.length <= 100
# S will only contain lowercase letters and digits 2 through 9.
# S starts with a letter.
# 1 <= K <= 10^9
# The decoded string is guaranteed to have less than 2^63 letters.
class Solution(object):
def decodeAtIndex(self, S, K):
"""
:type S: str
:type K: int
:rtype: str
"""
i = 0
for c in S:
if c.isdigit():
i *= int(c)
else:
i += 1
for c in reversed(S):
K %= i
if K == 0 and c.isalpha():
return c
if c.isdigit():
i /= int(c)
else:
i -= 1
|
sentence = 'The cat is named Sammy'
print(sentence.upper())
print(sentence.lower())
print(sentence.capitalize())
print(sentence.count('i'))
|
import os
from copy import deepcopy
from unittest import TestCase
from scrapy import Spider
from scrapy.settings import Settings
from scrapy_httpproxy import get_proxy
from scrapy_httpproxy.settings import default_settings
from scrapy_httpproxy.storage.environment import EnvironmentStorage
from scrapy_httpproxy.storage.settings import SettingsStorage
class StorageTest(TestCase):
def setUp(self):
self.spider = Spider('foo')
self.settings = Settings()
self.settings.setmodule(default_settings)
def tearDown(self):
pass
def test_environment(self):
oldenv = os.environ.copy()
os.environ['http_proxy'] = http_proxy = 'https://proxy.for.http:3128'
os.environ['https_proxy'] = https_proxy = 'http://proxy.for.https:8080'
os.environ.pop('file_proxy', None)
settings = deepcopy(self.settings)
storage = EnvironmentStorage(settings)
storage.open_spider(self.spider)
self.assertTrue(storage, True)
self.assertIn('http', storage)
self.assertIn('https', storage)
self.assertNotIn('file_proxy', storage)
self.assertSequenceEqual(
storage['http'],
get_proxy(http_proxy, 'http', storage.auth_encoding))
self.assertSequenceEqual(
storage['https'],
get_proxy(https_proxy, 'https', storage.auth_encoding))
storage.close_spider(self.spider)
os.environ = oldenv
def test_settings(self):
http_proxy_1 = 'https://proxy.for.http.1:3128'
http_proxy_2 = 'https://proxy.for.http.2:3128'
https_proxy_1 = 'http://proxy.for.https.1:8080'
https_proxy_2 = 'http://proxy.for.https.2:8080'
local_settings = {
'HTTPPROXY_ENABLED': True,
'HTTPPROXY_PROXIES': {'http': [http_proxy_1, http_proxy_2],
'https': [https_proxy_1, https_proxy_2]}
}
settings = deepcopy(self.settings)
settings.setdict(local_settings)
storage = SettingsStorage(settings)
storage.open_spider(self.spider)
self.assertTrue(storage, True)
self.assertIn('http', storage)
self.assertIn('https', storage)
self.assertSequenceEqual(
storage['http'],
get_proxy(http_proxy_1, 'http', storage.auth_encoding)
)
storage.close_spider(self.spider)
|
# -*- coding: utf-8 -*- #
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for components commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
def GetUpdateManager(group_args):
"""Construct the UpdateManager to use based on the common args for the group.
Args:
group_args: An argparse namespace.
Returns:
update_manager.UpdateManager, The UpdateManager to use for the commands.
"""
try:
os_override = platforms.OperatingSystem.FromId(
group_args.operating_system_override)
except platforms.InvalidEnumValue as e:
raise exceptions.InvalidArgumentException('operating-system-override', e)
try:
arch_override = platforms.Architecture.FromId(
group_args.architecture_override)
except platforms.InvalidEnumValue as e:
raise exceptions.InvalidArgumentException('architecture-override', e)
platform = platforms.Platform.Current(os_override, arch_override)
root = (files.ExpandHomeDir(group_args.sdk_root_override)
if group_args.sdk_root_override else None)
url = (files.ExpandHomeDir(group_args.snapshot_url_override)
if group_args.snapshot_url_override else None)
return update_manager.UpdateManager(
sdk_root=root, url=url, platform_filter=platform)
|
"""
Created on Wed Jun 13 17:19:23 2018
@author: msdogan
This module retrieves inflow data from CDEC for defined station IDs.
Detailed info:
documentation: http://ulmo.readthedocs.io/en/latest/
GitHub repo: https://github.com/ulmo-dev/ulmo
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import urllib, time, datetime
# from ulmo import cdec
# # get all available stattion info
# stations = pd.DataFrame(cdec.historical.get_stations()).to_csv('CDEC_stations.csv',index=True)
# # get all available sensor info
# sensors = pd.DataFrame(cdec.historical.get_sensors()).to_csv('CDEC_sensors.csv',index=False)
station_IDs = [
'SHA', # 'Shasta'
# 'KES', # 'Keswick'
# 'ORO', # 'Oroville'
'BUL', # 'Bullards Bar'
# 'ENG', # 'Englebright'
'FOL', # 'Folsom'
# 'NAT', # 'Nimbus'
'NML', # 'New Melones'
# 'DNP', # 'Don Pedro'
# 'EXC', # 'New Exchequer'
'PNF' # 'Pine Flat'
]
sensor_ID = 76 # inflow
start_date='2009-12-31'
end_date='2018-09-01'
resolution = 'hourly' # Possible values are 'event', 'hourly', 'daily', and 'monthly'
duration_code = 'H'
conversion = 0.028316847 # convert ft3/s to m3/s
# dat = cdec.historical.get_data(['SHA'],resolutions=['monthly'],sensor_ids=[sensor_ID])
# df = pd.DataFrame()
# for station_ID in station_IDs:
# print('retrieving station: '+station_ID)
# data = cdec.historical.get_data(
# station_ids=[station_ID], # check out station ID above
# sensor_ids=[sensor_ID], # check out sensor ID above
# resolutions=[resolution],
# start=start_date, # define start date, default is None
# end=end_date # define end date, default is None
# )
# # organize retrieved dictionary and convert to pandas Data Frame (convert negative values to positive and interpolate missing values)
# data = pd.DataFrame(data[station_ID][data[station_ID].keys()[0]]).abs().interpolate()*conversion
# data.columns = [station_ID]
# df[station_ID] = data[station_ID]
# df['KES'] = 0 # downstream of SHA
# df['NAT'] = 0 # downstream of FOL
# # save data
# df.to_csv('inflow_cms2.csv')
# # ****** Mustafa's Ulmo ******
# ex_url = 'http://cdec.water.ca.gov/dynamicapp/req/CSVDataServlet?Stations=SHA&SensorNums=76&dur_code=H&Start=2009-06-06&End=2018-09-06'
# for station_ID in station_IDs:
# print('retrieving station: '+station_ID)
# url = 'http://cdec.water.ca.gov/dynamicapp/req/CSVDataServlet?'+'Stations='+str(station_ID)+'&SensorNums='+str(sensor_ID)+'&dur_code='+str(duration_code)+'&Start='+str(start_date)+'&End='+str(end_date)
# print('url: '+url)
# web = urllib.urlopen(url)
# s = web.read()
# web.close()
# ff = open(str(station_ID)+"_CDEC.csv", "w")
# ff.write(s)
# ff.close()
# time.sleep(6)
# daterange = pd.date_range(start=start_date,end=end_date,freq=duration_code)
# print(daterange)
# for station_ID in station_IDs:
# print('organizing station: '+station_ID)
# df = pd.read_csv(str(station_ID)+"_CDEC.csv",index_col=0,header=0)
# df.index = pd.to_datetime(df.index)
# df = pd.read_csv('inflow_cms1.csv',index_col=0,header=0)
# df.index = pd.to_datetime(df.index)
# # look after outliars and correct if necessary
# print(df.describe())
# key = df.keys()[-1]
# print(key)
# print(df.loc[lambda df: df[key] > df[key].quantile(.9999), :])
df = pd.read_csv("CDEC_data_cfs.csv",header=0,index_col=0)
# df = pd.to_numeric(df.values,errors='coerce')
d = pd.DataFrame()
for key in df.keys():
d[key]=pd.to_numeric(df[key],errors='coerce')
d = d.abs().interpolate()*conversion
d.to_csv('CDEC_data_cms.csv')
# key = d.keys()[1]
# print(key)
# print(d.loc[lambda d: d[key] > d[key].quantile(.9999), :])
fig = plt.figure(figsize=(5,4)); ax = plt.gca()
d.boxplot(ax=ax, sym='*',showmeans=True,showfliers=False)
plt.xticks(np.arange(1,len(d.keys())+1),['Shasta','Folsom','New Melones','Pine Flat'])
plt.title('Hourly Average Reservoir Inflow ($m^3/s$)',loc='left',fontweight='bold')
plt.tight_layout()
plt.savefig('inflow_cms.pdf',transparent=False)
plt.show()
|
from line2.models.command import Command, Parameter, ParameterType, CommandResult, CommandResultType
from random import random
from line2.utils import IsEmpty
def Echo(message, options, text=''):
if not IsEmpty(text):
return CommandResult(type=CommandResultType.done, texts=[text])
echoCmd = Command(
'echo',
Echo,
desc='Echoes'
)
|
"""Test of the various bits of the parser."""
# fchic
from fchic.parser_definition import (
char_arr_deck,
deck,
details,
fchk,
header,
integer,
ival_deck,
name_of_deck,
real,
real_arr_deck,
rval_deck,
title,
)
def test_data_types() -> None:
"""Test parsers for various types."""
result = real.parseString("-6.20000000E+10")
assert float("".join(result["base"])) == -6.2
assert int(result["exponent"]) == 10
assert int(integer.parseString("4")[0]) == 4
assert int(integer.parseString("0")[0]) == 0
assert int(integer.parseString("-0")[0]) == 0
assert int(integer.parseString("+08")[0]) == 8
assert int(integer.parseString("-118 +3")[0]) == -118
def test_header() -> None:
"""Test header parser."""
ex_header = """TD-DFT calculation
Freq RB3LYP 6-31G(d)
Number of atoms I 12
"""
# test reading header
assert "".join(title.parseString(ex_header)) == "TD-DFT calculation"
result = details.parseString(ex_header.splitlines()[1])
assert ["".join(word) for word in result] == ["Freq", "RB3LYP", "6-31G(d)"]
result = header("header").parseString(ex_header)
assert "title" in result["header"] and "details" in result["header"]
def test_deck_name() -> None:
"""Test deck name parser."""
ex_block1 = "Current cartesian coordinates R N= 36\n"
ex_block2 = "Number of independent functions I 102\n"
ex_block3 = """Info1-9 I N= 9
7 6 0 0 0 100
6 18 -502"""
assert (
"".join(name_of_deck.parseString(ex_block1)) == "Current cartesian coordinates"
)
assert (
"".join(name_of_deck.parseString(ex_block2))
== "Number of independent functions"
)
assert "".join(name_of_deck.parseString(ex_block3)) == "Info1-9"
def test_decks() -> None:
"""Test reading of decks."""
example_decks = """Number of alpha electrons I 21
Route C N= 7
# Geom=AllCheck Guess=Read SCRF=Check GenChk B3LYP/6-31G(d)
Symm=None Freq=(NoRaman)
Nuclear charges R N= 12
6.00000000E+01 6.00000000D-01 -6.00000E-01 6.00000000E+00 6.00000000E+00
6.00000000E+00 1.00000000E+00 1.00000000E+00 1.00000000E+00 1.00000000E+00
1.00000000E+00 1.00000000E+00
Virial Ratio R 2.010118722531438E+00
"""
result = ival_deck.parseString(example_decks.splitlines()[0])
assert "".join(result["key"]) == "Number of alpha electrons"
assert "".join(result["type"]) == "I"
assert "".join(result["value"]) == "21"
result = rval_deck.parseString(example_decks.splitlines()[-1])
assert "".join(result["key"]) == "Virial Ratio"
assert "".join(result["type"]) == "R"
assert "".join(result["value"]["base"]) == "2.010118722531438"
assert "".join(result["value"]["exponent"]) == "+00"
result = char_arr_deck.parseString("\n".join(example_decks.splitlines()[1:6]))
assert "".join(result["key"]) == "Route"
assert "".join(result["type"]) == "C"
assert "".join(result["size"]) == "7"
assert "".join(result["value"]) == "\n".join(example_decks.splitlines()[2:4])
result = real_arr_deck.parseString("\n".join(example_decks.splitlines()[4:-1]))
assert float(result.value[0]) == 60.0
assert float(result.value[1]) == 0.6
assert float(result.value[2]) == -0.6
assert float(result.value[7]) == 1.0
# Test reading all decks
result = deck[...].parseString(example_decks)
assert len(result) == 4
def test_full() -> None:
"""Test reading a fchk file."""
ex_full = """TD-DFT calculation
Freq RB3LYP 6-31G(d)
Number of atoms I 12
Info1-9 I N= 9
7 6 0 0 0 100
6 18 -502
Multiplicity I 1
Number of electrons I 42
Number of alpha electrons I 21
Route C N= 7
# Geom=AllCheck Guess=Read SCRF=Check GenChk B3LYP/6-31G(d)
Symm=None Freq=(NoRaman)
Virial Ratio R 2.010118722531438E+00
Atom Types C N= 12
Int Atom Types I N= 12
0 0 0 0 0 0
0 0 0 0 0 0
Nuclear charges R N= 12
6.00000000E+00 6.00000000E+00 6.00000000E+00 6.00000000E+00 6.00000000E+00
6.00000000E+00 1.00000000E+00 1.00000000E+00 1.00000000E+00 1.00000000E+00
1.00000000E+00 1.00000000E+00
"""
data = fchk.parseString(ex_full)
assert "header" in data
assert "decks" in data
print(data["decks"])
assert len(data["decks"]) == 10
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer.testing import attr
from deepmark_chainer.net import vgg_d
class TestVGG_D(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (1, 3, 224, 224)).astype(numpy.float32)
self.l = vgg_d.VGG_D()
def check_forward(self, xp):
x = chainer.Variable(xp.asarray(self.x))
self.l(x)
def test_forward_cpu(self):
self.check_forward(numpy)
@attr.gpu
def test_forward_gpu(self):
self.l.to_gpu()
self.check_forward(cuda.cupy)
|
# -*- coding: utf-8 -*-
"""
api.v1.auth.parser
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2017-18 by Wendell Hu.
:license: MIT, see LICENSE for more details.
"""
from flask_restful import reqparse
login_parser = reqparse.RequestParser()
login_parser.add_argument('username',
type=str,
location='json',
help='username must be included.')
login_parser.add_argument('password',
type=str,
location='json',
help='password must be included.')
|
X = float(input())
for i in range(100):
print('N[{}] = {:.4f}'.format(i, X))
X /= 2
|
#! /usr/bin/python -tt
# This is a simple command to check that "Is this ok [y/N]: " and yes and no
# have either all been translated or none have been translated.
import sys
import glob
from yum.misc import to_utf8
def trans(msg, default):
if msg == 'msgstr ""\n':
return unicode(default, encoding='utf-8')
if msg.startswith('msgstr "'):
msg = msg[len('msgstr "'):]
msg = msg[:-2]
return unicode(msg, encoding='utf-8')
for fname in glob.glob("po/*.po"):
next = None
is_this_ok = None
sis_this_ok = None
yes = None
syes = None
y = None
sy = None
no = None
sno = None
n = None
sn = None
for line in file(fname):
if next is not None:
if next == 'is_this_ok':
sis_this_ok = line
if line == 'msgstr ""\n' or line.find('[y/N]') != -1:
is_this_ok = False
else:
is_this_ok = True
if next == 'yes':
syes = line
yes = line != 'msgstr ""\n'
if next == 'y':
sy = line
y = line != 'msgstr ""\n'
if next == 'no':
sno = line
no = line != 'msgstr ""\n'
if next == 'n':
sn = line
n = line != 'msgstr ""\n'
next = None
continue
if line == 'msgid "Is this ok [y/N]: "\n':
next = 'is_this_ok'
if line == 'msgid "yes"\n':
next = 'yes'
if line == 'msgid "y"\n':
next = 'y'
if line == 'msgid "no"\n':
next = 'no'
if line == 'msgid "n"\n':
next = 'n'
if (is_this_ok is None or
yes is None or
y is None or
no is None or
n is None):
print >>sys.stderr, """\
ERROR: Can't find all the msg id's in %s
is_this_ok %s
yes %s
y %s
no %s
n %s
""" % (fname,
is_this_ok is None,
yes is None,
y is None,
no is None,
n is None)
sys.exit(1)
syes = trans(syes, "yes")
sy = trans(sy, "y")
sno = trans(sno, "no")
sn = trans(sn, "n")
if (is_this_ok != yes or
is_this_ok != no):
print >>sys.stderr, """\
ERROR: yes/no translations don't match in: %s
is_this_ok %5s: %s
yes %5s: %s
y %5s: %s
no %5s: %s
n %5s: %s
""" % (fname,
to_utf8(is_this_ok), to_utf8(sis_this_ok),
to_utf8(yes), to_utf8(syes), to_utf8(y), to_utf8(sy),
to_utf8(no), to_utf8(sno), to_utf8(n), to_utf8(sn))
if syes[0] != sy:
print >>sys.stderr, """\
ERROR: yes/y translations don't match in: %s
yes %5s: %s
y %5s: %s
""" % (fname,
yes, syes, y, sy)
if sno[0] != sn:
print >>sys.stderr, """\
ERROR: no/n translations don't match in: %s
no %5s: %s
n %5s: %s
""" % (fname,
no, sno, n, sn)
|
# Generated by Django 2.2 on 2021-09-16 05:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waterApp', '0016_auto_20210914_1328'),
]
operations = [
migrations.CreateModel(
name='OfflineLoggerData',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('date', models.DateField()),
('pressure', models.FloatField()),
('temperature', models.FloatField()),
('water_level', models.FloatField()),
('location', models.CharField(max_length=200)),
],
),
]
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
MODEL = 'gpt2-medium'
DEV = 'cuda'
TOP_K = 10
LENGTH = 50
WEIGHTS = [0.01, 0.01]
WEIGHTS = [0.02]
COND = 'positive politics'
COND = 'negative politics'
COND = 'positive'
COND = 'negative science'
COND = 'positive science'
COND = 'negative'
PREFIX = 'To conclude'
PREFIX = 'The potato'
PREFIX = 'The following is a negative sentence. The chicken tastes'
def top_k_filtering(logits, top_k=1, filter_value=-float("Inf"), min_tokens_to_keep=1):
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1))
ids_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
ids_to_retain = torch.topk(logits, top_k)[1][0]
logits[ids_to_remove] = filter_value
return logits, ids_to_retain
tokenizer = GPT2Tokenizer.from_pretrained(MODEL)
model = GPT2LMHeadModel.from_pretrained(MODEL).to(DEV)
COND_IDS = torch.tensor([tokenizer.encode(COND)]).to(DEV)
# embed = model.get_input_embeddings()
# cond_embeds = embed(COND_IDS)[0]
# for i in range(cond_embeds.shape[0]):
# embed.weight.data += WEIGHTS[i] * cond_embeds[i]
input_ids = torch.tensor([tokenizer.encode(PREFIX, add_special_tokens=True)]).to(DEV)
# past = model(input_ids[:, :-1])[1]
for t in range(input_ids.shape[1], LENGTH): # +1 for the last time step of prefix
# model = GPT2LMHeadModel.from_pretrained(MODEL).to(DEV)
# criterion = torch.nn.CrossEntropyLoss()
# optimizer = torch.optim.SGD(model.parameters(), lr=0.00008)
# for step in range(1):
# logits, _ = model(input_ids)
# loss = criterion(logits[:, -1], COND_IDS[0])
# model.zero_grad()
# loss.backward()
# # clip_grad_norm(model.parameters(), 0.5)
# optimizer.step()
with torch.no_grad():
logits, _ = model(input_ids)
logits = logits[:, -1]
logits, ids_to_retain = top_k_filtering(logits, TOP_K)
probs = F.softmax(logits, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=1)
input_ids = torch.cat([input_ids, next_tokens], dim=-1)
print(tokenizer.decode(input_ids[0]))
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import os
import sys
import logging
import traceback
from logging.handlers import RotatingFileHandler
from .windowsDeviceAPI import WindowsDeviceAPI
from .APIDefine import KEYBOARD_CMD_LIST, MOUSE_CMD_LIST, KEY_INPUT, KEY_INPUTSTRING, MOUSE_MOVE, MOUSE_CLICK, \
MOUSE_DOUBLECLICK, MOUSE_RIGHTCLICK, MOUSE_LONGCLICK, MOUSE_DRAG, LOG_DEFAULT, LOG_FORMAT, KEY_PRESS, KEY_RELEASE
from ...iDevice import IDevice
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir)
class WindowsDevice(IDevice):
def __init__(self, platform):
IDevice.__init__(self, platform)
self.__logger = None
self.__deviceApi = WindowsDeviceAPI(platform)
def initialize(self, log_dir, **kwargs):
level = kwargs.pop('level') if 'level' in kwargs else logging.DEBUG
hwnd = kwargs.pop('hwnd') if 'hwnd' in kwargs else None
query_path = kwargs.pop('query_path') if 'query_path' in kwargs else None
window_size = kwargs.pop('window_size') if 'window_size' in kwargs else None
if not self._LogInit(log_dir, level):
raise RuntimeError("init log failed")
self.__deviceApi.Initialize(hwnd=hwnd, query_path=query_path, window_size=window_size, **kwargs)
hwnd = self.__deviceApi.window_handle
if hwnd:
self.__logger.info("find window: %x", hwnd)
self.__logger.info("logging start")
return True
def deInitialize(self):
return self.__deviceApi.DeInitialize()
def getScreen(self, **kwargs):
img = self.__deviceApi.ScreenCap(**kwargs)
return img
def doAction(self, **kwargs):
aType = kwargs['aType']
if aType in KEYBOARD_CMD_LIST:
return self.keyboardCMD(**kwargs)
if aType in MOUSE_CMD_LIST:
return self.mouseCMD(**kwargs)
raise Exception("unknown action type: %s, %s" % (aType, kwargs))
def keyboardCMD(self, **kwargs):
try:
aType = kwargs['aType']
keys = kwargs.get('keys', None)
key_string = kwargs.get('key_string', None)
long_click_time = kwargs.get('long_click_time', 0)
if aType == KEY_INPUT:
self.__logger.info("key input, keys: %s", keys)
self.__deviceApi.InputKeys(keys, long_click_time)
elif aType == KEY_INPUTSTRING:
self.__logger.info("key input string, key_string: %s", key_string)
self.__deviceApi.InputStrings(key_string)
elif aType == KEY_PRESS:
self.__logger.info("key_press: %s", keys)
self.__deviceApi.PressKey(keys)
elif aType == KEY_RELEASE:
self.__logger.info("key_release: %s", keys)
self.__deviceApi.ReleaseKey(keys)
except Exception as e:
self.__logger.error('keyboardCMD error: %s', e)
raise e
def mouseCMD(self, **kwargs):
try:
aType = kwargs['aType']
px = kwargs.get('px', None)
py = kwargs.get('py', None)
by_post = kwargs.get('by_post', False)
long_click_time = kwargs.get('long_click_time', 0)
fromX = kwargs.get('fromX', None)
fromY = kwargs.get('fromY', None)
toX = kwargs.get('toX', None)
toY = kwargs.get('toY', None)
if aType == MOUSE_MOVE:
self.__logger.info("mouse move, px: %s, py: %s", px, py)
self.__deviceApi.MouseMove(px, py)
elif aType == MOUSE_CLICK:
self.__logger.info("mouse click, px: %s, py: %s", px, py)
self.__deviceApi.MouseClick(px, py, by_post)
elif aType == MOUSE_DOUBLECLICK:
self.__logger.info("mouse double click, px: %s, py: %s", px, py)
self.__deviceApi.MouseDoubleClick(px, py)
elif aType == MOUSE_RIGHTCLICK:
self.__logger.info("mouse right click, px: %s, py: %s", px, py)
self.__deviceApi.MouseRightClick(px, py)
elif aType == MOUSE_LONGCLICK:
self.__logger.info("mouse long click, px: %s, py: %s, long_click_time: %s",
px,
py,
long_click_time)
self.__deviceApi.MouseLongClick(px, py, long_click_time)
elif aType == MOUSE_DRAG:
self.__logger.info("mouse drag, fromX: %s, fromY: %s, toX: %s, toY: %s",
fromX,
fromY,
toX,
toY)
self.__deviceApi.MouseDrag(fromX, fromY, toX, toY)
except Exception as e:
self.__logger.error('mouseCMD error: %s', e)
raise e
def _LogInit(self, log_dir, level):
if not isinstance(log_dir, str):
logging.error('wrong log_dir when init LOG, log_dir: %s', log_dir)
return False
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.__logger = logging.getLogger(LOG_DEFAULT)
if not self.__logger.handlers:
console = logging.StreamHandler()
formatter = logging.Formatter(LOG_FORMAT)
console.setFormatter(formatter)
fileHandler = RotatingFileHandler(filename=os.path.join(log_dir, 'DeviceAPI.log'),
maxBytes=2048000,
backupCount=10)
fileHandler.setFormatter(formatter)
self.__logger.addHandler(fileHandler)
self.__logger.addHandler(console)
self.__logger.setLevel(level)
return True
# def _GetValuesInkwargs(self, key, isNessesary, defaultValue, kwargs):
# try:
# if not isNessesary:
# if key not in kwargs:
# return defaultValue
# else:
# return kwargs[key]
# else:
# return kwargs[key]
# except KeyError as e:
# self.__logger.error(e)
# raise e
|
from pip._vendor.msgpack.fallback import xrange
# use KMP algorithm
# https://en.wikipedia.org/wiki/Knuth%E2%80%93Morris%E2%80%93Pratt_algorithm
class Solution:
def strStr(self, haystack: str, needle: str) -> int:
dic = self.prefix(needle)
i = j = 0
while i < len(haystack) and j < len(needle):
if needle[j] == haystack[i]:
i += 1
j += 1
elif j == 0:
i += 1
else:
j = dic[j - 1]
else:
if j == len(needle):
return i - j
return -1
def prefix(self, s: str) -> {int: int}:
dictionary = {0: 0}
for i in xrange(1, len(s)):
j = dictionary[i - 1]
while j > 0 and s[j] != s[i]:
j = dictionary[j - 1]
if s[j] == s[i]:
j += 1
dictionary[i] = j
return dictionary
s = Solution()
print(s.strStr("hello", "ll"))
print(s.strStr("aaaaa", "baa"))
print(s.strStr("", ""))
print(s.strStr("lenin & stalin", "in"))
|
import os
access_token_key = os.getenv('TWITTER_ACCESS_TOKEN_KEY', '')
access_token_secret = os.getenv('TWITTER_ACCESS_TOKEN_SECRET', '')
consumer_key = os.getenv('TWITTER_CONSUMER_KEY', '')
consumer_secret = os.getenv('TWITTER_CONSUMER_SECRET', '')
|
from beluga.visualization import BelugaPlot
from beluga.visualization.datasources import Dill
plots = BelugaPlot('./data.dill', default_sol=-1, default_step=-1, renderer='matplotlib')
plots.add_plot().line_series('v', 'h') \
.xlabel('v [$m/s$]').ylabel('h [m]') \
.title('h-v Plot')
plots.add_plot().line_series('theta*180/3.14159', 'h') \
.xlabel(r'$\theta$ [deg]').ylabel('$h$ [m]') \
.title('Downrange')
plots.add_plot().line_series('theta*180/3.14159', 'phi*180/3.14159') \
.xlabel(r'$\theta$ [deg]').ylabel('$\phi$ [deg]') \
.title('Ground Track')
plots.add_plot() \
.line('t', 'alpha') \
.line('t', 'bank') \
.xlabel('t (s)').ylabel('alpha (degrees)') \
.title('Control history')
plots.add_plot().line_series('t', 'gam*180/3.14159') \
.xlabel('t [s]').ylabel(r'$\gamma$ [deg]') \
.title('Flight Path Angle')
plots.add_plot().line_series('t', 'lamV') \
.xlabel('t [s]').ylabel(r'$\lambda_v$') \
.title('$\lambda_v$')
plots.render()
|
"""
Product API Service Test Suite
Test cases can be run with the following:
nosetests -v --with-spec --spec-color
coverage report -m
"""
import os
import logging
from unittest import TestCase
from unittest.mock import patch
from flask_api import status # HTTP Status Codes
from service.models import db
from service.service import app, init_db, internal_server_error
from tests.product_factory import ProductFactory
SHOPCART_ENDPOINT = os.getenv('SHOPCART_ENDPOINT', 'http://localhost:5000/shopcarts')
######################################################################
# T E S T C A S E S
######################################################################
class TestProductServer(TestCase):
""" REST API Server Tests """
@classmethod
def setUpClass(cls):
""" This runs once before the entire test suite """
init_db()
app.debug = False
app.testing = True
app.config["SQLALCHEMY_DATABASE_URI"] = app.config["TEST_DATABASE_URI"]
@classmethod
def tearDownClass(cls):
""" This runs once after the entire test suite """
db.session.close()
def setUp(self):
""" This runs before each test """
self.app = app.test_client()
db.drop_all() # clean up the last tests
db.create_all() # create new tables
def tearDown(self):
""" This runs after each test """
db.session.remove()
db.drop_all()
def _create_products(self, count):
""" Factory method to create products in bulk """
products = []
for _ in range(count):
test_product = ProductFactory()
resp = self.app.post(
"/api/products", json=test_product.serialize(), content_type="application/json"
)
self.assertEqual(
resp.status_code, status.HTTP_201_CREATED, "Could not create test product"
)
new_product = resp.get_json()
test_product.id = new_product["id"]
products.append(test_product)
return products
######################################################################
# P L A C E T E S T C A S E S H E R E
######################################################################
def test_index(self):
""" Test index call """
resp = self.app.get("/")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_create_product(self):
""" Create a new Product """
test_product = ProductFactory()
logging.debug(test_product)
resp = self.app.post(
"/api/products", json=test_product.serialize(), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
location = resp.headers.get("Location", None)
self.assertIsNotNone(location)
# Check the data is correct
new_product = resp.get_json()
self.assertEqual(new_product["name"], test_product.name, "Names do not match")
self.assertEqual(
new_product["category"], test_product.category, "Categories do not match"
)
self.assertEqual(
new_product["description"], test_product.description, "Descriptions do not match"
)
self.assertEqual(
new_product["price"], test_product.price, "Prices do not match"
)
resp = self.app.get(location)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
new_product = resp.get_json()
self.assertEqual(new_product["name"], test_product.name, "Names do not match")
self.assertEqual(
new_product["category"], test_product.category, "Categories do not match"
)
self.assertEqual(
new_product["description"], test_product.description, "Descriptions do not match"
)
self.assertEqual(
new_product["price"], test_product.price, "Prices do not match"
)
def test_create_product_with_invalid_content_type(self):
""" Create a new Product with invalid content type"""
test_product = ProductFactory()
logging.debug(test_product)
resp = self.app.post(
"/api/products", json=test_product.serialize(), content_type="text/plain"
)
self.assertEqual(resp.status_code, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
def test_create_product_with_bad_request(self):
""" Create a new Product with bad request"""
test_product = ProductFactory()
logging.debug(test_product)
test_product.category = ""
resp = self.app.post(
"/api/products", json=test_product.serialize(), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_get_product(self):
""" Get a single product by its ID """
# get the id of a product
test_product = self._create_products(1)[0]
resp = self.app.get("/api/products/{}".format(test_product.id))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data["name"], test_product.name)
def test_get_product_not_found(self):
""" Get a product that's not found """
resp = self.app.get("/api/products/0")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_get_product_bad_request(self):
""" Get a product with invalid product id """
resp = self.app.get("/api/products/a")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
resp = self.app.get("/api/products/3.3")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_a_product(self):
""" Delete a Product """
test_product = self._create_products(1)[0]
resp = self.app.delete("/api/products/{}".format(test_product.id))
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(len(resp.data), 0)
# make sure they are deleted
resp = self.app.get("/api/products/{}".format(test_product.id))
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_product_bad_request(self):
""" Get a product with invalid product id """
resp = self.app.delete("/api/products/a")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
resp = self.app.delete("/api/products/3.3")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_product(self):
""" Update an existing Product """
# create a product to update
test_product = ProductFactory()
test_product_name = test_product.name
test_product_description = test_product.description
test_product_price = test_product.price
resp = self.app.post(
"/api/products", json=test_product.serialize(), content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# update the product
new_product = resp.get_json()
new_product["category"] = "Education"
resp = self.app.put(
"/api/products/{}".format(new_product["id"]),
json=new_product,
content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_product = resp.get_json()
self.assertEqual(updated_product["category"], "Education")
# create an update request with partial information
part_product = resp.get_json()
part_product["category"] = ""
resp = self.app.put(
"/api/products/{}".format(part_product["id"]),
json=part_product,
content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_product = resp.get_json()
self.assertEqual(updated_product["category"], "Education")
part_product = resp.get_json()
part_product["name"] = ""
resp = self.app.put(
"/api/products/{}".format(part_product["id"]),
json=part_product,
content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_product = resp.get_json()
self.assertEqual(updated_product["name"], test_product_name)
part_product = resp.get_json()
part_product["description"] = ""
resp = self.app.put(
"/api/products/{}".format(part_product["id"]),
json=part_product,
content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_product = resp.get_json()
self.assertEqual(updated_product["description"], test_product_description)
part_product = resp.get_json()
part_product["price"] = ""
resp = self.app.put(
"/api/products/{}".format(part_product["id"]),
json=part_product,
content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_product = resp.get_json()
self.assertEqual(updated_product["price"], test_product_price)
def test_update_product_not_found(self):
""" Update a product that's not found """
test_product = ProductFactory()
resp = self.app.put(
"/api/products/0",
json=test_product.serialize(),
content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_update_product_bad_request(self):
""" Update a product with bad request body """
# create a product to update
test_product = ProductFactory()
resp = self.app.post(
"/api/products", json=test_product.serialize(), content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# create an update request with bad request body
new_product = resp.get_json()
resp = self.app.put(
"/api/products/a",
json=new_product,
content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
resp = self.app.put(
"/api/products/3.3",
json=new_product,
content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
test_product = ProductFactory()
test_product_name = test_product.name
test_product_description = test_product.description
test_product_price = test_product.price
resp = self.app.post(
"/api/products", json=test_product.serialize(), content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# update the product
new_product = resp.get_json()
new_product["price"] = "a"
resp = self.app.put(
"/api/products/{}".format(new_product["id"]),
json=new_product,
content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_get_product_list(self):
""" Get a list of Products """
self._create_products(5)
resp = self.app.get("/api/products")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), 5)
def test_query_product_list_by_category(self):
""" Query Products by Category """
products = self._create_products(10)
test_category = products[0].category
category_products = [product for product in products if product.category == test_category]
resp = self.app.get("/api/products", query_string="category={}".format(test_category))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(category_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["category"], test_category)
def test_query_product_list_by_name(self):
""" Query Products by Name """
products = self._create_products(10)
test_name = products[0].name
name_products = [product for product in products if product.name == test_name]
resp = self.app.get("/api/products", query_string="name={}".format(test_name))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(name_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["name"], test_name)
def test_query_product_list_by_description(self):
""" Query Products by Description """
products = self._create_products(10)
test_description = products[0].description
description_products = [product for product in products if product.description == test_description]
resp = self.app.get("/api/products", query_string="description={}".format(test_description))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(description_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["description"], test_description)
def test_query_product_by_price(self):
""" Query Products by Price Range """
products = self._create_products(10)
test_max_price = products[0].price * 10
test_min_price = products[0].price / 10
price_products = [product for product in products if product.price >= test_min_price and product.price <= test_max_price]
resp = self.app.get("/api/products", query_string="minimum={}&maximum={}".format(test_min_price, test_max_price))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(price_products))
def test_query_product_list_by_name_category(self):
""" Query Products by Name and Category """
products = self._create_products(10)
test_name = products[0].name
test_category = products[0].category
name_category_products = [product for product in products if product.name == test_name and product.category == test_category]
resp = self.app.get("/api/products", query_string="name={}&category={}".format(test_name, test_category))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(name_category_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["name"], test_name)
self.assertEqual(product["category"], test_category)
def test_query_product_list_by_name_description(self):
""" Query Products by Name and Description """
products = self._create_products(10)
test_name = products[0].name
test_description = products[0].description
name_description_products = [product for product in products if product.name == test_name and product.description == test_description]
resp = self.app.get("/api/products", query_string="name={}&description={}".format(test_name, test_description))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(name_description_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["name"], test_name)
self.assertEqual(product["description"], test_description)
def test_query_product_by_name_price(self):
""" Query Products by Name and Price """
products = self._create_products(10)
test_name = products[0].name
test_max_price = products[0].price * 10
test_min_price = products[0].price / 10
name_price_products = [product for product in products if product.name == test_name and product.price >= test_min_price and product.price <= test_max_price]
resp = self.app.get("/api/products", query_string="name={}&minimum={}&maximum={}".format(test_name, test_min_price, test_max_price))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(name_price_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["name"], test_name)
def test_query_product_list_by_category_description(self):
""" Query Products by Category and Description """
products = self._create_products(10)
test_category = products[0].category
test_description = products[0].description
category_description_products = [product for product in products if product.category == test_category and product.description == test_description]
resp = self.app.get("/api/products", query_string="category={}&description={}".format(test_category, test_description))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(category_description_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["category"], test_category)
self.assertEqual(product["description"], test_description)
def test_query_product_by_category_price(self):
""" Query Products by Category and Price """
products = self._create_products(10)
test_category = products[0].category
test_max_price = products[0].price * 10
test_min_price = products[0].price / 10
category_price_products = [product for product in products if product.category == test_category and product.price >= test_min_price and product.price <= test_max_price]
resp = self.app.get("/api/products", query_string="category={}&minimum={}&maximum={}".format(test_category, test_min_price, test_max_price))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(category_price_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["category"], test_category)
def test_query_product_by_description_price(self):
""" Query Products by Description and Price """
products = self._create_products(10)
test_description = products[0].description
test_max_price = products[0].price * 10
test_min_price = products[0].price / 10
description_price_products = [product for product in products if product.description == test_description and product.price >= test_min_price and product.price <= test_max_price]
resp = self.app.get("/api/products", query_string="description={}&minimum={}&maximum={}".format(test_description, test_min_price, test_max_price))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(description_price_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["description"], test_description)
def test_query_product_by_name_category_description(self):
""" Query Products by Name, Category and Description """
products = self._create_products(10)
test_name = products[0].name
test_category = products[0].category
test_description = products[0].description
name_category_description_products = [product for product in products if product.name == test_name and product.category == test_category and product.description == test_description]
resp = self.app.get("/api/products", query_string="name={}&category={}&description={}".format(test_name, test_category, test_description))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(name_category_description_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["name"], test_name)
self.assertEqual(product["category"], test_category)
self.assertEqual(product["description"], test_description)
def test_query_product_by_name_category_price(self):
""" Query Products by Name, Category and Price """
products = self._create_products(10)
test_name = products[0].name
test_category = products[0].category
test_max_price = products[0].price * 10
test_min_price = products[0].price / 10
name_category_price_products = [product for product in products if product.name == test_name and product.category == test_category and product.price >= test_min_price and product.price <= test_max_price]
resp = self.app.get("/api/products", query_string="name={}&category={}&minimum={}&maximum={}".format(test_name, test_category, test_min_price, test_max_price))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(name_category_price_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["name"], test_name)
self.assertEqual(product["category"], test_category)
def test_query_product_by_name_description_price(self):
""" Query Products by Name, Description and Price """
products = self._create_products(10)
test_name = products[0].name
test_description = products[0].description
test_max_price = products[0].price * 10
test_min_price = products[0].price / 10
name_description_price_products = [product for product in products if product.name == test_name and product.description == test_description and product.price >= test_min_price and product.price <= test_max_price]
resp = self.app.get("/api/products", query_string="name={}&description={}&minimum={}&maximum={}".format(test_name, test_description, test_min_price, test_max_price))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(name_description_price_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["name"], test_name)
self.assertEqual(product["description"], test_description)
def test_query_product_by_category_description_price(self):
""" Query Products by Category, Description and Price """
products = self._create_products(10)
test_category = products[0].category
test_description = products[0].description
test_max_price = products[0].price * 10
test_min_price = products[0].price / 10
category_description_price_products = [product for product in products if product.category == test_category and product.description == test_description and product.price >= test_min_price and product.price <= test_max_price]
resp = self.app.get("/api/products", query_string="category={}&description={}&minimum={}&maximum={}".format(test_category, test_description, test_min_price, test_max_price))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(category_description_price_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["category"], test_category)
self.assertEqual(product["description"], test_description)
def test_query_product_by_name_category_description_price(self):
""" Query Products by Name, Category, Description and Price """
products = self._create_products(10)
test_name = products[0].name
test_category = products[0].category
test_description = products[0].description
test_max_price = products[0].price * 10
test_min_price = products[0].price / 10
name_category_description_price_products = [
product for product in products if product.name == test_name and product.category == test_category and product.description == test_description and product.price >= test_min_price and product.price <= test_max_price]
resp = self.app.get("/api/products", query_string="name={}&category={}&description={}&minimum={}&maximum={}".format(test_name, test_category, test_description, test_min_price, test_max_price))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(name_category_description_price_products))
# check the data just to be sure
for product in data:
self.assertEqual(product["name"], test_name)
self.assertEqual(product["category"], test_category)
self.assertEqual(product["description"], test_description)
def test_query_product_by_price_bad_request(self):
""" Query Products by Invalid Price Range """
products = self._create_products(10)
test_max_price = products[0].price * 10
test_min_price = products[0].price / 10
resp = self.app.get("/api/products", query_string="minimum={}".format(test_min_price))
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
resp = self.app.get("/api/products", query_string="maximum={}".format(test_max_price))
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_purchase_product_shopcart_exists(self):
'''Purchase a Product Shopcart Exists Successfully'''
user_id = 101
with patch('requests.get') as get_shopcart_by_userid_mock:
get_shopcart_by_userid_mock.return_value.status_code = 200
get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}]
with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock:
post_shopcart_item_mock.return_value.status_code=201
json = {"user_id": user_id, "amount": 4}
product = self._create_products(1)
resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data, b'{"message":"Product successfully added into the shopping cart"}\n')
def test_purchase_product_shopcart_no_exist(self):
'''Purchase a Product Shopcart Doesn't Exist Successfully'''
user_id = 101
with patch('requests.get') as get_shopcart_by_userid_mock:
get_shopcart_by_userid_mock.return_value.status_code = 200
get_shopcart_by_userid_mock.return_value.json.return_value = []
with patch('service.service.create_shopcart') as create_shopcart_mock:
create_shopcart_mock.return_value.status_code=201
with patch('service.service.add_item_to_shopcart') as post_shopcartitem_mock:
post_shopcartitem_mock.return_value.status_code=201
json = {"user_id": user_id, "amount": 4}
product = self._create_products(1)
resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data, b'{"message":"Product successfully added into the shopping cart"}\n')
def test_purchase_product_not_found(self):
'''Purchase a Product That's Not Found'''
user_id = 101
with patch('requests.get') as get_shopcart_by_userid_mock:
get_shopcart_by_userid_mock.return_value.status_code = 200
get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}]
with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock:
post_shopcart_item_mock.return_value.status_code=201
json = {"user_id": user_id, "amount": 4}
resp = self.app.post("/api/products/1/purchase", json=json, content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_purchase_product_cannot_add_shopcart(self):
'''Purchase a Product Not Added Into Shopcart (Shopcart Exists) '''
user_id = 101
with patch('requests.get') as get_shopcart_by_userid_mock:
get_shopcart_by_userid_mock.return_value.status_code = 200
get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}]
with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock:
post_shopcart_item_mock.return_value.status_code=400
json = {"user_id": user_id, "amount": 4}
product = self._create_products(1)
resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(resp.data, b'{"message": "Product was not added in the shopping cart because of an error"}\n')
def test_purchase_product_empty_user_id(self):
'''Purchase a Product Empty User ID'''
user_id = ""
with patch('requests.get') as get_shopcart_by_userid_mock:
get_shopcart_by_userid_mock.return_value.status_code = 200
get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}]
with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock:
post_shopcart_item_mock.return_value.status_code=201
json = {"user_id": user_id, "amount": 4}
product = self._create_products(1)
resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(resp.data, b'{"message": "Fields cannot be empty"}\n')
def test_purchase_product_empty_amount(self):
'''Purchase a Product Empty Amount '''
user_id = 101
with patch('requests.get') as get_shopcart_by_userid_mock:
get_shopcart_by_userid_mock.return_value.status_code = 200
get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}]
with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock:
post_shopcart_item_mock.return_value.status_code=201
json = {"user_id": user_id, "amount": ""}
product = self._create_products(1)
resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(resp.data, b'{"message": "Fields cannot be empty"}\n')
def test_purchase_product_id_not_int(self):
'''Purchase a Product ID not Int '''
user_id = 101
with patch('requests.get') as get_shopcart_by_userid_mock:
get_shopcart_by_userid_mock.return_value.status_code = 200
get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}]
with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock:
post_shopcart_item_mock.return_value.status_code=201
json = {"user_id": user_id, "amount": 4}
resp = self.app.post("/api/products/{}/purchase".format("test"), json=json, content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(resp.data, b'{"message": "Invalid Product ID. Must be Integer"}\n')
def test_purchase_amount_not_int(self):
'''Purchase a Product Amount not Int '''
user_id = 101
with patch('requests.get') as get_shopcart_by_userid_mock:
get_shopcart_by_userid_mock.return_value.status_code = 200
get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}]
with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock:
post_shopcart_item_mock.return_value.status_code=201
json = {"user_id": user_id, "amount": "hello"}
product = self._create_products(1)
resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(resp.data, b'{"message": "Invalid Amount. Must be Integer"}\n')
def test_purchase_user_id_not_int(self):
'''Purchase a Product User ID not Int '''
user_id = "testing"
with patch('requests.get') as get_shopcart_by_userid_mock:
get_shopcart_by_userid_mock.return_value.status_code = 200
get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}]
with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock:
post_shopcart_item_mock.return_value.status_code=201
json = {"user_id": user_id, "amount": 4}
product = self._create_products(1)
resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(resp.data, b'{"message": "Invalid User ID. Must be Integer"}\n')
def test_purchase_unsuccessful_product_shopcart_error(self):
'''Purchase a Product Shopcart Doesn't Exist (ShopCart Creation Error)'''
user_id = 101
with patch('requests.get') as get_shopcart_by_userid_mock:
get_shopcart_by_userid_mock.return_value.status_code = 200
get_shopcart_by_userid_mock.return_value.json.return_value = []
with patch('service.service.create_shopcart') as create_shopcart_mock:
create_shopcart_mock.return_value.status_code=400
json = {"user_id": user_id, "amount": 4}
product = self._create_products(1)
resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json")
self.assertEqual(resp.status_code,status.HTTP_400_BAD_REQUEST)
self.assertEqual(resp.data,b'{"message": "Cannot create shopcart so cannot add product into shopping cart"}\n' )
def test_purchase_product_shopcart_unsuccessful_product(self):
'''Purchase a Product (Product Adding Error) '''
user_id = 101
with patch('requests.get') as get_shopcart_by_userid_mock:
get_shopcart_by_userid_mock.return_value.status_code = 200
get_shopcart_by_userid_mock.return_value.json.return_value = []
with patch('service.service.create_shopcart') as create_shopcart_mock:
create_shopcart_mock.return_value.status_code=201
with patch('service.service.add_item_to_shopcart') as post_shopcartitem_mock:
post_shopcartitem_mock.return_value.status_code=400
json = {"user_id": user_id, "amount": 4}
product = self._create_products(1)
resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(resp.data, b'{"message": "Product not successfully added into the shopping cart"}\n')
def test_data_validation_error(self):
'''Data Validation Error '''
test_product = ProductFactory()
data = test_product.serialize()
data.pop('name', None)
resp = self.app.post(
"/api/products", json=data, content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn(b'Bad Request', resp.data)
def test_404_not_found_error(self):
'''Resources Not Found Error '''
resp = self.app.get("/products/{}")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertIn(b'Not Found', resp.data)
def test_method_not_allowed_error(self):
'''Test Method Not Allowed Error '''
resp = self.app.post("/")
self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertIn(b'Method not Allowed', resp.data)
def test_unsupported_media_type_error(self):
'''Unsupported Media Requests '''
test_product = ProductFactory()
resp = self.app.post(
"/api/products", json=test_product.serialize(), content_type="text/javascript"
)
self.assertEqual(resp.status_code, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
self.assertIn(b'{"message": "Content-Type must be application/json"}\n', resp.data)
def test_internal_server_error(self):
'''Internal Server Error '''
resp = internal_server_error("internal serval error")
self.assertEqual(resp[1], status.HTTP_500_INTERNAL_SERVER_ERROR)
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'chercher'."""
from primaires.interpreteur.commande.commande import Commande
class CmdChercher(Commande):
"""Commande 'chercher'"""
def __init__(self):
"""Constructeur de la commande."""
Commande.__init__(self, "chercher", "lookfor")
self.schema = "<texte_libre>"
self.nom_categorie = "bouger"
self.aide_courte = "permet de chercher quelque chose"
self.aide_longue = \
"Cette commande permet de chercher quelque chose dans la " \
"salle où vous vous trouvez. Vous devez entrer ce que vous " \
"souhaitez trouver : il peut s'agir d'un mot contenu dans " \
"la description ou dans celle d'un détail ou alors de " \
"quelque chose de plus subtile. C'est une commande " \
"d'exploration avancée, c'est-à-dire que vous n'en aurez " \
"pas l'utilité dans toutes les salles, mais il pourra " \
"être utile parfois de chercher sous une pile de " \
"couvertures par exemple. Préférez préciser l'information " \
"à rechercher en un seul mot (il est possible d'en mettre " \
"plus mais ce sera bien moins courant)."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
texte = dic_masques["texte_libre"].texte
personnage.agir("bouger")
salle = personnage.salle
personnage << "Vous commencez à chercher {}.".format(texte)
salle.envoyer("{} commence à chercher quelque chose...", personnage)
personnage.etats.ajouter("recherche")
yield 7
if "recherche" in personnage.etats:
personnage.etats.retirer("recherche")
nb = salle.script["recherche"].executer(personnage=personnage,
salle=salle, texte=texte)
if nb == 0:
personnage << "Vous n'avez rien trouvé qui vaille la " \
"peine d'en parler."
|
/home/runner/.cache/pip/pool/b8/2b/4a/fe37f69cfbb893fef0f3566d4ce7a89098a8a5665d3203d35344f013d4
|
import sys
import os
import curses
import subprocess
from typing import Any, Dict
from typing import Dict
from aorc.state import AorcState
from aorc.aorc_doit import *
from simple_curses import View
import simple_curses.validator as validator
def state_from_view_values(old_state: AorcState, view_values: Dict[str, Any]) -> AorcState:
new_state = old_state.copy()
vals = view_values
new_state.__dict__.update(vals)
return new_state
def run_config_action(app, view: View, context):
config_keys = [
"config_exception_file",
"config_v14_command_file",
"config_quick_push_file",
"config_save_file",
"config_pid_file",
"config_policy_name",
]
# this next statement also performs validation at the field level
invalid_values = {}
view_values: Dict[str, str] = view.get_values()
def check_value_for(k, a_validator):
v = view_values[k]
if len(v) == 0 or a_validator.validate(view_values[k]) is None:
invalid_values[k] = view_values[k]
def make_error_msg():
err_msg = []
for k in invalid_values.keys():
msg = "field {} has invalid value (not a valid file path) the value=[{}]".format(k, invalid_values[k])
err_msg.append(msg)
return ": ".join(err_msg)
check_value_for("config_exception_file", validator.Path())
check_value_for("config_v14_command_file", validator.Path())
check_value_for("config_quick_push_file", validator.Path())
check_value_for("config_save_file", validator.Path())
check_value_for("config_pid_file", validator.Path())
if len(invalid_values) > 0:
app.msg_error("{}".format(make_error_msg()))
pass
else:
# process the data
app.msg_info("Config save - success {}".format(view_values))
pass
new_state = state_from_view_values(app.state, view_values)
app.state = new_state
|
import numpy as np
import tensorflow as tf
from garage.misc import logger, special
from garage.sampler import parallel_sampler
from garage.sampler import singleton_pool
from garage.sampler.base import BaseSampler
from garage.sampler.utils import truncate_paths
from garage.tf.misc import tensor_utils
def worker_init_tf(g):
g.sess = tf.Session()
g.sess.__enter__()
def worker_init_tf_vars(g):
g.sess.run(tf.global_variables_initializer())
class BatchSampler(BaseSampler):
def start_worker(self):
if singleton_pool.n_parallel > 1:
singleton_pool.run_each(worker_init_tf)
parallel_sampler.populate_task(self.algo.env, self.algo.policy)
if singleton_pool.n_parallel > 1:
singleton_pool.run_each(worker_init_tf_vars)
def shutdown_worker(self):
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr):
cur_policy_params = self.algo.policy.get_param_values()
cur_env_params = self.algo.env.get_param_values()
paths = parallel_sampler.sample_paths(
policy_params=cur_policy_params,
env_params=cur_env_params,
max_samples=self.algo.batch_size,
max_path_length=self.algo.max_path_length,
scope=self.algo.scope,
)
if self.algo.whole_paths:
return paths
else:
paths_truncated = truncate_paths(paths, self.algo.batch_size)
return paths_truncated
def process_samples(self, itr, paths):
baselines = []
returns = []
max_path_length = self.algo.max_path_length
if hasattr(self.algo.baseline, "predict_n"):
all_path_baselines = self.algo.baseline.predict_n(paths)
else:
all_path_baselines = [
self.algo.baseline.predict(path) for path in paths
]
for idx, path in enumerate(paths):
path_baselines = np.append(all_path_baselines[idx], 0)
deltas = path["rewards"] + \
self.algo.discount * path_baselines[1:] - \
path_baselines[:-1]
path["advantages"] = special.discount_cumsum(
deltas, self.algo.discount * self.algo.gae_lambda)
path["deltas"] = deltas
for idx, path in enumerate(paths):
# baselines
path['baselines'] = all_path_baselines[idx]
baselines.append(path['baselines'])
# returns
path["returns"] = special.discount_cumsum(path["rewards"],
self.algo.discount)
returns.append(path["returns"])
# make all paths the same length
obs = [path["observations"] for path in paths]
obs = tensor_utils.pad_tensor_n(obs, max_path_length)
actions = [path["actions"] for path in paths]
actions = tensor_utils.pad_tensor_n(actions, max_path_length)
rewards = [path["rewards"] for path in paths]
rewards = tensor_utils.pad_tensor_n(rewards, max_path_length)
returns = [path["returns"] for path in paths]
returns = tensor_utils.pad_tensor_n(returns, max_path_length)
advantages = [path["advantages"] for path in paths]
advantages = tensor_utils.pad_tensor_n(advantages, max_path_length)
baselines = tensor_utils.pad_tensor_n(baselines, max_path_length)
agent_infos = [path["agent_infos"] for path in paths]
agent_infos = tensor_utils.stack_tensor_dict_list([
tensor_utils.pad_tensor_dict(p, max_path_length)
for p in agent_infos
])
env_infos = [path["env_infos"] for path in paths]
env_infos = tensor_utils.stack_tensor_dict_list([
tensor_utils.pad_tensor_dict(p, max_path_length) for p in env_infos
])
valids = [np.ones_like(path["returns"]) for path in paths]
valids = tensor_utils.pad_tensor_n(valids, max_path_length)
average_discounted_return = (np.mean(
[path["returns"][0] for path in paths]))
undiscounted_returns = [sum(path["rewards"]) for path in paths]
ent = np.sum(
self.algo.policy.distribution.entropy(agent_infos) *
valids) / np.sum(valids)
samples_data = dict(
observations=obs,
actions=actions,
rewards=rewards,
advantages=advantages,
baselines=baselines,
returns=returns,
valids=valids,
agent_infos=agent_infos,
env_infos=env_infos,
paths=paths,
average_return=np.mean(undiscounted_returns),
)
logger.record_tabular('Iteration', itr)
logger.record_tabular('AverageDiscountedReturn',
average_discounted_return)
logger.record_tabular('AverageReturn', np.mean(undiscounted_returns))
logger.record_tabular('NumTrajs', len(paths))
logger.record_tabular('Entropy', ent)
logger.record_tabular('Perplexity', np.exp(ent))
logger.record_tabular('StdReturn', np.std(undiscounted_returns))
logger.record_tabular('MaxReturn', np.max(undiscounted_returns))
logger.record_tabular('MinReturn', np.min(undiscounted_returns))
return samples_data
|
import batchglm.data as data_utils
from batchglm.models.glm_nb import AbstractEstimator, EstimatorStoreXArray, InputData, Model
from batchglm.models.base_glm.utils import closedform_glm_mean, closedform_glm_var
from batchglm.models.glm_nb.utils import closedform_nb_glm_logmu, closedform_nb_glm_logphi
import batchglm.train.tf.ops as op_utils
import batchglm.train.tf.train as train_utils
from batchglm.train.tf.base import TFEstimatorGraph, MonitoredTFEstimator
from batchglm.train.tf.base_glm import GradientGraphGLM, NewtonGraphGLM, TrainerGraphGLM, EstimatorGraphGLM, FullDataModelGraphGLM, BasicModelGraphGLM
from batchglm.train.tf.base_glm import ESTIMATOR_PARAMS, ProcessModelGLM, ModelVarsGLM
from batchglm.train.tf.base_glm import HessiansGLM, FIMGLM, JacobiansGLM
from batchglm.train.tf.base_glm_all import EstimatorAll, EstimatorGraphAll, FIMGLMALL, HessianGLMALL, JacobiansGLMALL
import batchglm.utils.random as rand_utils
from batchglm.utils.linalg import groupwise_solve_lm
from batchglm import pkg_constants
|
from reportlab.lib.styles import ParagraphStyle as PS
from reportlab.platypus import PageBreak
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.doctemplate import PageTemplate, BaseDocTemplate
from reportlab.platypus.tableofcontents import TableOfContents
from reportlab.platypus.frames import Frame
from reportlab.lib.units import cm
from reportlab.platypus import SimpleDocTemplate, Image
class GLQRPrint(BaseDocTemplate):
def __init__(self, filename, **kw):
self.image=""
self.customerName="";
self.images=[]
self.header=[]
self.ticketContent= []
self.series= []
self.amounts= []
self.folios= []
self.dates= []
self.products= []
self.allowSplitting = 0
apply(BaseDocTemplate.__init__, (self, filename), kw)
template = PageTemplate('normal', [Frame(2.5*cm, 4.5*cm, 15*cm, 25*cm, id='F1')])
self.addPageTemplates(template)
self.centered = PS(name = 'centered',
fontSize = 10,
leading = 11,
alignment = 1,
spaceAfter = 0)
self.space = PS(name = 'space',
fontSize = 17,
leading = 16,
alignment = 1,
spaceAfter = 15)
def setImage(self, image):
self.image= image
def setCustomer(self, customer):
self.customerName= customer
def afterFlowable(self, flowable):
"Registers TOC entries."
if flowable.__class__.__name__ == 'Paragraph':
text = flowable.getPlainText()
style = flowable.style.name
if style == 'Heading1':
self.notify('TOCEntry', (0, text, self.page))
if style == 'Heading2':
self.notify('TOCEntry', (1, text, self.page))
def setHeader(self, header):
self.header= header
def addImage(self, image):
self.images.append(image)
def addDate(self, date):
self.dates.append(date)
def addSerie(self, serie):
self.series.append(serie)
def addFolio(self, folio):
self.folios.append(folio)
def addAmount(self, amount):
self.amounts.append(amount)
def addProduct(self, product):
self.products.append(product)
def generateTicket(self):
currentIndex=0
for currentImage in self.images:
self.ticketContent.append(Image(currentImage))
self.ticketContent.append(Paragraph('<b>Serie : '+ self.series[currentIndex] +' </b>', self.centered))
self.ticketContent.append(Paragraph('<b>Folio : '+ self.folios[currentIndex] +' </b>', self.centered))
self.ticketContent.append(Paragraph('<b>Cantidad : $'+ str(self.amounts[currentIndex]) +' </b>', self.centered))
self.ticketContent.append(Paragraph('<b>Cliente : '+ str(self.customerName) +' </b>', self.centered))
self.ticketContent.append(PageBreak())
currentIndex= currentIndex+1
self.multiBuild(self.ticketContent)
|
import matplotlib.pyplot as plt
#import tabulate
import json
log_dir = 'log/'
experiment = ['baseline', 'N', 'GC', 'BN', 'R2', 'AA', 'AC'] # baseline, BN, GC, N, AA, AC
versions = '100'#['100','200','300','400']
results = []
loss = []
acc = []
for e in experiment:
xp_path = log_dir+e+versions
with open(xp_path + '/results.json') as f_r:
results.append(json.loads(f_r.read()))
with open(xp_path + '/visdom.log') as f_r:
dat=f_r.read().split(']\n')
loss.append(json.loads(dat[0][11:]))
acc.append(json.loads(dat[1][11:]))
fig, ax1 = plt.subplots(figsize=(6, 6))
ax1.set_title("Training losses on different experiments for dataset 100")
ax1.set_xlabel("Epoch[-]")
ax1.set_ylabel("Loss[-]")
ax1.set_ylim([0,1.5])
for v in loss:
x=v['data'][0]['x']
y=v['data'][0]['y']
ax1.plot(x,y)
ax1.legend(experiment)
plt.savefig(log_dir +'train_loss_comarision', bbox_inches='tight', pad_inches=0.1)
fig, ax2 = plt.subplots(figsize=(6, 6))
ax2.set_title("Validation losses on different experiments for dataset 100")
ax2.set_xlabel("Epoch[-]")
ax2.set_ylabel("Loss[-]")
ax2.set_ylim([0,1.5])
for v in loss:
x=v['data'][1]['x']
y=v['data'][1]['y']
ax2.plot(x,y)
ax2.legend(experiment)
plt.savefig(log_dir +'valid_loss_comarision', bbox_inches='tight', pad_inches=0.1)
fig, ax1 = plt.subplots(figsize=(6, 6))
ax1.set_title("Training Top-1 accurracy on different experiments for dataset 100")
ax1.set_xlabel("Epoch[-]")
ax1.set_ylabel("Top-1 accurracy[-]")
#ax1.set_ylim([0,1])
for v in acc:
x=v['data'][0]['x']
y=v['data'][0]['y']
ax1.plot(x,y)
ax1.legend(experiment)
plt.savefig(log_dir +'train_acc_comarision', bbox_inches='tight', pad_inches=0.1)
fig, ax2 = plt.subplots(figsize=(6, 6))
ax2.set_title("Validation Top-1 accurracy on different experiments for dataset 100")
ax2.set_xlabel("Epoch[-]")
ax2.set_ylabel("Top-1 accurracy[-]")
#ax2.set_ylim([0,1])
for v in acc:
x=v['data'][1]['x']
y=v['data'][1]['y']
ax2.plot(x,y)
ax2.legend(experiment)
plt.savefig(log_dir +'valid_acc_comarision', bbox_inches='tight', pad_inches=0.1)
print("Test Top-1 accuracy on different experiments for dataset 100")
print("Version\tAccuracy")
for v in results:
print('{0}\t\t{1:.2f}%'.format(experiment[results.index(v)],v['test_acc']*100.))
with open(log_dir + 'results.txt','w') as f:
print("Test Top-1 accuracy on different experiments for dataset 100",file=f)
print("Version\tAccuracy",file=f)
for v in results:
print('{0}\t\t{1:.2f}%'.format(experiment[results.index(v)],v['test_acc']*100.),file=f)
|
import argparse
import csv
import datetime
import json
import pytz
import yaml
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('zoom', help='input zoom yaml file')
parser.add_argument('papers', help='input paper csv file')
parser.add_argument('problems', help='input open problems csv file')
parser.add_argument('calendar', help='input calendar json file')
parser.add_argument('chairs', help='input chairs csv file')
parser.add_argument('output', help='output html file')
args = parser.parse_args()
papers = {}
sessions = {}
events = []
problems = []
chairs = {}
in_fmt = "%Y-%m-%d" + "T" + "%H:%M:%S%z"
with open(args.zoom) as f:
zoom = yaml.safe_load(f)
with open(args.papers) as f:
reader = csv.DictReader(f)
for paper in reader:
papers[paper['UID']] = paper
paper_sessions = paper['session'].split('|')
session = sessions.get(paper_sessions[0])
if session == None:
session = []
sessions[paper_sessions[0]] = session
session.append({ 'UID':paper['UID'], 'position':paper['position_1'], 'zoom':paper['zoom_1'] })
if len(paper_sessions) > 1:
session = sessions.get(paper_sessions[1])
if session == None:
session = []
sessions[paper_sessions[1]] = session
session.append({ 'UID':paper['UID'], 'position':paper['position_2'], 'zoom':paper['zoom_2'] })
with open(args.problems) as f:
reader = csv.DictReader(f)
for paper in reader:
problems.append(paper)
with open(args.calendar) as f:
events = json.load(f)
with open(args.chairs) as f:
reader = csv.DictReader(f)
for row in reader:
chairs[row['UID']] = row['chair']
events.sort(key=lambda e: e['start'])
with open(args.output, 'w') as f:
# print('<!DOCTYPE html>\n<html lang="en">\n<head>\n<title>COLT 2020 Schedule</title>\n</head>\n<body>\n<table border="1">', file=f)
print('{% set active_page = "(plain schedule)" %}', file=f)
print('{% set page_title = "The No-Frills Schedule" %}', file=f)
print('{% extends "base.html" %}', file=f)
print('{% block content %}', file=f)
print('<table border="1">', file=f)
for event in events:
if event['link'] != '':
desc = '<a href="{0}"><strong>{1}</strong></a>'.format(event['link'], event['title'])
else:
desc = event['title']
if event['title'].find('Session ') >= 0:
desc += ' (Session chair: {0})<br />\n<a href="{1}" target="_blank">[Zoom link for plenary]</a>'.format(chairs[event['title']], zoom['plenary'][0])
elif (event['title'].find('Session ') >= 0) or (event['title'].find('Keynote ') >= 0) or (event['title'].find('Open Problems') >= 0) or (event['title'].find('Business Meeting') >= 0):
desc += '<br />\n<a href="{0}" target="_blank">[Zoom link for plenary]</a>'.format(zoom['plenary'][0])
start = datetime.datetime.strptime(event['start'],"%Y-%m-%dT%H:%M:%S%z").astimezone(pytz.timezone('Etc/GMT+12'))
start_date = start.strftime('%A %Y-%m-%d')
start_time = start.strftime('%H:%M <a href="https://www.timeanddate.com/time/zones/aoe">AoE</a>')
print(' <tr><td style="padding: 5px; white-space: nowrap; text-align: right">{0}<br />{1}</td><td style="padding: 5px">{2}</td></tr>\n'.format(start_date, start_time, desc), file=f)
if event['title'].find('Open Problems') >= 0:
problems.sort(key=lambda p: p['UID'])
for paper in problems:
title = paper['title']
authors = ', '.join(paper['authors'].split('|'))
position = paper['position']
desc = '<a href="papers/paper_{0}.html"><strong>{1}</strong></a><br />{2}'.format(uid, title, authors)
print(' <tr><td></td><td style="padding: 5px">{0}. {1}</td></tr>\n'.format(position, desc), file=f)
if event['title'].find('Session ') >= 0:
session_paper_keys = sessions[event['title']]
session_paper_keys.sort(key=lambda p: int(p['position']))
for paper_key in session_paper_keys:
uid = paper_key['UID']
position = paper_key['position']
paper = papers[uid]
title = paper['title']
authors = ', '.join(paper['authors'].split('|'))
zoom_link = paper_key['zoom']
desc = '<a href="papers/paper_{0}.html"><strong>{1}</strong></a><br />{2}<br /><a href="{3}" target="_blank">[Zoom link for poster session]</a>'.format(uid, title, authors, zoom_link)
print(' <tr><td></td><td style="padding: 5px">{0}. {1}</td></tr>\n'.format(position, desc), file=f)
#print('</table>\n</body>\n</html>', file=f)
print('</table>', file=f)
print('{% endblock %}', file=f)
|
import numpy as np
import pandas as pd
from sklearn.covariance import EllipticEnvelope
from sklearn.neighbors import LocalOutlierFactor
from sklearn.ensemble import IsolationForest
from sklearn.svm import OneClassSVM
from .base import OutlierDetector
__all__ = [
'RobustCovariance',
'LocalOutlierFactor',
'IsolationForest',
'OneClassSVM',
]
class OneClassSVM(OneClassSVM, OutlierDetector): pass
class RobustCovariance(EllipticEnvelope, OutlierDetector): pass
class LocalOutlierFactor(LocalOutlierFactor, OutlierDetector): pass
class IsolationForest(IsolationForest, OutlierDetector): pass
|
class IndexedMessage:
__slots__ = ("message", "index")
def __init__(self, message: str, index: int) -> None:
"""Represents a message and its index within a collection
Args:
message: The message
index: The message index
"""
self.message = message
self.index = index
|
import requests
from geopy.geocoders import Nominatim
'''
Find a user's local representatives (and their contact details) given an
address string input
'''
def calculate_lat_long_from_address(address_string):
'''
Find latitude and longitude of address
Inputs:
address_string (str): user's address string
Returns:
(latitude, longitude) (floats): the lat and long of the inputed address
'''
geolocator = Nominatim(user_agent="issuetoimpact")
location = geolocator.geocode(address_string)
return (location.latitude, location.longitude)
def call_openstates_api(lat, long):
'''
Call openstates api to find local representatives and their contact
information, given address latitude and longitude
Inputs:
lat (float): address latitude
long (float): address longitude
Returns:
Api call response json
'''
query = """
{{
people(latitude: {lat}, longitude: {long}, first: 100) {{
edges {{
node {{
name
contact: contactDetails {{type, value}}
chamber: currentMemberships(classification:["upper", "lower"]) {{
post {{
label
}}
organization {{
name
classification
parent {{
name
}}
}}
}}
}}
}}
}}
}}
"""
variables = {
"lat": lat,
"long": long
}
open_states_api = 'https://openstates.org/graphql'
headers = {'X-API-Key':'67a3f9ad-bd88-4561-b6fd-7c4719f0b397'}
response = requests.post(open_states_api, headers=headers,
json={'query': query.format(**variables)})
return response.json()
def process_openstates_response(openstates_response):
'''
Generate representatives contact information dictionary from api response
Inputs:
openstates_response (json): api request response
Returns:
representatives (dict): representatives and contact info dictionary
'''
representatives = {}
for person in openstates_response["data"]["people"]["edges"]:
rep_dict = {}
rep_dict['name'] = person["node"]["name"]
rep_dict['chamber'] = person["node"]["chamber"][0]["organization"]["name"]
rep_dict['contact_details'] = {}
for contact in person["node"]["contact"]:
rep_dict['contact_details'][contact["type"]] = contact["value"]
representatives[rep_dict['name']] = rep_dict
return representatives
def find_rep_from_address(address_string):
'''
Given address string input, generate dictionary of local representatives
and their contact information
Inputs:
address_string (str): address string
Returns:
(dict) representatives contact information dictionary
'''
lat, long = calculate_lat_long_from_address(address_string)
openstates_rep_response = call_openstates_api(lat, long)
return process_openstates_response(openstates_rep_response)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 17 22:31:22 2019
@author: student
"""
import threading
import time
import tkinter as tk
from tkinter import HORIZONTAL, Button, Entry, Frame, Label, Scale, Tk, mainloop
import numpy as np
import pinocchio as pin
import talos_conf as conf
#import romeo_conf as conf
import vizutils
from tsid_biped import TsidBiped
AXES = ['X', 'Y', 'Z']
class Scale3d:
def __init__(self, master, name, from_, to, tickinterval, length, orient, command):
self.s = 3 * [None]
for i in range(3):
self.s[i] = Scale(master, label='%s %s' % (name, AXES[i]), from_=from_[i], to=to[i],
tickinterval=tickinterval[i], orient=orient[i], length=length[i], command=command)
self.s[i].pack()
separator = Frame(height=2, bd=1, relief=tk.SUNKEN)
separator.pack(fill=tk.X, padx=5, pady=5)
def get(self):
return self.s[0].get(), self.s[1].get(), self.s[2].get()
class Entry3d:
def __init__(self, master, name):
self.s = 3 * [None]
for i in range(3):
Label(master, text='%s %s' % (name, AXES[i])).pack() # side=tk.TOP)
self.s[i] = Entry(master, width=5)
self.s[i].pack() # side=tk.BOTTOM)
separator = Frame(height=1, bd=1, relief=tk.SUNKEN)
separator.pack(fill=tk.X, padx=2, pady=2) # , side=tk.BOTTOM)
def get(self):
try:
return [float(self.s[i].get()) for i in range(3)]
except:
print("could not convert string to float", [self.s[i].get() for i in range(3)])
return 3 * [0.0]
scale_com, scale_RF, scale_LF = None, None, None
button_contact_RF, button_contact_LF = None, None
push_robot_active, push_robot_com_vel, com_vel_entry = False, 3 * [0.0], None
def update_com_ref_scale(value):
x, y, z = scale_com.get()
tsid.trajCom.setReference(com_0 + np.array([1e-2 * x, 1e-2 * y, 1e-2 * z]).T)
def update_RF_ref_scale(value):
x, y, z = scale_RF.get()
H_rf_ref = H_rf_0.copy()
H_rf_ref.translation += + np.array([1e-2 * x, 1e-2 * y, 1e-2 * z]).T
tsid.trajRF.setReference(H_rf_ref)
def update_LF_ref_scale(value):
x, y, z = scale_LF.get()
H_lf_ref = H_lf_0.copy()
H_lf_ref.translation += + np.array([1e-2 * x, 1e-2 * y, 1e-2 * z]).T
tsid.trajLF.setReference(H_lf_ref)
def switch_contact_RF():
if tsid.contact_RF_active:
tsid.remove_contact_RF()
button_contact_RF.config(text='Make contact right foot')
else:
tsid.add_contact_RF()
button_contact_RF.config(text='Break contact right foot')
def switch_contact_LF():
if tsid.contact_LF_active:
tsid.remove_contact_LF()
button_contact_LF.config(text='Make contact left foot')
else:
tsid.add_contact_LF()
button_contact_LF.config(text='Break contact left foot')
def toggle_wireframe_mode():
tsid.gui.setWireFrameMode('world', 'WIREFRAME')
def push_robot():
global push_robot_com_vel, push_robot_active
push_robot_com_vel = com_vel_entry.get()
push_robot_active = True
def create_gui():
"""thread worker function"""
global scale_com, scale_RF, scale_LF, button_contact_RF, button_contact_LF, com_vel_entry
master = Tk(className='TSID GUI')
scale_com = Scale3d(master, 'CoM', [-10, -15, -40], [10, 15, 40], [5, 5, 10], [200, 250, 300],
3*[HORIZONTAL], update_com_ref_scale)
scale_RF = Scale3d(master, 'Right foot', 3 * [-30], 3 * [30], 3 * [10], 3 * [300],
3*[HORIZONTAL], update_RF_ref_scale)
scale_LF = Scale3d(master, 'Left foot', 3 * [-30], 3 * [30], 3 * [10], 3 * [300],
3*[HORIZONTAL], update_LF_ref_scale)
button_contact_RF = Button(master, text='Break contact right foot', command=switch_contact_RF)
button_contact_RF.pack(side=tk.LEFT)
button_contact_LF = Button(master, text='Break contact left foot', command=switch_contact_LF)
button_contact_LF.pack(side=tk.LEFT)
Button(master, text='Toggle wireframe', command=toggle_wireframe_mode).pack(side=tk.LEFT)
# Frame(height=2, bd=1, relief=tk.SUNKEN).pack(fill=tk.X, padx=5, pady=5)
Button(master, text='Push robot CoM', command=push_robot).pack()
com_vel_entry = Entry3d(master, 'CoM vel')
mainloop()
def run_simu():
global push_robot_active
i, t = 0, 0.0
q, v = tsid.q, tsid.v
time_avg = 0.0
while True:
time_start = time.time()
tsid.comTask.setReference(tsid.trajCom.computeNext())
tsid.postureTask.setReference(tsid.trajPosture.computeNext())
tsid.rightFootTask.setReference(tsid.trajRF.computeNext())
tsid.leftFootTask.setReference(tsid.trajLF.computeNext())
HQPData = tsid.formulation.computeProblemData(t, q, v)
sol = tsid.solver.solve(HQPData)
if sol.status != 0:
print("QP problem could not be solved! Error code:", sol.status)
break
# tau = tsid.formulation.getActuatorForces(sol)
dv = tsid.formulation.getAccelerations(sol)
q, v = tsid.integrate_dv(q, v, dv, conf.dt)
i, t = i + 1, t + conf.dt
if push_robot_active:
push_robot_active = False
data = tsid.formulation.data()
if tsid.contact_LF_active:
J_LF = tsid.contactLF.computeMotionTask(0.0, q, v, data).matrix
else:
J_LF = np.zeros((0, tsid.model.nv))
if tsid.contact_RF_active:
J_RF = tsid.contactRF.computeMotionTask(0.0, q, v, data).matrix
else:
J_RF = np.zeros((0, tsid.model.nv))
J = np.vstack((J_LF, J_RF))
J_com = tsid.comTask.compute(t, q, v, data).matrix
A = np.vstack((J_com, J))
b = np.concatenate((np.array(push_robot_com_vel), np.zeros(J.shape[0])))
v = np.linalg.lstsq(A, b, rcond=-1)[0]
if i % conf.DISPLAY_N == 0:
tsid.display(q)
x_com = tsid.robot.com(tsid.formulation.data())
x_com_ref = tsid.trajCom.getSample(t).pos()
H_lf = tsid.robot.framePosition(tsid.formulation.data(), tsid.LF)
H_rf = tsid.robot.framePosition(tsid.formulation.data(), tsid.RF)
x_lf_ref = tsid.trajLF.getSample(t).pos()[:3]
x_rf_ref = tsid.trajRF.getSample(t).pos()[:3]
vizutils.applyViewerConfiguration(tsid.viz, 'world/com', x_com.tolist() + [0, 0, 0, 1.])
vizutils.applyViewerConfiguration(tsid.viz, 'world/com_ref', x_com_ref.tolist() + [0, 0, 0, 1.])
vizutils.applyViewerConfiguration(tsid.viz, 'world/rf', pin.SE3ToXYZQUATtuple(H_rf))
vizutils.applyViewerConfiguration(tsid.viz, 'world/lf', pin.SE3ToXYZQUATtuple(H_lf))
vizutils.applyViewerConfiguration(tsid.viz, 'world/rf_ref', x_rf_ref.tolist() + [0, 0, 0, 1.])
vizutils.applyViewerConfiguration(tsid.viz, 'world/lf_ref', x_lf_ref.tolist() + [0, 0, 0, 1.])
if i % 1000 == 0:
print("Average loop time: %.1f (expected is %.1f)" % (1e3 * time_avg, 1e3 * conf.dt))
time_spent = time.time() - time_start
time_avg = (i * time_avg + time_spent) / (i + 1)
if time_avg < 0.9 * conf.dt:
time.sleep(10 * (conf.dt - time_avg))
print("#" * conf.LINE_WIDTH)
print(" Test Task Space Inverse Dynamics ".center(conf.LINE_WIDTH, '#'))
print("#" * conf.LINE_WIDTH)
tsid = TsidBiped(conf, conf.viewer)
tsid.q0[2] = 1.02127
com_0 = tsid.robot.com(tsid.formulation.data())
H_rf_0 = tsid.robot.framePosition(tsid.formulation.data(), tsid.model.getFrameId(conf.rf_frame_name))
H_lf_0 = tsid.robot.framePosition(tsid.formulation.data(), tsid.model.getFrameId(conf.lf_frame_name))
vizutils.addViewerSphere(tsid.viz, 'world/com', conf.SPHERE_RADIUS, conf.COM_SPHERE_COLOR)
vizutils.addViewerSphere(tsid.viz, 'world/com_ref', conf.REF_SPHERE_RADIUS, conf.COM_REF_SPHERE_COLOR)
vizutils.addViewerSphere(tsid.viz, 'world/rf', conf.SPHERE_RADIUS, conf.RF_SPHERE_COLOR)
vizutils.addViewerSphere(tsid.viz, 'world/rf_ref', conf.REF_SPHERE_RADIUS, conf.RF_REF_SPHERE_COLOR)
vizutils.addViewerSphere(tsid.viz, 'world/lf', conf.SPHERE_RADIUS, conf.LF_SPHERE_COLOR)
vizutils.addViewerSphere(tsid.viz, 'world/lf_ref', conf.REF_SPHERE_RADIUS, conf.LF_REF_SPHERE_COLOR)
th_gui = threading.Thread(target=create_gui)
th_gui.start()
th_simu = threading.Thread(target=run_simu)
th_simu.start()
|
from punc_tokenizer import PuncTokenizer
|
#!/bin/python
import sys
def solve(a):
n = len(a)
sum1 = sum2 = 0
for i in xrange(n/2):
sum1 = sum1 + a[i]
sum2 = sum2 + a[n - i - 1]
return abs(sum1 - sum2)
n = int(raw_input().strip())
a = map(int, raw_input().strip().split(' '))
result = solve(a)
print(result)
|
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimizer adapter for Keras optimizer."""
from typing import Any, Callable, Optional, Union
import tensorflow as tf
from tensorflow_federated.python.learning.optimizers import optimizer
class KerasOptimizer(optimizer.Optimizer):
"""Adapter for keras optimzier as `tff.learning.optimizers.Optimizer`.
This class is expected to be instantiated in the context of
`tff.tf_computation` in which it is to be used. This is because the
`optimizer_fn` provided in constructor is going to be invoked, which will
create a keras optimizer instance, and we will force the creation of
`tf.Variable` objects which store the state of that keras optimizer.
If this class is supposed to be used as a "server optimizer", set
`disjoint_init_and_next` to True, which means that the `initialize` and `next`
methods are going to be invoked in the context of *different*
`tff.tf_computations`, and TFF needs to carry the optimizer variables between
the invocations.
If this class is supposed to be used as a "client optimizer", set
`disjoint_init_and_next` to False, which means that the `initialize` and
`next` methods are going to be invoked in the context of *the same*
`tff.tf_compuation` and we don't need to pass the variables of keras optimizer
to TFF to handle.
NOTE: This class is not meant to be exposed in public API for now. Rather, it
is used to convert the previous default support for keras optimizers to the
tff.learning.optimizers format.
"""
def __init__(self, optimizer_fn: Callable[[], tf.keras.optimizers.Optimizer],
weights: Any, disjoint_init_and_next: bool):
"""Initializes `KerasOptimizer`.
Args:
optimizer_fn: A no-arg callable that creates and returns a
`tf.keras.optimizers.Optimizer`.
weights: A (possibly nested) structure of `tf.Variable` objects which are
supposed to be modified during call to the `next` method of the
optimizer.
disjoint_init_and_next: A boolean, determining whether the `initialize`
and `next` methods are going to be invoked in the context of the same
`tff.tf_computation`.
"""
self._optimizer = optimizer_fn()
self._disjoint_init_and_next = disjoint_init_and_next
def mock_apply_gradients(opt, variables):
opt.apply_gradients([
(tf.zeros_like(w), w) for w in tf.nest.flatten(variables)
])
# Force the creation of tf.Variables controlled by the keras optimizer but
# keep the variables unmodified. For instance, the "step" variable will be
# 0, not 1, after this operation.
tf.function(mock_apply_gradients).get_concrete_function(
self._optimizer, weights)
def initialize(self, specs):
del specs # Unused.
if self._disjoint_init_and_next:
return self._optimizer.variables()
else:
return ()
def next(self, state, weights, gradients):
if self._disjoint_init_and_next:
tf.nest.map_structure(lambda v, s: v.assign(s),
self._optimizer.variables(), state)
self._optimizer.apply_gradients(
list(zip(tf.nest.flatten(gradients), tf.nest.flatten(weights))))
if self._disjoint_init_and_next:
return self._optimizer.variables(), weights
else:
return (), weights
def build_or_verify_tff_optimizer(
optimizer_fn: Union[Callable[[], tf.keras.optimizers.Optimizer],
optimizer.Optimizer],
trainable_weights: Optional[Any] = None,
disjoint_init_and_next: Optional[bool] = None) -> optimizer.Optimizer:
"""Returns `tff.learning.optimizers.Optimizer` for `optimizer_fn`.
This helper function is used for `tff.learning` to provide backward
compatibility of accepting an argument of a no-arg callable returns a
`tf.keras.optimizers.Optimizer`. Keras optimizer has to be eagerly created in
each TFF computation function. If the input `optimizer_fn` is already
a `tff.learning.optimizers.Optimizer`, it will be directly returned.
Args:
optimizer_fn: A `tff.learning.optimizers.Optimizer`, or a no-argument
callable that constructs and returns a `tf.keras.optimizers.Optimizer`.
trainable_weights: Optional if `optimizer_fn` is a
`tff.learning.optimizers.Optimizer`. A (possibly nested) structure of
`tf.Variable` objects used to eagerly initialize Keras optimizers if
`optimizer_fn` is a callable.
disjoint_init_and_next: Optional if `optimizer_fn` is a
`tff.learning.optimizers.Optimizer`. A boolean, determining whether the
`initialize` and `next` methods are going to be invoked in the context of
the same `tff.tf_computation` if `optimizer_fn` is a callable.
Raises:
TypeError: Input `optimizer_fn` is not `tff.learning.optimizers.Optimizer`
or a callable.
Returns:
A `tff.learning.optimizers.Optimizer`.
"""
if isinstance(optimizer_fn, optimizer.Optimizer):
return optimizer_fn
elif callable(optimizer_fn):
return KerasOptimizer(optimizer_fn, trainable_weights,
disjoint_init_and_next)
else:
raise TypeError(
'`optimizer_fn` must be a callable or '
f'`tff.learning.optimizers.Optimizer`, got {type(optimizer_fn)}')
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Training example of adaGaussian-mechanism differential privacy.
"""
import os
import mindspore.nn as nn
from mindspore import context
from mindspore.train.callback import ModelCheckpoint
from mindspore.train.callback import CheckpointConfig
from mindspore.train.callback import LossMonitor
from mindspore.nn.metrics import Accuracy
from mindspore.train.serialization import load_checkpoint, load_param_into_net
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
from mindspore.dataset.vision import Inter
import mindspore.common.dtype as mstype
from mindarmour.privacy.diff_privacy import DPModel
from mindarmour.privacy.diff_privacy import PrivacyMonitorFactory
from mindarmour.privacy.diff_privacy import NoiseMechanismsFactory
from mindarmour.utils.logger import LogUtil
from examples.common.networks.lenet5.lenet5_net import LeNet5
from dp_ada_gaussian_config import mnist_cfg as cfg
LOGGER = LogUtil.get_instance()
LOGGER.set_level('INFO')
TAG = 'Lenet5_train'
def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1,
num_parallel_workers=1, sparse=True):
"""
create dataset for training or testing
"""
# define dataset
ds1 = ds.MnistDataset(data_path)
# define operation parameters
resize_height, resize_width = 32, 32
rescale = 1.0 / 255.0
shift = 0.0
# define map operations
resize_op = CV.Resize((resize_height, resize_width),
interpolation=Inter.LINEAR)
rescale_op = CV.Rescale(rescale, shift)
hwc2chw_op = CV.HWC2CHW()
type_cast_op = C.TypeCast(mstype.int32)
# apply map operations on images
if not sparse:
one_hot_enco = C.OneHot(10)
ds1 = ds1.map(input_columns="label", operations=one_hot_enco,
num_parallel_workers=num_parallel_workers)
type_cast_op = C.TypeCast(mstype.float32)
ds1 = ds1.map(input_columns="label", operations=type_cast_op,
num_parallel_workers=num_parallel_workers)
ds1 = ds1.map(input_columns="image", operations=resize_op,
num_parallel_workers=num_parallel_workers)
ds1 = ds1.map(input_columns="image", operations=rescale_op,
num_parallel_workers=num_parallel_workers)
ds1 = ds1.map(input_columns="image", operations=hwc2chw_op,
num_parallel_workers=num_parallel_workers)
# apply DatasetOps
buffer_size = 10000
ds1 = ds1.shuffle(buffer_size=buffer_size)
ds1 = ds1.batch(batch_size, drop_remainder=True)
ds1 = ds1.repeat(repeat_size)
return ds1
if __name__ == "__main__":
# This configure can run both in pynative mode and graph mode
context.set_context(mode=context.GRAPH_MODE,
device_target=cfg.device_target)
network = LeNet5()
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
config_ck = CheckpointConfig(
save_checkpoint_steps=cfg.save_checkpoint_steps,
keep_checkpoint_max=cfg.keep_checkpoint_max)
ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
directory='./trained_ckpt_file/',
config=config_ck)
# get training dataset
ds_train = generate_mnist_dataset(os.path.join(cfg.data_path, "train"),
cfg.batch_size)
if cfg.micro_batches and cfg.batch_size % cfg.micro_batches != 0:
raise ValueError(
"Number of micro_batches should divide evenly batch_size")
# Create a factory class of DP noise mechanisms, this method is adding noise
# in gradients while training. Mechanisms can be 'Gaussian'
# or 'AdaGaussian', in which noise would be decayed with 'AdaGaussian'
# mechanism while be constant with 'Gaussian' mechanism.
noise_mech = NoiseMechanismsFactory().create(cfg.noise_mechanisms,
norm_bound=cfg.norm_bound,
initial_noise_multiplier=cfg.initial_noise_multiplier,
decay_policy='Exp')
net_opt = nn.Momentum(params=network.trainable_params(),
learning_rate=cfg.lr, momentum=cfg.momentum)
# Create a monitor for DP training. The function of the monitor is to
# compute and print the privacy budget(eps and delta) while training.
rdp_monitor = PrivacyMonitorFactory.create('rdp',
num_samples=60000,
batch_size=cfg.batch_size,
initial_noise_multiplier=cfg.initial_noise_multiplier,
per_print_times=234)
# Create the DP model for training.
model = DPModel(micro_batches=cfg.micro_batches,
norm_bound=cfg.norm_bound,
noise_mech=noise_mech,
network=network,
loss_fn=net_loss,
optimizer=net_opt,
metrics={"Accuracy": Accuracy()})
LOGGER.info(TAG, "============== Starting Training ==============")
model.train(cfg['epoch_size'], ds_train,
callbacks=[ckpoint_cb, LossMonitor(), rdp_monitor],
dataset_sink_mode=cfg.dataset_sink_mode)
LOGGER.info(TAG, "============== Starting Testing ==============")
ckpt_file_name = 'trained_ckpt_file/checkpoint_lenet-5_234.ckpt'
param_dict = load_checkpoint(ckpt_file_name)
load_param_into_net(network, param_dict)
ds_eval = generate_mnist_dataset(os.path.join(cfg.data_path, 'test'),
batch_size=cfg.batch_size)
acc = model.eval(ds_eval, dataset_sink_mode=False)
LOGGER.info(TAG, "============== Accuracy: %s ==============", acc)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.