content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python3
"""
This module contains a subroutine for spectrally accurate interpolation of
data that is known on a uniform grid in a periodic domain.
"""
import numpy as np
#from numba import njit
# Get machine precision
eps = np.finfo(float).eps
#@njit
def fourier_interpolation(fk, x):
"""
Interpolate data that is known on a uniform grid in [0, 2pi).
This routine is based on the
matlab routine fourint.m in the DMSuite package by S.C. Reddy and J.A.C. Weideman, available at
http://www.mathworks.com/matlabcentral/fileexchange/29
or here:
http://dip.sun.ac.za/~weideman/research/differ.html
fk: Vector of y-coordinates of data, at equidistant points
x(k) = (k-1)*2*pi/N, k = 1...N
x: Vector of x-values where interpolant is to be evaluated.
output: Vector of interpolated values.
"""
N = len(fk)
M = len(x)
# Compute equidistant points
#xk = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
xk = (np.arange(N) * 2 * np.pi) / N
# Weights for trig interpolation
w = (-1.0) ** np.arange(0, N)
#w = np.array((-1) ** np.arange(0, N), dtype='f')
"""
x2 = x / 2
xk2 = xk / 2
# Compute quantities x - x(k)
xk2_2D, x2_2D = np.meshgrid(xk2, x2)
Dold = x2_2D - xk2_2D
D = 0.5 * (np.outer(x, np.ones(N)) - np.outer(np.ones(M), xk))
print(Dold - D)
"""
D = 0.5 * (np.outer(x, np.ones(N)) - np.outer(np.ones(M), xk))
if np.mod(N, 2) == 0:
# Formula for N even
D = 1 / np.tan(D + eps * (D==0))
else:
# Formula for N odd
D = 1 / np.sin(D + eps * (D==0))
# Evaluate interpolant as matrix-vector products
#return np.matmul(D, w * fk) / np.matmul(D, w)
return np.dot(D, w * fk) / np.dot(D, w)
#return (D @ w * fk) / (D @ w)
#return D.dot(w * fk) / D.dot(w)
|
# Generated by Django 3.0.6 on 2020-05-17 18:14
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('disease', '0003_auto_20200517_1739'),
]
operations = [
migrations.AddField(
model_name='diseases',
name='Expected_recovery_date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 5, 17, 18, 14, 51, 646668, tzinfo=utc)),
preserve_default=False,
),
]
|
import json
import sys
from typing import Dict, List
from aito.schema import AitoTableSchema
from aito.utils.data_frame_handler import DataFrameHandler
from .sub_command import SubCommand
from ..parser import PathArgType, InputArgType, ParseError, try_load_json
class ConvertFromFormatSubCommand(SubCommand):
def build_parser(self, parser):
# add share arguments between formats
either_use_or_create_schema = parser.add_mutually_exclusive_group()
either_use_or_create_schema.add_argument(
'-c', '--create-table-schema', metavar='schema-output-file', type=PathArgType(parent_must_exist=True),
help='create an inferred aito schema and write to output file'
)
either_use_or_create_schema.add_argument(
'-s', '--use-table-schema', metavar='schema-input-file', type=PathArgType(must_exist=True),
help='convert the data to match the input table schema'
)
parser.add_argument('-j', '--json', action='store_true', help='convert to json format')
parser.add_argument(
'input', default='-', type=InputArgType(), nargs='?',
help="path to the input file (when no input file is given or when input is -, read from the standard input)"
)
@staticmethod
def parsed_args_to_data_frame_handler_convert_args(parsed_args: Dict) -> Dict:
in_format = parsed_args['input-format']
convert_args = {
'read_input': parsed_args['input'],
'write_output': sys.stdout,
'in_format': parsed_args['input-format'],
'out_format': 'json' if parsed_args['json'] else 'ndjson',
'read_options': {},
'convert_options': {},
}
if parsed_args['use_table_schema']:
with parsed_args['use_table_schema'].open() as f:
table_schema = try_load_json(f, 'table schema')
convert_args['use_table_schema'] = table_schema
if in_format == 'csv':
convert_args['read_options']['delimiter'] = parsed_args['delimiter']
convert_args['read_options']['decimal'] = parsed_args['decimal']
if in_format == 'excel':
if parsed_args['input'] == sys.stdin:
raise ParseError('input must be a file path for excel files')
if parsed_args['one_sheet']:
convert_args['read_options']['sheet_name'] = parsed_args['one_sheet']
return convert_args
def parse_and_execute(self, parsed_args: Dict):
parsed_convert_args = self.parsed_args_to_data_frame_handler_convert_args(parsed_args)
output_schema_path = parsed_args['create_table_schema'] if parsed_args['create_table_schema'] else None
converted_df = DataFrameHandler().convert_file(**parsed_convert_args)
if output_schema_path:
inferred_schema = AitoTableSchema.infer_from_pandas_data_frame(converted_df)
with output_schema_path.open(mode='w') as f:
json.dump(inferred_schema.to_json_serializable(), f, indent=2, sort_keys=True)
return 0
class ConvertFromCSVSubCommand(ConvertFromFormatSubCommand):
def __init__(self):
super().__init__('csv', 'convert CSV data')
def build_parser(self, parser):
super().build_parser(parser)
parser.add_csv_format_default_arguments()
class ConvertFromExcelSubCommand(ConvertFromFormatSubCommand):
def __init__(self):
super().__init__('excel', 'convert EXCEL data')
def build_parser(self, parser):
super().build_parser(parser)
parser.add_excel_format_default_arguments()
parser.description = 'Convert EXCEL data, accept both xls and xlsx'
class ConvertSubCommand(SubCommand):
_default_sub_commands = [
ConvertFromCSVSubCommand(),
ConvertFromExcelSubCommand(),
ConvertFromFormatSubCommand('json', 'convert JSON data'),
ConvertFromFormatSubCommand('ndjson', 'convert NDJSON data'),
]
def __init__(self, sub_commands: List[SubCommand] = None):
super().__init__('convert', 'convert from a given format into NDJSON|JSON')
if not sub_commands:
sub_commands = self._default_sub_commands
self._sub_commands_map = {cmd.name: cmd for cmd in sub_commands}
def build_parser(self, parser):
parser.epilog = '''To see help for a specific format:
aito convert <input-format> - h
When no input or when input is -, read standard input.
You must use input file instead of standard input for excel file
'''
sub_commands_subparsers = parser.add_subparsers(
title='input-format',
dest='input-format',
metavar='<input-format>'
)
sub_commands_subparsers.required = True
for sub_cmd in self._sub_commands_map.values():
sub_cmd_parser = sub_commands_subparsers.add_parser(sub_cmd.name, help=sub_cmd.help_message)
sub_cmd.build_parser(sub_cmd_parser)
def parse_and_execute(self, parsed_args: Dict):
self._sub_commands_map[parsed_args['input-format']].parse_and_execute(parsed_args)
return 0
|
# Copyright 2017, Inderpreet Singh, All rights reserved.
import pickle
import sys
import argparse
# my libs
from system import SystemScanner, SystemFile, SystemScannerError
if __name__ == "__main__":
if sys.hexversion < 0x03050000:
sys.exit("Python 3.5 or newer is required to run this program.")
parser = argparse.ArgumentParser(description="File size scanner")
parser.add_argument("path", help="Path of the root directory to scan")
parser.add_argument("-e", "--exclude-hidden", action="store_true", default=False,
help="Exclude hidden files")
parser.add_argument("-H", "--human-readable", action="store_true", default=False,
help="Human readable output")
args = parser.parse_args()
scanner = SystemScanner(args.path)
if args.exclude_hidden:
scanner.add_exclude_prefix(".")
try:
root_files = scanner.scan()
except SystemScannerError as e:
sys.exit("SystemScannerError: {}".format(str(e)))
if args.human_readable:
def print_file(file: SystemFile, level: int):
sys.stdout.write(" "*level)
sys.stdout.write("{} {} {}\n".format(
file.name,
"d" if file.is_dir else "f",
file.size
))
for child in file.children:
print_file(child, level+1)
for root_file in root_files:
print_file(root_file, 0)
else:
bytes_out = pickle.dumps(root_files)
sys.stdout.buffer.write(bytes_out)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@version: ??
@author: xiaoming
@license: MIT Licence
@contact: xiaominghe2014@gmail.com
@site:
@software: PyCharm
@file: base_define.py
@time: 2017/12/19 下午12:11
"""
import const as xreg
xreg.repeat_0_or_more = '*'
xreg.repeat_1_or_more = '+'
xreg.repeat_0_or_1 = '?'
xreg.char_any = '.'
xreg.begin = '^'
xreg.end = '$'
# eg. [1-9]
xreg.r_from = '['
xreg.r_id = '-'
xreg.r_to = ']'
xreg.save_begin = '('
xreg.save_end = ')'
class RegSign:
"""
利用ReSign 和 xreg 可以合成相应正则表达式
"""
def __init__(self, reg=''):
self.version = '0.0.0'
self.reg = reg
def other(self, other):
self.reg = '{}{}'.format(self.reg, xreg.other(other))
return self
def repeat_n(self, s, n):
self.reg = '{}{}'.format(self.reg, xreg.repeat_n(s, n))
return self
def repeat_n_to_m(self, s, n, m):
self.reg = '{}{}'.format(self.reg, xreg.repeat_n_m(s, n, m))
return self
def find(self, s):
self.reg = '{}{}'.format(self.reg, xreg.find(s))
return self
def maybe(self, s):
self.reg = '{}{}'.format(self.reg, xreg.maybe(s))
return self
def from_to(self, begin, end):
self.reg = '{}{}'.format(self.reg, xreg.from_to(begin, end))
return self
|
'''
@author Tian Shi
Please contact tshi@vt.edu
'''
import itertools
import os
import shutil
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from LeafNATS.data.utils import create_batch_memory
from LeafNATS.utils.utils import show_progress
from nltk.corpus import stopwords
from .model import modelDMSC
stop_words = stopwords.words('english')
class modelKeywords(modelDMSC):
def __init__(self, args):
super().__init__(args=args)
# keywords from attention
self.keywords0 = [{} for k in range(args.n_tasks)]
# keywords from both attention and deliberated attention.
self.keywords1 = [{} for k in range(args.n_tasks)]
self.wd_freq = {}
def keyword_extraction(self):
'''
Visualization
'''
self.build_vocabulary()
self.build_models()
print(self.base_models)
print(self.train_models)
if len(self.base_models) > 0:
self.init_base_model_params()
if len(self.train_models) > 0:
self.init_train_model_params()
self.vis_data = create_batch_memory(
path_=self.args.data_dir,
file_=self.args.file_vis,
is_shuffle=False,
batch_size=self.args.batch_size,
is_lower=self.args.is_lower
)
key_dir = '../nats_results/attn_keywords'
if not os.path.exists(key_dir):
os.mkdir(key_dir)
else:
shutil.rmtree(key_dir)
os.mkdir(key_dir)
with torch.no_grad():
print('Begin Generate Keywords')
n_batch = len(self.vis_data)
print('The number of batches (keywords): {}'.format(n_batch))
for batch_id in range(n_batch):
self.build_batch(self.vis_data[batch_id])
self.keyword_worker(batch_id, key_dir)
show_progress(batch_id+1, n_batch)
print()
for k in range(self.args.n_tasks):
key_arr = [[wd, 100*self.keywords1[k][wd] /
(self.wd_freq[wd]+100)] for wd in self.keywords1[k]]
key_arr = sorted(key_arr, key=lambda k: k[1])[::-1]
key_arr = [[itm[0]]*int(round(itm[1])) for itm in key_arr
if (itm[0] not in stop_words) and (len(itm[0]) > 3) and (itm[0] != '<unk>')]
key_arr = key_arr[:100]
key_arr = list(itertools.chain(*key_arr))
fout = open(os.path.join(key_dir, str(k)+'.txt'), 'w')
fout.write(' '.join(key_arr) + '\n')
fout.close()
def keyword_worker(self, batch_id, key_dir):
'''
Keywords
'''
review_emb = self.base_models['embedding'](self.batch_data['review'])
batch_size = review_emb.size(0)
seq_len = review_emb.size(1)
emb_gate = torch.sigmoid(self.train_models['gate'](review_emb))
emb_valu = torch.relu(self.train_models['value'](review_emb))
review_out = review_emb*(1-emb_gate) + emb_valu*emb_gate
encoder_hy, _ = self.train_models['encoder'](review_out)
input_pool = encoder_hy.view(batch_size, seq_len, 2, -1)
input_pool = input_pool.contiguous().view(batch_size, seq_len*2, -1)
max_pool = self.train_models['max_pool'](input_pool).squeeze(-1)
max_pool = max_pool.view(batch_size, seq_len, 2)
avg_pool = self.train_models['avg_pool'](input_pool).squeeze(-1)
avg_pool = avg_pool.view(batch_size, seq_len, 2)
input_fm = encoder_hy.view(batch_size, seq_len, 2, -1)
cfmf = self.train_models['fmf'](input_fm[:, :, 0])
cfmb = self.train_models['fmb'](input_fm[:, :, 1])
review_enc = torch.cat((encoder_hy, max_pool, avg_pool, cfmf, cfmb), 2)
attn0_out = []
attn1_out = []
for k in range(self.args.n_tasks):
attn0 = torch.tanh(
self.train_models['attn_forward'][k](review_enc))
attn0 = self.train_models['attn_wrap'][k](attn0).squeeze(2)
attn0 = torch.softmax(attn0, 1)
cv_hidden0 = torch.bmm(attn0.unsqueeze(1), review_enc).squeeze(1)
attn0_out.append(attn0)
attn1 = torch.tanh(
self.train_models['loop_forward1'][k](review_enc))
attn1 = torch.bmm(attn1, cv_hidden0.unsqueeze(2)).squeeze(2)
attn1 = torch.softmax(attn1, 1)
# get the accumulated attention.
attn1_out.append(0.5*attn0 + 0.5*attn1)
review = []
batch_review = self.batch_data['review'].data.cpu().numpy()
for k in range(batch_size):
review.append([self.batch_data['id2vocab'][wd]
for wd in batch_review[k] if not wd == 1])
for k in range(self.args.n_tasks):
attn0_out[k] = attn0_out[k].data.cpu().numpy().tolist()
for j in range(batch_size):
attn0_out[k][j] = attn0_out[k][j][:len(
review[j])]/np.sum(attn0_out[k][j][:len(review[j])])
attn0_out[k][j] = attn0_out[k][j].tolist()
for k in range(self.args.n_tasks):
attn1_out[k] = attn1_out[k].data.cpu().numpy().tolist()
for j in range(batch_size):
attn1_out[k][j] = attn1_out[k][j][:len(
review[j])]/np.sum(attn1_out[k][j][:len(review[j])])
attn1_out[k][j] = attn1_out[k][j].tolist()
for k in range(batch_size):
for wd in review[k]:
try:
self.wd_freq[wd] += 1
except:
self.wd_freq[wd] = 1
for j in range(self.args.n_tasks):
idx0 = np.argsort(attn0_out[j][k])[-3:]
idx1 = np.argsort(attn1_out[j][k])[-3:]
for id_ in idx0:
try:
self.keywords0[j][review[k][id_]] += 1
except:
self.keywords0[j][review[k][id_]] = 1
for id_ in idx1:
try:
self.keywords1[j][review[k][id_]] += 1
except:
self.keywords1[j][review[k][id_]] = 1
|
## Script to help generate caterogy information
words_HP = ['harry', 'said', 'ron', 'hermione', 'professor', 'lupin', 'back', 'black', 'one', 'around', 'like', 'looked', 'could', 'see', 'got', 'snape', 'hagrid', 'didnt', 'get', 'know', 'well', 'harrys', 'still', 'eyes', 'go', 'would', 'dont', 'time', 'though', 'face', 'going', 'looking', 'right', 'think', 'dumbledore', 'malfoy', 'saw', 'come', 'head', 'voice', 'door', 'away', 'im', 'sirius', 'toward', 'hes', 'something', 'look', 'heard', 'behind', 'last', 'hand', 'wand', 'ever', 'gryffindor', 'turned', 'room', 'never', 'scabbers', 'way', 'next', 'thought', 'told', 'went', 'good', 'us', 'fudge', 'dementors', 'neville', 'potter', 'weasley', 'mcgonagall', 'hed', 'front', 'long', 'made', 'came', 'ill', 'two', 'first', 'moment', 'crookshanks', 'aunt', 'pettigrew', 'hogwarts', 'want', 'inside', 'seemed', 'table', 'took', 'left', 'knew', 'wasnt', 'madam', 'uncle', 'even', 'suddenly', 'large', 'really', 'castle', 'dark', 'anything', 'tell', 'trying', 'wood', 'class', 'hands', 'felt', 'let', 'three', 'thing', 'make', 'great', 'much', 'youre', 'buckbeak', 'say', 'couldnt', 'ive', 'hear', 'fred', 'bed', 'cant', 'firebolt', 'open', 'feet', 'need', 'another', 'put', 'little', 'stood', 'gave', 'across', 'oh', 'trelawney', 'year', 'people', 'sure', 'cloak', 'school', 'seen', 'rons', 'yes', 'help', 'take', 'night', 'magic', 'vernon', 'gone', 'every', 'staring', 'end', 'pulled', 'hogsmeade', 'better', 'weve', 'onto', 'mr', 'percy', 'everyone', 'old', 'whispered', 'thats', 'george', 'id', 'bit', 'hall', 'forward', 'keep', 'hagrids', 'quickly', 'happened', 'without', 'whats', 'along', 'enough', 'theres', 'reached', 'set', 'floor', 'rest', 'hair', 'quidditch', 'done', 'team', 'new', 'wouldnt', 'must', 'sat', 'marge', 'mind', 'started', 'might', 'nothing', 'asked', 'years', 'day', 'youve', 'blacks', 'match', 'map', 'began', 'yet', 'slytherin', 'ter', 'boy', 'air', 'sight', 'opened', 'rat', 'stan', 'robes', 'side', 'azkaban', 'slowly', 'small', 'quite', 'dear', 'outside', 'tried', 'course', 'yeh', 'peter', 'window', 'broom', 'muttered', 'else', 'quietly', 'dementor', 'best', 'fell', 'arm', 'yelled', 'mouth', 'mean', 'yeah', 'anyone', 'field', 'wont', 'okay', 'standing', 'found', 'later', 'feeling', 'common', 'books', 'life', 'ministry', 'hard', 'coming', 'dog', 'minutes', 'snitch', 'wanted', 'wizard', 'find', 'leave', 'already', 'things', 'talking', 'believe', 'please', 'trunk', 'stared', 'cup', 'dead', 'kept', 'give', 'whole', 'grounds', 'sitting', 'stop', 'ground', 'snapes', 'called', 'slightly', 'getting', 'full', 'lost', 'crowd', 'hippogriff', 'empty', 'watching', 'happy', 'hermiones', 'youll', 'thinking', 'pomfrey', 'moved', 'hadnt', 'voldemort', 'second', 'case', 'watched', 'man', 'stopped', 'tea', 'havent', 'sit', 'father', 'turn', 'feel', 'run', 'cold', 'tower', 'caught', 'able', 'however', 'morning', 'dad', 'youd', 'together', 'move', 'hit', 'lupins', 'crabbe', 'owl', 'nearly', 'witch', 'house', 'ten', 'light', 'tiny', 'ask', 'classroom', 'boggart', 'book', 'top', 'read', 'work', 'enormous', 'past', 'raised', 'er', 'staircase', 'minister', 'telling', 'malfoys', 'listen', 'roared', 'appeared', 'sorry', 'pocket', 'sound', 'bag', 'sort', 'place', 'entrance', 'goyle', 'expecto', 'lot', 'held', 'either', 'always', 'james', 'shut', 'office', 'shouted', 'corner', 'shaking', 'close', 'kill', 'understand', 'ravenclaw', 'petunia', 'friends', 'loudly', 'holding', 'five', 'walked', 'shoulder', 'someone', 'magical', 'taking', 'making', 'pointing', 'many', 'parents', 'teacher', 'seized', 'picked', 'remember', 'rather', 'eye', 'taken', 'supposed', 'chest', 'watch', 'shes', 'pointed', 'lesson', 'climbed', 'corridor', 'times', 'parchment', 'word', 'almost', 'thin', 'theyre', 'tree', 'fast', 'usual', 'neck', 'idea', 'everything', 'breath', 'patronus', 'followed', 'moving', 'christmas', 'fat', 'alone', 'since', 'bus', 'chocolate', 'defense', 'fire', 'creatures', 'noise', 'lavender', 'werewolf', 'muggles', 'near', 'use', 'silence', 'extremely', 'silver', 'person', 'form', 'speak', 'red', 'cat', 'patronum', 'catch', 'indeed', 'dursleys', 'start', 'loud', 'trouble', 'threw', 'hundred', 'try', 'isnt', 'point', 'heart', 'words', 'dangerous', 'stay', 'walking', 'finally', 'leg', 'leaving', 'wrong', 'quiet', 'chair', 'invisibility', 'steps', 'arts', 'closed', 'students', 'except', 'headmaster', 'portrait', 'points', 'severus', 'chapter', 'foot', 'muggle', 'hedwig', 'finished', 'killed', 'aside', 'gold', 'mrs', 'hurried', 'train', 'shall', 'broke', 'stand', 'saying', 'ern', 'passed', 'running', 'screaming', 'sir', 'stomach', 'glasses', 'teachers', 'least', 'bad', 'used', 'letter', 'beneath', 'huge', 'several', 'burst', 'horrible', 'fact', 'gasped', 'forest', 'completely', 'chance', 'week', 'street', 'meant', 'arms', 'died', 'sign', 'third', 'name', 'mum', 'heads', 'theyd', 'desk', 'care', 'doors', 'wind', 'wall', 'met', 'white', 'low', 'listening', 'willow', 'reckon', 'wing', 'wait', 'honeydukes', 'innocent', 'rosmerta', 'lying', 'real', 'call', 'wizards', 'ear', 'seconds', 'whether', 'tail', 'hope', 'doesnt', 'cabin', 'visit', 'straight', 'thick', 'knight', 'worse', 'dyou', 'divination', 'death', 'theyve', 'hold', 'quick', 'hole', 'filch', 'dudley', 'talk', 'voices', 'perhaps', 'upon', 'fingers', 'world', 'ready', 'shot', 'tears', 'hissed', 'starting', 'ran', 'half', 'werent', 'mad', 'crystal', 'nobody', 'looks', 'sent', 'hospital', 'trelawneys', 'friend', 'worst', 'turning', 'cage', 'instead', 'legs', 'furiously', 'glass', 'brought', 'managed', 'far', 'whose', 'laughter', 'kind', 'exactly', 'clear', 'news', 'angry', 'twelve', 'ears', 'certainly', 'backward', 'beside', 'trees', 'lets', 'hours', 'miss', 'tonight', 'scared', 'ahead', 'parvati', 'grim', 'became', 'broomstick', 'given', 'hidden', 'direction', 'broken', 'lay', 'carrying', 'stuff', 'snapped', 'hiding', 'true', 'upstairs', 'memory', 'story', 'pulling', 'excellent', 'free', 'deserted', 'windows', 'wearing', 'disappeared', 'approached', 'continued', 'deep', 'matter', 'entered', 'rain', 'terrible', 'stone', 'flitwick', 'buckbeaks', 'peeves', 'nose', 'teeth', 'probably', 'stairs', 'family', 'number', 'cauldron', 'grabbed', 'granger', 'soon', 'green', 'lily', 'water', 'wings', 'picture', 'ginny', 'term', 'glanced', 'pair', 'apart', 'nervously', 'big', 'boys', 'fine', 'smile', 'abruptly', 'happen', 'arrived', 'unless', 'youknowwho', 'laughing', 'darkness', 'eat', 'strode', 'dean', 'dropped', 'truth', 'nearer', 'headed', 'vanished', 'roots', 'hooch', 'committee', 'pushed', 'afraid', 'allowed', 'bottle', 'carefully', 'reason', 'break', 'answer', 'late', 'seem', 'knees', 'tightly', 'permission', 'walk', 'today', 'hardly', 'footsteps', 'tight', 'hat', 'needed', 'shook', 'sank', 'covered', 'em', 'job', 'ah', 'throat', 'thank', 'seamus', 'gryffindors', 'waited', 'playing', 'closely', 'wands', 'wants', 'knows', 'potion', 'lake', 'lady', 'grass', 'stupid', 'angelina', 'potters', 'holidays', 'forced', 'essay', 'closer', 'particularly', 'locked', 'days', 'nasty', 'vernons', 'opposite', 'wizarding', 'birthday', 'worried', 'lord', 'flew', 'paper', 'present', 'happily', 'ago', 'nimbus', 'view', 'edge', 'asleep', 'within', 'none', 'added', 'mother', 'snarled', 'furious', 'sudden', 'hurt', 'high', 'rolled', 'sleep', 'classes', 'led', 'laugh', 'slipped', 'ball', 'fighting', 'bags', 'glittering', 'dormitory', 'spoke', 'slytherins', 'goal', 'alicia', 'katie', 'working', 'whomping', 'marauders', 'pettigrews', 'remus', 'summer', 'homework', 'ink', 'four', 'car', 'funny', 'alive', 'gazing', 'seeing', 'hesitated', 'wondering', 'leapt', 'middle', 'pleased', 'brown', 'daily', 'prophet', 'win', 'poor', 'heavy', 'breakfast', 'piece', 'finger', 'forget', 'lunch', 'grip', 'decided', 'die', 'ceiling', 'bent', 'fall', 'fallen', 'longbottom', 'slid', 'leaky', 'tom', 'waiting', 'game', 'shop', 'somebody', 'sharply', 'seriously', 'impossible', 'twenty', 'crack', 'hufflepuff', 'helping', 'rope', 'others', 'hippogriffs', 'hell', 'fifty', 'oliver', 'cho', 'also', 'frowning', 'pain', 'spent', 'notice', 'expression', 'dare', 'oclock', 'hour', 'leaned', 'escaped', 'attack', 'sky', 'landed', 'soft', 'gray', 'thanks', 'waving', 'final', 'says', 'bet', 'angrily', 'grinning', 'cross', 'fixed', 'minute', 'box', 'knocked', 'flat', 'smiling', 'live', 'evening', 'walls', 'explain', 'fence', 'halt', 'familiar', 'repeated', 'bar', 'passage', 'safe', 'cried', 'future', 'arithmancy', 'definitely', 'joined', 'girl', 'maybe', 'dumbledores', 'feast', 'filled', 'become', 'cadogan', 'rose', 'practice', 'seeker', 'forgotten', 'butterbeer', 'secretkeeper', 'macnair', 'charm', 'drive', 'hoped', 'speaking', 'frightened', 'realized', 'tomorrow', 'suppose', 'clearly', 'lucky', 'beak', 'moon', 'flying', 'london', 'note', 'change', 'difficult', 'missed', 'longer', 'helped', 'blood', 'subject', 'wardrobe', 'shrieking', 'nodded', 'eh', 'single', 'putting', 'ones', 'wiping', 'expect', 'effort', 'expelled', 'wide', 'stepped', 'different', 'skin', 'figure', 'giving', 'short', 'using', 'bin', 'saved', 'spotted', 'tables', 'hasnt', 'twice', 'shadows', 'scarlet', 'theyll', 'midair', 'seat', 'breaking', 'marble', 'muttering', 'leaves', 'ladder', 'beat', 'cut', 'quaffle', 'bludger', 'oneeyed', 'prongs', 'tunnel', 'page', 'known', 'less', 'bellowed', 'obviously', 'send', 'stretched', 'inches', 'barely', 'soared', 'join', 'ripped', 'trembling', 'six', 'whod', 'laughed', 'bound', 'gleaming', 'broomsticks', 'snap', 'worry', 'return', 'marges', 'wheres', 'force', 'escape', 'forbidden', 'warning', 'screamed', 'stuck', 'heading', 'serious', 'thrown', 'cornelius', 'anyway', 'diagon', 'alley', 'excitedly', 'lose', 'packed', 'thomas', 'meet', 'checking', 'ages', 'lowered', 'louder', 'terrified', 'shining', 'question', 'branches', 'lessons', 'confused', 'afternoon', 'riddikulus', 'joke', 'strong', 'charms', 'silent', 'falling', 'betrayed', 'ward', 'possession', 'flint', 'highly', 'secret', 'write', 'paused', 'round', 'weird', 'cupboard', 'living', 'possible', 'potions', 'delighted', 'jumped', 'spot', 'weeks', 'thirteen', 'hedwigs', 'whatever', 'bright', 'voldemorts', 'golden', 'growing', 'lower', 'owls', 'tied', 'errol', 'carried', 'grin', 'weasleys', 'seven', 'remembered', 'sun', 'sneakoscope', 'loads', 'knowing', 'handle', 'thousand', 'dogs', 'monster', 'sideways', 'ouch', 'struggling', 'village', 'woke', 'hot', 'line', 'immediately', 'peered', 'deal', 'thoughts', 'growled', 'attention', 'returned', 'suspiciously', 'itll', 'calmly', 'sighed', 'fault', 'clutched', 'arent', 'changed', 'deeply', 'heaved', 'means', 'money', 'bang', 'step', 'calm', 'fear', 'forgot', 'whisper', 'examining', 'beaming', 'parlor', 'anymore', 'among', 'excited', 'surprise', 'hurrying', 'drew', 'balls', 'staying', 'rats', 'stuffed', 'hanging', 'doubt', 'check', 'bring', 'search', 'compartment', 'careful', 'pale', 'corridors', 'patch', 'reach', 'silvery', 'mud', 'applause', 'beyond', 'pumpkin', 'group', 'animal', 'harder', 'fer', 'trust', 'conversation', 'imagine', 'zonkos', 'fly', 'weight', 'stadium', 'cheering', 'moments', 'wormtail', 'anybody', 'exam', 'executioner']
# for word in words_HP:
# print(word)
# print(len(words_HP))
dictX_HP = {'harry': -0.3404182105762809, 'said': -0.3082035252113908, 'ron': -0.29064987249227214, 'hermione': -0.295993355050975, 'professor': -0.395361509471976, 'lupin': -0.2903172889185488, 'back': -0.41867708056140046, 'black': -0.41412321866824137, 'one': -0.3769123231653582, 'around': -0.36723433944329353, 'like': -0.378418612917154, 'looked': -0.2947271200078124, 'could': -0.3577161927919001, 'see': -0.35916690726581646, 'got': -0.40749323724709696, 'snape': -0.3342276781322949, 'hagrid': -0.35782965724186905, 'didnt': -0.3377125115808793, 'get': -0.36791675635571036, 'know': -0.32088769960292146, 'well': -0.3509799514530042, 'harrys': -0.3581815597564729, 'still': -0.3891462832677463, 'eyes': -0.3411320521537438, 'go': -0.33625121776974054, 'would': -0.3991270096767004, 'dont': -0.4011167126400624, 'time': -0.383755081000773, 'though': -0.3391935902439818, 'face': -0.425454819598296, 'going': -0.38674436028238246, 'looking': -0.2989421821642099, 'right': -0.33612430464485826, 'think': -0.3695793444380788, 'dumbledore': -0.42466146441031927, 'malfoy': -0.3690430336120932, 'saw': -0.33953765995951724, 'come': -0.32601085533517926, 'head': -0.37457791172044363, 'voice': -0.3441443053843645, 'door': -0.4250479731570908, 'away': -0.3534835004099374, 'im': -0.39972895376338746, 'sirius': -0.3607181620351904, 'toward': -0.3456705796134602, 'hes': -0.37090004603916277, 'something': -0.3502463512599456, 'look': -0.3200768952276976, 'heard': -0.4121546538726243, 'behind': -0.3842322608742912, 'last': -0.4534087136541481, 'hand': -0.35293334356464456, 'wand': -0.4247314105150524, 'ever': -0.3897571463820377, 'gryffindor': -0.485975296097189, 'turned': -0.3157406010910939, 'room': -0.42986673669826675, 'never': -0.42607414823748274, 'scabbers': -0.3208840565256028, 'way': -0.3583127216088684, 'next': -0.3677565206361123, 'thought': -0.38149276698010803, 'told': -0.39619415683262654, 'went': -0.3338101039580636, 'good': -0.4197530595221225, 'us': -0.3527284479829061, 'fudge': -0.4223446565899427, 'dementors': -0.3924238121146706, 'neville': -0.43122050763320485, 'potter': -0.3563956186908335, 'weasley': -0.547684076491472, 'mcgonagall': -0.21470530954930736, 'hed': -0.4547728843661789, 'front': -0.4083186096076119, 'long': -0.3955714212812588, 'made': -0.36273979193934536, 'came': -0.3381265018451254, 'ill': -0.4214717234229859, 'two': -0.3868359596459398, 'first': -0.3721772586535749, 'moment': -0.39260080375831685, 'crookshanks': -0.34383628958683593, 'aunt': -0.4748607929407128, 'pettigrew': -0.4321678054836688, 'hogwarts': -0.41973286792532793, 'want': -0.40133248290503337, 'inside': -0.4072937705557821, 'seemed': -0.4578718938702481, 'table': -0.3974104062070743, 'took': -0.3930959429118343, 'left': -0.5135599508534043, 'knew': -0.35123929550698785, 'wasnt': -0.313283908716895, 'madam': -0.6133743663361283, 'uncle': -0.5428261545879886, 'even': -0.43088149074663284, 'suddenly': -0.4573519465666472, 'large': -0.4174788766760509, 'really': -0.41844844002299597, 'castle': -0.38547101872356687, 'dark': -0.3817746391588224, 'anything': -0.384192386220149, 'tell': -0.3861589225343997, 'trying': -0.39740860030675657, 'wood': -0.4540675327082501, 'class': -0.485088455855281, 'hands': -0.39270916602527667, 'felt': -0.37484240618510967, 'let': -0.4104700937773591, 'three': -0.4585097914409581, 'thing': -0.36563017216984, 'make': -0.4647798852504154, 'great': -0.458763496243908, 'much': -0.4550840304210772, 'youre': -0.39150424078181556, 'buckbeak': -0.4740268036318609, 'say': -0.40451272045685766, 'couldnt': -0.4188646213104272, 'ive': -0.31756211078013097, 'hear': -0.4158921574538428, 'fred': -0.34208761899951284, 'bed': -0.4031033849445751, 'cant': -0.40230640244441, 'firebolt': -0.4762529438418908, 'open': -0.3703265538282747, 'feet': -0.4830803048498615, 'need': -0.4648522117052097, 'another': -0.3503126253329676, 'put': -0.4038388101471008, 'little': -0.3873097182516131, 'stood': -0.3371097949712153, 'gave': -0.2916933079776938, 'across': -0.4890578325774798, 'oh': -0.35164683295772614, 'trelawney': -0.2189948131894088, 'year': -0.38483615045927205, 'people': -0.4144923176328458, 'sure': -0.41988157498515294, 'cloak': -0.449506620338296, 'school': -0.429579299106382, 'seen': -0.43990122975125684, 'rons': -0.366542793115703, 'yes': -0.2565478645886826, 'help': -0.370189146634433, 'take': -0.4109787495955283, 'night': -0.4573546032764562, 'magic': -0.4925752509936749, 'vernon': -0.4863277268381305, 'gone': -0.3937427947171375, 'every': -0.4363862983845517, 'staring': -0.32223976727679693, 'end': -0.4366846887791792, 'pulled': -0.4143607096419292, 'hogsmeade': -0.3496612149050829, 'better': -0.4262185105950535, 'weve': -0.31489970287981145, 'onto': -0.488017517549639, 'mr': -0.3716931857238555, 'percy': -0.3901014240667475, 'everyone': -0.3716266469511373, 'old': -0.3931800574359525, 'whispered': -0.27140502090887636, 'thats': -0.36057986011028637, 'george': -0.3637322240777988, 'id': -0.44681924780909626, 'bit': -0.41219900603684617, 'hall': -0.4952337477313958, 'forward': -0.4361372790377303, 'keep': -0.42845815407325943, 'hagrids': -0.4620933274760993, 'quickly': -0.17668315591436776, 'happened': -0.3597484795042253, 'without': -0.4924488060849929, 'whats': -0.22218149167949477, 'along': -0.4696350622292944, 'enough': -0.40613840440880417, 'theres': -0.4559059535972751, 'reached': -0.4380172450065086, 'set': -0.29281636990597143, 'floor': -0.4183743430118068, 'rest': -0.41179596348104314, 'hair': -0.42091652892528886, 'quidditch': -0.38405168026589326, 'done': -0.4229381098880037, 'team': -0.43954284996375975, 'new': -0.4122545188187575, 'wouldnt': -0.425470732971978, 'must': -0.3647641404862631, 'sat': -0.3128371616697609, 'marge': -0.6137148737801807, 'mind': -0.4179214395970223, 'started': -0.42128080097978166, 'might': -0.41727959888003013, 'nothing': -0.4119697006240009, 'asked': -0.31323112709123974, 'years': -0.6146826657011871, 'day': -0.4820783470895799, 'youve': -0.43497875782214646, 'blacks': -0.4265282184865763, 'match': -0.3918543339652658, 'map': -0.44059207350940277, 'began': -0.5248333961688537, 'yet': -0.3471898892387219, 'slytherin': -0.5225231966677508, 'ter': -0.43663664219570886, 'boy': -0.3733249877174564, 'air': -0.5272565228202469, 'sight': -0.3958109975944048, 'opened': -0.41464497226865127, 'rat': -0.3339447556680149, 'stan': -0.3328720753568432, 'robes': -0.3765017524718864, 'side': -0.38643473917234444, 'azkaban': -0.32776779377627513, 'slowly': -0.34783371202118196, 'small': -0.353687882635126, 'quite': -0.3622977936231381, 'dear': -0.3181866319882837, 'outside': -0.4981465792483096, 'tried': -0.34840323049919336, 'course': -0.3437838469342981, 'yeh': -0.4472396658282604, 'peter': -0.4556477928062975, 'window': -0.3663747814829155, 'broom': -0.42512348867717914, 'muttered': -0.23287141304484155, 'else': -0.3121039103485214, 'quietly': -0.17445343603264685, 'dementor': -0.35680420451126404, 'best': -0.34959574242317515, 'fell': -0.377996250566316, 'arm': -0.3875268886223478, 'yelled': -0.36371734163552116, 'mouth': -0.317872812681845, 'mean': -0.3812094643932262, 'yeah': -0.23833774473739364, 'anyone': -0.3418368670074958, 'field': -0.43923006083303284, 'wont': -0.4389659570452413, 'okay': -0.3140778346195514, 'standing': -0.2714530897499168, 'found': -0.456353508253536, 'later': -0.43296456787827525, 'feeling': -0.352123005573758, 'common': -0.4379927683091566, 'books': -0.35999059551933, 'life': -0.25082342452258893, 'ministry': -0.3624590271546265, 'hard': -0.28958164264295605, 'coming': -0.37655235938768167, 'dog': -0.2892360417187588, 'minutes': -0.4805606077444714, 'snitch': -0.3550834678603194, 'wanted': -0.3300180623082395, 'wizard': -0.2922909148225235, 'find': -0.4604156492027976, 'leave': -0.3634629047361744, 'already': -0.33353412828396134, 'things': -0.4041823903450186, 'talking': -0.3673051015967493, 'believe': -0.36053230757944016, 'please': -0.26280499429378634, 'trunk': -0.3979385605034909, 'stared': -0.15095433461295346, 'cup': -0.4356135045889362, 'dead': -0.29784699453725694, 'kept': -0.4680563099830738, 'give': -0.31460153540736063, 'whole': -0.4548191151662004, 'grounds': -0.3174625710587177, 'sitting': -0.1754239333884582, 'stop': -0.268554050454618, 'ground': -0.2923891928268394, 'snapes': -0.3164734777525601, 'called': -0.16328586741912365, 'slightly': -0.3438641239385722, 'getting': -0.33959458892481165, 'full': -0.25440518885985597, 'lost': -0.27468329879816866, 'crowd': -0.3107559365495924, 'hippogriff': -0.30942921459955675, 'empty': -0.42466837979258354, 'watching': -0.2468038837453529, 'happy': -0.3462964808937596, 'hermiones': -0.3767878964124062, 'youll': -0.305851932095456, 'thinking': -0.25109727796814724, 'pomfrey': -0.13307288084097182, 'moved': -0.30532897693197875, 'hadnt': -0.2540732178033967, 'voldemort': -0.4954765167726223, 'second': -0.305762424974042, 'case': -0.25268601971453164, 'watched': -0.2734143136230628, 'man': -0.3569484010668036, 'stopped': -0.38839227175850766, 'tea': -0.1781321833870223, 'havent': -0.27968996656216083, 'sit': -0.17761688561160688, 'father': -0.24660047610357444, 'turn': -0.24353258147853404, 'feel': -0.29389051778853253, 'run': -0.211589286078808, 'cold': -0.3439169229874384, 'tower': -0.2958937017729499, 'caught': -0.32536145269700667, 'able': -0.385159029576849, 'however': -0.18498603764945723, 'morning': -0.374998882352224, 'dad': -0.4327191621918188, 'youd': -0.3508738094570856, 'together': -0.3240647221463804, 'move': -0.2706010668246189, 'hit': -0.3632366681567216, 'lupins': -0.29867648614826053, 'crabbe': -0.41352591361410257, 'owl': -0.2771547649771829, 'nearly': -0.29759712582287035, 'witch': -0.4153625683331886, 'house': -0.2175888238230342, 'ten': -0.2533306656348614, 'light': -0.29454390144444964, 'tiny': -0.4328402184429306, 'ask': -0.2658338348312285, 'classroom': -0.26126769068015443, 'boggart': -0.3377543793236378, 'book': -0.3683385142398446, 'top': -0.34355509943692597, 'read': -0.2572823534241081, 'work': -0.16061053083401108, 'enormous': -0.3126691750733934, 'past': -0.17825937268868697, 'raised': -0.2080861696239776, 'er': -0.14466099941434216, 'staircase': -0.4662335845548397, 'minister': -0.2131091099424152, 'telling': -0.272379734811925, 'malfoys': -0.4075344861414752, 'listen': -0.10929611272165747, 'roared': -0.21797183867048603, 'appeared': -0.20580293894719678, 'sorry': -0.23890439986679884, 'pocket': -0.288231437520453, 'sound': -0.2668779450116772, 'bag': -0.32020744219540614, 'sort': -0.22530918594956334, 'place': -0.2962197222745053, 'entrance': -0.32594133920231605, 'goyle': -0.3312571590933038, 'expecto': -0.16801803765457105, 'lot': -0.2625081543052464, 'held': -0.2757127864809704, 'either': -0.31806907913074356, 'always': -0.34667386220985763, 'james': -0.35684554824350695, 'shut': -0.26139530060695637, 'office': -0.355425464522554, 'shouted': -0.29718121635042644, 'corner': -0.14046074738561942, 'shaking': -0.22120290780697402, 'close': -0.24384356302470564, 'kill': -0.16115207158758707, 'understand': -0.12718779844737865, 'ravenclaw': -0.3510613576260826, 'petunia': -0.27853579871457934, 'friends': -0.19128704632665763, 'loudly': -0.17835506354192127, 'holding': -0.23310474920816396, 'five': -0.16218456171992643, 'walked': -0.2466874208185574, 'shoulder': -0.1422191069782422, 'someone': -0.21398718586509594, 'magical': -0.20579218962294593, 'taking': -0.11454915826181362, 'making': -0.2504581564602617, 'pointing': -0.1824562820652009, 'many': -0.24800878817225516, 'parents': -0.2614675135535692, 'teacher': -0.17317356109868962, 'seized': -0.20100805032709912, 'picked': -0.20926797401162203, 'remember': -0.3065517477395801, 'rather': -0.1890213597828072, 'eye': -0.19781608774970205, 'taken': -0.1971752439257744, 'supposed': -0.15628525427526674, 'chest': -0.2872823435832202, 'watch': -0.13304421245790116, 'shes': -0.2796083719838562, 'pointed': -0.23180807954771, 'lesson': -0.1472591528979325, 'climbed': -0.04083974659593975, 'corridor': -0.1210372260375165, 'times': -0.25662862835629374, 'parchment': -0.13165868243609596, 'word': -0.32483243135173123, 'almost': -0.31086224360200887, 'thin': -0.26931024501110484, 'theyre': -0.10501694538355939, 'tree': -0.242346965142977, 'fast': -0.264750494737473, 'usual': -0.13483364477442983, 'neck': -0.1586033559475837, 'idea': -0.2108001159303304, 'everything': -0.1432089116998099, 'breath': -0.14067497667484294, 'patronus': -0.08154847829011175, 'followed': 0.1162851937372294, 'moving': -0.060752043460865275, 'christmas': -0.030356554313839212, 'fat': -0.3008700925183188, 'alone': -0.22303441472993085, 'since': -0.3004669813780729, 'bus': -0.25378521213697597, 'chocolate': 0.03621414753604124, 'defense': -0.2115318383921258, 'fire': -0.10992383503387297, 'creatures': -0.1269174625295662, 'noise': -0.2450043474207156, 'lavender': -0.15915584149097442, 'werewolf': -0.1277026988619141, 'muggles': -0.15810167616798568, 'near': -0.31245044968654406, 'use': -0.1713632016184624, 'silence': -0.1493886116344825, 'extremely': -0.1745664226512587, 'silver': -0.2148215499936752, 'person': -0.1142015652239898, 'form': -0.18884230854088255, 'speak': -0.08382596329522721, 'red': -0.16369599607523794, 'cat': -0.11597851073196642, 'patronum': -0.07421570497578969, 'catch': -0.04799686927135146, 'indeed': -0.10255612743418167, 'dursleys': -0.24993456199665, 'start': 0.04601530704966239, 'loud': 0.04060148907707252, 'trouble': 0.14302650227754723, 'threw': -0.0155230990574208, 'hundred': -0.34156150875687086, 'try': -0.12755459010497192, 'isnt': 0.014192053317913626, 'point': -0.14479253680905563, 'heart': -0.1581907927226934, 'words': -0.06286194854970426, 'dangerous': 0.005701205512679945, 'stay': -0.1587140721966243, 'walking': -0.06554473999212548, 'finally': -0.0019563754482845035, 'leg': -0.32530177092078566, 'leaving': -0.2723231442803349, 'wrong': -0.07836939436734872, 'quiet': -0.10212076969565664, 'chair': -0.11743339438618135, 'invisibility': -0.11155842115318247, 'steps': -0.14830315967441224, 'arts': -0.2623212198124764, 'closed': -0.1411680974384731, 'students': -0.1084404212374625, 'except': -0.10901170465746703, 'headmaster': -0.19536689585286096, 'portrait': -0.2842771983621382, 'points': -0.039535414414606174, 'severus': -0.17427797103084308, 'chapter': 0.379177750408984, 'foot': -0.21669943568670363, 'muggle': -0.08554238820334437, 'hedwig': -0.2301558473258338, 'finished': -0.1212057984080559, 'killed': 0.014855624202094157, 'aside': -0.20547406274234126, 'gold': -0.1245539852309046, 'mrs': -0.14171328345384318, 'hurried': -0.1361295071361226, 'train': -0.08570240497836444, 'shall': -0.08772407873250136, 'broke': -0.15125667540441634, 'stand': -0.12920120921081682, 'saying': -0.0296746222871258, 'ern': -0.03532342560905907, 'passed': -0.08983232348341023, 'running': -0.06797808004560403, 'screaming': -0.10709328252706432, 'sir': -0.2579840909027078, 'stomach': 0.08994322886694901, 'glasses': -0.09029058007086592, 'teachers': 0.03127733646052494, 'least': -0.034472254001263254, 'bad': -0.131871994314194, 'used': -0.19734375087188957, 'letter': -0.012555731837033086, 'beneath': -0.2434135623647079, 'huge': -0.2282056119097787, 'several': -0.31554791424842404, 'burst': -0.261762670767239, 'horrible': -0.21067148787074572, 'fact': -0.06248991220181975, 'gasped': -0.004324969957323348, 'forest': -0.22419851445231076, 'completely': 0.05615961739859654, 'chance': 0.10207639898671245, 'week': -0.14867436038654397, 'street': -0.16299128585706296, 'meant': -0.15508997684141526, 'arms': 0.10417047654229032, 'died': -0.1591461813820235, 'sign': -0.21494146260047886, 'third': -0.13151725700516365, 'name': 0.04544352669783208, 'mum': -0.1378422790675687, 'heads': -0.0095518515135504, 'theyd': -0.14203097358935826, 'desk': -0.06174841667023207, 'care': -0.1384686058710165, 'doors': 0.05886095487848555, 'wind': -0.14203963781934878, 'wall': -0.02514483808579191, 'met': -0.014555628182324759, 'white': -0.04649545703443824, 'low': -0.16808974750794384, 'listening': 0.038327327979675176, 'willow': -0.17353477189804967, 'reckon': -0.17126966177896882, 'wing': -0.14388928693987424, 'wait': -0.09050751682112064, 'honeydukes': -0.20785704777511238, 'innocent': 0.034559850897514885, 'rosmerta': 0.17296671244502274, 'lying': -0.08233536387545638, 'real': 0.040640351887577884, 'call': 0.0013260497671907453, 'wizards': -0.15116466918296947, 'ear': 0.01114105279816437, 'seconds': 0.023708648680793046, 'whether': -0.13374491491756574, 'tail': 0.05790896113074211, 'hope': 0.11336558117361033, 'doesnt': 0.048290759030042804, 'cabin': 0.14299356792401602, 'visit': -0.13773931630748976, 'straight': -0.10728939219602177, 'thick': -0.14407114015588127, 'knight': -0.1090868692639704, 'worse': 0.02133307290527716, 'dyou': -0.021975991287801297, 'divination': 0.07593213910304387, 'death': -0.015624748994441263, 'theyve': -0.09674343558358559, 'hold': -0.06833669253966876, 'quick': -0.008619943644538716, 'hole': 0.027686591370968197, 'filch': -0.05309502199852507, 'dudley': -0.03767093726728855, 'talk': 0.11808417397154787, 'voices': -0.049355187932598396, 'perhaps': 0.04815064455498264, 'upon': 0.0017003587305593022, 'fingers': -0.06636211277049353, 'world': -0.06256662881749248, 'ready': 0.08643800920728271, 'shot': -0.03002171534261804, 'tears': 0.08155589661637111, 'hissed': 0.224503103440121, 'starting': -0.07479641042477775, 'ran': 0.114911924885881, 'half': -0.17210655176849954, 'werent': -0.018906448932824445, 'mad': 0.07804675137848391, 'crystal': -0.21750596540929, 'nobody': -0.09873894834031845, 'looks': -0.001767267795497575, 'sent': 0.006195059934323864, 'hospital': -0.06475025805873601, 'trelawneys': 0.16680906655818759, 'friend': 0.08100697017118726, 'worst': -0.07624990620325797, 'turning': 0.11539884941949033, 'cage': -0.100186879342938, 'instead': 0.10285553846582861, 'legs': -0.06980866867333743, 'furiously': 0.10909680861739833, 'glass': 0.0129199803847854, 'brought': 0.295616522759712, 'managed': -0.08123050155941876, 'far': 0.10145679347863887, 'whose': -0.009343742970284872, 'laughter': -0.00924580263142434, 'kind': 0.06570132064573456, 'exactly': 0.0889199846572211, 'clear': -0.0012602429403933474, 'news': -0.01972945216459076, 'angry': -0.09342124270540825, 'twelve': 0.025155753485942282, 'ears': 0.06250933166414513, 'certainly': 0.07949413300200243, 'backward': -0.1680835240593651, 'beside': -0.08050662834107772, 'trees': 0.013288041660763313, 'lets': 0.10528852696568487, 'hours': -0.01016993478133989, 'miss': -0.006963323484046748, 'tonight': -0.1155957487454621, 'scared': -0.0439299494479946, 'ahead': 0.010761318750048474, 'parvati': 0.06695452412624, 'grim': 0.09198661353551363, 'became': 0.07084052680531235, 'broomstick': 0.07702150624776018, 'given': 0.042151429790558116, 'hidden': 0.1268654806553485, 'direction': -0.0717224787956028, 'broken': -0.06979792247308798, 'lay': 0.002719031636320738, 'carrying': 0.11526555404225865, 'stuff': 0.1932699487577028, 'snapped': -0.13918773106421486, 'hiding': 0.03944414892793765, 'true': 0.2687395717217044, 'upstairs': 0.18524152197244415, 'memory': -0.006341526260176091, 'story': -0.057103753448860366, 'pulling': 0.1559140015265254, 'excellent': 0.0591888498671664, 'free': 0.041783992055099715, 'deserted': 0.1305082504709226, 'windows': 0.1563119579862269, 'wearing': -0.07549435227953613, 'disappeared': 0.10651687030599268, 'approached': 0.08365521943708477, 'continued': 0.20269775393890174, 'deep': 0.09354566312913969, 'matter': 0.13303522510026952, 'entered': 0.07364896143708186, 'rain': 0.10912910225805217, 'terrible': -0.035233339520552395, 'stone': 0.1483486698121976, 'flitwick': 0.12102603142303328, 'buckbeaks': -0.06728391746356735, 'peeves': 0.20808763224787077, 'nose': 0.10843184595260556, 'teeth': 0.1805299241894873, 'probably': 0.19431873220615783, 'stairs': 0.22170820373150332, 'family': 0.10608876811590376, 'number': 0.0797080647651817, 'cauldron': 0.12758387678923405, 'grabbed': 0.17167501543392685, 'granger': 0.1472988447258822, 'soon': 0.018667869686675623, 'green': 0.08894739687068465, 'lily': 0.02058410720384763, 'water': 0.2233152092344674, 'wings': -0.03585160454294846, 'picture': 0.10499002052518884, 'ginny': 0.3552147253643628, 'term': -0.06246521426069668, 'glanced': 0.20429244919437184, 'pair': -0.12984495339317348, 'apart': 0.04959954943747142, 'nervously': 0.3859814841791826, 'big': 0.09566200740613405, 'boys': 0.15567407929342358, 'fine': 0.1908591956336841, 'smile': 0.2028549838396121, 'abruptly': 0.19637183296952487, 'happen': 0.1453732362945038, 'arrived': 0.011989872136142663, 'unless': 0.028680693103137606, 'youknowwho': 0.240191912804233, 'laughing': 0.16607341556026306, 'darkness': 0.15880037592159224, 'eat': 0.1022249886793445, 'strode': 0.10449624125768206, 'dean': 0.06852863559247277, 'dropped': 0.1083968983472608, 'truth': 0.167631683680861, 'nearer': 0.07261770288841687, 'headed': 0.016233405554574826, 'vanished': -0.021409404151626368, 'roots': 0.1362141674464518, 'hooch': 0.2074314231071522, 'committee': 0.055121504753433644, 'pushed': 0.08695406614268851, 'afraid': 0.11488195167517137, 'allowed': 0.13589094937275736, 'bottle': 0.07861013532834202, 'carefully': 0.08192062730270402, 'reason': 0.14904450184870355, 'break': 0.06953274945196374, 'answer': 0.3746686086307662, 'late': 0.11159831753428104, 'seem': 0.09420575586193657, 'knees': 0.1724808204444411, 'tightly': 0.06665913008804682, 'permission': 0.15378301881676595, 'walk': 0.09746787225573557, 'today': 0.17592655622490375, 'hardly': 0.12249192610103113, 'footsteps': -0.044101776063928635, 'tight': 0.1010038338618056, 'hat': 0.06372240349329679, 'needed': 0.12947603400680688, 'shook': 0.31407750854361466, 'sank': 0.14335031560502545, 'covered': 0.1404195194264433, 'em': 0.07149413350364509, 'job': 0.38189670331509556, 'ah': 0.30899578821597057, 'throat': 0.19873674146353132, 'thank': 0.2754987161983641, 'seamus': 0.18439191244635003, 'gryffindors': 0.04764657165210679, 'waited': 0.11356276266844526, 'playing': 0.06670522604996292, 'closely': 0.2406849835643526, 'wands': 0.1937269767080323, 'wants': 0.23902518635142306, 'knows': 0.006759829868252918, 'potion': 0.20940132154802246, 'lake': 0.1139540867929328, 'lady': 0.09311911318676169, 'grass': 0.1567591539369918, 'stupid': 0.1082045689272624, 'angelina': 0.03989071897693613, 'potters': 0.06295955729585204, 'holidays': 0.2954941469989318, 'forced': 0.2820459669694164, 'essay': 0.4151192957727699, 'closer': 0.19843632032919567, 'particularly': 0.2325891406843773, 'locked': 0.020701519353448928, 'days': 0.32742297344081767, 'nasty': 0.24085194347559075, 'vernons': 0.11441992557322242, 'opposite': 0.17725191567571874, 'wizarding': 0.27063852119052195, 'birthday': 0.1737864373206221, 'worried': 0.19696580816521933, 'lord': 0.17820594666960846, 'flew': 0.08276440521558456, 'paper': 0.16588190453257753, 'present': 0.23328885148698275, 'happily': 0.28673065040681595, 'ago': 0.03470203086662908, 'nimbus': 0.024289730220993847, 'view': 0.21542124440774893, 'edge': 0.1574278423843251, 'asleep': 0.2748028529717203, 'within': 0.145363338557075, 'none': 0.18981241841872193, 'added': 0.2416023992879796, 'mother': 0.29214280681883686, 'snarled': 0.19776941782418841, 'furious': 0.22105947686802205, 'sudden': 0.16004193613974446, 'hurt': 0.33989638872275973, 'high': 0.01265591805076337, 'rolled': 0.1397418668678166, 'sleep': 0.3044353107525449, 'classes': 0.2969780016614021, 'led': 0.04521081892716376, 'laugh': 0.17265830299783907, 'slipped': 0.11518347058167568, 'ball': -0.019539496597447418, 'fighting': 0.2377140098007957, 'bags': 0.2328105483701453, 'glittering': 0.22437422047643168, 'dormitory': 0.4314415805776475, 'spoke': 0.13485834291734866, 'slytherins': 0.04435500602561417, 'goal': 0.30304598897054397, 'alicia': -0.009474787515611085, 'katie': 0.010096311969712016, 'working': 0.18562174994985073, 'whomping': 0.19915714059382367, 'marauders': 0.11909142018052744, 'pettigrews': 0.038159468053167084, 'remus': 0.3113914270455744, 'summer': 0.23885576337466555, 'homework': 0.1559438671019109, 'ink': 0.13202250222690576, 'four': 0.02591642267116668, 'car': 0.09441763630179574, 'funny': 0.1814978825890683, 'alive': 0.11598414188394465, 'gazing': 0.2634616155959473, 'seeing': 0.28601555723801503, 'hesitated': 0.2970798812661527, 'wondering': 0.3317815354791066, 'leapt': 0.2743820698609559, 'middle': 0.20293596633904837, 'pleased': 0.13762248955228273, 'brown': 0.17712913693128313, 'daily': -0.009117926146216895, 'prophet': -0.04743706455373429, 'win': 0.10224987959860339, 'poor': 0.19733536137737043, 'heavy': 0.19194925733924134, 'breakfast': 0.34901450328233946, 'piece': 0.21777217212557465, 'finger': 0.08106423366158234, 'forget': 0.3099168946241396, 'lunch': 0.21295375294558305, 'grip': 0.18156447357007202, 'decided': 0.34498979175644406, 'die': 0.2590492075440757, 'ceiling': 0.343267239770489, 'bent': 0.24659913778782014, 'fall': 0.26005167540759616, 'fallen': -0.06289687809867939, 'longbottom': 0.355352386138377, 'slid': 0.2958075214575227, 'leaky': 0.15978015381147728, 'tom': 0.10748264621522519, 'waiting': 0.36794622377436736, 'game': 0.2883076435749124, 'shop': 0.18443547579400163, 'somebody': 0.16185286089911397, 'sharply': 0.37449360535833937, 'seriously': 0.30628354341894803, 'impossible': 0.4217549935904171, 'twenty': 0.12451270255882008, 'crack': 0.3939360626453881, 'hufflepuff': 0.23426198381912966, 'helping': 0.23168793414379746, 'rope': 0.049074407264800245, 'others': 0.05797564843771056, 'hippogriffs': 0.3186987285977317, 'hell': 0.05038399704628857, 'fifty': 0.2019083529949197, 'oliver': 0.24041785233276766, 'cho': 0.1211730859930469, 'also': 0.35082399400065284, 'frowning': 0.4399061889232022, 'pain': 0.4784905542623562, 'spent': 0.15555654978559244, 'notice': 0.17163177033669452, 'expression': 0.5162249813167117, 'dare': 0.318781903643806, 'oclock': 0.44018039148474125, 'hour': 0.11701857208273182, 'leaned': 0.4415902459760608, 'escaped': 0.26383451010737236, 'attack': 0.1638535212692136, 'sky': 0.11297548074387187, 'landed': 0.17289420354214394, 'soft': 0.23493196681580206, 'gray': 0.26895452271871023, 'thanks': 0.3208113284441683, 'waving': 0.2990137208048109, 'final': 0.2200854466932379, 'says': 0.4237333499234548, 'bet': 0.2288265422529158, 'angrily': 0.3442184504205029, 'grinning': 0.3892857554023076, 'cross': 0.33444474067245444, 'fixed': 0.3771595349296215, 'minute': 0.513617796167493, 'box': 0.2838029920867853, 'knocked': 0.3557703911764279, 'flat': 0.17190843767752367, 'smiling': 0.4722599319478081, 'live': 0.4592145675925847, 'evening': 0.3907699132807694, 'walls': 0.25199976671085234, 'explain': 0.3381316121849452, 'fence': 0.35417522487234165, 'halt': 0.2850814856231092, 'familiar': 0.3822424294390422, 'repeated': 0.23808520677883294, 'bar': 0.19949505443775623, 'passage': 0.28473358182479186, 'safe': 0.5204433526929799, 'cried': 0.08161979620229848, 'future': 0.336574081824057, 'arithmancy': 0.37262325133312657, 'definitely': 0.3355492106421252, 'joined': 0.4281062430422927, 'girl': 0.17111227600675166, 'maybe': 0.20871439741707007, 'dumbledores': 0.12083477863278398, 'feast': 0.369065956969809, 'filled': 0.2915588334162244, 'become': 0.23624039514929726, 'cadogan': -0.004717100505124448, 'rose': 0.1905701695489056, 'practice': 0.4419363146787612, 'seeker': 0.34520270771812983, 'forgotten': 0.15670864332582335, 'butterbeer': 0.30377027192833167, 'secretkeeper': 0.20236671337866083, 'macnair': 0.3088195280030938, 'charm': 0.5125504411268543, 'drive': 0.2925139228178161, 'hoped': 0.41656207705538584, 'speaking': 0.5469362059935718, 'frightened': 0.14352136490844078, 'realized': 0.2992056300954835, 'tomorrow': 0.33867053774042444, 'suppose': 0.31220413771758715, 'clearly': 0.3193801732601945, 'lucky': 0.3884274127306377, 'beak': 0.29081949928008527, 'moon': 0.29301108683569355, 'flying': 0.28350907682781346, 'london': 0.4569787083422835, 'note': 0.3136760081136329, 'change': 0.5157264419525402, 'difficult': 0.4107290963099268, 'missed': 0.19897525411810776, 'longer': 0.3003378220900457, 'helped': 0.2813816023259125, 'blood': 0.4930913364925262, 'subject': 0.2673852910372173, 'wardrobe': 0.3846619943038761, 'shrieking': 0.4780222469808937, 'nodded': 0.3664011659971613, 'eh': 0.4023092793394035, 'single': 0.19660155460909726, 'putting': 0.4155960399589845, 'ones': 0.4569425281053684, 'wiping': 0.5568564738690255, 'expect': 0.32566316098254444, 'effort': 0.3986362947782026, 'expelled': 0.2736872788615582, 'wide': 0.2842376534059142, 'stepped': 0.4794270542052116, 'different': 0.2854733878441891, 'skin': 0.23268676822917445, 'figure': 0.37003023877528646, 'giving': 0.4535657339161236, 'short': 0.16343894786884675, 'using': 0.3246296034460133, 'bin': 0.2951736834702041, 'saved': 0.5025259087791628, 'spotted': 0.4737788321363556, 'tables': 0.1944971394154045, 'hasnt': 0.5037709771871259, 'twice': 0.2299361917715175, 'shadows': 0.321924251205994, 'scarlet': 0.20506818688768166, 'theyll': 0.3067496083397431, 'midair': 0.5349846672388463, 'seat': 0.5106657201772952, 'breaking': 0.3991365017493175, 'marble': 0.346953709720523, 'muttering': 0.3182726033692745, 'leaves': 0.2585225787047866, 'ladder': 0.3602497067057282, 'beat': 0.27868860208860624, 'cut': 0.37593238183859407, 'quaffle': 0.12168098424818274, 'bludger': 0.3376488811739197, 'oneeyed': 0.32647471541144885, 'prongs': 0.23739469527860785, 'tunnel': 0.3357873948258481, 'page': 0.35713541615618793, 'known': 0.5912474483082296, 'less': 0.2554669797524906, 'bellowed': 0.5277916179704697, 'obviously': 0.30765711544092156, 'send': 0.44329480148387185, 'stretched': 0.4297626746090035, 'inches': 0.35979496620618223, 'barely': 0.3733358647942212, 'soared': 0.29063412444661774, 'join': 0.30230075728795225, 'ripped': 0.3202170020671363, 'trembling': 0.4779593710446266, 'six': 0.4984122539906529, 'whod': 0.2528995591548156, 'laughed': 0.4321461425201628, 'bound': 0.353645455250865, 'gleaming': 0.452547478772682, 'broomsticks': 0.4058218815743301, 'snap': 0.3836922979594177, 'worry': 0.4452496807459606, 'return': 0.4669380091056828, 'marges': 0.1905903447986481, 'wheres': 0.5451205793846644, 'force': 0.3253658901639396, 'escape': 0.4745773026765401, 'forbidden': 0.2684276765849178, 'warning': 0.5120259865744529, 'screamed': 0.3361680721250453, 'stuck': 0.3675278687035149, 'heading': 0.3080740456746126, 'serious': 0.5348746667699321, 'thrown': 0.2964022178210701, 'cornelius': 0.4375310988698596, 'anyway': 0.36854174054666883, 'diagon': 0.2644753707291194, 'alley': 0.2391528751436413, 'excitedly': 0.5472200530756413, 'lose': 0.4253886999394558, 'packed': 0.40886683153426573, 'thomas': 0.357532934668013, 'meet': 0.4371528345735351, 'checking': 0.43788912088066406, 'ages': 0.4659524603266301, 'lowered': 0.3158710476916558, 'louder': 0.2152478253795272, 'terrified': 0.4831893844181736, 'shining': 0.507643340928945, 'question': 0.5419346222029093, 'branches': 0.4793693995754717, 'lessons': 0.34519451332108086, 'confused': 0.6627281337998422, 'afternoon': 0.3529360349992681, 'riddikulus': 0.4687984664463136, 'joke': 0.1520197127392864, 'strong': 0.31936950063784014, 'charms': 0.25308417518442816, 'silent': 0.493583645278342, 'falling': 0.4273218019417102, 'betrayed': 0.43942200353733196, 'ward': 0.545143883052364, 'possession': 0.17257999242486502, 'flint': 0.1722358052783245, 'highly': 0.5682251357370275, 'secret': 0.41096190331442484, 'write': 0.30141613154964986, 'paused': 0.4668793195800738, 'round': 0.5387836061215924, 'weird': 0.44594874058777995, 'cupboard': 0.2719344049172733, 'living': 0.4257831135702729, 'possible': 0.4511232038218362, 'potions': 0.4652519269147652, 'delighted': 0.5971848884244585, 'jumped': 0.5892494164448256, 'spot': 0.7492510310722124, 'weeks': 0.4003371856979515, 'thirteen': 0.643136169809274, 'hedwigs': 0.44738333559754057, 'whatever': 0.4105265924135557, 'bright': 0.3498403865675367, 'voldemorts': 0.4732176171894463, 'golden': 0.309829124890568, 'growing': 0.17027052504426424, 'lower': 0.1181945738646701, 'owls': 0.4330970303776494, 'tied': 0.4538931035304598, 'errol': 0.3091321600574404, 'carried': 0.5874586929026968, 'grin': 0.5173247102386285, 'weasleys': 0.28701344490261294, 'seven': 0.3389470495876347, 'remembered': 0.4834681481726659, 'sun': 0.5910581547179599, 'sneakoscope': 0.60382941612333, 'loads': 0.4808263062737621, 'knowing': 0.5160472454025005, 'handle': 0.25602606230581293, 'thousand': 0.4320943588573138, 'dogs': 0.5543469425622427, 'monster': 0.4776255536173938, 'sideways': 0.3735041876795954, 'ouch': 0.6545099669342639, 'struggling': 0.4345920804407187, 'village': 0.3389538937990309, 'woke': 0.6764678006971834, 'hot': 0.3693846293367188, 'line': 0.4271132350746857, 'immediately': 0.42168282981051536, 'peered': 0.6217521575346409, 'deal': 0.33496416357029984, 'thoughts': 0.5747576975740424, 'growled': 0.4503890086089161, 'attention': 0.3569917556666596, 'returned': 0.5094419721265082, 'suspiciously': 0.6748182389997055, 'itll': 0.4388541110407457, 'calmly': 0.5344496495158111, 'sighed': 0.608685227635433, 'fault': 0.6175846667872777, 'clutched': 0.3926176192444521, 'arent': 0.5694480373611677, 'changed': 0.2593393328962571, 'deeply': 0.6677490284536506, 'heaved': 0.4710791260818595, 'means': 0.6005192645242697, 'money': 0.4508897474146109, 'bang': 0.5725000804303539, 'step': 0.4120405307519123, 'calm': 0.5429392027002179, 'fear': 0.6240835074936512, 'forgot': 0.5183833863014404, 'whisper': 0.613218311655458, 'examining': 0.4616556938762672, 'beaming': 0.5234946174740325, 'parlor': 0.35177169819377, 'anymore': 0.6046770740807405, 'among': 0.37836095920308555, 'excited': 0.28489855965079997, 'surprise': 0.6108433910824563, 'hurrying': 0.30672776950751973, 'drew': 0.3789549288957005, 'balls': 0.4754525984651952, 'staying': 0.3159408440387718, 'rats': 0.5864656965139596, 'stuffed': 0.5895614633951969, 'hanging': 0.5811360696575196, 'doubt': 0.40628787188267856, 'check': 0.4412178369769257, 'bring': 0.5144726380646842, 'search': 0.43196494897234017, 'compartment': 0.6119846115291117, 'careful': 0.38808804131641916, 'pale': 0.41262842335988087, 'corridors': 0.5036785279456014, 'patch': 0.3538156773017245, 'reach': 0.6041828185597395, 'silvery': 0.6072235842912065, 'mud': 0.7615413498652485, 'applause': 0.4928487784916707, 'beyond': 0.40588972420260544, 'pumpkin': 0.2788400704757952, 'group': 0.4491904141411048, 'animal': 0.4109708038123612, 'harder': 0.4087871012323629, 'fer': 0.2769741307389885, 'trust': 0.3112235958581978, 'conversation': 0.478811748419566, 'imagine': 0.507015834547184, 'zonkos': 0.2875425407765658, 'fly': 0.4776861305942346, 'weight': 0.4675704756499178, 'stadium': 0.6948213987803382, 'cheering': 0.28592133004289344, 'moments': 0.4553048092519855, 'wormtail': 0.304709220221954, 'anybody': 0.318381840268673, 'exam': 0.4720558571903251, 'executioner': 0.509084867358726};
dictY_HP = {'harry': -0.016016617542326254, 'said': -0.0846381965695806, 'ron': -0.028594116749923487, 'hermione': -0.031997370291108745, 'professor': -0.07324567423423661, 'lupin': -0.05145587056979269, 'back': 0.008717663956850369, 'black': -0.024374962587382688, 'one': -0.0014790206169297024, 'around': 0.007158249231080023, 'like': -0.007551844854668414, 'looked': -0.008039235949635855, 'could': -0.021270680189314203, 'see': -0.03513132058715266, 'got': -0.011017997600667254, 'snape': -0.02080233669862581, 'hagrid': -0.017946497743338306, 'didnt': -0.028234691743285755, 'get': -0.0149766227677887, 'know': -0.03524052329700306, 'well': -0.036964532917767134, 'harrys': 0.016040682445671743, 'still': -0.0020916824849146875, 'eyes': 0.015631760341675197, 'go': -0.035921626893634116, 'would': -0.027203905938374875, 'dont': -0.04926825956352247, 'time': -0.0063884531279824255, 'though': 0.004573460901625889, 'face': 0.009313909171336095, 'going': -0.03388001980404003, 'looking': -0.003353387702872116, 'right': -0.008530703958983752, 'think': -0.04019281904323126, 'dumbledore': -0.03485135876394768, 'malfoy': -0.010746211185286264, 'saw': 0.0055525695846648645, 'come': -0.01780474224046502, 'head': 0.010687634944096518, 'voice': -0.02131949569603817, 'door': 0.0295851200622914, 'away': -0.002734778807754187, 'im': -0.03276933862949218, 'sirius': -0.024220874348957727, 'toward': 0.012303302649684334, 'hes': -0.02215785931799915, 'something': -0.008281040268745907, 'look': -0.014696580534267872, 'heard': -0.00031269429708030006, 'behind': 0.015756553248961722, 'last': 0.002465971510338325, 'hand': 0.009588725478466302, 'wand': 0.011980037548345824, 'ever': -0.014755128467669219, 'gryffindor': 0.015632979508743695, 'turned': -0.0006559323338145448, 'room': 0.02203807983282681, 'never': -0.018752700593499805, 'scabbers': -0.0075867730776104265, 'way': 0.0010864009751442288, 'next': 0.008575587806049731, 'thought': -0.022558982706519397, 'told': -0.02092455330108928, 'went': 0.0011856866092273598, 'good': -0.008091195736335438, 'us': -0.02224354275185029, 'fudge': -0.02216407920774817, 'dementors': -0.0150941036144571, 'neville': -0.022193618921124546, 'potter': -0.02599477386779362, 'weasley': -0.017739715807884523, 'mcgonagall': -0.04935377703753135, 'hed': -0.01581733031264888, 'front': 0.016787780099320446, 'long': 0.006955657658642837, 'made': 0.0020438254055897232, 'came': 0.0017615414547145938, 'ill': -0.019631179072071823, 'two': 0.0010677652392539073, 'first': -0.005981974478432485, 'moment': 0.0018148852452575168, 'crookshanks': 0.0025107149391941372, 'aunt': 0.2569105629771705, 'pettigrew': -0.013438635361177785, 'hogwarts': -0.009451209709689396, 'want': -0.03661334751162304, 'inside': 0.00999680303793432, 'seemed': 0.006304920558018409, 'table': 0.008951793287971322, 'took': 0.0029520996001230534, 'left': 0.001985477807872261, 'knew': -0.010351193685086928, 'wasnt': -0.00956653657998579, 'madam': -0.02461113337144888, 'uncle': 0.2302688559067907, 'even': 0.0069504642748244826, 'suddenly': 0.001645765124599755, 'large': 0.019153506728511207, 'really': -0.021379007258517525, 'castle': -0.001384983796920527, 'dark': -0.004001504221111756, 'anything': -0.028609127184846688, 'tell': -0.016655699888473523, 'trying': -0.008025094942992982, 'wood': -0.0022401259227247472, 'class': -0.002848587728777488, 'hands': 0.004610435513958901, 'felt': 0.002784016898177249, 'let': -0.0046171992466245695, 'three': 0.003987690683069731, 'thing': -0.011324189244822279, 'make': 0.0017280426469562687, 'great': 0.01631358261309367, 'much': -0.013637943389865393, 'youre': -0.01600031681629517, 'buckbeak': -0.0024806782774909088, 'say': -0.0252635154638671, 'couldnt': -0.01369603587431051, 'ive': -0.017549424343100736, 'hear': -0.005950893964459304, 'fred': -0.023915381947300425, 'bed': 0.0004093467902851024, 'cant': -0.022221548895666132, 'firebolt': -0.0017284005238104385, 'open': 0.015024200490075981, 'feet': 0.012991378626079278, 'need': -0.01687001606850604, 'another': -0.0043710556739635165, 'put': 0.003087366262641312, 'little': 0.007329248621899436, 'stood': 0.011655634872533533, 'gave': -0.000981312850282476, 'across': 0.009946884634182024, 'oh': -0.01892708907471014, 'trelawney': -0.024819229807478558, 'year': -0.0005991775236693493, 'people': 0.0010733859442134607, 'sure': -0.01668992547087045, 'cloak': 0.002696923372893081, 'school': -0.001949967656828243, 'seen': -0.012674575006697263, 'rons': 0.006734153186598824, 'yes': -0.020697704724943294, 'help': -0.012442689282732174, 'take': -0.005252208207470452, 'night': 0.002596395115510995, 'magic': -0.008810749124309724, 'vernon': 0.23875584068098116, 'gone': 0.0017248219416003468, 'every': 0.005297739883983917, 'staring': 0.012802721507165866, 'end': 0.002754664594512012, 'pulled': 0.008170494089322843, 'hogsmeade': -0.008911261401045778, 'better': -0.01893240267307366, 'weve': -0.015113310182566479, 'onto': 0.009288622285111084, 'mr': -0.0291567908851963, 'percy': -0.006326466962558858, 'everyone': -0.007186384580480584, 'old': 0.00406495902401078, 'whispered': -0.014460994245776546, 'thats': -0.01817691890185837, 'george': -0.020492857018917118, 'id': -0.02137319684426121, 'bit': -0.007439326670481129, 'hall': 0.009514084118685458, 'forward': 0.00852516471416059, 'keep': 0.0033619621173315356, 'hagrids': 0.008262005605292302, 'quickly': -0.011343391538348083, 'happened': -0.005433434200996668, 'without': 0.0018610624830482899, 'whats': -0.017580951138338102, 'along': 0.004158970326872263, 'enough': -0.006469639263532249, 'theres': -0.005631915981593289, 'reached': 0.013447228570475173, 'set': -0.004908275713577694, 'floor': 0.01201630419196802, 'rest': -0.00039310038256945806, 'hair': 0.015536056001597616, 'quidditch': 0.004819481907709319, 'done': -0.014293677293114338, 'team': 0.007884567623398894, 'new': 0.005898475375448384, 'wouldnt': -0.010957112648617146, 'must': -0.010150906470314615, 'sat': 0.008736333954372226, 'marge': 0.1548116568172427, 'mind': -0.0036402354717936436, 'started': 0.0160762421246507, 'might': -0.006781220203118892, 'nothing': -0.007326994076235634, 'asked': -0.008829569996448945, 'years': 0.0034541693889553445, 'day': 0.0018775864877763472, 'youve': -0.01254124306993769, 'blacks': -0.003823018521981061, 'match': -0.0024729633780402947, 'map': 0.0005945211704447498, 'began': 0.0073807670560942186, 'yet': -0.008434667351609313, 'slytherin': 0.01354766830719011, 'ter': -0.009958464489515876, 'boy': -0.004667761630132947, 'air': 0.007292060413884059, 'sight': 0.011592308085421847, 'opened': 0.006726278675124632, 'rat': -0.005579179983319429, 'stan': -0.0066402443452737, 'robes': 0.0023251541543305504, 'side': 0.0008470302135921265, 'azkaban': -0.010219825410268146, 'slowly': -0.0006049799892056697, 'small': 0.008936346811803865, 'quite': -0.003844148050159634, 'dear': -0.014657174757380863, 'outside': 0.009931668936572576, 'tried': -0.0014212044708554578, 'course': -0.01599184406844362, 'yeh': -0.012930622456935905, 'peter': -0.010956013493583137, 'window': 0.007994653254869881, 'broom': 0.0037415833386645015, 'muttered': -0.0023043392809310257, 'else': -0.008276553496861894, 'quietly': -0.01835543620588533, 'dementor': -0.005731325330911994, 'best': -0.003141473901150004, 'fell': 0.006147602774549404, 'arm': 0.003536449774348653, 'yelled': 0.013455351458126574, 'mouth': 0.005894762549348761, 'mean': -0.005828981312089, 'yeah': -0.011509080936633087, 'anyone': -0.009446789572169984, 'field': 0.005910630300491813, 'wont': 0.005120831378047532, 'okay': -0.01550383882357699, 'standing': 0.007427480833479196, 'found': -0.005776955412404112, 'later': 0.004096706933855465, 'feeling': -0.0010991302154799, 'common': 0.020390244913443663, 'books': 0.0017688617509160624, 'life': 0.011204348283658737, 'ministry': -0.010325987771865663, 'hard': -0.003601807698667207, 'coming': 0.002633035740999449, 'dog': -0.0006320677448432441, 'minutes': 0.004587283816498221, 'snitch': 0.003010758175654124, 'wanted': -0.006715299278072405, 'wizard': 0.0007336610835863367, 'find': -0.0019402236764989563, 'leave': -0.007659519134683256, 'already': -0.003298291842207598, 'things': -0.005634126293542426, 'talking': -0.0059518839095700285, 'believe': -0.00863244928543172, 'please': -0.006726774543011551, 'trunk': 0.008753607897206438, 'stared': -0.0024255846044045405, 'cup': 0.0013204416954042339, 'dead': -0.008481290090288507, 'kept': -0.0015063123053116482, 'give': -0.008254623412028816, 'whole': 0.004131917278122934, 'grounds': 0.0018412778741594945, 'sitting': -0.001453021232106356, 'stop': -0.004021079352380533, 'ground': 0.00439588614172469, 'snapes': 0.004524114863723785, 'called': -0.007377166475486019, 'slightly': -0.0024958037897739776, 'getting': 0.004917927865601872, 'full': 0.003934690328879464, 'lost': -0.006031821101966748, 'crowd': 0.00023842578264457104, 'hippogriff': -0.0012259638888332343, 'empty': 0.012510905030903501, 'watching': -0.0014871489068610066, 'happy': 0.004866890919467196, 'hermiones': -0.0005295594244683183, 'youll': -0.002065789446424718, 'thinking': -0.001680304190068563, 'pomfrey': -0.009485683330424292, 'moved': 0.005640727204866178, 'hadnt': -0.0019630981725518117, 'voldemort': -0.0039742639086594814, 'second': -0.0003706523822010551, 'case': -0.0015144059722623756, 'watched': -0.0004345386523638118, 'man': 0.011095111857454461, 'stopped': 0.008413258739265026, 'tea': 0.00893595567200819, 'havent': 0.0029761176214219208, 'sit': -0.010992086642473978, 'father': -0.0045683912466875035, 'turn': -0.00410425260754069, 'feel': -0.007915109338997922, 'run': -0.0076960274712626125, 'cold': 0.0038738721105438154, 'tower': 0.005939448137540082, 'caught': 0.005654933932990053, 'able': 0.0031617800715997083, 'however': -0.0024356904883825535, 'morning': 0.005006122545066492, 'dad': -0.009843163529244029, 'youd': -0.009966639189448978, 'together': 0.010773285996156337, 'move': -0.006649075005890268, 'hit': 0.0029310635287143685, 'lupins': -0.001686759164487354, 'crabbe': 0.012196426020682445, 'owl': 0.005494219007774898, 'nearly': -0.004457303923515044, 'witch': 0.0021784598697219965, 'house': 0.0033501459087262393, 'ten': 0.0015177663688218362, 'light': 0.005745865312415976, 'tiny': 0.005888615586412289, 'ask': -0.011879911209019392, 'classroom': 0.006018572350172069, 'boggart': -0.0046049909653278105, 'book': 0.004228147266671892, 'top': 0.006620548437118885, 'read': -0.00041973122218344586, 'work': -0.002634860733293948, 'enormous': 0.004519399619136345, 'past': 0.007951367529563377, 'raised': 0.00962548386344255, 'er': -0.004140841700115955, 'staircase': 0.008322229698850412, 'minister': -0.0070623962698933345, 'telling': -0.0052190510747428046, 'malfoys': 0.0013825292998124713, 'listen': -0.00971984032626788, 'roared': 0.006290569216953376, 'appeared': 0.006946002173470827, 'sorry': -0.012325379845776986, 'pocket': 0.005438904609095412, 'sound': 0.00196973509391116, 'bag': 0.0025887838835797886, 'sort': 0.00036263437381360184, 'place': -0.001763684243273021, 'entrance': 0.005798604012808728, 'goyle': 0.0050242996922589015, 'expecto': 0.02158443500333948, 'lot': -0.0010004802414144073, 'held': 0.014157363915697191, 'either': -0.0007045387845710896, 'always': -0.004359623395850136, 'james': -0.007345587069691123, 'shut': 0.004528072503020083, 'office': -0.0005233878577291258, 'shouted': 0.0025414405876265514, 'corner': 0.003226027725807961, 'shaking': -0.0027208259800923606, 'close': -0.002422348412418402, 'kill': -0.0056205049081520355, 'understand': -0.011719182969605647, 'ravenclaw': 0.003344307663032394, 'petunia': 0.1282402442926381, 'friends': 0.001861408681885357, 'loudly': 0.008449202941073734, 'holding': 0.014849720270384998, 'five': 0.0049060048483594056, 'walked': 0.0008588220722815582, 'shoulder': 0.007652872465768403, 'someone': -0.0009235283615013139, 'magical': -0.0008579586516062939, 'taking': 0.005277913854349793, 'making': -0.00303072861683209, 'pointing': -0.00305972006812844, 'many': 0.005041067256047301, 'parents': -0.0008590888316384023, 'teacher': -0.003345613485723322, 'seized': 0.010926868603822913, 'picked': 0.003083677227909599, 'remember': -0.008558183802918342, 'rather': 0.011048034553778084, 'eye': -0.0007559576496729396, 'taken': -0.002586298295360235, 'supposed': -0.002703178383034264, 'chest': 0.002556975735267998, 'watch': 0.0005375115334760577, 'shes': 0.0008461043169322298, 'pointed': 0.00772512371801502, 'lesson': -0.0019864112974992826, 'climbed': 0.0033114065104652168, 'corridor': 0.003739897078449322, 'times': -4.014130062275613e-05, 'parchment': -0.0004551367023296145, 'word': -0.0029856469883671, 'almost': 0.0025255680977306167, 'thin': 0.007583413877905059, 'theyre': 0.0003752324105046476, 'tree': 0.009893899057535563, 'fast': 0.003050754888085296, 'usual': 0.005022749840296592, 'neck': 0.004357507157173078, 'idea': -0.005256145912020671, 'everything': -0.0023166395558613484, 'breath': 0.003153627541325166, 'patronus': -0.000799215886692697, 'followed': 0.0013572372049545128, 'moving': -0.000231160451510596, 'christmas': -0.0004576566227789293, 'fat': 0.006838633365368831, 'alone': -0.0021925300605309082, 'since': -0.0021784348731415655, 'bus': -0.0005846160180604404, 'chocolate': -0.002681505705145607, 'defense': -0.00643161279293444, 'fire': 0.00581091812842765, 'creatures': -0.0030268129359129607, 'noise': 0.001962709136910301, 'lavender': -0.0001307229708787261, 'werewolf': -0.0019330702167665629, 'muggles': -0.0019302149355422532, 'near': -0.0016294588398853348, 'use': -0.002336341850345053, 'silence': 0.0030395622555985063, 'extremely': 0.0030718678863953753, 'silver': 0.007752298289059142, 'person': -0.0017454648886690767, 'form': 0.0066593994984274485, 'speak': -0.004152876756877423, 'red': 0.0043236842961576994, 'cat': -0.00038927943930019537, 'patronum': 0.01589626101255055, 'catch': -0.0019560613292397937, 'indeed': -0.007772474340913352, 'dursleys': 0.004671923850906888, 'start': -0.003988406387245187, 'loud': 0.004209371419133548, 'trouble': -0.0015864611325339456, 'threw': 0.002828997469292325, 'hundred': 0.003533606771541138, 'try': -0.009946507081965924, 'isnt': -0.008290627535569, 'point': -0.002403404966331321, 'heart': 0.0026541731727514786, 'words': -0.0006118971863082562, 'dangerous': -0.0012155508562469478, 'stay': 0.007735216605032756, 'walking': 0.0003403061540385911, 'finally': -0.001499616288766592, 'leg': 0.008057373597017463, 'leaving': -0.00037349829409987975, 'wrong': -0.010439816528009049, 'quiet': -0.001082915026782451, 'chair': 0.0011130344395628424, 'invisibility': -0.0003943071168533085, 'steps': 0.005062525022844357, 'arts': -0.004625097618590223, 'closed': 0.005025539011780211, 'students': 0.0019888665027505267, 'except': -0.0009115034554857539, 'headmaster': -0.0060401108811657285, 'portrait': 0.006203621079952289, 'points': 0.0018346491152158406, 'severus': -0.008205394081622523, 'chapter': 0.0008565572355810789, 'foot': 0.0029863156380103275, 'muggle': -0.005653697842265018, 'hedwig': 0.008478851692470828, 'finished': 0.0022641481255462415, 'killed': -0.004354838748187056, 'aside': 0.004389518272217216, 'gold': 0.002958517185833341, 'mrs': -0.012156065041924643, 'hurried': 0.0023365903508274803, 'train': -0.001388761155078143, 'shall': -0.00591038872456085, 'broke': 0.00016042872930400228, 'stand': 0.008141414416470028, 'saying': -0.006908720421303378, 'ern': -0.006089851223850908, 'passed': 0.0018486107428130323, 'running': -0.0003974242420643255, 'screaming': 0.0010489033500889137, 'sir': 0.0035142532584060923, 'stomach': 0.0016146691128925003, 'glasses': 0.007165921309603965, 'teachers': -2.2583886137083803e-05, 'least': -0.002000108793646107, 'bad': -0.00254439447604853, 'used': 0.0014652864769847047, 'letter': 0.0013396791674978347, 'beneath': 0.004270049191570253, 'huge': 0.009020480610363809, 'several': 0.01219827920604181, 'burst': 0.010292151878363251, 'horrible': 0.0021876425403430043, 'fact': -0.0036731317655771525, 'gasped': -0.004652985706402348, 'forest': 0.0030006505681005225, 'completely': 0.0010968980059895578, 'chance': -0.000673368490380093, 'week': 0.0070770863988079465, 'street': 0.0011541650564748259, 'meant': -0.004249600141960077, 'arms': 0.0050593542851555965, 'died': 9.593168799982547e-05, 'sign': 0.011383992424241101, 'third': -0.0005465822297042052, 'name': -0.005350895607366381, 'mum': -0.0026724633404111814, 'heads': 0.002087082640306376, 'theyd': -0.00224350564469207, 'desk': -0.003394463030454585, 'care': -0.005477519278381, 'doors': 0.009755190986383399, 'wind': 0.00015964410319130277, 'wall': 0.0016045719306187333, 'met': -0.0034197828685552964, 'white': 0.007653098185909681, 'low': 0.000699041525302732, 'listening': -4.414948808612772e-05, 'willow': -0.00015096314209069077, 'reckon': -0.005750052899282265, 'wing': 0.0016475927332627103, 'wait': -0.0055410046896781475, 'honeydukes': -0.0015902743949735845, 'innocent': -0.0008122689045798022, 'rosmerta': -0.011249216812259289, 'lying': 0.0022772046701188134, 'real': -0.0039636918737070235, 'call': 0.014734561614905297, 'wizards': 0.001256241604573771, 'ear': 0.001719438478550534, 'seconds': -0.0004440820433578073, 'whether': -0.0029229204647074607, 'tail': -0.0001318548000490769, 'hope': 0.0024026523345134897, 'doesnt': -0.004730686588284355, 'cabin': 0.0035824946968000945, 'visit': 0.0010712223827447527, 'straight': 0.0030127833672056775, 'thick': 0.0017130803353683523, 'knight': -0.0008143198380428875, 'worse': -0.0021991036773368875, 'dyou': -0.003911350211844074, 'divination': -0.0028759087790092355, 'death': 0.0002227697856380188, 'theyve': -0.007569187005180527, 'hold': 0.00028640274626881557, 'quick': -0.007752429011722983, 'hole': 0.005896821824693259, 'filch': -0.002296365052526172, 'dudley': 0.03005653338217241, 'talk': -0.005519822877515889, 'voices': 0.0004455874341978204, 'perhaps': 0.005485469718140361, 'upon': 0.0023652899380800325, 'fingers': 0.0016637780655277524, 'world': 0.0017196757337503469, 'ready': -0.003435234860775119, 'shot': 0.001290506592949731, 'tears': 0.0018277894145179105, 'hissed': -9.623343929918912e-05, 'starting': -0.004970769026743648, 'ran': 0.0017036714292911555, 'half': 0.00555653765391083, 'werent': -0.000554814133377809, 'mad': -0.0034338864522430617, 'crystal': 0.0017409669253189027, 'nobody': -0.0017173362178359217, 'looks': -0.004691240227468289, 'sent': -0.0016741961570149858, 'hospital': 0.0017680264110017231, 'trelawneys': -0.004241946685626025, 'friend': -0.002806585456049909, 'worst': -0.0025029933073399614, 'turning': -0.0031012436667020794, 'cage': 0.010425166156793824, 'instead': -0.0004533417126339997, 'legs': 0.0023041522096817943, 'furiously': 0.0050779118337881115, 'glass': 0.011549029334275877, 'brought': 0.005741965801559428, 'managed': 0.011286686805260187, 'far': 0.001125925246712272, 'whose': 0.00468606978236786, 'laughter': 0.0007882705436873586, 'kind': -0.0006222649727124891, 'exactly': 0.0018360568673121765, 'clear': 0.0004979138080859272, 'news': -0.001517444220582935, 'angry': 0.003135912117655617, 'twelve': 0.0017856607076282274, 'ears': 0.0021171398007740084, 'certainly': -0.00303541470041723, 'backward': 0.0020236515553483493, 'beside': 0.0017831357633650376, 'trees': -2.615408725262639e-05, 'lets': -0.008026641703046457, 'hours': 0.001683246410851986, 'miss': -0.008005652090963692, 'tonight': -0.00041307093174589536, 'scared': -0.0002490094094059555, 'ahead': -0.0003563820347219566, 'parvati': 0.002280757104444283, 'grim': -0.00434616432030091, 'became': 0.001097655372256768, 'broomstick': -9.062051390951225e-05, 'given': 0.0044707357743344065, 'hidden': 0.0058446820126167275, 'direction': 0.010055320976515762, 'broken': -0.0004572769012501991, 'lay': 0.004223453649430675, 'carrying': 0.0024730711546437265, 'stuff': -0.0033870060993677923, 'snapped': 0.012967270087906744, 'hiding': -0.0008181173753451795, 'true': -0.0014219008968097027, 'upstairs': 0.0016551972026235104, 'memory': 0.0008260184677169377, 'story': -0.0012084338117115023, 'pulling': 0.004107716213241364, 'excellent': 0.0011349333461930222, 'free': -0.0013910560033612287, 'deserted': 0.003646820826249904, 'windows': 0.001397157639969475, 'wearing': 0.00214384110047889, 'disappeared': 0.001604268054414054, 'approached': 0.0010236538620221488, 'continued': -0.005396323703222944, 'deep': 0.0013853721891416084, 'matter': -0.005787124340448041, 'entered': 0.0005665313873693135, 'rain': 0.0022547357883277762, 'terrible': -0.0021682975391389405, 'stone': 0.0028164805512684078, 'flitwick': -0.0058974023289591095, 'buckbeaks': 0.0021117853032982747, 'peeves': -0.0038971996193289437, 'nose': 0.001965129063942944, 'teeth': 0.0025609278995675044, 'probably': -0.002187115652022485, 'stairs': 0.0010660888533677625, 'family': -0.0008360560791117338, 'number': 0.004865249337096914, 'cauldron': 0.0003330697190473651, 'grabbed': 0.0020261589200191335, 'granger': -0.005332087055510953, 'soon': 0.006622561052913032, 'green': 0.0006631838367960458, 'lily': -0.002493274931350529, 'water': -0.0003848607425299109, 'wings': 0.004100694923826384, 'picture': 0.00015056916656374758, 'ginny': -0.0035094803363953874, 'term': -0.0031128790389958745, 'glanced': 0.0033394895728075962, 'pair': 0.010924321910181407, 'apart': -0.0009714564270090124, 'nervously': -0.005378740320584175, 'big': 0.0019876645662776538, 'boys': 0.0066026759244426445, 'fine': -0.002296268437049562, 'smile': -0.0017536395378692038, 'abruptly': -0.002788189865819744, 'happen': -0.002315565622457647, 'arrived': 0.0013718725957722212, 'unless': -0.0016988820993950368, 'youknowwho': -0.004693211425530165, 'laughing': 0.0034806186726672717, 'darkness': 0.00015078225223079644, 'eat': -0.0008027863478884308, 'strode': 0.0011786065244418608, 'dean': 0.001057673352120635, 'dropped': -0.0007986325525929547, 'truth': -0.004955767560826448, 'nearer': 0.0020145551315215024, 'headed': 0.0018686175711981766, 'vanished': 0.0017649813374125765, 'roots': -0.003161323687889184, 'hooch': -0.004638268982053049, 'committee': -0.0026308913180023512, 'pushed': 0.00316102134088409, 'afraid': -0.009564755882172551, 'allowed': -0.0006208813315380448, 'bottle': 0.00788398479013527, 'carefully': 0.0020963945271997835, 'reason': -0.0018397279819920903, 'break': -0.0036716590834141288, 'answer': -0.002133170304440063, 'late': -0.0019407417061874062, 'seem': -0.005045400428819719, 'knees': 0.0018054120839387441, 'tightly': 0.005088091968290112, 'permission': 0.002206204400211018, 'walk': 0.0015287822640568346, 'today': -0.0028105529137773004, 'hardly': 0.002093071797045888, 'footsteps': 0.003457672419446044, 'tight': 0.005977094120524483, 'hat': 0.004578162666071691, 'needed': -0.0025661089729288804, 'shook': 0.0021221901875532545, 'sank': 0.006059536574522111, 'covered': 0.000328843207360615, 'em': 0.001554210781013866, 'job': -0.00297576555732392, 'ah': -0.0032595349148390345, 'throat': -0.0009190150445098016, 'thank': -0.0007654848001640847, 'seamus': -0.002345108477442149, 'gryffindors': 0.0030652191222291206, 'waited': -0.0006918687099115666, 'playing': 0.0008573062151183072, 'closely': -0.0022259499357569425, 'wands': -0.0004423648278416617, 'wants': -0.007425565872548389, 'knows': -0.002825166379504291, 'potion': 0.002018300351570028, 'lake': 0.0006908953113917099, 'lady': 0.0018238863948817987, 'grass': 0.002062788673272163, 'stupid': -0.0019897877130805376, 'angelina': 0.0005738575863859417, 'potters': -0.0037741899149285048, 'holidays': 0.007262613846643341, 'forced': 0.006673452591144894, 'essay': -0.0027950276614231847, 'closer': -0.0005716467984258905, 'particularly': 0.004427069379813733, 'locked': 0.0012170888888153764, 'days': 0.0013543256034722552, 'nasty': -8.484612612501362e-06, 'vernons': 0.05232704720459533, 'opposite': 0.0015887084646358618, 'wizarding': 0.0007301671177062109, 'birthday': 0.0060244957019335976, 'worried': 0.0032885383541514396, 'lord': -0.002661347667077028, 'flew': 0.008685488636339124, 'paper': 0.002614900274659756, 'present': 0.0004551275607711328, 'happily': -0.003682063356884658, 'ago': -0.00095098071334696, 'nimbus': -0.0004123680831365767, 'view': 0.003739527362270925, 'edge': 0.0004124911603909945, 'asleep': -0.003320938609452654, 'within': 0.0018667974801246981, 'none': -0.00231019274672828, 'added': 0.003367851626926364, 'mother': -0.00023879824618455747, 'snarled': 0.004169061583839088, 'furious': 0.003417107848411638, 'sudden': 0.0024277059968301484, 'hurt': -0.003868244656049395, 'high': 0.0020962966433944984, 'rolled': 0.0023012679042996934, 'sleep': 0.00013165454905214027, 'classes': 0.0007061647983751735, 'led': 0.00046542592058154063, 'laugh': -0.0031090085192086118, 'slipped': 0.001014939237997582, 'ball': 0.0057673414649863095, 'fighting': -0.001225130984821805, 'bags': 0.001623892682404795, 'glittering': 0.002220536544784712, 'dormitory': 0.0015028976887245245, 'spoke': -0.00046614331367404396, 'slytherins': -0.0006017822472518418, 'goal': 0.0034029123684422044, 'alicia': 0.003238925626362521, 'katie': 0.0024396077329303075, 'working': -0.0029786134091213876, 'whomping': -0.0005289735793180949, 'marauders': -0.0010825716214610504, 'pettigrews': -0.00038690731864876, 'remus': -0.005396240877299598, 'summer': 0.005243662203309862, 'homework': 0.0014128982974233246, 'ink': 0.0047314928252422325, 'four': 0.004026645886945957, 'car': 0.016615089527272493, 'funny': -0.0026850724874874195, 'alive': -0.0004571588422987556, 'gazing': -4.3502937750832e-05, 'seeing': -0.0005704902375769514, 'hesitated': -0.0011099082185633748, 'wondering': -0.0018641551727791108, 'leapt': 0.006196183375872638, 'middle': 0.003001273900176833, 'pleased': 0.0068266534462263035, 'brown': 0.0030849505932506187, 'daily': -0.0039002936219451846, 'prophet': -4.4961053152294244e-05, 'win': -0.00235209368576576, 'poor': -0.003662031307236393, 'heavy': 0.004183020759358339, 'breakfast': -0.0006439826395006033, 'piece': 0.00032238346536801185, 'finger': -0.0006450013299842918, 'forget': 0.005901893419613153, 'lunch': 0.0010522802328214362, 'grip': 0.0004214397987065559, 'decided': -0.0001180028445625795, 'die': -0.004211648393259621, 'ceiling': -0.0008310001755259517, 'bent': 0.0019241621609611833, 'fall': -0.0007102635185933559, 'fallen': 0.0005982512008419538, 'longbottom': -0.000774633635468384, 'slid': 0.0015379870028849694, 'leaky': 0.0007515632931556507, 'tom': 0.0005861469101461884, 'waiting': 0.0006961187017281219, 'game': -0.000484421701854984, 'shop': -0.002274685036868524, 'somebody': 9.287580869364176e-05, 'sharply': -0.00425849255814447, 'seriously': -0.002465875324939927, 'impossible': -0.0028002403207487846, 'twenty': 0.0033133206630989124, 'crack': 0.002638680425891141, 'hufflepuff': 0.001636055689533826, 'helping': -0.005371253973628598, 'rope': 0.0009620062386258777, 'others': 0.0019636545678458975, 'hippogriffs': -0.0023779961802600697, 'hell': -0.005322850214099232, 'fifty': 0.0032309307957157124, 'oliver': -0.0033572398487754468, 'cho': 0.0006992407592341219, 'also': -0.00270936450287184, 'frowning': -0.007998954892994426, 'pain': -0.0010449164345549167, 'spent': 0.0033940620699665163, 'notice': 0.00021598217758819787, 'expression': 0.000445248058793484, 'dare': -0.001310073378631629, 'oclock': 0.0005616794841526397, 'hour': 0.00023816936321159127, 'leaned': 0.0020847114841013713, 'escaped': -0.001639891520941513, 'attack': -0.0010075155418865155, 'sky': 0.0015389508463831438, 'landed': 0.0025671351212158875, 'soft': 0.0018714685873006504, 'gray': 0.002791688853101235, 'thanks': -0.0025407100322651526, 'waving': -0.0011339632747539, 'final': 0.001376076494691597, 'says': -0.0033542833094550133, 'bet': -0.003940284833492998, 'angrily': -0.0049055940350276415, 'grinning': 0.0030942167135513204, 'cross': -0.0007450520834130057, 'fixed': 0.0024835621482849532, 'minute': 0.0038648183363160843, 'box': 0.0030306447091841275, 'knocked': 0.0032839020803919518, 'flat': 0.00529142361580565, 'smiling': -0.001260517796795868, 'live': -0.0045146032387115035, 'evening': 0.00195952089318584, 'walls': 0.003275067503663288, 'explain': -0.0017827032390424922, 'fence': 0.001224642666634828, 'halt': 0.0018417089474053441, 'familiar': -0.0015280060983260877, 'repeated': -0.0011712771400408776, 'bar': 0.00227516708571905, 'passage': -0.0024438086519234605, 'safe': -0.0037713097227550533, 'cried': -0.004212440911002467, 'future': 0.0007549310923028626, 'arithmancy': 0.001520252614789142, 'definitely': 0.0006475692898127954, 'joined': -0.0007408422033069824, 'girl': 0.0017177790100887103, 'maybe': -0.0011752639504691198, 'dumbledores': 0.00040919628368491016, 'feast': -0.001460214926886805, 'filled': 0.002990136158745909, 'become': 0.0019190360282717272, 'cadogan': 0.006292718531387224, 'rose': 0.000989742046351134, 'practice': 0.0038563203218357936, 'seeker': -0.0022946155342728194, 'forgotten': -0.0017018060847828888, 'butterbeer': -0.0007304593477720369, 'secretkeeper': -0.002046688742334348, 'macnair': -0.003463988214838676, 'charm': -0.0006162375300298557, 'drive': 0.00020384286983415463, 'hoped': 0.002312600960329355, 'speaking': 0.002373659657551491, 'frightened': -0.000719992050312569, 'realized': -0.0013128719356206733, 'tomorrow': -0.0026269482453876755, 'suppose': -0.001572216788894896, 'clearly': -0.0018878313186078007, 'lucky': 0.0013469551608502437, 'beak': 0.000568607278603634, 'moon': 0.0002412180144272868, 'flying': 0.001882316776724886, 'london': -0.0007114040515597554, 'note': 0.005580747416228997, 'change': -0.002667361961638748, 'difficult': -0.0013546332859272446, 'missed': -0.0010550319121429856, 'longer': -0.0001293254889399405, 'helped': -0.0012581264590575598, 'blood': 0.0024580202267766127, 'subject': 0.00032891526223310653, 'wardrobe': 0.0018215712710449496, 'shrieking': -0.0006338550684964788, 'nodded': 0.006567953853493089, 'eh': -0.0007492360543223815, 'single': 0.006103227612999673, 'putting': 0.003136012031876725, 'ones': -0.0024224320279201835, 'wiping': 0.002457988675884128, 'expect': -0.0004595545350961607, 'effort': 0.0009172886362980455, 'expelled': -0.00091365729470554, 'wide': 0.005495796041777402, 'stepped': -0.0001776156386582276, 'different': 0.0019246739807372735, 'skin': -0.0012354719375563866, 'figure': 0.0014962278131030406, 'giving': -0.0033868210642341513, 'short': 0.0023416759878726098, 'using': -0.001691769177098805, 'bin': -0.004065780677614056, 'saved': -0.0033681316025678015, 'spotted': 0.0054408529606514554, 'tables': 0.0047516833491171645, 'hasnt': -0.00517489881423345, 'twice': 0.0029263706871161463, 'shadows': 0.00023782149242028306, 'scarlet': 0.002270690807394883, 'theyll': -0.005247091787312413, 'midair': -0.0010313859909743047, 'seat': 0.0045647263374032476, 'breaking': -0.0012825241861582864, 'marble': 0.004605699069622378, 'muttering': 0.001137526249174104, 'leaves': 0.0013181697732083323, 'ladder': 0.0032775338418702237, 'beat': 0.00028200662053083875, 'cut': -0.0004335038912035219, 'quaffle': 0.004347130803452729, 'bludger': -0.00100618914266999, 'oneeyed': 0.0013627958174768106, 'prongs': -0.0016092217621905553, 'tunnel': 0.0006983147154865423, 'page': 0.0011360980357048864, 'known': -0.0018482893513728847, 'less': 0.0005471540085279744, 'bellowed': 0.008286262827525545, 'obviously': -0.0040563100201313, 'send': -0.00013414472238611454, 'stretched': 0.006178798073574135, 'inches': 0.0022215499851838427, 'barely': -0.002048262423358194, 'soared': 0.0032140633222071673, 'join': -0.0007236550175178238, 'ripped': 0.002145105345715445, 'trembling': -3.649164563298803e-05, 'six': 0.0009378259520711904, 'whod': -0.00012395916360967616, 'laughed': 0.0005203247857035201, 'bound': 0.0015916768805284485, 'gleaming': 0.0022287906970177647, 'broomsticks': -0.0016766690270679208, 'snap': 0.003605680110257171, 'worry': -0.0013402763708456349, 'return': -0.0020553923689745884, 'marges': 0.029613405794365156, 'wheres': 0.004981759477889147, 'force': -0.002005907985523952, 'escape': -0.0015868133750303543, 'forbidden': 0.001231702898509434, 'warning': -0.00045968179396108224, 'screamed': 0.002853089018185104, 'stuck': 0.0026581337517049162, 'heading': 0.0038999364095301126, 'serious': -0.003102641525231763, 'thrown': -0.00038744535646472264, 'cornelius': -0.002004040116045955, 'anyway': -0.0045263522995963035, 'diagon': -0.0005129802712784413, 'alley': -0.0019922371209510595, 'excitedly': -0.0018919065407736366, 'lose': -0.0013660544564958747, 'packed': 0.000433801665418749, 'thomas': 0.0018600824528198561, 'meet': -0.002884430165190526, 'checking': -0.001431387142168014, 'ages': -0.0006841023817696202, 'lowered': 0.00032021407419726295, 'louder': -0.0006617738812834151, 'terrified': 0.001302726441077414, 'shining': 0.0011878109751599923, 'question': -0.0006866193911949747, 'branches': 0.0009179447337176685, 'lessons': -0.00040719941896162404, 'confused': -0.0016651036438597444, 'afternoon': -0.0005204067072161243, 'riddikulus': -0.00021056498802074736, 'joke': 0.00027635142589639924, 'strong': 0.0032386720181715447, 'charms': -0.001314338604014402, 'silent': 0.00025604704418131304, 'falling': 0.003257954583378889, 'betrayed': -0.0016537982909906005, 'ward': -1.3548629313842226e-05, 'possession': 0.0030399337812538636, 'flint': 0.002256528472992838, 'highly': -0.002555484198008739, 'secret': -0.0021078825288260816, 'write': 0.0008155004751140331, 'paused': -0.002371702070277944, 'round': 0.0007265551041264622, 'weird': 8.7083171850256e-05, 'cupboard': 0.0008618753696114, 'living': 0.0009511284783592049, 'possible': 0.0018028326764637532, 'potions': -0.0019578159030327947, 'delighted': -0.0017985444666532785, 'jumped': 0.005665914386885513, 'spot': 0.00018925939771117982, 'weeks': -0.002064282662810745, 'thirteen': 5.3675104429401145e-05, 'hedwigs': 0.009917139458896568, 'whatever': 0.0003603909207084599, 'bright': -0.0014423347565426588, 'voldemorts': -0.000903090254971899, 'golden': 0.0031923588604409778, 'growing': -0.00031132302422125045, 'lower': 0.002881895583831916, 'owls': 0.0040264404462265965, 'tied': 0.0024408497329127168, 'errol': 0.0005252528048542453, 'carried': 0.00017145074941740465, 'grin': -0.0009946583092465561, 'weasleys': -0.0022598255239304537, 'seven': -0.0003168313444786457, 'remembered': -0.003802115799191988, 'sun': 0.0027686704788992982, 'sneakoscope': 0.0030854832986671006, 'loads': -0.002455231557830359, 'knowing': -0.000590777642144256, 'handle': 0.003145807011095544, 'thousand': 0.0003147539904694871, 'dogs': 0.0040698630142141125, 'monster': 0.0020093439897184147, 'sideways': 0.0010109581829181832, 'ouch': -0.0008685857722521037, 'struggling': 0.0005625618221412231, 'village': 0.00022240558192368316, 'woke': -0.00018943461478864674, 'hot': 0.0006877797106692807, 'line': -0.0006404906842657224, 'immediately': -0.001749852289992014, 'peered': 0.007411882389694789, 'deal': 0.0027685653239592238, 'thoughts': -0.0006007881234200376, 'growled': 0.011549362592191127, 'attention': -0.0023044364549850635, 'returned': 0.003032680462778764, 'suspiciously': 0.0013081388250424583, 'itll': -0.00269309736125315, 'calmly': 0.0008357171297167947, 'sighed': -0.002168780569635094, 'fault': -0.0017927568563480105, 'clutched': 0.0026351425824521634, 'arent': 0.0015545669900540403, 'changed': -0.001208340690632637, 'deeply': -0.0015543533744733894, 'heaved': 0.0032454782115006165, 'means': -0.0021945671174911164, 'money': 0.001724323776930566, 'bang': 0.0008770130591265047, 'step': -0.0013501247885718993, 'calm': -0.0005073852065626701, 'fear': -0.00039426309166177165, 'forgot': 0.0022764165381973485, 'whisper': -0.003997543847101542, 'examining': 0.00014959205282418653, 'beaming': 0.0019254543812282344, 'parlor': 0.0003805178771482348, 'anymore': -0.00035139661503043253, 'among': 0.00045419958467144344, 'excited': 0.0007107495547419223, 'surprise': 0.0010924196079786805, 'hurrying': -0.0005649176998784931, 'drew': 0.0010320705871120488, 'balls': -0.0001227079117094379, 'staying': -0.002662209134433574, 'rats': -0.0018558105967011109, 'stuffed': 0.002411587706985237, 'hanging': -0.001628827225674583, 'doubt': -0.0024748258594319123, 'check': -0.0003009337334607497, 'bring': -0.0031537828329314004, 'search': -0.002658175529869136, 'compartment': -0.00027710772075047554, 'careful': -0.0033528475136874267, 'pale': 0.0027478963895086294, 'corridors': 0.000987593990493845, 'patch': 0.0012094166805332563, 'reach': 0.005847694527605945, 'silvery': -0.002938283823436777, 'mud': 0.001994651646122731, 'applause': -0.0011515474763650927, 'beyond': -0.0018728169953032806, 'pumpkin': 0.0035780727881236725, 'group': 0.0006457443688822062, 'animal': 0.0006371951290240422, 'harder': -0.0014049862343531385, 'fer': -0.0028840366906282165, 'trust': -0.003302161962076856, 'conversation': -0.002239095919262583, 'imagine': -0.004167717655332448, 'zonkos': 0.001243246449739599, 'fly': -0.0008195240411808264, 'weight': 0.0017102981793836898, 'stadium': 0.0008240887624256731, 'cheering': 0.0002520775185752135, 'moments': -1.9000887626884575e-05, 'wormtail': -0.0020655760520654205, 'anybody': 0.0007619491452213529, 'exam': -0.0003676370634124034, 'executioner': 0.0001920747235135091};
dictZ_HP = {'harry': 0.006576621610657809, 'said': 0.09954568366953387, 'ron': 0.015546826061993882, 'hermione': 0.020543605535611548, 'professor': 0.07313841810977675, 'lupin': 0.03929993600900191, 'back': -0.02651920262597355, 'black': 0.012867872773700712, 'one': -0.013191644188357568, 'around': -0.03672906623796869, 'like': 0.017285705988807427, 'looked': -0.015320861915908172, 'could': 0.011702977037760189, 'see': 0.04670132012425389, 'got': 0.020474096958364, 'snape': 0.013359379612171091, 'hagrid': 0.007240152878881713, 'didnt': 0.03919778404365819, 'get': 0.01736361087357711, 'know': 0.06863578203348558, 'well': 0.05965483892766792, 'harrys': -0.03827639406613852, 'still': -0.005445840097055729, 'eyes': -0.02350702554573704, 'go': 0.043122339638951526, 'would': 0.02918699190146723, 'dont': 0.09457671505718555, 'time': -0.00838270406835534, 'though': -0.007556964923729323, 'face': -0.019301824266987233, 'going': 0.03887233872743786, 'looking': 0.014122583535647815, 'right': 0.004504891773369856, 'think': 0.05876513834552801, 'dumbledore': 0.024079360573696845, 'malfoy': -0.008385234588453852, 'saw': -0.015825974703768723, 'come': 0.026210940274494842, 'head': -0.02867721041396014, 'voice': 0.01690221567867924, 'door': -0.05210828102977123, 'away': -0.009853795596894482, 'im': 0.04213392108173401, 'sirius': 0.019882972498993914, 'toward': -0.04057838970474067, 'hes': 0.035531051205808566, 'something': -0.0007434479960376381, 'look': 0.008177271203432134, 'heard': -0.009851076782487986, 'behind': -0.033414815565361265, 'last': -0.02524704069997751, 'hand': -0.024980245863467972, 'wand': -0.03217387045645279, 'ever': 0.00012094573913951716, 'gryffindor': -0.04646140972372324, 'turned': -0.019733284096625767, 'room': -0.05745433554206216, 'never': 0.01742550218171949, 'scabbers': -0.004055354332019775, 'way': -0.013973739883038912, 'next': -0.015866678579210567, 'thought': 0.010244993596936687, 'told': 0.02496614665534996, 'went': -0.02619885733182448, 'good': 0.02737832740052176, 'us': 0.03155022554439235, 'fudge': 0.023283119535050082, 'dementors': 0.011417761174043119, 'neville': 0.00806351002768764, 'potter': 0.03974921362738035, 'weasley': 0.0035507347544698883, 'mcgonagall': 0.03763835894205048, 'hed': 0.002731335760903389, 'front': -0.027992991689027306, 'long': -0.02149830500116005, 'made': -0.0062405083612386125, 'came': -0.017839034991153316, 'ill': 0.03820884924538128, 'two': -0.012269890189541261, 'first': -0.00868889613250823, 'moment': -0.00440270621816637, 'crookshanks': -0.017084531625701477, 'aunt': 0.11594614127360175, 'pettigrew': 0.009049115099406743, 'hogwarts': -0.008142521628536143, 'want': 0.04707494813269892, 'inside': -0.03092662277290425, 'seemed': -0.006602045680528795, 'table': -0.02009867976483317, 'took': -0.02136361736613169, 'left': -0.027296401711420174, 'knew': 0.012556578582265887, 'wasnt': 0.01144973234198501, 'madam': 0.010066324092053698, 'uncle': 0.0825003789167139, 'even': 0.004586250616542908, 'suddenly': -0.005897295517782317, 'large': -0.01561419578734981, 'really': 0.0474917763874466, 'castle': -0.013757847278621737, 'dark': -0.019074605802494315, 'anything': 0.03299381585899558, 'tell': 0.03741718460262639, 'trying': 0.0042890418081031065, 'wood': -0.009898692908653101, 'class': -0.014211958703653872, 'hands': -0.01939037623515241, 'felt': -0.023425564257220297, 'let': 0.014932500616106681, 'three': -0.00986792839310674, 'thing': 0.0023086620199221654, 'make': 0.02743085584118457, 'great': -0.016795686336995776, 'much': 0.014672972068085606, 'youre': 0.035247714991887766, 'buckbeak': -0.010917436968780173, 'say': 0.02936019682751247, 'couldnt': 0.0061867952798649005, 'ive': 0.02367205983333848, 'hear': 0.009769061681619326, 'fred': 0.0224453830951579, 'bed': -0.024310306379370563, 'cant': 0.020185489852052572, 'firebolt': -0.01202933771053328, 'open': -0.03732373435136865, 'feet': -0.023341789928524207, 'need': 0.019807284110024807, 'another': -0.014576552650929218, 'put': -0.007444353909769183, 'little': -0.0017422027745630396, 'stood': -0.018890006151672672, 'gave': 0.0030763431168157798, 'across': -0.027066876639661477, 'oh': 0.027293965430411564, 'trelawney': 0.013637248015104946, 'year': -0.011476374441837524, 'people': 0.00010609124362390157, 'sure': 0.013678600264856207, 'cloak': -0.01726542947817335, 'school': -0.008454700600059267, 'seen': 0.009260007562839326, 'rons': -0.020388038239657773, 'yes': 0.0358233084378642, 'help': 0.008827775662892688, 'take': 0.0032784908943762968, 'night': -0.006674277121504886, 'magic': -0.00245585512185312, 'vernon': 0.094246565993071, 'gone': 0.007289478846527762, 'every': -0.01567755979737121, 'staring': -0.00012363633956542644, 'end': -0.0152241014072137, 'pulled': -0.025403915751227833, 'hogsmeade': 0.005198590501289714, 'better': 0.0154001353195989, 'weve': 0.020382314042445056, 'onto': -0.026330054728605827, 'mr': 0.016638636217419695, 'percy': 0.006056517383487555, 'everyone': -0.0026016348110779763, 'old': 0.0024123335172663227, 'whispered': 0.009032787343944932, 'thats': 0.021250761560555165, 'george': 0.01761588150453873, 'id': 0.030455642167659676, 'bit': 0.00822969788745883, 'hall': -0.024617224041755703, 'forward': -0.018107983740497473, 'keep': 0.005007472419305511, 'hagrids': -0.020652906774081343, 'quickly': 0.0029470930923979205, 'happened': 0.009972770949482235, 'without': -0.0019071035507142906, 'whats': 0.022320606130611903, 'along': -0.013823179914595835, 'enough': 0.012181412737956266, 'theres': 0.004495220468277025, 'reached': -0.018139932545873923, 'set': -0.006614055548449551, 'floor': -0.014024150243786495, 'rest': -0.014231522562475462, 'hair': -0.014546584419141459, 'quidditch': -0.018541885725617532, 'done': 0.013998473186618867, 'team': -0.022072212589233475, 'new': -0.0036514185092954687, 'wouldnt': 0.01521975774545971, 'must': 0.008203671588626085, 'sat': -0.010542912565674125, 'marge': 0.06778730390798893, 'mind': 0.007285198717679103, 'started': -0.0035860850044741478, 'might': 0.011175298205224612, 'nothing': 0.0020289779857826594, 'asked': 0.011257259564609922, 'years': -0.0003773498673103166, 'day': -0.002764893764860788, 'youve': 0.01802801762007801, 'blacks': -0.0038076925818431133, 'match': -0.006990641992522593, 'map': -0.007202327179347153, 'began': -0.006780367774747821, 'yet': 0.004792828919396426, 'slytherin': -0.0199650782107088, 'ter': 0.016531910897273298, 'boy': 0.002721604607340509, 'air': -0.013325276332003666, 'sight': -0.016083551661793278, 'opened': -0.023795856100159815, 'rat': 0.003277216947815486, 'stan': -0.0003827195559557529, 'robes': -0.016792839621011472, 'side': -0.022118561428502, 'azkaban': 0.0076518790850170874, 'slowly': -0.0072312258756805125, 'small': -0.01127311239333513, 'quite': 0.007753619653486063, 'dear': 0.01445903378778424, 'outside': -0.014610983495320437, 'tried': 0.001149117145281811, 'course': 0.01828140810608453, 'yeh': 0.016350508033469567, 'peter': 0.0071109890185322095, 'window': -0.011989425037526308, 'broom': -0.006300042452988547, 'muttered': -0.003975747888297105, 'else': 0.000142213143797849, 'quietly': 0.019489596091376207, 'dementor': -0.007947226978730374, 'best': 0.0019463093682821515, 'fell': -0.01946464412402297, 'arm': -0.018383927927700904, 'yelled': 0.006664646863496466, 'mouth': -0.012948832457616076, 'mean': 0.01434215617952412, 'yeah': 0.012520983231840834, 'anyone': 0.012929633504721717, 'field': -0.02114702969532693, 'wont': 0.014659928471157694, 'okay': 0.009974137407935754, 'standing': -0.00627761612205494, 'found': -0.002283912474198744, 'later': -0.013142102180031895, 'feeling': -0.004048923905086385, 'common': -0.04294907676574555, 'books': -0.00397294535899408, 'life': -0.003176365782671594, 'ministry': 0.0034696744781522538, 'hard': -0.0001773379991771854, 'coming': 0.005632731057135316, 'dog': -0.005011668095826334, 'minutes': -0.013141787163784632, 'snitch': -0.010005369366468392, 'wanted': 0.0075349739251956175, 'wizard': -0.0010313046074491748, 'find': -0.0005699355209262958, 'leave': 0.0014740559367796837, 'already': 0.0022290579922881353, 'things': 0.0020005263537891107, 'talking': 0.009044920252927984, 'believe': 0.018976868449392194, 'please': 0.008134848728080714, 'trunk': -0.009184315097443725, 'stared': -0.011363878489311013, 'cup': -0.009864749134177857, 'dead': 0.00655129019560717, 'kept': -0.005254201996629761, 'give': 0.009079181165131312, 'whole': -0.00854230766007243, 'grounds': -0.011853434612813412, 'sitting': -0.006984944094361231, 'stop': -0.0029163687690112383, 'ground': -0.01195996826702966, 'snapes': -0.010361834242571527, 'called': 0.0006936106624511656, 'slightly': 0.0023216147620703518, 'getting': 0.0018747673251519313, 'full': -0.0059735032817747405, 'lost': -0.00016995464771642552, 'crowd': -0.0060011273253934, 'hippogriff': -0.0086513474015308, 'empty': -0.015413108765083791, 'watching': -0.0065628976928579046, 'happy': 0.0052272511209448536, 'hermiones': -0.012217160645486315, 'youll': 0.013863946246702487, 'thinking': 0.0057909238850967475, 'pomfrey': -0.0017120205993884808, 'moved': -0.014757965856711715, 'hadnt': -0.0007601542197549799, 'voldemort': -0.0011252738964336636, 'second': -0.011264651914867517, 'case': -0.0018689771161172414, 'watched': -0.008654838001475304, 'man': -0.0015472621065886832, 'stopped': -0.003954036916945259, 'tea': 0.007805595848229469, 'havent': 0.01568040540175105, 'sit': 0.005699520318566299, 'father': 0.005004577599192506, 'turn': -0.003475485455907891, 'feel': 0.006240642327854387, 'run': -0.0047170511704510395, 'cold': -0.011011107347548654, 'tower': -0.019237105331667656, 'caught': -0.01028095557752132, 'able': 0.003388083902911504, 'however': -0.005384497165044912, 'morning': -0.009408824582860088, 'dad': 0.01016181618775529, 'youd': 0.007843076718682382, 'together': -0.0031349103345691227, 'move': 0.002762817186091561, 'hit': -0.015353285049714472, 'lupins': -0.007087062417586252, 'crabbe': -0.017749301389741, 'owl': -0.0013283100076324351, 'nearly': 0.0008313257110820123, 'witch': -0.011179710216501373, 'house': -0.015040999182191369, 'ten': -0.002315899375156048, 'light': -0.010947527989454822, 'tiny': -0.008043497442201276, 'ask': 0.007224271996669889, 'classroom': -0.014239357322661106, 'boggart': 0.0018596137007785042, 'book': -0.01080778867164947, 'top': -0.01398900172456219, 'read': -0.0025965094148949035, 'work': 0.011859486391445071, 'enormous': -0.011714940979450552, 'past': -0.005951282349564126, 'raised': -0.009225870768228386, 'er': 0.01161099956770801, 'staircase': -0.019117728132220835, 'minister': 0.007966527566531724, 'telling': 0.006912999864088632, 'malfoys': -0.002927395831615161, 'listen': 0.011338660695749615, 'roared': 0.004106382955232221, 'appeared': -0.013537795039180851, 'sorry': 0.017798852009660495, 'pocket': -0.010305912956841011, 'sound': -0.007172814249244844, 'bag': -0.010849106132032951, 'sort': -0.0018125231923883714, 'place': -0.003736875689211511, 'entrance': -0.014044412785033012, 'goyle': -0.01456116276871642, 'expecto': -0.005321764373722119, 'lot': 0.005456859853661733, 'held': -0.013685413537034659, 'either': -0.00442292170884554, 'always': 0.003748486384003658, 'james': 0.0052664000217304925, 'shut': -0.008481105037292604, 'office': -0.006088090888390907, 'shouted': 0.0031479256909492342, 'corner': -0.012263622149521617, 'shaking': -0.0048121288986039314, 'close': -0.005056552997948972, 'kill': 0.006893272951273875, 'understand': 0.01665114266014853, 'ravenclaw': -0.009515001874775629, 'petunia': 0.04998862436554121, 'friends': -0.00427646479354401, 'loudly': 0.005257223052335245, 'holding': -0.0031271123273273415, 'five': -0.01238349806457807, 'walked': -0.00912248039456141, 'shoulder': -0.00505334665366537, 'someone': -0.0029407482180957845, 'magical': -0.00194904152840015, 'taking': 0.00844145962045984, 'making': -0.0033407323243074363, 'pointing': -0.004556297694130696, 'many': 0.0025570136034761006, 'parents': -0.0031212141447118247, 'teacher': -0.002124868828848968, 'seized': -0.013970340503960715, 'picked': -0.00717954679040415, 'remember': 0.008427962887732999, 'rather': 0.0018767180542886785, 'eye': -0.004321910465047439, 'taken': -0.0015322396981468575, 'supposed': 0.003391498708509652, 'chest': -0.009833493396949777, 'watch': 1.821165251747002e-05, 'shes': 0.004080725315428345, 'pointed': -0.005124280271752183, 'lesson': -0.003489298043176026, 'climbed': -0.014147815859565769, 'corridor': -0.01298941096366062, 'times': -0.0007699209967777198, 'parchment': 0.0007601218879702236, 'word': 0.0018145439040807806, 'almost': -0.007966355357069318, 'thin': -0.00914094659578139, 'theyre': 0.00809899605970894, 'tree': -0.007310041359845965, 'fast': -0.009718006496157285, 'usual': -0.0032129649848414736, 'neck': -0.011055237261497292, 'idea': 0.002639124883085213, 'everything': 0.002252065874382434, 'breath': -0.008274462706255648, 'patronus': 0.0011579182315058492, 'followed': -0.008143863814686424, 'moving': -0.005174536923465966, 'christmas': -0.0012836474128318095, 'fat': -0.004487773497104852, 'alone': 0.00019346701887100777, 'since': -0.007795603907710602, 'bus': -0.008458386212435762, 'chocolate': 0.003223386269481178, 'defense': -0.008241167095610279, 'fire': -0.008817656150528512, 'creatures': 0.00026162907817056647, 'noise': -0.005818754414908225, 'lavender': -0.0003165516473993331, 'werewolf': 0.0017617789990651922, 'muggles': 0.00783193391845305, 'near': -0.004093814312307772, 'use': 0.004078470900933249, 'silence': -0.00945242711234037, 'extremely': -0.0010520273225845154, 'silver': -0.012858032554431291, 'person': -0.0027917506039232686, 'form': 0.011998079066150968, 'speak': 0.004921963294205, 'red': -0.00958263594340531, 'cat': -0.001210789740909817, 'patronum': -0.0021123673957808733, 'catch': 0.0015984536618031174, 'indeed': 0.010006896293055591, 'dursleys': 0.00473218374310752, 'start': -0.001698800198553351, 'loud': -0.0013011583820964434, 'trouble': 0.007394996538449368, 'threw': -0.011084227904372725, 'hundred': -0.010213338675409661, 'try': 0.007346976507006247, 'isnt': 0.006741431382492596, 'point': 0.0013881681667115703, 'heart': -0.005184250908306492, 'words': -0.004474989425173113, 'dangerous': 0.0012931121674162297, 'stay': 0.00845896749792886, 'walking': -0.01006653117690414, 'finally': -0.004952914659228829, 'leg': -0.007476608123845089, 'leaving': -0.003977934763001977, 'wrong': 0.007902609040431318, 'quiet': -0.005786450689754755, 'chair': -0.007676336498700153, 'invisibility': -0.005580488945652618, 'steps': -0.014440049535410622, 'arts': -0.010064685110255897, 'closed': -0.013379555632723453, 'students': -0.004215007657806497, 'except': -0.008017985822684074, 'headmaster': 0.008889552246278822, 'portrait': -0.010982428925295808, 'points': -0.007293213831823762, 'severus': 0.007418529036243869, 'chapter': -0.0011731703270171686, 'foot': -0.011016355907954285, 'muggle': -0.0012397254540440846, 'hedwig': 9.257518077082572e-05, 'finished': 0.0038465948658464, 'killed': 0.0036420980592156414, 'aside': -0.01116568889549917, 'gold': -0.005945280093295301, 'mrs': 0.004371692853850738, 'hurried': -0.01316232797127561, 'train': -0.003582212254696661, 'shall': 0.004915804490177148, 'broke': -0.0035166834491541466, 'stand': -0.0016497044881039476, 'saying': 0.003723564429782593, 'ern': 0.0023637228941660398, 'passed': -0.006681120241126236, 'running': -0.001864038010406833, 'screaming': -0.007536876109127763, 'sir': -0.001531447296484597, 'stomach': -0.00709213479082565, 'glasses': -0.00936379935782216, 'teachers': -0.004316351070713781, 'least': 0.0009294276496303031, 'bad': 0.00567595848264249, 'used': -0.0017467241359950937, 'letter': -0.005936106982280993, 'beneath': -0.012246357148691123, 'huge': -0.003651013512922953, 'several': -0.00521921300456187, 'burst': -0.007436346693188247, 'horrible': -0.0009238174308924818, 'fact': -0.00015240978852357193, 'gasped': 0.0028377577148975177, 'forest': -0.004135502469726698, 'completely': -0.006709330216552573, 'chance': -0.0007027922235980559, 'week': -0.00018982670389994054, 'street': -0.006513164129554912, 'meant': 0.0019829819809258843, 'arms': -0.008937103929585176, 'died': 0.003190327551108069, 'sign': 0.006605213419427762, 'third': -0.0049148208911246735, 'name': 0.0018680779994223813, 'mum': 0.0028686015186545465, 'heads': -0.004269233397969317, 'theyd': 0.0057124097331963, 'desk': -0.000696862577518859, 'care': 0.003975544469918275, 'doors': -0.00972152895191787, 'wind': -0.007088882792835228, 'wall': -0.006857565289925161, 'met': 0.002763161259099633, 'white': -0.006749989262774866, 'low': -0.003923441843064538, 'listening': -0.004328172616912765, 'willow': -0.003981823575198763, 'reckon': 0.007685543231993692, 'wing': -0.0026412395761166627, 'wait': 0.0009257239534937898, 'honeydukes': -0.003256978427865593, 'innocent': 0.0028514131678063436, 'rosmerta': 0.011359537438219366, 'lying': -0.010546569250376576, 'real': -0.001094901741788018, 'call': 0.009251810570642734, 'wizards': -0.005495866973989227, 'ear': -0.0021250593519969514, 'seconds': -0.005167646392867132, 'whether': 0.001429320568453995, 'tail': -0.006183814474576768, 'hope': 0.011954239419973506, 'doesnt': 0.011745823470922179, 'cabin': -0.011851564308671913, 'visit': 0.0029394795534222477, 'straight': -0.003726827992150282, 'thick': -0.004895413550438891, 'knight': -0.006639736051614416, 'worse': -6.706411484039606e-05, 'dyou': 0.011405636876724344, 'divination': 0.0016076560913187111, 'death': -0.0005384821609754697, 'theyve': 0.005510447816867415, 'hold': -0.0034519332217686854, 'quick': 0.0033672763250664896, 'hole': -0.008368197672671437, 'filch': 0.004109848090983169, 'dudley': 0.005338546159122297, 'talk': 0.005328384771289161, 'voices': -0.0052883975055846594, 'perhaps': 0.0040188910678863555, 'upon': -0.003788532319888706, 'fingers': -0.0038607462278159553, 'world': -0.0017822336029866168, 'ready': 0.0021836590907874367, 'shot': -0.005203898696402492, 'tears': -0.0032494983766757535, 'hissed': 0.0035440095307955493, 'starting': 0.008587193960217723, 'ran': -0.009869097159627107, 'half': 0.0023388837547467555, 'werent': 0.004354846254942175, 'mad': 0.0021033231210981856, 'crystal': -0.004274494411340007, 'nobody': -0.0012968617912760513, 'looks': 0.0011069054823576907, 'sent': 0.00047912203705240564, 'hospital': -0.004389670992236639, 'trelawneys': -0.0013405480626712879, 'friend': 0.00637321380507012, 'worst': 0.0017033003701509184, 'turning': 0.0036435271818360338, 'cage': -0.009342389101806415, 'instead': 0.0010984236442845839, 'legs': -0.011062478695790589, 'furiously': 0.0023281048910994296, 'glass': -0.000904151570384809, 'brought': -0.0025834058316238466, 'managed': 0.0012661273811094046, 'far': -0.002158771328699947, 'whose': -0.0031369907361330548, 'laughter': -0.003480391341492792, 'kind': -0.0040452720667029695, 'exactly': 0.0008350392165290436, 'clear': -0.0016020362113914816, 'news': 0.003907980924410507, 'angry': -0.000692084581252139, 'twelve': 0.0002228453523378924, 'ears': -0.0021000693582836874, 'certainly': 0.007584140388981908, 'backward': -0.00942384942992914, 'beside': -0.006209644827790851, 'trees': -0.0045353995514360755, 'lets': 0.007318388047347225, 'hours': -0.002200224855877254, 'miss': 0.00621828951063734, 'tonight': -0.0020701765463459728, 'scared': -0.00013188314989647418, 'ahead': -0.005687665254598063, 'parvati': -0.00440985032674906, 'grim': 0.0026079580015301394, 'became': -0.0018395010642927984, 'broomstick': -0.003976311693780678, 'given': 0.0036384213569166978, 'hidden': -0.008179996451175744, 'direction': -0.005199403695641977, 'broken': -0.0016985833505752898, 'lay': -0.008848342005718206, 'carrying': -0.003129123064440762, 'stuff': 0.00448569411418804, 'snapped': -0.00036220756932651467, 'hiding': 0.0005533913799226935, 'true': 0.007930092427718721, 'upstairs': -0.0068081099609495575, 'memory': -0.0018326554245275308, 'story': 0.005787355361048612, 'pulling': -0.005878664714100584, 'excellent': 0.008789733277271552, 'free': -0.00102519374848976, 'deserted': -0.00968496172951181, 'windows': -0.0070438806734843045, 'wearing': -0.0034074319655922366, 'disappeared': -0.0007233805289732925, 'approached': -0.00461115522294476, 'continued': 0.0011724783541469837, 'deep': -0.0009107369690511767, 'matter': 0.0019275717614795922, 'entered': -0.009827366568383065, 'rain': -0.004532258466263865, 'terrible': 0.0008525670474670912, 'stone': -0.007538870057163586, 'flitwick': 0.006589879688352063, 'buckbeaks': -0.008929127049291522, 'peeves': 0.0010235408302136817, 'nose': -0.0040344160809611595, 'teeth': 0.0027724585827515044, 'probably': 0.00011372083050618803, 'stairs': -0.0049542362709116584, 'family': 0.001184532765841086, 'number': -0.0007711717237754745, 'cauldron': -0.004795973869969309, 'grabbed': -0.0057021989056152845, 'granger': 0.007855498822981038, 'soon': -0.002436214127617256, 'green': -0.007556697281703982, 'lily': 0.0028428416201833873, 'water': -0.0031811786775212876, 'wings': -0.00842824274542709, 'picture': 0.000671669237580917, 'ginny': 0.0007885520607750287, 'term': -0.003148233626393169, 'glanced': 0.0004960023900209179, 'pair': -0.0035743428364199553, 'apart': -0.0032014673590171367, 'nervously': 0.0044675034345966575, 'big': 2.649934298817525e-05, 'boys': -0.0025277304332755345, 'fine': 0.006585861153032961, 'smile': -0.0014399281190575523, 'abruptly': 0.005300251463864536, 'happen': 0.0044840504248438605, 'arrived': -0.0003603188250119655, 'unless': -0.0019416718235717645, 'youknowwho': 0.005007395670441761, 'laughing': -0.00503780947723045, 'darkness': -0.005450881416037306, 'eat': -0.0005525664890191955, 'strode': -0.007677644157785523, 'dean': -0.001647586461834422, 'dropped': -0.002693307304662959, 'truth': 0.001473546845061355, 'nearer': -0.0048921103499711144, 'headed': -0.011809291952562999, 'vanished': -0.0076653183038768394, 'roots': -0.0018146239988836009, 'hooch': 0.0014858872829330233, 'committee': 0.002695763932710495, 'pushed': -0.010075454675209228, 'afraid': 0.011470466070577235, 'allowed': 0.0022796952376124236, 'bottle': -0.001464822714252528, 'carefully': -0.004999071829242262, 'reason': 0.001297896414806872, 'break': 0.0015235307364292482, 'answer': 0.0027001029647728734, 'late': 0.001361771085948608, 'seem': 0.00494860463070584, 'knees': -0.005134070321899102, 'tightly': -0.008482320823013827, 'permission': 0.006630025522036636, 'walk': -0.002871346245723148, 'today': 0.0034521057909275813, 'hardly': 0.002895231426439801, 'footsteps': -0.005803209443017912, 'tight': -0.004860847179604231, 'hat': -0.0014393758693912323, 'needed': 0.0017896994018655817, 'shook': -0.005034829372858206, 'sank': -0.005320914487095592, 'covered': -0.000559332370127349, 'em': 0.002248676111151268, 'job': 0.0025520665758568773, 'ah': 0.011688015724889174, 'throat': -0.002376115292917964, 'thank': 0.0038831612465802866, 'seamus': 0.0011964588593337596, 'gryffindors': -0.004683339615424483, 'waited': -0.0030745661351120122, 'playing': -0.002672908946482589, 'closely': -0.001972359591364616, 'wands': -0.0041591092336768, 'wants': 0.0078059052908009624, 'knows': 0.0008232519997261559, 'potion': 0.0007936311964337137, 'lake': -0.004698434972529133, 'lady': -0.0012673680066229444, 'grass': -0.007082021938958741, 'stupid': 0.00029793359811659467, 'angelina': -0.0029736204398865774, 'potters': 0.0017859565376527296, 'holidays': 0.0011860890639264858, 'forced': -0.0011037964672134019, 'essay': 0.0023874788133539057, 'closer': -0.00016479415159031195, 'particularly': 0.0010570094371451766, 'locked': -0.0028913535741190268, 'days': 0.00040381873547048797, 'nasty': -0.0025152294708634146, 'vernons': 0.01766817187973591, 'opposite': -0.004269601309677518, 'wizarding': 0.0009514505390217393, 'birthday': 0.0024054466622784863, 'worried': 0.008879389533938017, 'lord': -0.00030347982099156797, 'flew': -0.0065886987187124685, 'paper': -0.006437879690658947, 'present': 0.001081404408322698, 'happily': 0.005240167974859793, 'ago': 0.00012219828238209694, 'nimbus': -0.002639880775044212, 'view': -0.0064807780503787285, 'edge': -0.004607935450137856, 'asleep': -0.002361068811499659, 'within': 0.0006208257476413154, 'none': 0.0007192621222658984, 'added': 0.0003695046221836956, 'mother': 0.004427046146798549, 'snarled': 0.007712522000012825, 'furious': 0.0011624506423758506, 'sudden': -0.0059764131674124735, 'hurt': 0.004321405811875958, 'high': -0.00608308308058323, 'rolled': -0.008159057419030162, 'sleep': -0.0014615238898954468, 'classes': -0.00023585968810276665, 'led': -0.006612519465663351, 'laugh': 0.001645067476138649, 'slipped': -0.006323292584328467, 'ball': -0.006027100257889654, 'fighting': 0.0024933585842174433, 'bags': -0.004354370763565032, 'glittering': -0.005917638322731117, 'dormitory': -0.0037693255738363188, 'spoke': -0.001801148076085341, 'slytherins': -0.0025915514227480025, 'goal': -0.005746324051182375, 'alicia': -0.0004248158057446082, 'katie': -0.008138011511488344, 'working': 0.0019293697277489813, 'whomping': -0.0029024693443445975, 'marauders': -0.0012111082958367171, 'pettigrews': -0.0015853163429622712, 'remus': 0.008250040706709164, 'summer': -0.0017429562085394357, 'homework': 0.0018484074570786718, 'ink': -0.0028019451458055665, 'four': -0.003090663224482628, 'car': 0.005012635456430418, 'funny': 0.0028303897027103725, 'alive': 0.0006188928299587105, 'gazing': 0.00035172043558029213, 'seeing': 0.001915749269108349, 'hesitated': 0.000307916446965713, 'wondering': 0.0025219577033315805, 'leapt': -0.004034908874107482, 'middle': -0.005927485885939414, 'pleased': 0.0031322466489639867, 'brown': -0.0015472846320075933, 'daily': -0.00013862594390499973, 'prophet': -0.0013883110592335698, 'win': 0.0018342027367324292, 'poor': 0.001829918820176052, 'heavy': -0.0036867953963058806, 'breakfast': -0.0018690712633272454, 'piece': -0.0023593946311197406, 'finger': -0.002694287923328948, 'forget': 0.008605772914455091, 'lunch': -0.0019676240190806197, 'grip': -0.0007135864262586462, 'decided': 0.0025526425170843145, 'die': 0.002925745680028815, 'ceiling': -0.0019810447678310665, 'bent': -0.003678040925538731, 'fall': -0.0020364313261823567, 'fallen': -0.005797961118159824, 'longbottom': 0.0009037168249550053, 'slid': -0.005818268693605849, 'leaky': -0.0044025885677073115, 'tom': -0.004165812163899497, 'waiting': -0.0017811810475831268, 'game': -0.0022747134845317587, 'shop': 0.0017367839781481423, 'somebody': -0.0018824063191170727, 'sharply': 0.004454154167516378, 'seriously': 0.003922839908639876, 'impossible': 0.0020685854036342254, 'twenty': -0.008760603676259466, 'crack': -0.0010289099978252315, 'hufflepuff': -0.003281783206248838, 'helping': 0.005462563442133287, 'rope': -0.004053171194303199, 'others': -0.0012881569803699664, 'hippogriffs': 0.0034776855154986506, 'hell': 0.006325397428046144, 'fifty': -0.00378311010303689, 'oliver': 0.007804942951056417, 'cho': -0.004799256933399807, 'also': 0.0030633601710478107, 'frowning': 0.007484751496284196, 'pain': 0.0013865591570837248, 'spent': -0.0021625426577610257, 'notice': 0.0006214386275370025, 'expression': 0.0033143680586873235, 'dare': 0.004183721119118458, 'oclock': -9.5914104433766e-05, 'hour': -0.0020946029836859674, 'leaned': -0.00515990500544794, 'escaped': 0.0026359786111017657, 'attack': 0.0017279516833645047, 'sky': -0.0024695950143456026, 'landed': -0.0063980169393506965, 'soft': -0.0033202074126115443, 'gray': -0.0033166827987543106, 'thanks': 0.006605663581487828, 'waving': 0.002173462818145714, 'final': -0.0007543499226745494, 'says': 0.004512935799623138, 'bet': 0.006282950646369988, 'angrily': 0.0030178089923498246, 'grinning': -0.00019055456188420036, 'cross': -0.0012125542995123277, 'fixed': -0.002240660038801532, 'minute': -0.001130860666620781, 'box': -0.0022212208286043395, 'knocked': -0.001259551070939603, 'flat': -0.0036588878445340633, 'smiling': 0.008648474569302707, 'live': 0.005166262628946733, 'evening': -0.0032336393642440923, 'walls': -0.0013581099217762453, 'explain': 0.00357003802959986, 'fence': -0.003141943152564467, 'halt': -0.004839277111446439, 'familiar': -0.0011266262356674356, 'repeated': -0.0016528970869664331, 'bar': -0.0067776412319075765, 'passage': -0.001652129735487189, 'safe': 0.0029614649319064804, 'cried': 0.00152146806269216, 'future': -0.00265589388588967, 'arithmancy': -0.0028291694990032084, 'definitely': -0.00043834356997954636, 'joined': -0.001679305803202981, 'girl': 1.1720058345113913e-05, 'maybe': 0.002347690328561581, 'dumbledores': 0.0005906254202996604, 'feast': 0.004844132617759364, 'filled': -0.006184473496710916, 'become': 0.0017968315450243774, 'cadogan': -0.004600917239328148, 'rose': -0.006589667826221255, 'practice': -0.0009209659009309912, 'seeker': 0.0001244996744916694, 'forgotten': -0.00034586919038359165, 'butterbeer': 2.442380125548991e-06, 'secretkeeper': -0.0006145323385150967, 'macnair': 0.00413874697241114, 'charm': 0.0007808927650255102, 'drive': 0.0019908155692614964, 'hoped': 0.0030448119156578574, 'speaking': 0.003184543933350327, 'frightened': -0.0008938454631462123, 'realized': 0.0006364081958393052, 'tomorrow': 0.004359350502991926, 'suppose': 0.002719268126994528, 'clearly': 0.0014101972258211682, 'lucky': 0.0010516788084553427, 'beak': -0.003410872614430821, 'moon': -0.0002460700885445264, 'flying': -0.0011833884048615842, 'london': 0.0011856339406753076, 'note': 0.003109908499829449, 'change': 0.0024000551086902277, 'difficult': 0.003214005679383208, 'missed': 0.0013139622777271023, 'longer': 0.004423469873662621, 'helped': 4.30733491239108e-05, 'blood': 0.0006540640334272198, 'subject': 0.00032551410691874654, 'wardrobe': -0.0021425095569768352, 'shrieking': 0.004139712328472536, 'nodded': 0.001728088300694023, 'eh': 0.00523293218583331, 'single': -0.0028774925536718806, 'putting': 0.004552961693257783, 'ones': 0.003240007460471655, 'wiping': -0.0023771939038805243, 'expect': 0.007425523111970834, 'effort': -0.001194966627622673, 'expelled': 0.004455744088643046, 'wide': -0.008592948589344462, 'stepped': -0.0020567946772233033, 'different': -0.0010052281238546131, 'skin': -0.0029514555704390362, 'figure': -0.0026001553576007357, 'giving': 0.0037648203525597617, 'short': -0.0010481853947611698, 'using': 0.0024374440319724373, 'bin': 0.0006692552758522817, 'saved': 0.001886436819862284, 'spotted': 0.003006695266233613, 'tables': -0.0066102342813525216, 'hasnt': 0.006539030447126644, 'twice': 0.0012505862785312366, 'shadows': -0.001992963275924742, 'scarlet': -0.002066364358074396, 'theyll': 0.005383384178570216, 'midair': -2.643527210307864e-06, 'seat': -0.008623142367654893, 'breaking': 0.001415484400061432, 'marble': -0.005755870388224685, 'muttering': -0.0018337208210820227, 'leaves': 8.986478786010721e-06, 'ladder': -0.004411032878156567, 'beat': -0.003066854980891231, 'cut': 0.0011147677405601124, 'quaffle': -0.005160516758928903, 'bludger': -0.0012148448517691575, 'oneeyed': -0.004622799376759751, 'prongs': 0.00040566224108584014, 'tunnel': 0.0004239989148818777, 'page': -0.0017728740825551548, 'known': 0.004167396867304179, 'less': 0.0018718756684666848, 'bellowed': 0.004796955758389767, 'obviously': 0.002588024874009427, 'send': 0.006841702741285131, 'stretched': -0.0039033372852180165, 'inches': -0.00486904125889663, 'barely': 0.00025856010382871203, 'soared': -0.005469513945093193, 'join': 0.0016757169574735603, 'ripped': -0.0028553549459341405, 'trembling': -0.0004589402426648864, 'six': -0.004193001172908678, 'whod': 0.0012201229648079518, 'laughed': -0.00027462319900854656, 'bound': -0.0029779776739058297, 'gleaming': -0.001781891920202116, 'broomsticks': -0.002067972905858128, 'snap': -0.003000687865937716, 'worry': 0.00838324187298224, 'return': 0.0012052216015346114, 'marges': 0.008953274085967973, 'wheres': 0.009689913753408065, 'force': 0.0026846186281314225, 'escape': 0.002207707259130298, 'forbidden': -0.0009119394239650125, 'warning': -0.0017463067673151613, 'screamed': 0.003393347146895967, 'stuck': -0.0026903155966073165, 'heading': -0.003627811513680692, 'serious': 0.004393935333881912, 'thrown': -0.001918622940765479, 'cornelius': 0.005530789964622194, 'anyway': 0.00428708078519596, 'diagon': 0.0001126675797794141, 'alley': -0.00035668399491355383, 'excitedly': 0.003722107784134344, 'lose': 0.003842374041004246, 'packed': -0.00038914488962036556, 'thomas': 0.0011566341825863598, 'meet': 0.0054188901992717475, 'checking': 0.0005197376608519964, 'ages': 0.0015181750112079745, 'lowered': 0.0007534976691976447, 'louder': -0.0011050390197083271, 'terrified': 0.0006038710326457165, 'shining': 0.00041541215233408235, 'question': 0.0029317143244000873, 'branches': -0.00015435011928233765, 'lessons': 0.0005396825368434201, 'confused': 0.003401722989306637, 'afternoon': 0.00011469633668468628, 'riddikulus': -0.0011803408542461046, 'joke': 0.002572673385007524, 'strong': -0.0023783086156375995, 'charms': 0.0020728539604538535, 'silent': -0.0007411457166929296, 'falling': -0.0014469465600902406, 'betrayed': 0.005479064395561899, 'ward': -0.002075971344818006, 'possession': -0.005852758269113396, 'flint': -0.0017715926160835477, 'highly': 0.003446989193380777, 'secret': 0.001225491868432277, 'write': 0.004735194990895799, 'paused': 0.004954734684025933, 'round': 0.0045477389596884805, 'weird': -0.0007154329496553611, 'cupboard': -0.0026123077163840982, 'living': -0.003341293473636372, 'possible': 0.0005073634632884038, 'potions': 0.0016975157617557099, 'delighted': 0.001560910575231843, 'jumped': 0.002900618596629831, 'spot': 0.004249498285379557, 'weeks': 0.0021753773737289307, 'thirteen': 0.0022038940605512334, 'hedwigs': -0.002973775990552033, 'whatever': 0.0014858192234954346, 'bright': 0.000788230784883687, 'voldemorts': 0.004079254955577924, 'golden': -0.000553472960848351, 'growing': -0.002423081438062983, 'lower': -0.0030846929878829443, 'owls': -0.0028832520460685324, 'tied': -0.0007666254045523895, 'errol': -0.000136985196631688, 'carried': -0.00019448715538629196, 'grin': 0.0021541890904513954, 'weasleys': -0.0010681525171456205, 'seven': -0.0019622482957446963, 'remembered': 0.004274638185185881, 'sun': -0.0006283787600036608, 'sneakoscope': 0.0002251274740177475, 'loads': 0.002909151541937897, 'knowing': 0.0012182555759858564, 'handle': -0.0029703501347518734, 'thousand': -0.0007635961020046038, 'dogs': 0.004764290919586664, 'monster': -0.0033295451995652542, 'sideways': -0.0023849794248880026, 'ouch': 0.005904136538630075, 'struggling': -0.002334273301236213, 'village': 0.0021744931169354405, 'woke': 0.0019467873286207265, 'hot': -0.0018066039135904198, 'line': -0.000982887782192476, 'immediately': -0.002098194583382889, 'peered': -0.0011793129095425622, 'deal': 0.0028085337154202658, 'thoughts': -0.0020526315344083046, 'growled': 0.0068718784150445964, 'attention': 0.00044879522409731437, 'returned': -0.0040911681486013925, 'suspiciously': 0.005818787719461665, 'itll': 0.004479023311302059, 'calmly': 0.007567288187525331, 'sighed': 0.003388846881741929, 'fault': 0.0022100470102234216, 'clutched': -0.0024429153023762954, 'arent': 0.002470714273621631, 'changed': -0.0010912640370720316, 'deeply': 0.0006864907685887543, 'heaved': -0.0033539141102110416, 'means': 0.007529515578628817, 'money': -0.000867421690375345, 'bang': -0.0023412820997826783, 'step': -0.0013517741605413801, 'calm': 0.0018156693667332542, 'fear': 0.0014386533613099715, 'forgot': 0.00804312384787266, 'whisper': 0.006159549166036216, 'examining': 0.0016872602429316276, 'beaming': -0.003507801662441571, 'parlor': -0.002521181354977433, 'anymore': 0.007181658416295617, 'among': -0.003521982877243225, 'excited': -0.0009219166759951943, 'surprise': -0.002367932748986037, 'hurrying': -0.00173653656954139, 'drew': -0.002498906405633238, 'balls': -0.0007429146023686837, 'staying': 0.002470043669299232, 'rats': 0.0010492851461998242, 'stuffed': -0.0047087088558084795, 'hanging': 0.00223158734916374, 'doubt': 0.0029952639922548982, 'check': 0.0014134396702692481, 'bring': 0.0018863151751608099, 'search': 0.0024953715007263377, 'compartment': -0.0002695823140775237, 'careful': 0.003324249645333245, 'pale': -0.00418092176978523, 'corridors': -0.003404587488461149, 'patch': -0.0019786618618134457, 'reach': 0.005020581733696491, 'silvery': -0.0018213205991867809, 'mud': -0.002454375177822176, 'applause': -0.00046051661581407324, 'beyond': -0.0011867213192274257, 'pumpkin': -0.001816141992382679, 'group': -0.0012630582078398038, 'animal': -0.0007687409542093844, 'harder': -0.0014421880416696084, 'fer': 0.003809143151917532, 'trust': 0.005032857243247674, 'conversation': 0.003218099933671961, 'imagine': 0.005602166774913313, 'zonkos': -0.001138465905414773, 'fly': 0.0004087281510506788, 'weight': -0.0035311003608839093, 'stadium': -0.003990229843196864, 'cheering': -0.0004880432433416364, 'moments': -8.539510511785332e-05, 'wormtail': 0.0012907610891788668, 'anybody': 0.0005709393445733278, 'exam': 0.0010890770189377508, 'executioner': 0.001765434707522009};
#words_characters = ['harry', 'ron', 'hermione', 'professor', 'lupin', 'black', 'snape', 'hagrid', 'dumbledore', 'malfoy', 'sirius', 'scabbers', 'neville', 'potter', 'weasley', 'mcgonagall', 'crookshanks', 'aunt', 'pettigrew', 'madam', 'uncle', 'wood', 'buckbeak', 'trelawney', 'vernon', 'mr', 'percy', 'george', 'marge', 'blacks', 'peter', 'hippogriff', 'pomfrey', 'voldemort', 'father', 'dad', 'boggart', 'goyle', 'james', 'petunia', 'werewolf', 'muggles', 'muggle', 'silver', 'person', 'dursleys', 'severus', 'hedwig', 'teachers', 'mum', 'rosmerta', 'dudley', 'parvati', 'family', 'granger', 'lily', 'ginny', 'hooch', 'seamus', 'lady', 'alicia', 'katie', 'remus', 'prophet', 'longbottom', 'tom', 'oliver', 'cho', 'thomas', 'monster', 'drew', 'wormtail', 'gryffindor', 'slytherin', 'hufflepuff', 'ravenclaw']
words_characters = ['harry', 'ron', 'hermione', 'lupin', 'snape', 'hagrid', 'dumbledore', 'malfoy', 'sirius', 'scabbers', 'neville', 'potter', 'weasley', 'mcgonagall', 'crookshanks', 'pettigrew', 'buckbeak', 'trelawney', 'vernon', 'percy', 'george', 'marge', 'hippogriff', 'voldemort', 'boggart', 'goyle', 'james', 'petunia', 'hedwig', 'dudley', 'parvati', 'lily', 'ginny', 'seamus', 'remus', 'longbottom', 'oliver', 'cho', 'thomas', 'drew', 'wormtail', 'magic']
words_houses = ['gryffindor', 'slytherin', 'hufflepuff', 'ravenclaw']
words_witches = ['hermione', 'weasley', 'mcgonagall', 'trelawney', 'parvati', 'lily', 'ginny', 'cho']
words_wizards = ['harry', 'ron', 'lupin', 'snape', 'hagrid', 'dumbledore', 'malfoy', 'sirius', 'neville', 'potter', 'weasley', 'percy', 'george', 'fred', 'voldemort', 'goyle', 'james', 'seamus', 'remus', 'longbottom', 'oliver', 'thomas', 'drew']
|
#%% from all the dataset extract the raw rows that respect some conditions
import bz2
import subprocess
import os
from datetime import datetime
from utils import utils
dataset = '/home/gandelli/dev/data/it/sorted_by_pages.tsv.bz2'
dump_in = bz2.open(dataset, 'r')
line = dump_in.readline()
reverted_user = ''
current_page_id = 0
current_page = ''
reverter_id = 0
inizio = datetime.now()
print(inizio.strftime(" %H:%M:%S"))
while line != '':
line = dump_in.readline().rstrip().decode('utf-8')[:-1]
values = line.split('\t')
if len(values) < 2:
continue
if line == '' or values[28] != '0' or utils.is_vandalism(values[4]):
continue
if values[1] != 'revision':
continue
page_id = int(values[23])
page_name = values[25]
user = values[6]
user_edit_count = values[21]
rev_id = values[52]
reverter = values[65]
is_reverted = values[64]
if page_id != current_page_id:
#calcola m sulla pagina
print('processo current page che è finita', current_page)
#initialize new page
current_page_id = page_id
current_page = page_name
reverted_m = {}
dump_in.close()
print(datetime.now() - inizio)
# %%
|
def quicksort(x):
if len(x) == 1 or len(x) == 0:
return x
else:
pivot = x[0]
i = 0
for j in range(len(x) - 1):
if x[j + 1] < pivot:
x[j + 1], x[i + 1] = x[i + 1], x[j + 1]
i += 1
x[0], x[i] = x[i], x[0]
first_part = quicksort(x[:i])
second_part = quicksort(x[i + 1:])
first_part.append(x[i])
return first_part + second_part
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]
quicksort(alist)
print(alist) |
#!/usr/bin/env python2
"""
This script performs trail finding for a given species.
Retrieval of metabolic pathways, genomic information and EC numbers associations
is also handled, as needed.
Version: 1.0 (May 2018)
License: MIT
Author: Alexandra Zaharia (contact@alexandra-zaharia.org)
"""
import multiprocessing
import os
import sys
import time
import trail.finding.consistency as consistency
import trail.finding.graph as graph
import trail.finding.HNet as HNet
import trail.finding.kegg_import as kegg_import
from trail.finding.Exclusions import Exclusions
from trail.finding.CoMetGeNeError import CoMetGeNeError, error
from trail.finding.NetworkBuilder import NetworkBuilder
from trail.finding.output import output_trail
from trail.utils import open_device
from parsers import arg_parser, kgml_parser
def run_HNet(kgml, args, exclusions, network_inst, dev_out):
"""Runs HNet (trail finding) for a given pathway of a given species.
If trail finding takes too long, the analysis is aborted and the pathway is
blacklisted for the gap parameters with which CoMetGeNe was executed.
:param kgml: metabolic pathway in KGML format
:param args: command-line arguments for this script
:param exclusions: Exclusions object representing blacklisted pathways
:param network_inst: NetworkBuilder object
:param dev_out: output device for results (file or stdout)
:return: list of trails found by the HNet algorithm for the given pathway
"""
# The HNet_on_every_arc function of HNet.py is started as a process that is
# terminated if it takes longer than a set timeout. Terminating the process
# also results in blacklisting the pathway, as its analysis takes too long.
HNet_queue = multiprocessing.Queue()
HNet_process = multiprocessing.Process(
target=HNet.HNet_on_every_arc,
args=(HNet_queue, network_inst.G, network_inst.D,
network_inst.reactions,)
)
HNet_process.start()
HNet_process.join(timeout=args.timeout)
if HNet_process.is_alive():
HNet_process.terminate()
HNet_process.join()
aborted = kgml + ': Aborted (analysis takes longer than '
aborted += str(args.timeout) + ' seconds)\n\n'
dev_out.write(aborted)
exclusions.blacklist(
network_inst,
os.path.join(os.path.abspath(args.DIR), kgml))
return None
return HNet_queue.get_nowait()
def check_consistency(trail, network_inst):
"""Checks whether the given trail is consistent with the original reaction
and gene networks.
The trail is a solution for a metabolic pathway and an undirected graph
built on the same vertex set as the metabolic pathway, representing gene
neighborhood in terms of reactions. This method tests whether this trail is
still a solution for the metabolic pathway and the original undirected graph
representing gene neighborhood.
If the consistency check takes longer than 30 seconds, it is aborted.
:param trail: a trail produced by the HNet algorithm
:param network_inst: NetworkBuilder object
:return: True if the trail is consistent with the original reaction and gene
networks, False if it is not, and None if the consistency check takes
longer than 30 seconds
"""
cons_queue = multiprocessing.Queue()
cons_process = multiprocessing.Process(
target=consistency.is_consistent,
args=(cons_queue, network_inst.G_reduced, network_inst.reactions,
trail,)
)
cons_process.start()
cons_process.join(timeout=30)
if cons_process.is_alive():
cons_process.terminate()
cons_process.join()
return None
return cons_queue.get_nowait()
def analyze_kgml(kgml, args, G_init, exclusions, ec_numbers, dev_out):
"""Runs the HNet algorithm on the given KGML file (metabolic pathway) and
outputs results.
:param kgml: metabolic pathway in KGML format
:param args: command-line arguments for this script
:param G_init: undirected graph representing gene neighborhood for the
given species
:param exclusions: Exclusions object representing blacklisted pathways
:param ec_numbers: dict associating a list of EC numbers (values) to R
numbers (keys)
:param dev_out: output device for results (file or stdout)
"""
pathway = os.path.join(os.path.abspath(args.DIR), kgml)
if not exclusions.can_analyze(pathway, args.delta_G, args.delta_D):
return
title = kgml + ': ' + kgml_parser.get_pathway_title(pathway)
dev_out.write(title + '\n')
network_inst = NetworkBuilder(
G_init, kgml, args, exclusions, ec_numbers, dev_out)
# Proceed only if all networks have been initialized within the allotted
# timeout.
if not network_inst.blacklisted:
trails = run_HNet(kgml, args, exclusions, network_inst, dev_out)
if trails is not None:
if len(trails) == 0:
dev_out.write(kgml + ': (not found)\n')
else:
for trail in trails:
if check_consistency(trail, network_inst):
output_trail(
kgml,
trail,
network_inst,
dev_out)
dev_out.write(kgml + ':\n')
elapsed = '%s: --- %.2f seconds ---\n\n' % (
kgml, (time.time() - network_inst.start_pw))
dev_out.write(elapsed)
def main():
"""Performs trail finding (HNet) for a given species.
Metabolic pathways for the given species are retrieved if necessary, as well
as its genomic information.
Results are either stored in a file, or displayed on stdout.
"""
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # unbuffered mode
start_time = time.time() # record the starting time
args = arg_parser.parse_cmd_arguments()
dev_out = open_device(args.output)
try:
if not args.skip_import:
kegg_import.download_kgml(args.ORG, args.DIR)
deltas = '--- delta_G = %d, delta_D = %d ---\n\n' % (
args.delta_G, args.delta_D)
dev_out.write(deltas)
# Build undirected graph representing the genome.
G_init = graph.build_undirected_graph(args)
# Determine which pathways should be skipped for the current species.
exclusions = Exclusions(args.ORG)
# Retrieve the associations between EC numbers and reactions.
ec_numbers = kegg_import.retrieve_ec_numbers()
# Determine the list of kgml files to analyze.
directory = os.path.abspath(args.DIR)
pathways = [filename for filename in os.listdir(directory)
if filename.lower().endswith('.kgml')]
pathways.sort()
# Run CoMetGeNe for every metabolic pathway in the specified directory.
for kgml in pathways:
analyze_kgml(kgml, args, G_init, exclusions, ec_numbers, dev_out)
elapsed = '--- %.2f seconds ---\n' % (time.time() - start_time)
dev_out.write(elapsed)
if args.output is not None:
dev_out.close()
except CoMetGeNeError as err:
sys.stderr.write(err.text + '\n')
if args.output is not None:
dev_out.close()
exit(error[err.value])
else:
exit(0)
if __name__ == '__main__':
main()
|
import unittest
class Solution:
def isValid(self, s: str) -> bool:
token = {
')': '(',
'}': '{',
']': '[',
}
stack = []
for i in range(len(s)):
char = s[i]
if char in token:
if len(stack) == 0:
return False
top = stack.pop()
if token[char] != top:
return False
else:
stack.append(char)
if len(stack) > 0:
return False
return True
class Test(unittest.TestCase):
def test_isValid(self):
for i in [
(True, '([])'),
(False, '([]'),
(False, '([)]'),
(False, '([{'),
]:
self.assertEqual(i[0], Solution().isValid(i[1]))
if __name__ == '__main__':
unittest.main()
|
from datetime import datetime
from test.base import ClientBaseCase
from linode_api4.objects import Config, Image, Instance, Type
from linode_api4.objects.base import MappedObject
class LinodeTest(ClientBaseCase):
"""
Tests methods of the Linode class
"""
def test_get_linode(self):
"""
Tests that a client is loaded correctly by ID
"""
linode = Instance(self.client, 123)
self.assertEqual(linode._populated, False)
self.assertEqual(linode.label, "linode123")
self.assertEqual(linode.group, "test")
self.assertTrue(isinstance(linode.image, Image))
self.assertEqual(linode.image.label, "Ubuntu 17.04")
json = linode._raw_json
self.assertIsNotNone(json)
self.assertEqual(json['id'], 123)
self.assertEqual(json['label'], 'linode123')
self.assertEqual(json['group'], 'test')
# test that the _raw_json stored on the object is sufficient to populate
# a new object
linode2 = Instance(self.client, json['id'], json=json)
self.assertTrue(linode2._populated)
self.assertEqual(linode2.id, linode.id)
self.assertEqual(linode2.label, linode.label)
self.assertEqual(linode2.group, linode.group)
self.assertEqual(linode2._raw_json, linode._raw_json)
def test_rebuild(self):
"""
Tests that you can rebuild with an image
"""
linode = Instance(self.client, 123)
with self.mock_post('/linode/instances/123') as m:
pw = linode.rebuild('linode/debian9')
self.assertIsNotNone(pw)
self.assertTrue(isinstance(pw, str))
self.assertEqual(m.call_url, '/linode/instances/123/rebuild')
self.assertEqual(m.call_data, {
"image": "linode/debian9",
"root_pass": pw,
})
def test_available_backups(self):
"""
Tests that a Linode can retrieve its own backups
"""
linode = Instance(self.client, 123)
backups = linode.available_backups
# assert we got the correct number of automatic backups
self.assertEqual(len(backups.automatic), 3)
# examine one automatic backup
b = backups.automatic[0]
self.assertEqual(b.id, 12345)
self.assertEqual(b._populated, True)
self.assertEqual(b.status, 'successful')
self.assertEqual(b.type, 'auto')
self.assertEqual(b.created, datetime(year=2018, month=1, day=9, hour=0,
minute=1, second=1))
self.assertEqual(b.updated, datetime(year=2018, month=1, day=9, hour=0,
minute=1, second=1))
self.assertEqual(b.finished, datetime(year=2018, month=1, day=9, hour=0,
minute=1, second=1))
self.assertEqual(b.region.id, 'us-east-1a')
self.assertEqual(b.label, None)
self.assertEqual(b.message, None)
self.assertEqual(len(b.disks), 2)
self.assertEqual(b.disks[0].size, 1024)
self.assertEqual(b.disks[0].label, 'Debian 8.1 Disk')
self.assertEqual(b.disks[0].filesystem, 'ext4')
self.assertEqual(b.disks[1].size, 0)
self.assertEqual(b.disks[1].label, '256MB Swap Image')
self.assertEqual(b.disks[1].filesystem, 'swap')
self.assertEqual(len(b.configs), 1)
self.assertEqual(b.configs[0], 'My Debian 8.1 Profile')
# assert that snapshots came back as expected
self.assertEqual(backups.snapshot.current, None)
self.assertEqual(backups.snapshot.in_progress, None)
def test_update_linode(self):
"""
Tests that a Linode can be updated
"""
with self.mock_put('linode/instances/123') as m:
linode = Instance(self.client, 123)
linode.label = "NewLinodeLabel"
linode.group = "new_group"
linode.save()
self.assertEqual(m.call_url, '/linode/instances/123')
self.assertEqual(m.call_data, {
"label": "NewLinodeLabel",
"group": "new_group"
})
def test_delete_linode(self):
"""
Tests that deleting a Linode creates the correct api request
"""
with self.mock_delete() as m:
linode = Instance(self.client, 123)
linode.delete()
self.assertEqual(m.call_url, '/linode/instances/123')
def test_reboot(self):
"""
Tests that you can submit a correct reboot api request
"""
linode = Instance(self.client, 123)
result = {}
with self.mock_post(result) as m:
linode.reboot()
self.assertEqual(m.call_url, '/linode/instances/123/reboot')
def test_shutdown(self):
"""
Tests that you can submit a correct shutdown api request
"""
linode = Instance(self.client, 123)
result = {}
with self.mock_post(result) as m:
linode.shutdown()
self.assertEqual(m.call_url, '/linode/instances/123/shutdown')
def test_boot(self):
"""
Tests that you can submit a correct boot api request
"""
linode = Instance(self.client, 123)
result = {}
with self.mock_post(result) as m:
linode.boot()
self.assertEqual(m.call_url, '/linode/instances/123/boot')
def test_boot_with_config(self):
"""
Tests that you can submit a correct boot with a config api request
"""
linode = Instance(self.client, 123)
config = linode.configs[0]
result = {}
with self.mock_post(result) as m:
linode.boot(config=config)
self.assertEqual(m.call_url, '/linode/instances/123/boot')
def test_mutate(self):
"""
Tests that you can submit a correct mutate api request
"""
linode = Instance(self.client, 123)
result = {}
with self.mock_post(result) as m:
linode.mutate()
self.assertEqual(m.call_url, '/linode/instances/123/mutate')
def test_initiate_migration(self):
"""
Tests that you can initiate a pending migration
"""
linode = Instance(self.client, 123)
result = {}
with self.mock_post(result) as m:
linode.initiate_migration()
self.assertEqual(m.call_url, '/linode/instances/123/migrate')
class TypeTest(ClientBaseCase):
def test_get_types(self):
"""
Tests that Linode types can be returned
"""
types = self.client.linode.types()
self.assertEqual(len(types), 4)
for t in types:
self.assertTrue(t._populated)
self.assertIsNotNone(t.id)
self.assertIsNotNone(t.label)
self.assertIsNotNone(t.disk)
def test_get_type_by_id(self):
"""
Tests that a Linode type is loaded correctly by ID
"""
t = Type(self.client, 'g5-nanode-1')
self.assertEqual(t._populated, False)
self.assertEqual(t.vcpus, 1)
self.assertEqual(t.label, "Linode 1024")
self.assertEqual(t.disk, 20480)
|
import unittest
from unittest import mock
from service.app import create_app
class TestApp(unittest.TestCase):
def test1(self):
_app = create_app()
with _app.test_client() as client:
with mock.patch('service.views.stories.get_users_s') as get_user_mock:
with mock.patch('service.views.stories.get_stories_s') as get_stories_mock:
with mock.patch('service.views.stories.is_follower_s') as is_follower_mock:
get_user_mock.return_value = {
"firstname": "luca",
"lastname": "perez",
"email": "example@example.com",
"dateofbirth": "19/01/01",
"user_id": 1
}
get_stories_mock.return_value = [
{
'id': 1,
'text': 'diodiddio',
'dicenumber': 0,
'roll': {},
'date': '1/1/1',
'likes': 0,
'dislikes': 1,
'author_id': 1}
]
is_follower_mock.return_value = True
reply = client.get('/stories')
self.assertEqual(reply.status_code, 200)
def test2(self):
app = create_app().test_client()
reply = app.get('/stories/nonExistingID')
self.assertEqual(reply.status_code, 404)
|
# converts a csv file with set info to an Euler permutation list
# also checks for duplicate row names
# defines the CSV data filenames
inFile = "input.csv"
outFile = "output.csv"
# necessary imports
import csv
import time
# start timer
start = time.time()
# number of redundant iterations
iter = 0
# get the CSV data file as input
input = open(inFile, "rU")
reader = csv.reader(input)
# set up CSV data file as output
output = open(outFile, "wb")
writer = csv.writer(output, delimiter="\t")
# initialize row variable
firstPass = True
# initialize lists for item info
itemName = []
# initialize label counting hash
labelCount = dict()
# process CSV file, row-by-row
for row in reader:
# initialize list for creating Euler diagram list
outRow = []
# when processing column names only
if firstPass:
firstPass = False;
# when processing an item
else:
# grab which label (last column is the set label)
label = row[61]
# are there duplicate names?
if item in itemName:
# add a redundancy
iter += 1
# no duplicates, store in list
else:
itemName.append(item)
# add to label count appropriately
if item in labelCount:
labelCount[gene[1]] += 1
else:
labelCount[gene[1]] = 1
# store as part of the Euler diagram output
outRow.append(item.strip())
outRow.append(label)
writer.writerow(outRow)
# close all files
input.close()
output.close()
# count the gene labels for histogram
oneLabel = 0
twoLabels = 0
threeLabels = 0
fourLabels = 0
fiveLabels = 0
additionalLabels = 0
for val in labelCount.values():
if val == 1:
oneLabel += 1
elif val == 2:
twoLabels += 1
elif val == 3:
threeLabels += 1
elif val == 4:
fourLabels += 1
elif val == 5:
fiveLabels += 1
else:
additionalLabels += 1
# stop timer
end = time.time()
# process the time elapsed
elapsed = end - start
min = round(elapsed / 60, 3)
# display redundancies (if any)
if iter == 1:
print("There was " + str(iter) + " redundancy.")
elif iter == 0:
print("There were no redundancies!")
else:
print("There were " + str(iter) + " redundancies.")
# display gene label counts
print("There are " + str(oneLabel) + " genes with one label.")
print("There are " + str(twoLabels) + " genes with two labels.")
print("There are " + str(threeLabels) + " genes with three labels.")
print("There are " + str(fourLabels) + " genes with four labels.")
print("There are " + str(fiveLabels) + " genes with five labels.")
print("There are " + str(additionalLabels) + " genes with additional labels.")
# display time taken
print("CSV scanning operation complete after", min, "minutes.")
|
""" Implement a basic calculator to evaluate a simple expression string.
The expression string contains only non-negative integers, +, -, *, /
operators , open ( and closing parentheses ) and empty spaces . The integer
division should truncate toward zero.
You may assume that the given expression is always valid. All intermediate
results will be in the range of [-2147483648, 2147483647].
Follow up: Could you solve the problem without using built-in library
functions.
Example 1:
Input: s = "1 + 1" Output: 2 Example 2:
Input: s = " 6-4 / 2 " Output: 4
IDEA:
interpret blocks inside () using standard calc
( 1 8 )
i i+1 j-1 j
2-3/4
stack
+2
-3
/4
"""
class Solution772:
pass
|
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('requirements.txt') as req_file:
requires = [req for req in req_file.read().split('\n') if req]
with open('requirements-dev.txt') as req_file:
requires_dev = [req for req in req_file.read().split('\n') if req]
with open('VERSION') as fp:
version = fp.read().strip()
setup(name='molo.polls',
version=version,
description=('A molo module that provides the ability to run polls.'),
long_description=readme,
classifiers=[
"Programming Language :: Python :: 3.6",
"Framework :: Django",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Praekelt Foundation',
author_email='dev@praekelt.com',
url='http://github.com/praekelt/molo.polls',
license='BSD',
keywords='praekelt, mobi, web, django',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
namespace_packages=['molo'],
install_requires=requires,
tests_require=requires_dev,
entry_points={})
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import collections
import functools
from ..framework import Variable, default_main_program, in_dygraph_mode, dygraph_only, Parameter, ParamBase, _varbase_creator, _dygraph_tracer
import pickle
import six
from . import learning_rate_scheduler
import warnings
from .. import core
from .base import guard
from paddle.fluid.dygraph.jit import _SaveLoadConfig
from paddle.fluid.dygraph.io import _construct_program_holders, _construct_params_and_buffers
__all__ = [
'save_dygraph',
'load_dygraph',
]
def _parse_load_config(configs):
supported_configs = ['model_filename', 'params_filename', 'keep_name_table']
# input check
for key in configs:
if key not in supported_configs:
raise ValueError(
"The additional config (%s) of `paddle.fluid.load_dygraph` is not supported."
% (key))
# construct inner config
inner_config = _SaveLoadConfig()
inner_config.model_filename = configs.get('model_filename', None)
inner_config.params_filename = configs.get('params_filename', None)
inner_config.keep_name_table = configs.get('keep_name_table', None)
return inner_config
@dygraph_only
def save_dygraph(state_dict, model_path):
'''
:api_attr: imperative
Save Layer's state_dict to disk. This will generate a file with suffix ".pdparams"
The state_dict is get from Layers.state_dict function
Args:
state_dict(dict) : The state dict to be saved.
model_path(str) : the file prefix to save the state_dict. The format is "dirname/file_prefix". If file_prefix is empty str. A exception will be raised
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10])
state_dict = emb.state_dict()
fluid.save_dygraph( state_dict, "paddle_dy")
adam = fluid.optimizer.Adam( learning_rate = fluid.layers.noam_decay( 100, 10000),
parameter_list = emb.parameters() )
state_dict = adam.state_dict()
fluid.save_dygraph( state_dict, "paddle_dy")
'''
base_name = os.path.basename(model_path)
assert base_name != "", "The input model_path MUST be format of dirname/filename [dirname\\filename in Windows system], but received filename is empty string."
suffix = ".pdparams"
assert len(state_dict) > 0, "state_dict is empty, no need to save"
param_num = 0
for k, v in state_dict.items():
if isinstance(v, ParamBase):
param_num += 1
if param_num == 0:
suffix = ".pdopt"
model_dict = {}
name_table = {}
for k, v in state_dict.items():
if isinstance(v, (Variable, core.VarBase)):
model_dict[k] = v.numpy()
name_table[k] = v.name
else:
model_dict[k] = v
model_dict["StructuredToParameterName@@"] = name_table
file_name = model_path + suffix
dir_name = os.path.dirname(file_name)
if dir_name and not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(file_name, 'wb') as f:
pickle.dump(model_dict, f, protocol=2)
# NOTE(chenweihang): load_dygraph will deprecated in future, we don't
# support new loading features for it
# TODO(qingqing01): remove dygraph_only to support loading static model.
# maybe need to unify the loading interface after 2.0 API is ready.
# @dygraph_only
def load_dygraph(model_path, **configs):
'''
:api_attr: imperative
Load parameter state dict from disk.
.. note::
Due to some historical reasons, if you load ``state_dict`` from the saved
result of `paddle.static.save_inference_model`, the structured variable name
will cannot be restored. You need to set the argument `use_structured_name=False`
when using `Layer.set_state_dict` later.
Args:
model_path(str) : The file prefix store the state_dict.
(The path should Not contain suffix '.pdparams')
**configs (dict, optional): other save configuration options for compatibility. We do not
recommend using these configurations, if not necessary, DO NOT use them. Default None.
The following options are currently supported:
(1) model_filename (string): The inference model file name of the paddle 1.x ``save_inference_model``
save format. Default file name is :code:`__model__` .
(2) params_filename (string): The persistable variables file name of the paddle 1.x ``save_inference_model``
save format. No default file name, save variables separately by default.
Returns:
state_dict(dict) : the dict store the state_dict
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.disable_static()
emb = paddle.nn.Embedding(10, 10)
state_dict = emb.state_dict()
fluid.save_dygraph(state_dict, "paddle_dy")
scheduler = paddle.optimizer.lr_scheduler.NoamLR(
d_model=0.01, warmup_steps=100, verbose=True)
adam = paddle.optimizer.Adam(
learning_rate=scheduler,
parameters=emb.parameters())
state_dict = adam.state_dict()
fluid.save_dygraph(state_dict, "paddle_dy")
para_state_dict, opti_state_dict = fluid.load_dygraph("paddle_dy")
'''
# deal with argument `model_path`
model_prefix = model_path
if model_prefix.endswith(".pdparams"):
model_prefix = model_prefix[:-9]
elif model_prefix.endswith(".pdopt"):
model_prefix = model_prefix[:-6]
para_dict = None
opti_dict = None
params_file_path = model_prefix + ".pdparams"
opti_file_path = model_prefix + ".pdopt"
# deal with argument `config`
config = _parse_load_config(configs)
if os.path.exists(params_file_path) or os.path.exists(opti_file_path):
# Load state dict by `save_dygraph` save format
para_dict = {}
if os.path.exists(params_file_path):
with open(params_file_path, 'rb') as f:
para_dict = pickle.load(f) if six.PY2 else pickle.load(
f, encoding='latin1')
if not config.keep_name_table and "StructuredToParameterName@@" in para_dict:
del para_dict["StructuredToParameterName@@"]
if os.path.exists(opti_file_path):
with open(opti_file_path, 'rb') as f:
opti_dict = pickle.load(f) if six.PY2 else pickle.load(
f, encoding='latin1')
else:
# check model path
if not os.path.isdir(model_prefix):
raise ValueError("Model saved directory '%s' is not exists." %
model_prefix)
# check whether model file exists
if config.model_filename is None:
model_filename = '__model__'
else:
model_filename = config.model_filename
model_file_path = os.path.join(model_path, model_filename)
if os.path.exists(model_file_path):
# Load state dict by `jit.save/io.save_inference_model` save format
# NOTE(chenweihang): [ Compatibility of save_inference_model save format ]
# The model saved by `save_inference_model` does not completely correspond to
# the information required by the `state_dict` under the dygraph.
# `save_inference_model` not save structured name, we need to remind
# the user to configure the `use_structured_name` argument when `set_state_dict`
# NOTE(chenweihang): `jit.save` doesn't save optimizer state
# 1. load program desc & construct _ProgramHolder
programs = _construct_program_holders(model_path,
config.model_filename)
# 2. load layer parameters & buffers
# NOTE: using fluid.dygraph.guard() here will cause import error in py2
with guard():
persistable_var_dict = _construct_params_and_buffers(
model_prefix,
programs,
config.params_filename,
append_suffix=False)
# 3. construct state_dict
para_dict = dict()
for var_name in persistable_var_dict:
para_dict[var_name] = persistable_var_dict[var_name].numpy()
# if *.info exists, we can recover structured_name
var_info_filename = str(config.params_filename) + ".info"
var_info_path = os.path.join(model_prefix, var_info_filename)
if os.path.exists(var_info_path):
with open(var_info_path, 'rb') as f:
extra_var_info = pickle.load(f)
structured_para_dict = dict()
for var_name in para_dict:
structured_name = extra_var_info[var_name].get(
'structured_name', None)
assert structured_name is not None, "Cannot find saved variable (%s)'s structured name in saved model." % var_name
structured_para_dict[structured_name] = para_dict[
var_name]
para_dict = structured_para_dict
else:
# load state dict by `io.save_params/persistables` save format
# TODO(chenweihang): [ Now only supports loading parameters seperately ]
# If users save all parameters as one file, the [ variable.name -> variable ]
# mapping info will lost, so users need to give variable list, but users build
# variable list in dygraph mode is difficult, we recommend users to use
# paddle.static.load_program_state in this case
# Try to load all the files in the directory in VarBase format,
# the file name is used as the name of VarBase
load_var_list = []
# 1. load file names
var_name_list = []
for root, _, files in os.walk(model_path):
for filename in files:
file_path = os.path.join(root, filename)
tmp_var_name = os.path.relpath(file_path, model_path)
var_name = tmp_var_name.replace("\\", "/")
var_name_list.append(var_name)
# 2. create and load VarBase
with guard():
for name in var_name_list:
new_var = _varbase_creator(name=name, persistable=True)
_dygraph_tracer().trace_op(
type='load',
inputs={},
outputs={'Out': new_var},
attrs={'file_path': os.path.join(model_path, name)})
load_var_list.append(new_var)
# 3. construct state_dict
para_dict = dict()
for var in load_var_list:
para_dict[var.name] = var.numpy()
return para_dict, opti_dict
|
#!/usr/bin/env python
"""
libRst
======
This module is intended to provide compilation support for rst. The intention
is to keep the required libraries all in one place to provide a deployable,
python 2.6/2.7 compatible, rst compiler.
RST Constructs to be supported:
- paragraph
- heading
- list (unordered, ordered)
- table
"""
__version__ = "0.1"
__date__ = "130212"
__author__ = "Curtis Sand"
import docutils.core
import sys, os.path
def toHtml(text):
""" Use Docutils to compile text into html. """
return docutils.core.publish_parts(source=text,
writer_name='html')['html_body']
def indentParagraph(text, indent):
""" Indent some text by a number of spaces
:param indent: (int or str) number of spaces to indent the text, or
the text to use as the indentation
>>> indentParagraph('foo\\nbar', indent=3)
' foo\\n bar'
>>> indentParagraph('foo\\nbar', indent='__')
'__foo\\n__bar'
"""
if isinstance(indent, int):
indent = ' ' * indent
return '\n'.join([indent + line for line in text.split('\n')])
def wrapText(line, width=80, continuationPrefix=None, splitWords=False,
wordSplitChar='-'):
""" Wrap text to the given width.
:param line: (str) the line of text to wrap
:param width: (int) the width to wrap the line to
:param continuationPrefix: (str) the string to prefix continued lines with
:param splitWords: (bool) whether or not to split words to fill the line
:param wordSplitChar: (str) The string to use to indicate a word
continues on another line. wordSplitChar has no
effect if splitWords is False.
>>> wrapText('foo bar', width=6)
'foo \\nbar \\n'
>>> wrapText('foo bar', width=6, continuationPrefix=' ')
'foo \\n bar \\n'
>>> wrapText('foo bar', width=6, splitWords=True)
'foo b-\\nar \\n'
>>> wrapText('foo bar', width=6, splitWords=True, wordSplitChar='>')
'foo b>\\nar \\n'
>>> wrapText('foo bar', width=5, splitWords=True)
'foo \\nbar \\n'
"""
if not continuationPrefix:
continuationPrefix = ''
words = line.split(' ')
retVal = ''
newLine = ''
for word in words:
if len(newLine) + len(word) <= width:
newLine += word + ' '
continue
elif len(newLine) + len(word) > width and not splitWords:
retVal += newLine + '\n'
newLine = continuationPrefix + word + ' '
continue
else: #split the word
remainingSpace = width - len(newLine)
if remainingSpace <= 1:
retVal += newLine + '\n'
newLine = continuationPrefix + word + ' '
continue
splitIndex = remainingSpace - len(wordSplitChar)
newLine += word[:splitIndex] + wordSplitChar
retVal += newLine + '\n'
newLine = continuationPrefix + word[splitIndex:] + ' '
continue
retVal += newLine + '\n'
return retVal
def _separateNewlines(text):
""" Separate newlines from beginning and end of text and return them in a tuple.
:return: (tuple of str) the beginning newline value, stripped text,
ending newline value
>>> _separateNewlines('\\nfoo\\n')
('\\n', 'foo', '\\n')
>>> _separateNewlines('foo')
('', 'foo', '')
"""
start = ''
end = ''
if text[0] == '\n':
start = '\n'
text = text[1:]
if text[-1] == '\n':
end = '\n'
text = text[:-1]
return start, text, end
def heading(text, level):
""" Turn a line of text into an RST heading. Always returns a trailing
newline.
:param level: (int) the level of heading to produce. Level 0 is the
document title and is overlined.
>>> heading('foo', 0)
'===\\nfoo\\n===\\n'
>>> heading('foo', 1)
'foo\\n===\\n'
>>> heading('\\nfoo\\n', 2)
'\\nfoo\\n---\\n'
>>> heading('foo', 11) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: A heading cannot have a level less than 0 or ...
"""
_chars = ['=', '-', '~', '"', "'", '*', '^', '_', '+', ':', '#']
getUnderline = lambda charIndex: _chars[charIndex] * len(text)
start, text, end = _separateNewlines(text)
if level < 0 or level >= len(_chars):
msg = ('A Heading cannot have a level less than 0 or larger ' +
'than %s: %s' % (len(_chars), text))
raise ValueError(msg)
elif level == 0:
return '%s%s\n%s\n%s\n' % (start, getUnderline(level), text,
getUnderline(level))
else:
return '%s%s\n%s\n' % (start, text, getUnderline(level-1))
def list(elements, ordered=False, startIndex=1):
""" Create an RST List from a collection.
:param elements: (list) a collection of strings, each an element of
the list
:param ordered: (bool) set's list type between bulleted and enumerated
:param startIndex: (int) if start index is 1 then an auto-enumerated
list is used ("#. element\\\n")
>>> list(['foo', 'bar'])
'- foo\\n- bar\\n'
>>> list(['foo', 'bar'], ordered=True)
'#. foo\\n#. bar\\n'
>>> list(['foo', 'bar'], ordered=True, startIndex=3)
'3. foo\\n4. bar\\n'
startIndex has no effect if not ordered
>>> list(['foo', 'bar'], ordered=False, startIndex=3)
'- foo\\n- bar\\n'
"""
retVal = ''
index = startIndex
for element in elements:
if ordered and startIndex==1:
retVal += '#. %s\n' % (element)
elif ordered and startIndex>1:
retVal += '%s. %s\n' % (index, element)
index = index + 1
else:
retVal += '- %s\n' % element
return retVal
def table(grid):
""" Build an RST table out of nested lists. """
grid = _padGrid(grid)
cell_width = 2 + max(reduce(lambda x,y: x+y,
[[len(str(item)) for item in row]
for row in grid], []))
num_cols = len(grid[0])
rst = _tableDiv(num_cols, cell_width, 0)
header_flag = 1
for row in grid:
rst = rst + '| ' + '| '.join([_normalizeCell(x, cell_width-1)
for x in row]) + '|\n'
rst = rst + _tableDiv(num_cols, cell_width, header_flag)
header_flag = 0
return rst
def _tableDiv(num_cols, col_width, header_flag):
if header_flag == 1:
return num_cols*('+' + (col_width)*'=') + '+\n'
else:
return num_cols*('+' + (col_width)*'-') + '+\n'
def _normalizeCell(string, length):
return string + ((length - len(string)) * ' ')
def _padGrid(grid):
padChar = ''
maxRowLen = max([len(row) for row in grid])
for row in grid:
while len(row) < maxRowLen:
row.append(padChar)
return grid
if __name__=="__main__":
import doctest
doctest.testmod()
|
import itertools
from typing import Dict, List, Tuple
import numpy as np
from sklearn.metrics import (
classification_report,
precision_score,
recall_score,
precision_recall_fscore_support,
)
from imagededup.utils.logger import return_logger
logger = return_logger(__name__)
def _get_unique_ordered_tuples(unique_tuples: List[Tuple]) -> List[Tuple]:
"""Sort each tuple given a list of tuples and retain only unique pairs regardless of order within the tuple.
Eg: [(2, 1), (1, 2), (3, 4)] becomes [(1, 2), (3, 4)]"""
return list(set([tuple(sorted(i)) for i in unique_tuples]))
def _make_all_unique_possible_pairs(ground_truth_dict: Dict) -> List[Tuple]:
"""
Given a ground truth dictionary, generate all possible unique image pairs (both negative and positive pairs).
"""
# get all elements of the dictionary
all_files = list(ground_truth_dict.keys())
# make all possible pairs (remove pairs with same elements)
all_tuples = [i for i in itertools.product(all_files, all_files) if i[0] != i[1]]
return _get_unique_ordered_tuples(all_tuples)
def _make_positive_duplicate_pairs(ground_truth: Dict, retrieved: Dict) -> List[Tuple]:
"""
Given ground_truth and retrieved dictionary, generate all unique positive pairs.
"""
pairs = []
for mapping in [ground_truth, retrieved]:
valid_pairs = []
for k, v in mapping.items():
valid_pairs.extend(list(zip([k]*len(v), v)))
pairs.append(_get_unique_ordered_tuples(valid_pairs))
return pairs[0], pairs[1]
def _prepare_labels(
complete_pairs: List[Tuple],
ground_truth_pairs: List[Tuple],
retrieved_pairs: List[Tuple],
) -> Tuple[List, List]:
"""
Given all possible unique pairs, ground truth positive pairs and retrieved positive pairs, generate true and
predicted labels to feed into classification metrics functions.
"""
ground_truth_pairs = set(ground_truth_pairs)
retrieved_pairs = set(retrieved_pairs)
y_true = [1 if i in ground_truth_pairs else 0 for i in complete_pairs]
y_pred = [1 if i in retrieved_pairs else 0 for i in complete_pairs]
return y_true, y_pred
def classification_metrics(ground_truth: Dict, retrieved: Dict) -> np.ndarray:
"""
Given ground truth dictionary and retrieved dictionary, return per class precision, recall and f1 score. Class 1 is
assigned to duplicate file pairs while class 0 is for non-duplicate file pairs.
Args:
ground_truth: A dictionary representing ground truth with filenames as key and a list of duplicate filenames
as value.
retrieved: A dictionary representing retrieved duplicates with filenames as key and a list of retrieved
duplicate filenames as value.
Returns:
Dictionary of precision, recall and f1 score for both classes.
"""
all_pairs = _make_all_unique_possible_pairs(ground_truth)
ground_truth_duplicate_pairs, retrieved_duplicate_pairs = _make_positive_duplicate_pairs(
ground_truth, retrieved
)
y_true, y_pred = _prepare_labels(
all_pairs, ground_truth_duplicate_pairs, retrieved_duplicate_pairs
)
logger.info(classification_report(y_true, y_pred))
prec_rec_fscore_support = dict(
zip(
('precision', 'recall', 'f1_score', 'support'),
precision_recall_fscore_support(y_true, y_pred),
)
)
return prec_rec_fscore_support
|
from bitarray import bitarray
from copy import deepcopy
from functools import wraps
import math
import random
import unittest
length_prefix = 2
def bitarray_to_bytes(bitarr):
return bitarr.length().to_bytes(length_prefix, byteorder='big') + bitarr.tobytes()
def bitarray_from_bytes(bites):
length = int.from_bytes(bites[:length_prefix], byteorder='big')
bitarr = bitarray()
bitarr.frombytes(bites[length_prefix:])
return bitarr[:length]
def unexpected_type(name, exp, val):
raise TypeError('expected "%s" to be %s, got %s' % (name, exp, type(val)))
## from https://stackoverflow.com/a/15577293 ##
def argtypes(**decls):
def decorator(f):
code = f.__code__
names = code.co_varnames[:code.co_argcount]
@wraps(f)
def decorated(*args, **kwargs):
for argname, argtypes in decls.items():
try:
val = args[names.index(argname)]
except ValueError:
val = kwargs.get(argname)
if argtypes == callable:
if not callable(val):
unexpected_type(argname, 'function', val)
elif not isinstance(val, argtypes):
unexpected_type(argname, argtypes, val)
return f(*args, **kwargs)
return decorated
return decorator
################################################
class Op():
@argtypes(key=str, f=callable)
def __init__(self, key, f):
self.key = key
self.f = f
def __str__(self):
return self.key
def revise(tokens):
return match_num(tokens, [])
def match_num(tokens, acc):
if not tokens:
return acc
elif isinstance(tokens[0], int):
acc.append(tokens[0])
return match_op(tokens[1:], acc)
return match_num(tokens[1:], acc)
def match_op(tokens, acc):
if not tokens:
return acc
elif isinstance(tokens[0], Op):
acc.append(tokens[0])
return match_num(tokens[1:], acc)
return match_op(tokens[1:], acc)
def eval(tokens):
if not tokens:
return 0
else:
return do_eval(tokens[1:], tokens[0])
def do_eval(tokens, acc):
if len(tokens) < 2:
return acc
op = tokens[0]
num = tokens[1]
acc = op.f(acc, num)
return do_eval(tokens[2:], acc)
class Genome():
@argtypes(tokens=list)
def __init__(self, tokens):
self.enc_by_key = {}
self.token_by_key = {}
self.token_by_enc = {}
self.tokens = []
self.gene_length = math.floor(math.log2(len(tokens))) + 1
fmt = '0%db' % self.gene_length
for i, token in enumerate(tokens):
if isinstance(token, int):
key = token
elif isinstance(token, Op):
key = token.key
else:
unexpected_type('token', (int,Op,), token)
bitarr = bitarray(format(i, fmt))
enc = bitarray_to_bytes(bitarr)
self.enc_by_key[key] = enc
self.token_by_key[key] = token
self.token_by_enc[enc] = token
@argtypes(keys=list)
def encode(self, keys):
bitarr = bitarray()
for key in keys:
enc = self.enc_by_key[key]
bitarr.extend(bitarray_from_bytes(enc))
return bitarr
def get_token(self, bitarr, i):
enc = bitarray_to_bytes(bitarr[i:i+self.gene_length])
try:
return self.token_by_enc[enc]
except KeyError:
return None
def decode(self, input):
if isinstance(input, bitarray):
bitarr = input
elif isinstance(input, bytes):
bitarr = bitarray_from_bytes(input)
else:
unexpected_type('input', (bitarray,bytes,), input)
tokens = [self.get_token(bitarr, i) for i in range(0, bitarr.length(), self.gene_length)]
return revise(tokens)
def new_chrom(self, input):
if isinstance(input, bitarray):
bitarr = input
tokens = self.decode(input)
elif isinstance(input, bytes):
bitarr = bitarray_from_bytes(input)
tokens = self.decode(input)
elif isinstance(input, list):
bitarr = self.encode(input)
tokens = input
else:
unexpected_type('input', (bitarray,bytes,list,), input)
return Chromosome(bitarr, tokens)
class Chromosome():
@argtypes(bitarr=bitarray, tokens=list)
def __init__(self, bitarr, tokens):
self.bitarr = bitarr
self.tokens = tokens
self.value = eval(tokens)
def bytes(self):
return bitarray_to_bytes(self.bitarr)
def fitness(self, objective, target):
return objective(target, self.value)
def copy_bitarray(self):
return self.bitarr.copy()
def __str__(self):
return ''.join(['%s' % token for token in self.tokens]) + '=%s' % self.value
def rand(nums, last):
x = random.randint(0, last-1)
for i, num in enumerate(nums):
if num > x:
return i
raise ValueError('expected num < %d, got %d' % (last, x))
class Environment():
@argtypes(genome=Genome, chrom_length=int, cross_rate=float, max_iters=int, mut_rate=float, objective=callable, pop_size=int, target=int)
def __init__(self, **kwargs):
self.genome = kwargs.get('genome')
self.chrom_length = kwargs.get('chrom_length')
self.cross_rate = kwargs.get('cross_rate')
self.max_iters = kwargs.get('max_iters')
self.mut_rate = kwargs.get('mut_rate')
self.objective = kwargs.get('objective')
self.pop_size = kwargs.get('pop_size')
self.target = kwargs.get('target')
self.pop = []
for _ in range(self.pop_size):
bitarr = bitarray([random.choice([False, True]) for _ in range(self.chrom_length)])
chrom = self.genome.new_chrom(bitarr)
self.pop.append(chrom)
def set_target(self, target):
self.target = target
def copy_chrom(self, i):
return deepcopy(self.pop[i])
def copy_pop(self):
return deepcopy(self.pop)
def total_fitness(self):
fitness = 0
for chrom in self.pop:
fitness += chrom.fitness(self.objective, self.target)
return fitness
def chrom_fitness(self, chrom):
return chrom.fitness(self.objective, self.target)
def try_crossover(self, bitarr1, bitarr2):
if self.cross_rate < random.random():
return False
end = min(bitarr1.length(), bitarr2.length())
start = random.randint(0, end-1)
temp = bitarr1[start:end]
bitarr1[start:end] = bitarr2[start:end]
bitarr2[start:end] = temp
return (start, end)
def try_mutate(self, bitarr1, bitarr2):
xs = []
ys = []
for x, b in enumerate(bitarr1):
if self.mut_rate >= random.random():
bitarr1[x] = not b
xs.append(x)
for y, b in enumerate(bitarr2):
if self.mut_rate >= random.random():
bitarr2[y] = not b
ys.append(y)
return (xs, ys)
def iter(self):
last = 0
nums = []
for i, chrom in enumerate(self.pop):
fitness = self.chrom_fitness(chrom)
if fitness == float("inf"):
return chrom
nums.append(round(fitness * 1000) + last)
last = nums[i]
new_pop = []
for _ in range(0, self.pop_size, 2):
i = rand(nums, last)
j = rand(nums, last)
while i == j:
j = rand(nums, last)
bitarr1 = self.pop[i].copy_bitarray()
bitarr2 = self.pop[j].copy_bitarray()
self.try_crossover(bitarr1, bitarr2)
self.try_mutate(bitarr1, bitarr2)
chrom1 = self.genome.new_chrom(bitarr1)
chrom2 = self.genome.new_chrom(bitarr2)
new_pop.append(chrom1)
new_pop.append(chrom2)
self.pop = new_pop
return False
def run(self):
iters = 0
chrom = False
while not chrom and iters < self.max_iters:
chrom = self.iter()
iters += 1
return (chrom, iters)
def add(x, y):
return x + y
def sub(x, y):
return x - y
def mul(x, y):
return x * y
def div(x, y):
if not y:
return x
return x / y
plus = Op('+', add)
minus = Op('-', sub)
multiply = Op('*', mul)
divide = Op('/', div)
class TestGenome(unittest.TestCase):
def setUp(self):
self.genome = Genome([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, plus, minus, multiply, divide])
def test_encode(self):
bits = self.genome.encode([1, '+', 2, '-', 3])
assert bits == bitarray('00011010001010110011')
def test_decode(self):
tokens = self.genome.decode(bitarray('00011010001010110011'))
assert tokens == [1, plus, 2, minus, 3]
def test_decode_invalid(self):
tokens = self.genome.decode(bitarray('0010001010101110101101110010'))
assert tokens == [2, plus, 7]
def test_eval(self):
value = eval([1, plus, 2, multiply, 3, minus, 4])
assert value == 5
def test_chromosome(self):
chrom = self.genome.new_chrom(bitarray('011010100101110001001101001010100001'))
fitness = chrom.fitness(objective, 42)
assert chrom.value == 23
assert fitness == 1/19
def objective(target, value):
diff = abs(target - value)
if diff > 0:
return 1 / diff
return float("inf")
class TestEnvironment(unittest.TestCase):
def setUp(self):
genome = Genome([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, plus, minus, multiply, divide])
self.env = Environment(
genome=genome,
objective=objective,
chrom_length=300,
cross_rate=0.7,
max_iters=400,
mut_rate=0.01,
pop_size=100,
target=50
)
def test_crossover(self):
bitarr1 = self.env.pop[0].copy_bitarray()
bitarr2 = self.env.pop[1].copy_bitarray()
copy1 = bitarr1.copy()
copy2 = bitarr2.copy()
res = self.env.try_crossover(bitarr1, bitarr2)
if res is False:
assert bitarr1 == copy1
assert bitarr2 == copy2
else:
(start, end) = res
assert bitarr1 == copy1[:start] + copy2[start:end] + copy1[end:]
assert bitarr2 == copy2[:start] + copy1[start:end] + copy2[end:]
def test_mutate(self):
chrom1 = self.env.copy_chrom(0)
chrom2 = self.env.copy_chrom(1)
bitarr1 = chrom1.copy_bitarray()
bitarr2 = chrom2.copy_bitarray()
(xs, ys) = self.env.try_mutate(chrom1.bitarr, chrom2.bitarr)
if xs:
for x in xs:
bitarr1[x] = not bitarr1[x]
if ys:
for y in ys:
bitarr2[y] = not bitarr2[y]
assert bitarr1 == chrom1.bitarr
assert bitarr2 == chrom2.bitarr
def test_iter(self):
fitness_before = self.env.total_fitness()
pop_before = self.env.copy_pop()
chrom = self.env.iter()
pop_after = self.env.copy_pop()
if not chrom:
for chrom in pop_before:
assert float("inf") != self.env.chrom_fitness(chrom)
fitness_after = self.env.total_fitness()
assert fitness_before < fitness_after
else:
assert float("inf") == self.env.chrom_fitness(chrom)
def test_run(self):
(chrom, iters) = self.env.run()
if chrom:
print(chrom)
assert float("inf") == self.env.chrom_fitness(chrom)
assert iters < self.env.max_iters
else:
assert iters == self.env.max_iters
if __name__ == '__main__':
unittest.main()
|
"""
the C source files and headers in this directory were copied from
https://github.com/devonmpowell/r3d at git revision `6ddcdfa` on
4/22/2019 by MRB.
"""
from . import _r2d
|
# -*- coding:utf-8 -*-
import numpy as np
import pylab as plt
class Damper(object):
'''ダンパークラス'''
def __init__(self,angle_radian:float,coefficient:float,alpha:float):
self.rad = angle_radian
self.coeff = coefficient
self.alpha = alpha
@property
def direction_vector(self):
return np.array([np.cos(self.rad),np.sin(self.rad)])
def get_damping_force_value(self,velocity):
'''減衰力を返す'''
return self.coeff*np.abs(velocity)**self.alpha
#
def plot_force_vectors_for_velocities(self,velocity_vector):
'''減衰力ベクトルを返す'''
velocity = np.dot(self.direction_vector,velocity_vector)
force_value = self.get_damping_force_value(velocity)
force_sign = -1.0*np.sign(velocity)
return force_sign*force_value*self.direction_vector
class LeanedPairDampers(object):
'''組み合わせダンパークラス'''
def __init__(self,Damper1:Damper,Damper2:Damper):
self.Damper1 = Damper1
self.Damper2 = Damper2
def compute_force_vector(self,velocity_vector):
'''合力ベクトルを計算する'''
force_vector_1 = self.Damper1.plot_force_vectors_for_velocities(velocity_vector)
force_vector_2 = self.Damper2.plot_force_vectors_for_velocities(velocity_vector)
return force_vector_1 + force_vector_2
class VectorUtils:
'''ベクトル計算用のクラス'''
@staticmethod
def get_angle_degree(vector):
theta = np.arctan2(vector[1],vector[0])
return theta*180.0/np.pi
@staticmethod
def get_angle_rad(vector):
theta = np.arctan2(vector[1],vector[0])
return theta
class DamperEffectivenessPlot(object):
'''描画用のクラス'''
def __init__(self,dampPair:LeanedPairDampers,velocity_value,row,col,pos):
self.LeanedPairDampers = dampPair
self.velocity_value = velocity_value
self.row = row
self.col = col
self.pos = pos
def force_vector_generator(self,rad_range):
'''合力ベクトルを生成するジェネレータ'''
for rad in rad_range:
velocity_vector = self.velocity_value * np.array([np.cos(rad),np.sin(rad)])
force_vector = self.LeanedPairDampers.compute_force_vector(velocity_vector)
yield force_vector
def force_vector_comp_parallel_to_velocity_generator(self,rad_range):
'''速度ベクトルに平行な合力ベクトルの大きさを生成するジェネレータ'''
for rad in rad_range:
velocity_vector = self.velocity_value * np.array([np.cos(rad),np.sin(rad)])
force_vector = self.LeanedPairDampers.compute_force_vector(velocity_vector)
yield abs(np.dot(force_vector,velocity_vector))/np.linalg.norm(velocity_vector)
def plot_force_value(self,fig):
'''合力ベクトルの大きさを図化する関数'''
ax = fig.add_subplot(self.row,self.col,self.pos,polar=True)
rad_range = np.arange(0,2*np.pi,0.01)
force_lst = [np.linalg.norm(force_vector) for force_vector in self.force_vector_generator(rad_range)]
plt.polar(rad_range,force_lst)
y_max = np.ceil(max(force_lst)) + 100
ax.set_ylim([0,y_max])
def plot_force_vector_comp_parallel_to_velocity(self,fig):
'''速度ベクトルに平行な合力ベクトルの大きさを図化する'''
ax = fig.add_subplot(self.row,self.col,self.pos,polar=True)
rad_range = np.arange(0,2*np.pi,0.01)
force_lst = list(self.force_vector_comp_parallel_to_velocity_generator(rad_range))
plt.polar(rad_range,force_lst)
y_max = np.ceil(max(force_lst)) + 100
ax.set_ylim([0,y_max])
def plot_force_vector_angle(self,fig):
'''合力ベクトルの角度を図化する'''
ax = fig.subplot(self.row,self.col,self.pos,polar=True)
degree_range = np.arange(0,180,1)[1:]
value_lst = []
for deg in degree_range:
rad = deg*np.pi/180
velocity_vector = np.array([np.cos(rad),np.sin(rad)])
force_vector = self.LeanedPairDampers.compute_force_vector(velocity_vector)
theta = VectorUtils.get_angle_rad(force_vector)
value_lst.append(theta*180/np.pi)
plt.plot(degree_range,value_lst)
plt.plot(degree_range,[deg-180 for deg in degree_range])
ax.set_xlim([0,180])
def plot_force_vectors_for_velocities(self,fig):
'''合力ベクトルを速度ベクトルの角度を5度ずつ変化させて図化する'''
ax = fig.add_subplot(self.row,self.col,self.pos)
# 必ず45度が入るようにした
rad_range = [theta*np.pi/180.0 for theta in np.arange(0,360,5)]
force_vector_lst = list(self.force_vector_generator(rad_range))
for force_vector in force_vector_lst:
plt.quiver(0,0,force_vector[0],force_vector[1],angles='xy',scale_units='xy',scale=1,width=0.005)
max_force_ceil = np.ceil(max([np.linalg.norm(vec) for vec in force_vector_lst]))+100
ax.set_xlim([-max_force_ceil,max_force_ceil])
ax.set_ylim([-max_force_ceil,max_force_ceil])
def plot_force_vector(self,velocity_vector):
'''速度ベクトルから合力ベクトルを計算する'''
ax = plt.subplot(self.row,self.col,self.pos)
force_vector = self.LeanedPairDampers.compute_force_vector(velocity_vector)
plt.quiver(0,0,force_vector[0],force_vector[1],angles='xy',scale_units='xy',scale=1,width=0.01,color="r",label="Damping Force")
plt.quiver(0,0,velocity_vector[0],velocity_vector[1],angles='xy',scale_units='xy',scale=1,width=0.01,label="Velocity")
max_value = max([np.linalg.norm(velocity_vector),np.linalg.norm(force_vector)])
ax.set_xlim([-max_value,max_value])
ax.set_ylim([-max_value,max_value])
plt.legend()
if __name__ == "__main__":
angle = 45.0*np.pi/180.0
alpha = 0.5
coeff = 20.0 #mm系
# alpha = 2.0
# coeff = 0.001 #mm系
Damp1 = Damper(angle,coeff,alpha)
Damp2 = Damper(-angle,coeff,alpha)
PairViscousDamp = LeanedPairDampers(Damp1,Damp2)
ViscousPlotter = DamperEffectivenessPlot(PairViscousDamp,650,1,2,2)
alpha = 1.0
coeff = 0.8 #mm系
Damp1 = Damper(angle,coeff,alpha)
Damp2 = Damper(-angle,coeff,alpha)
PairOilDamp = LeanedPairDampers(Damp1,Damp2)
OilPlotter = DamperEffectivenessPlot(PairOilDamp,650,1,2,1)
fig = plt.figure(figsize=(10,5))
OilPlotter.plot_force_value(fig)
ViscousPlotter.plot_force_value(fig)
plt.show()
fig2 = plt.figure(figsize=(10,5))
OilPlotter.plot_force_vectors_for_velocities(fig2)
ViscousPlotter.plot_force_vectors_for_velocities(fig2)
plt.show()
fig3 = plt.figure(figsize=(10,5))
velocity_vector = 650*np.array([np.cos(30.0*np.pi/180.0),np.sin(30.0*np.pi/180.0)])
OilPlotter.plot_force_vector(velocity_vector)
ViscousPlotter.plot_force_vector(velocity_vector)
plt.show()
|
# build RF extension
# run in RF
import os
from mojo.extensions import ExtensionBundle
# get current folder
basePath = os.path.dirname(__file__)
# folder with python files
libPath = os.path.join(basePath, 'extensionLib')
# folder with html files
htmlPath = os.path.join(basePath, 'html')
if not os.path.exists(htmlPath):
htmlPath = None
# folder with resources
resourcesPath = os.path.join(basePath, 'resources')
if not os.path.exists(resourcesPath):
resourcesPath = None
# load license text from file
# see http://choosealicense.com/ for more open-source licenses
licensePath = os.path.join(basePath, 'license.txt')
if not os.path.exists(licensePath):
licensePath = None
# boolean indicating if only .pyc should be included
pycOnly = False
# name of the compiled extension file
extensionFile = 'DesignSpaceEditor.roboFontExt'
# path of the compiled extension
buildPath = basePath
extensionPath = os.path.join(buildPath, extensionFile)
# initiate the extension builder
B = ExtensionBundle()
# name of the extension
B.name = "DesignSpaceEdit"
# name of the developer
B.developer = 'LettError'
# URL of the developer
B.developerURL = 'http://letterror.com'
if resourcesPath:
# extension icon (file path or NSImage)
imagePath = os.path.join(resourcesPath, 'icon.png')
B.icon = imagePath
# version of the extension
B.version = '1.9.8'
# should the extension be launched at start-up?
B.launchAtStartUp = True
# script to be executed when RF starts
B.mainScript = 'addDesignSpaceFileHandler.py'
# does the extension contain html help files?
B.html = htmlPath is not None
# minimum RoboFont version required for this extension
# Robofont 4.3 has fontTools with designspace version 5.0
B.requiresVersionMajor = '4'
B.requiresVersionMinor = '3'
# scripts which should appear in Extensions menu
B.addToMenu = [
{
'path' : 'openDesignSpaceFile.py',
'preferredName': 'Open',
'shortKey' : '',
},
{
'path' : 'newDesignSpaceFile.py',
'preferredName': 'New',
'shortKey' : '',
},
]
# compile and save the extension bundle
print('building extension...', end=' ')
B.save(extensionPath, libPath=libPath, htmlPath=htmlPath, resourcesPath=resourcesPath, pycOnly=pycOnly)
print('done!')
# check for problems in the compiled extension
print()
print(B.validationErrors())
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MusicXML import.
Input wrappers for converting MusicXML into tensorflow.magenta.NoteSequence.
"""
# internal imports
import musicxml_parser
import music_pb2
# Shortcut to CHORD_SYMBOL annotation type.
CHORD_SYMBOL = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL
class MusicXMLConversionError(Exception):
"""MusicXML conversion error handler."""
pass
def musicxml_to_sequence_proto(musicxml_document):
"""Convert MusicXML file contents to a tensorflow.magenta.NoteSequence proto.
Converts a MusicXML file encoded as a string into a
tensorflow.magenta.NoteSequence proto.
Args:
musicxml_document: A parsed MusicXML file.
This file has been parsed by class MusicXMLDocument
Returns:
A tensorflow.magenta.NoteSequence proto.
Raises:
MusicXMLConversionError: An error occurred when parsing the MusicXML file.
"""
sequence = music_pb2.NoteSequence()
# Standard MusicXML fields.
sequence.source_info.source_type = (
music_pb2.NoteSequence.SourceInfo.SCORE_BASED)
sequence.source_info.encoding_type = (
music_pb2.NoteSequence.SourceInfo.MUSIC_XML)
sequence.source_info.parser = (
music_pb2.NoteSequence.SourceInfo.MAGENTA_MUSIC_XML)
# Populate header.
sequence.ticks_per_quarter = musicxml_document.midi_resolution
# Populate time signatures.
musicxml_time_signatures = musicxml_document.get_time_signatures()
for musicxml_time_signature in musicxml_time_signatures:
time_signature = sequence.time_signatures.add()
time_signature.time = musicxml_time_signature.time_position
time_signature.numerator = musicxml_time_signature.numerator
time_signature.denominator = musicxml_time_signature.denominator
# Populate key signatures.
musicxml_key_signatures = musicxml_document.get_key_signatures()
for musicxml_key in musicxml_key_signatures:
key_signature = sequence.key_signatures.add()
key_signature.time = musicxml_key.time_position
# The Key enum in music.proto does NOT follow MIDI / MusicXML specs
# Convert from MIDI / MusicXML key to music.proto key
music_proto_keys = [11, 6, 1, 8, 3, 10, 5, 0, 7, 2, 9, 4, 11, 6, 1]
key_signature.key = music_proto_keys[musicxml_key.key + 7]
if musicxml_key.mode == "major":
key_signature.mode = key_signature.MAJOR
elif musicxml_key.mode == "minor":
key_signature.mode = key_signature.MINOR
# Populate tempo changes.
musicxml_tempos = musicxml_document.get_tempos()
for musicxml_tempo in musicxml_tempos:
tempo = sequence.tempos.add()
tempo.time = musicxml_tempo.time_position
tempo.qpm = musicxml_tempo.qpm
# Populate notes from each MusicXML part across all voices
# Unlike MIDI import, notes are not sorted
sequence.total_time = musicxml_document.total_time_secs
for part_index, musicxml_part in enumerate(musicxml_document.parts):
part_info = sequence.part_infos.add()
part_info.part = part_index
part_info.name = musicxml_part.score_part.part_name
for musicxml_measure in musicxml_part.measures:
for musicxml_note in musicxml_measure.notes:
if not musicxml_note.is_rest:
note = sequence.notes.add()
note.part = part_index
note.voice = musicxml_note.voice
note.instrument = musicxml_note.midi_channel
note.program = musicxml_note.midi_program
note.start_time = musicxml_note.note_duration.time_position
# Fix negative time errors from incorrect MusicXML
if note.start_time < 0:
note.start_time = 0
note.end_time = note.start_time + musicxml_note.note_duration.seconds
note.pitch = musicxml_note.pitch[1] # Index 1 = MIDI pitch number
note.velocity = musicxml_note.velocity
durationratio = musicxml_note.note_duration.duration_ratio()
note.numerator = durationratio.numerator
note.denominator = durationratio.denominator
musicxml_chord_symbols = musicxml_document.get_chord_symbols()
for musicxml_chord_symbol in musicxml_chord_symbols:
text_annotation = sequence.text_annotations.add()
text_annotation.time = musicxml_chord_symbol.time_position
text_annotation.text = musicxml_chord_symbol.get_figure_string()
text_annotation.annotation_type = CHORD_SYMBOL
return sequence
def musicxml_file_to_sequence_proto(musicxml_file):
"""Converts a MusicXML file to a tensorflow.magenta.NoteSequence proto.
Args:
musicxml_file: A string path to a MusicXML file.
Returns:
A tensorflow.magenta.Sequence proto.
Raises:
MusicXMLConversionError: Invalid musicxml_file.
"""
try:
musicxml_document = musicxml_parser.MusicXMLDocument(musicxml_file)
except musicxml_parser.MusicXMLParseException as e:
raise MusicXMLConversionError(e)
return musicxml_to_sequence_proto(musicxml_document)
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QPalette
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
MainWindow.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
MainWindow.setStyleSheet("QMainWindow{\n"
"border-radius:15px\n"
"}\n"
"QWidget{\n"
"border-radius:15px;\n"
"}\n"
"#frame{\n"
"background: #e1e9ed;}\n"
"QToolButton{\n"
"background:#EAF7FF;\n"
"border-radius:15px;\n"
"}\n"
"QToolButton:hover{\n"
"background:#EAF7FF;\n"
"border-radius:15px;\n"
"background:#49ebff;\n"
"}\n"
"#label{\n"
"text-align:center;\n"
"}\n"
"#welcome{\n"
"text-align:center;\n"
"}\n"
"#toolButton_7\n"
"{\n"
"background:#e1e9ed;\n"
"}")
MainWindow.setTabShape(QtWidgets.QTabWidget.Rounded)
#历史创作
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.clientbutton = QtWidgets.QToolButton(self.centralwidget)
self.clientbutton.setGeometry(QtCore.QRect(540, 220, 200, 120))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setBold(True)
font.setWeight(75)
self.clientbutton.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("./pictures/client.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.clientbutton.setIcon(icon)
self.clientbutton.setIconSize(QtCore.QSize(80, 80))
self.clientbutton.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.clientbutton.setObjectName("clientbutton")
#开始创作
self.roombutton = QtWidgets.QToolButton(self.centralwidget)
self.roombutton.setGeometry(QtCore.QRect(60, 220, 200, 120))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setBold(True)
font.setWeight(75)
self.roombutton.setFont(font)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("./pictures/coffee.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.roombutton.setIcon(icon4)
self.roombutton.setIconSize(QtCore.QSize(80, 80))
self.roombutton.setPopupMode(QtWidgets.QToolButton.InstantPopup)
self.roombutton.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.roombutton.setObjectName("roombutton")
#用户管理
self.staffbutton = QtWidgets.QToolButton(self.centralwidget)
self.staffbutton.setGeometry(QtCore.QRect(300, 220, 200, 120))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setBold(True)
font.setWeight(75)
self.staffbutton.setFont(font)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("./pictures/staff.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.staffbutton.setIcon(icon5)
self.staffbutton.setIconSize(QtCore.QSize(80, 80))
self.staffbutton.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.staffbutton.setObjectName("staffbutton")
self.chartbutton = QtWidgets.QToolButton(self.centralwidget)
self.chartbutton.setGeometry(QtCore.QRect(300, 380, 200, 120))
self.chartbutton.setMinimumSize(QtCore.QSize(200, 120))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setBold(True)
font.setWeight(75)
self.chartbutton.setFont(font)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("./pictures/chart.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.chartbutton.setIcon(icon1)
self.chartbutton.setIconSize(QtCore.QSize(70, 70))
self.chartbutton.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.chartbutton.setObjectName("chartbutton")
self.toolButton_6 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_6.setGeometry(QtCore.QRect(540, 380, 200, 120))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setBold(True)
font.setWeight(75)
self.toolButton_6.setFont(font)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("./pictures/tobecontinued.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_6.setIcon(icon2)
self.toolButton_6.setIconSize(QtCore.QSize(70, 80))
self.toolButton_6.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.toolButton_6.setObjectName("toolButton_6")
self.orderbutton = QtWidgets.QToolButton(self.centralwidget)
self.orderbutton.setGeometry(QtCore.QRect(60, 380, 200, 120))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setBold(True)
font.setWeight(75)
self.orderbutton.setFont(font)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("./pictures/order.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.orderbutton.setIcon(icon3)
self.orderbutton.setIconSize(QtCore.QSize(80, 80))
self.orderbutton.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.orderbutton.setObjectName("orderbutton")
self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(0, 0, 800, 180))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.welcome = QtWidgets.QLabel(self.frame)
self.welcome.setGeometry(QtCore.QRect(40, 10, 751, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(12)
self.welcome.setFont(font)
self.welcome.setText("")
self.welcome.setAlignment(QtCore.Qt.AlignCenter)
self.welcome.setObjectName("welcome")
self.toolButton_7 = QtWidgets.QToolButton(self.frame)
self.toolButton_7.setGeometry(QtCore.QRect(370, 70, 71, 71))
font = QtGui.QFont()
font.setPointSize(9)
self.toolButton_7.setFont(font)
self.toolButton_7.setText("")
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("./pictures/hotel.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_7.setIcon(icon6)
self.toolButton_7.setIconSize(QtCore.QSize(100, 100))
self.toolButton_7.setObjectName("toolButton_7")
self.modifyPwd = QtWidgets.QToolButton(self.frame)
self.modifyPwd.setGeometry(QtCore.QRect(710, 150, 81, 21))
self.modifyPwd.setStyleSheet("background:#e1e9ed")
self.modifyPwd.setObjectName("modifyPwd")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(280, 540, 241, 41))
font = QtGui.QFont()
font.setFamily("幼圆")
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(350, 560, 241, 41))
font = QtGui.QFont()
font.setFamily("幼圆")
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.clientbutton.setText(_translate("MainWindow", "历史创作"))
self.chartbutton.setText(_translate("MainWindow", "敬请期待"))
self.toolButton_6.setText(_translate("MainWindow", "敬请期待"))
self.orderbutton.setText(_translate("MainWindow", "敬请期待"))
self.roombutton.setText(_translate("MainWindow", "开始创作"))
self.staffbutton.setText(_translate("MainWindow", "用户管理"))
self.modifyPwd.setText(_translate("MainWindow", "修改密码"))
self.label.setText(_translate("MainWindow", "基于深度学习的油画创作系统-lcj"))
self.label_2.setText(_translate("MainWindow", "version 1.0")) |
""" Build a neural machine translation model based on the transformer architecture. """
import os
import sys
import json
import time
import logging
import argparse
import tempfile
import subprocess
import numpy as np
import tensorflow as tf
from datetime import datetime
from collections import OrderedDict
from transformer import Transformer as BaseTransformer
from lexical_shortcuts.lexical_shortcuts_transformer import Transformer as LexicalShortcutsTransformer
from lexical_shortcuts.dec_to_enc_shortcuts_transformer import Transformer as DecToEncShortcutsTransformer
from lexical_shortcuts.full_shortcuts_transformer import Transformer as FullShortcutsTransformer
from lexical_shortcuts.ablations.enc_only_shortcuts_transformer import Transformer as EncOnlyShortcutsTransformer
from lexical_shortcuts.ablations.dec_only_shortcuts_transformer import Transformer as DecOnlyShortcutsTransformer
from custom_iterator import TextIterator
from transformer_ops import get_parallel_ops, get_single_ops, VariableUpdateTrainer
from util import load_dict, seq2words, reverse_dict, get_visible_gpus, assign_to_device, count_parameters
from training_progress import TrainingProgress
# Debugging
from tensorflow.python import debug as tf_debug
def create_model(config, source_vocab_size, target_vocab_size):
""" Creates the model independent of the TensorFlow session. """
logging.info('Building model \'{:s}\'.'.format(config.model_name))
# Set model-specific parameters
if config.model_type == 'base_transformer':
model = BaseTransformer(config, source_vocab_size, target_vocab_size, config.model_name)
elif config.model_type == 'lexical_shortcuts_transformer':
model = LexicalShortcutsTransformer(config, source_vocab_size, target_vocab_size, config.model_name)
elif config.model_type == 'dec_to_enc_shortcuts_transformer':
model = DecToEncShortcutsTransformer(config, source_vocab_size, target_vocab_size, config.model_name)
elif config.model_type == 'full_shortcuts_transformer':
model = FullShortcutsTransformer(config, source_vocab_size, target_vocab_size, config.model_name)
elif config.model_type == 'enc_only_shortcuts_transformer':
model = EncOnlyShortcutsTransformer(config, source_vocab_size, target_vocab_size, config.model_name)
elif config.model_type == 'dec_only_shortcuts_transformer':
model = DecOnlyShortcutsTransformer(config, source_vocab_size, target_vocab_size, config.model_name)
else:
raise ValueError('Model type {:s} is not supported'.format(config.model_type))
return model
def average_checkpoints(to_load, config, sess):
""" Averages model parameter values across the specified model checkpoints from the same training run;
derived from https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/avg_checkpoints.py """
# Iterate over the specified checkpoints and assign them to a map
ckpt_map = dict()
for ckpt_path in config.reload:
ckpt_step = ckpt_path.split('-')[-1]
ckpt_map[int(ckpt_step)] = ckpt_path
ckpt_steps = ckpt_map.keys()
latest_ckpt = max(ckpt_steps)
sorted_keys = list(ckpt_steps)
sorted_keys.sort()
# Use neutral weights
scores = {ckpt_key: 1. for ckpt_key in sorted_keys}
# Select variables to be loaded; to_load == None when training
if to_load is None:
to_load = {var.name: var for var in tf.global_variables()}
# Assess checkpoints from oldest to most recent and average their values; abort if checkpoint does not exist
var_names = to_load.keys()
var_values = {var_name: None for var_name in var_names}
var_dtypes = {var_name: None for var_name in var_names}
reload_filename = ckpt_map[latest_ckpt]
logging.info('Reading-in {:d} checkpoints and averaging parameter values.'.format(len(config.reload)))
for ckpt_id, ckpt_key in enumerate(sorted_keys):
logging.info('Current checkpoint: {:s} ...'.format(ckpt_map[ckpt_key]))
# Open checkpoint
try:
reader = tf.contrib.framework.load_checkpoint(ckpt_map[ckpt_key])
except tf.errors.NotFoundError:
logging.info('Checkpoint not found. Exiting.')
sys.exit()
for var_name in var_names:
var_value = reader.get_tensor(var_name)
# Update accumulation maps
if var_name.startswith('global_step'):
var_values[var_name] = var_value
else:
var_values[var_name] = var_value * scores[ckpt_key] if var_values[var_name] is None else \
var_values[var_name] + (var_value * scores[ckpt_key])
var_dtypes[var_name] = var_value.dtype
if ckpt_id == len(sorted_keys) - 1:
# Average collected values
var_values[var_name] /= len(config.reload)
logging.info('Assigning averaged values to variables.')
assign_ops = [tf.assign(to_load[var_name], var_values[var_name]) for var_name in var_names]
sess.run(tf.group(assign_ops))
return reload_filename
def session_setup(config, sess, model, training=False, max_checkpoints=10):
""" Prepares the model and auxiliary resources for operation. """
to_init = list()
# Exclude optimization variables to be loaded during inference (for greater model portability)
to_load = None
if not training:
to_load = dict()
model_vars = tf.global_variables()
for var in model_vars:
if 'optimization' in var.name:
to_init.append(var)
else:
to_load[var.name.split(':')[0]] = var
# If a stand-alone model is called, variable names don't need to be mapped
saver = tf.train.Saver(to_load, max_to_keep=max_checkpoints)
reload_filename = None
no_averaging = True
if type(config.reload) == list and len(config.reload) > 1:
reload_filename = average_checkpoints(to_load, config, sess)
no_averaging = False
else:
if config.reload is not None:
if config.reload[0] == 'latest_checkpoint':
checkpoint_dir = os.path.dirname(config.save_to)
reload_filename = tf.train.latest_checkpoint(checkpoint_dir)
if reload_filename is not None:
if os.path.basename(reload_filename).rsplit('-', 1)[0] != os.path.basename(config.save_to):
logging.error('Mismatching model filename found in the same directory while reloading '
'from the latest checkpoint.')
sys.exit(1)
logging.info('Latest checkpoint found in directory {:s}.'.format(os.path.abspath(checkpoint_dir)))
elif config.reload[0] == 'best_perplexity':
checkpoint_dir = os.path.dirname(config.save_to)
checkpoint_paths = tf.train.get_checkpoint_state(checkpoint_dir).all_model_checkpoint_paths
reload_filename = [path for path in checkpoint_paths if 'best_perplexity' in path][0]
if reload_filename is not None:
logging.info('Best perplexity checkpoint found in directory {:s}.'
.format(os.path.abspath(checkpoint_dir)))
elif config.reload[0] == 'best_bleu':
checkpoint_dir = os.path.dirname(config.save_to)
checkpoint_paths = tf.train.get_checkpoint_state(checkpoint_dir).all_model_checkpoint_paths
reload_filename = [path for path in checkpoint_paths if 'best_bleu' in path][0]
if reload_filename is not None:
logging.info('Best BLEU checkpoint found in directory {:s}.'
.format(os.path.abspath(checkpoint_dir)))
else:
reload_filename = config.reload[0]
# Initialize a progress tracking object and restore its values, if possible
progress = TrainingProgress()
progress.bad_counter = 0
progress.uidx = 0
progress.eidx = 0
progress.estop = False
progress.validation_perplexity = OrderedDict()
progress.validation_bleu = OrderedDict()
if reload_filename is not None and training:
progress_path = '{:s}.progress.json'.format(reload_filename)
if os.path.exists(progress_path):
logging.info('Reloading training progress.')
progress.load_from_json(progress_path)
logging.info('Done!')
if training:
# If training process to be continued has been successfully completed before, terminate
if progress.estop is True or \
progress.eidx > config.max_epochs or \
progress.uidx >= config.max_updates:
logging.warning('Training is already complete. Disable reloading of training progress '
'(--no_reload_training_progress) or remove or modify progress file {:s} '
'to train anyway.'.format(progress_path))
sys.exit(0)
# If no source from which model parameters should be re-loaded has been specified, initialize model randomly
if reload_filename is None:
logging.info('Initializing model parameters from scratch.')
init_op = tf.global_variables_initializer()
sess.run(init_op)
logging.info('Done!')
# Otherwise, load parameters from specified source file
else:
reload_path = os.path.abspath(reload_filename)
# For single checkpoint evaluation, load parameter values from checkpoint file
if no_averaging:
logging.info('Loading model parameters from file {:s}.'.format(reload_path))
saver.restore(sess, reload_path)
# Initialize optimization parameters from scratch
if len(to_init) > 0:
logging.info('Initializing the rest from scratch.')
init_op = tf.variables_initializer(to_init)
sess.run(init_op)
# Reset global_path variable before resuming the training
if training:
model.load_global_step(progress.uidx, sess)
logging.info('Done!')
logging.info('Finished setting up the model!')
if training:
return saver, reload_filename, progress
else:
return saver, reload_filename
def load_dictionaries(config):
""" Loads the specified dictionary files and processes them for string look-up during translation. """
# Load in dictionaries (mapping: string -> string ID)
source_to_index = load_dict(config.source_vocab)
target_to_index = load_dict(config.target_vocab)
# Truncate dictionaries, if specified
if config.max_vocab_source > 0:
for key, idx in source_to_index.items():
if idx >= config.max_vocab_source:
del source_to_index[key]
if config.max_vocab_target > 0:
for key, idx in target_to_index.items():
if idx >= config.max_vocab_target:
del target_to_index[key]
# Reverse dictionaries (mapping: string ID -> string)
index_to_source = reverse_dict(source_to_index)
index_to_target = reverse_dict(target_to_index)
# Get vocabulary sizes
source_vocab_size = len(source_to_index.keys())
target_vocab_size = len(target_to_index.keys())
return source_to_index, target_to_index, index_to_source, index_to_target, source_vocab_size, target_vocab_size
def update_learning_rate(config, model_global_step):
""" Adjust the current learning rate for the optimization of the target model based on training progress;
As of now, specific to the transformer; see chapter 5.3. in 'Attention is all you Need'. """
scheduled_step = \
config.hidden_size ** (-0.5) * np.minimum((model_global_step + 1) ** (-0.5),
(model_global_step + 1) * (config.warmup_steps ** (-1.5)))
return scheduled_step
def get_dataset_iterator(custom_iterator, num_gpus, get_handle=False):
""" Transforms a custom iterator into a TensorFlow Dataset iterator. """
# Create a data-set whose elements are generated by the custom iterator
dataset = tf.data.Dataset.from_generator(lambda: custom_iterator,
(tf.int32, tf.int32, tf.int32, tf.float32, tf.float32),
(tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None])))
# Enable pre-fetching
prefetch_value = num_gpus if num_gpus >= 1 else 1
dataset.prefetch(prefetch_value)
# Based on the data-set, construct an initializeable iterator
dataset_iterator = dataset.make_initializable_iterator()
# Optionally, generate an iterator handle
if get_handle:
iterator_handle = tf.placeholder(tf.string, shape=[], name='iterator_handle')
return dataset_iterator, dataset, iterator_handle
return dataset_iterator, dataset
def train(config, sess_config):
""" Executes the training loop with the specified model and data sets. """
# Prepare data
source_to_index, target_to_index, index_to_source, index_to_target, source_vocab_size, target_vocab_size = \
load_dictionaries(config)
# Set-up iterators
# Initialize text iterators
custom_train_iterator = TextIterator(config,
config.source_dataset,
config.target_dataset,
config.save_to,
[source_to_index],
target_to_index,
config.sentence_batch_size,
config.token_batch_size,
sort_by_length=True,
shuffle_each_epoch=True,
training=True)
custom_valid_iterator = TextIterator(config,
config.valid_source_dataset,
config.valid_target_dataset,
config.save_to,
[source_to_index],
target_to_index,
config.sentence_batch_size,
config.token_batch_size,
sort_by_length=False,
shuffle_each_epoch=False)
train_iterator, train_dataset, iterator_handle = \
get_dataset_iterator(custom_train_iterator, config.num_gpus, get_handle=True)
valid_iterator, valid_dataset = get_dataset_iterator(custom_valid_iterator, config.num_gpus)
# Iterator initializers
train_init_op = train_iterator.make_initializer(train_dataset)
valid_init_op = valid_iterator.make_initializer(valid_dataset)
# Enable handles for switching between iterators
train_valid_iterator = tf.data.Iterator.from_string_handle(iterator_handle,
train_dataset.output_types,
train_dataset.output_shapes)
# Set-up the model
model = create_model(config, source_vocab_size, target_vocab_size)
# Save model options
config_as_dict = OrderedDict(sorted(vars(config).items()))
json.dump(config_as_dict, open('{:s}.json'.format(config.save_to), 'w'), indent=2)
# Initialize session
sess = tf.Session(config=sess_config)
if config.debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess, dump_root=None)
sess.add_tensor_filter('has_inf_or_nan', tf_debug.has_inf_or_nan)
# Set up model trainer
trainer = VariableUpdateTrainer(model,
config.num_encoder_layers,
train_valid_iterator,
config.num_gpus,
source_to_index['<EOS>'],
config.gradient_delay,
config.warmup_steps,
config.num_gpus >= 2,
sess,
track_grad_rates=config.track_grad_rates,
grad_norm_threshold=config.grad_norm_threshold)
# Get validation and translation OPs
if config.num_gpus >= 2:
validation_ops = \
get_parallel_ops(model, train_valid_iterator, config.num_gpus, source_to_index['<EOS>'], 'training', True)
translation_ops = \
get_parallel_ops(model, train_valid_iterator, config.num_gpus, source_to_index['<EOS>'], 'translation')
logging.info('[Parallel training, gradient delay == {:d}]'.format(config.gradient_delay))
else:
validation_ops = \
get_single_ops(model, train_valid_iterator, config.num_gpus, source_to_index['<EOS>'], 'training', True)
translation_ops = \
get_single_ops(model, train_valid_iterator, config.num_gpus, source_to_index['<EOS>'], 'translation')
logging.info('[Single-device training, gradient delay == {:d}]'.format(config.gradient_delay))
# Unpack validation and translation OPs
_, batch_loss_op, sentence_losses_op, _ = validation_ops
source_op, target_op, greedy_translations_op, sampled_translations_op, beam_translations_op, beam_scores_op = \
translation_ops
logging.info('-' * 20)
model_size = count_parameters()
logging.info('Number of model parameters (without activations): {:d}'.format(int(model_size)))
logging.info('-' * 20)
# Prepare model
saver, checkpoint_path, progress = \
session_setup(config, sess, model, training=True, max_checkpoints=config.max_checkpoints)
if checkpoint_path is not None:
logging.info('Resuming training from checkpoint {:s}'.format(checkpoint_path))
# Handle summaries (see model definitions for summary definitions)
train_summary_writer = None
valid_summary_writer = None
if config.summary_freq:
if config.summary_dir is not None:
summary_dir = config.summary_dir
else:
summary_dir = os.path.abspath(os.path.dirname(config.save_to))
train_summary_dir = summary_dir + '/{:s}_train'.format(model.name)
valid_summary_dir = summary_dir + '/{:s}_valid'.format(model.name)
# Declare writers
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
valid_summary_writer = tf.summary.FileWriter(valid_summary_dir, sess.graph)
# Initialize iterator handles
train_handle, valid_handle = sess.run([train_iterator.string_handle(), valid_iterator.string_handle()])
# Initialize metrics
model_global_step = 0
training_losses = list()
step_times = list()
grad_norm_ratios = list()
total_sentences, total_words = 0, 0
early_stopped = False
logging.info('[BEGIN TRAINING]')
logging.info('Current global step: {:d}'.format(progress.uidx))
logging.info('-' * 20)
for epoch_id in range(progress.eidx, config.max_epochs):
# Check if training has been early stopped
if progress.estop:
break
# Track epoch-specific losses
epoch_losses = list()
logging.info('Current training epoch: {:d}'.format(epoch_id))
logging.info('-' * 20)
# (Re-)initialize the training iterator
sess.run(train_init_op)
while True:
try:
# Update learning rate
learning_rate = update_learning_rate(config, model_global_step)
# Check if summaries need to be written
write_batch_summary = config.summary_freq and ((model_global_step % config.summary_freq == 0) or
(config.max_updates and
model_global_step % config.max_updates == 0))
# Define feed_dict
feed_dict = {iterator_handle: train_handle,
model.learning_rate: learning_rate,
model.training: True}
# Update model
batch_loss, words_processed, train_op, grad_norm_ratio, summaries = trainer.forward()
to_fetch = [model.global_step, batch_loss, words_processed, train_op, grad_norm_ratio]
# Optionally add summaries
if trainer.do_update and write_batch_summary:
to_fetch += [summaries]
pre_fetch_time = time.time()
fetches = sess.run(to_fetch, feed_dict=feed_dict)
step_times.append(time.time() - pre_fetch_time) # Keep track of update durations
# Skip rest of training script if gradients have been cached and not applied
if not trainer.do_update:
continue
model_global_step = fetches[0]
training_losses += [fetches[1]]
epoch_losses += [fetches[1]]
total_words += fetches[2]
grad_norm_ratios.append(fetches[4])
# Update the persistent global step tracker
progress.uidx = int(model_global_step)
# Reset caches following the gradient application (not very elegant, but the only thing found to work)
if trainer.do_update:
sess.run(trainer.zero_op)
# Write summaries
if write_batch_summary:
train_summary_writer.add_summary(fetches[-1], global_step=model_global_step)
# Report progress
if config.disp_freq and model_global_step % config.disp_freq == 0:
duration = sum(step_times)
current_time = datetime.now().strftime('[%Y-%m-%d %H:%M:%S]')
logging.info('{:s}[TRAIN] Epoch {:d} | Step {:d} | Loss/ word {:4f} | Words/ sec {:.4f} | '
'Words/ update {:4f} | Updates/ sec: {:.4f} | Learning rate {:.8f} | '
'Grad norm ratio {:.4f}'
.format(current_time, epoch_id, model_global_step,
sum(training_losses) / len(training_losses),
total_words / duration, total_words / len(training_losses),
len(training_losses) / duration, learning_rate,
sum(grad_norm_ratios) / len(grad_norm_ratios)))
logging.info('-' * 20)
step_times = list()
training_losses = list()
total_words = 0
def sample_model_output(random_sample=False, beam_search=False, n_displayed=10):
""" Displays model output for greedy decoding and decoding via weighted sampling. """
# (Re-)initialize the validation iterator
sess.run(valid_init_op)
# Translate a single batch from the validation data-set
sample_feed_dict = {iterator_handle: valid_handle,
model.training: False}
input_ops = [source_op, target_op]
if random_sample:
called_ops = [sampled_translations_op]
logging.info('[SAMPLED TRANSLATIONS]\n')
elif beam_search:
called_ops = [beam_translations_op, beam_scores_op]
logging.info('[BEAM SEARCH FOR BEAM OF {:d}]\n'.format(config.beam_size))
else:
called_ops = [greedy_translations_op]
logging.info('[GREEDY TRANSLATIONS]\n')
# Iterate over the entire validation set
# Ideally, only one batch should be drawn, but due to the nature of the Datatset iterator, this does
# not seem possible/ trivial
collected_fetches = list()
while True:
try:
sample_fetches = sess.run(input_ops + called_ops, feed_dict=sample_feed_dict)
collected_fetches.append(sample_fetches)
except tf.errors.OutOfRangeError:
break
# Surface first batch only
instances = zip(*collected_fetches[0])
for instance_id, instance in enumerate(instances):
logging.info('SOURCE: {:s}'.format(seq2words(instance[0], index_to_source)))
logging.info('TARGET: {:s}'.format(seq2words(instance[1], index_to_target)))
if not beam_search:
logging.info('SAMPLE: {:s}'.format(seq2words(instance[2], index_to_target)))
logging.info('\n')
else:
for sample_id, sample in enumerate(instance[2]):
logging.info('SAMPLE {:d}: {:s}\nScore {:.4f} | Length {:d} | Score {:.4f}'
.format(sample_id, seq2words(sample, index_to_target),
instance[3][sample_id], len(sample), instance[3][sample_id]))
logging.info('\n')
# Only display top-3 translations within the beam
if sample_id >= 2:
break
if instance_id >= n_displayed:
break
# Monitor model performance by generating output with sampling
if config.greedy_freq and model_global_step % config.greedy_freq == 0:
sample_model_output()
logging.info('-' * 20)
# Monitor model performance by generating output with sampling
if config.sample_freq and model_global_step % config.sample_freq == 0:
sample_model_output(random_sample=True)
logging.info('-' * 20)
# Monitor model performance by generating output with beam search
if config.beam_freq and model_global_step % config.beam_freq == 0:
sample_model_output(beam_search=True)
logging.info('-' * 20)
if config.valid_freq and model_global_step % config.valid_freq == 0:
logging.info('[BEGIN VALIDATION]')
logging.info('-' * 20)
# (Re-)initialize the validation iterator
sess.run(valid_init_op)
validation_ops = [batch_loss_op, sentence_losses_op]
handles = [iterator_handle, valid_handle]
# Get validation perplexity only
validation_loss, validation_perplexity, _, validation_global_step = \
validation_loop(sess, model, validation_ops, handles, valid_summary_writer)
# Optionally calculate validation BLEU
if config.bleu_script is not None:
# Re-initialize the validation iterator
sess.run(valid_init_op)
decoding_ops = [target_op, greedy_translations_op, beam_translations_op, beam_scores_op]
validation_bleu = \
validation_bleu_loop(sess, model, config, decoding_ops, handles, index_to_target,
valid_summary_writer, validation_global_step)
# Save best-BLEU checkpoints
if len(progress.validation_bleu) == 0 or \
validation_bleu > max(list(progress.validation_bleu.values())):
progress.validation_bleu[int(model_global_step)] = validation_bleu
saver.save(sess, save_path='{:s}-best_bleu'.format(config.save_to))
logging.info(
'[CHECKPOINT] Saved a best-BLEU model checkpoint to {:s}.'.format(config.save_to))
progress_path = '{:s}-best_bleu.progress.json'.format(config.save_to)
progress.save_to_json(progress_path)
logging.info('-' * 20)
else:
# Track BLEU
progress.validation_bleu[int(model_global_step)] = validation_bleu
if len(progress.validation_perplexity) == 0 or \
validation_perplexity < min(list(progress.validation_perplexity.values())):
progress.validation_perplexity[int(model_global_step)] = validation_perplexity
# Save model checkpoint in case validation performance has improved
saver.save(sess, save_path='{:s}-best_perplexity'.format(config.save_to))
logging.info(
'[CHECKPOINT] Saved a best-perplexity model checkpoint to {:s}.'.format(config.save_to))
progress_path = '{:s}-best_perplexity.progress.json'.format(config.save_to)
progress.save_to_json(progress_path)
logging.info('-' * 20)
progress.bad_counter = 0
else:
# Track perplexity
progress.validation_perplexity[int(model_global_step)] = validation_perplexity
# Check for early-stopping
progress.bad_counter += 1
if progress.bad_counter > config.patience > 0:
# Execute early stopping of the training
logging.info(
'No improvement observed on the validation set for {:d} steps. Early stop!'
.format(progress.bad_counter))
progress.estop = True
early_stopped = True
break
# Save model parameters
if config.save_freq and model_global_step % config.save_freq == 0:
saver.save(sess, save_path=config.save_to, global_step=model_global_step)
logging.info(
'[CHECKPOINT] Saved a scheduled model checkpoint to {:s}.'.format(config.save_to))
logging.info('-' * 20)
progress_path = '{:s}-{:d}.progress.json'.format(config.save_to, model_global_step)
progress.save_to_json(progress_path)
if config.max_updates and model_global_step % config.max_updates == 0:
logging.info('Maximum number of updates reached!')
saver.save(sess, save_path=config.save_to, global_step=progress.uidx)
logging.info('[CHECKPOINT] Saved the training-final model checkpoint to {:s}.'
.format(config.save_to))
logging.info('-' * 20)
progress.estop = True
progress_path = '{:s}-{:d}.progress.json'.format(config.save_to, progress.uidx)
progress.save_to_json(progress_path)
break
except tf.errors.OutOfRangeError:
trainer.curr_agg_step -= 1
break
if not early_stopped:
logging.info('Epoch {:d} concluded'.format(epoch_id))
try:
logging.info('Average epoch loss: {:.4f}.'.format(sum(epoch_losses) / len(epoch_losses)))
except ZeroDivisionError:
pass
# Update the persistent global step tracker
progress.uidx = int(model_global_step)
# Update the persistent epoch tracker
progress.eidx += 1
# Close active session
sess.close()
def validation_loop(sess, model, ops, handles, valid_summary_writer, external=False):
""" Iterates over the validation data, calculating a trained model's cross-entropy. """
# Unpack OPs
batch_loss_op, sentence_losses_op = ops
# Initialize metrics
valid_losses = list()
sentence_losses = list()
valid_global_step = 0
# Unpack iterator variables
if handles is not None:
handle, valid_handle = handles
feed_dict = {handle: valid_handle,
model.training: False}
else:
feed_dict = {model.training: False}
logging.info('Estimating validation loss ... ')
while True:
try:
# Run a forward pass through the model
# Note, per-sentence losses used by the model are already length-normalized
fetches = sess.run([model.global_step, batch_loss_op, sentence_losses_op], feed_dict=feed_dict)
if fetches is not None:
valid_losses += [fetches[1]]
sentence_losses += fetches[2].tolist()
valid_global_step = fetches[0]
if len(sentence_losses) > 0:
logging.info('Evaluated {:d} sentences'.format(len(sentence_losses)))
except tf.errors.OutOfRangeError:
break
# Report
total_valid_loss = sum(valid_losses)
mean_valid_loss = total_valid_loss / len(valid_losses)
valid_perplexity = np.exp(mean_valid_loss)
if not external:
current_time = datetime.now().strftime('[%Y-%m-%d %H:%M:%S]')
logging.info('-' * 20)
logging.info('{:s}[VALID] Loss/ word {:.4f} | Perplexity: {:.4f} | Sentence total {:d}'
.format(current_time, mean_valid_loss, valid_perplexity, len(sentence_losses)))
# Write summaries
if valid_summary_writer:
valid_loss_summary = \
tf.Summary(value=[tf.Summary.Value(tag='validation_loss', simple_value=mean_valid_loss)])
valid_perplexity_summary = \
tf.Summary(value=[tf.Summary.Value(tag='validation_perplexity', simple_value=valid_perplexity)])
valid_summary_writer.add_summary(valid_loss_summary, global_step=valid_global_step)
valid_summary_writer.add_summary(valid_perplexity_summary, global_step=valid_global_step)
return mean_valid_loss, valid_perplexity, sentence_losses, valid_global_step
def validation_bleu_loop(sess, model, config, ops, handles, target_dict, valid_summary_writer, valid_global_step,
external=False):
""" Iterates over the validation data, calculating the BLEU score of a trained model's beam-search translations. """
# Unpack iterator variables
if handles is not None:
handle, valid_handle = handles
feed_dict = {handle: valid_handle,
model.training: False}
else:
feed_dict = {model.training: False}
logging.info('Estimating validation BLEU ... ')
temp_translation_file = tempfile.NamedTemporaryFile(mode='w')
temp_reference_file = tempfile.NamedTemporaryFile(mode='w')
# Generate validation set translations
translation_loop(sess,
ops,
feed_dict,
target_dict,
temp_translation_file,
temp_reference_file,
external=False,
beam_decoding=True,
full_beam=False)
# Assumes multi_bleu_detok.perl is used for BLEU calculation and reporting
temp_translation_file.flush()
temp_reference_file.flush()
process_args = \
[config.bleu_script, temp_translation_file.name, temp_reference_file.name, config.valid_gold_reference]
process = subprocess.Popen(process_args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
bleu_score = 0.0
if len(stderr) > 0:
logging.warning('Validation script wrote the following to standard error:\n{}'.format(stderr))
if process.returncode != 0:
logging.warning('Validation script failed (returned exit status of {:d})'.format(process.returncode))
try:
print('Validation script output:\n{}'.format(stdout))
if config.use_sacrebleu:
bleu_score = float(stdout.decode('utf-8').split(' = ')[1].split(' ')[0])
else:
bleu_score = float(stdout.decode('utf-8').split(' ')[2][:-1])
except IndexError:
logging.warning('Unable to extract validation-BLEU from the script output.'.format(stdout))
# Report
if not external:
current_time = datetime.now().strftime('[%Y-%m-%d %H:%M:%S]')
logging.info('-' * 20)
logging.info('{:s}[VALID] BLEU: {:.2f}'.format(current_time, bleu_score))
# Write summaries
if valid_summary_writer and valid_global_step:
valid_loss_summary = \
tf.Summary(value=[tf.Summary.Value(tag='validation_bleu', simple_value=bleu_score)])
valid_summary_writer.add_summary(valid_loss_summary, global_step=valid_global_step)
return bleu_score
def translation_loop(sess, ops, feed_dict, target_dict, out_file, ref_file=None, external=False, beam_decoding=False,
full_beam=False):
""" Iterates over the translation source, generating translations in the target language. """
# Unpack OPs
target_op, greedy_trans_op, beam_trans_op, beam_scores_op = ops
# Track progress
total_sentences = 0
translations = list()
references = list()
beam_scores = list()
start_time = time.time()
while True:
try:
if beam_decoding:
ref_batch, target_batch, scores = \
sess.run([target_op, beam_trans_op, beam_scores_op], feed_dict=feed_dict)
else:
ref_batch, target_batch = sess.run([target_op, greedy_trans_op], feed_dict=feed_dict)
scores = None
if target_batch is not None:
translations.append(list(target_batch))
references.append(list(ref_batch))
if scores is not None:
beam_scores.append(list(scores))
total_sentences += target_batch.shape[0]
if len(translations) > 0:
logging.info('Translated {:d} sentences'.format(total_sentences))
except tf.errors.OutOfRangeError:
break
duration = time.time() - start_time
# Flatten information to be printed
if beam_decoding:
output_beams = list()
score_beams = list()
for batch_id, translation_batch in enumerate(translations):
output_beams += [beams for beams in translation_batch] # unpack batches
score_beams += [beams for beams in beam_scores[batch_id]]
outputs = list(zip(output_beams, score_beams))
else:
outputs = [sentence for batch in translations for sentence in batch]
outputs = np.array(outputs, dtype=np.object)
# Flatten references
references = [sentence for batch in references for sentence in batch]
references = np.array(references, dtype=np.object)
# Write translations to file
for sentence_id in range(len(outputs)):
if beam_decoding:
beams = list(zip(outputs[sentence_id][0], outputs[sentence_id][1]))
best_sequence, score = beams[0]
target_string = '{:s}\n'.format(seq2words(best_sequence, target_dict))
# if external:
# # Write scores
# target_string = '{:s} | {:.4f}\n'.format(target_string.strip(), score)
out_file.write(target_string)
if full_beam:
# Write the full beam
for sequence, score in beams[1:]:
target_string = seq2words(sequence, target_dict)
out_file.write('{:s} | {:.4f}\n'.format(target_string, score))
out_file.write('\n')
else:
target_string = seq2words(outputs[sentence_id], target_dict)
out_file.write('{:s}\n'.format(target_string))
# Write references
if ref_file:
ref_string = seq2words(references[sentence_id], target_dict)
ref_file.write('{:s}\n'.format(ref_string))
if external:
# Report to STDOUT
logging.info('-' * 20)
logging.info('Translated {:d} sentences in {:.4f} seconds at {:.4f} sentences per second.'
.format(total_sentences, duration, total_sentences / duration))
def validate(config, sess_config):
""" Helper function for executing model validation outside of the training loop. """
assert config.reload is not None, \
'Model path is not specified. Set path to model checkpoint using the --reload flag.'
# Prepare data
source_to_index, target_to_index, index_to_source, index_to_target, source_vocab_size, target_vocab_size = \
load_dictionaries(config)
# Set-up iterator
custom_valid_iterator = TextIterator(config,
config.valid_source_dataset,
config.valid_target_dataset,
config.save_to,
[source_to_index],
target_to_index,
config.sentence_batch_size,
config.token_batch_size,
sort_by_length=False,
shuffle_each_epoch=False)
valid_iterator, _ = get_dataset_iterator(custom_valid_iterator, config.num_gpus)
# Set-up the model
model = create_model(config, source_vocab_size, target_vocab_size)
# Get model OPs
if config.num_gpus >= 2:
validation_ops = get_parallel_ops(model, valid_iterator, config.num_gpus, source_to_index['<EOS>'], 'training')
translation_ops = \
get_parallel_ops(model, valid_iterator, config.num_gpus, source_to_index['<EOS>'], 'translation')
logging.info('[Parallel validation]')
else:
validation_ops = get_single_ops(model, valid_iterator, config.num_gpus, source_to_index['<EOS>'], 'training')
translation_ops = \
get_single_ops(model, valid_iterator, config.num_gpus, source_to_index['<EOS>'], 'translation')
logging.info('[Single-device validation]')
# Unpack OPs
_, batch_loss_op, sentence_losses_op, _, summaries_op = validation_ops
source_op, target_op, greedy_translations_op, sampled_translations_op, beam_translations_op, beam_scores_op = \
translation_ops
# Initialize session
sess = tf.Session(config=sess_config)
# Prepare model
saver, checkpoint_path = session_setup(config, sess, model, training=False)
logging.info('-' * 20)
if checkpoint_path is not None:
logging.info('Validating model initialized form checkpoint {:s}'.format(checkpoint_path))
else:
logging.info('No checkpoint to initialize the translation model from could be found. Exiting.')
sys.exit(1)
logging.info('-' * 20)
logging.info('Performing validation on corpus {:s}'.format(config.valid_target_dataset, model.name))
logging.info('[BEGIN VALIDATION]')
logging.info('-' * 20)
# Validate
sess.run(valid_iterator.initializer)
valid_ops = [batch_loss_op, sentence_losses_op]
valid_loss, valid_perplexity, sentence_losses, _ = \
validation_loop(sess, model, valid_ops, None, None, external=True)
logging.info('-' * 20)
# Calculate BLEU
sess.run(valid_iterator.initializer)
translation_ops = [target_op, greedy_translations_op, beam_translations_op, beam_scores_op]
valid_bleu = \
validation_bleu_loop(sess, model, config, translation_ops, None, index_to_target, None, None, external=True)
# Report
corpus_lines = open(config.valid_target_dataset).readlines()
logging.info('-' * 20)
for line, cost in zip(corpus_lines, sentence_losses):
logging.info('{:s} | {:.4f}'.format(line.strip(), cost))
logging.info('-' * 20)
mean_valid_loss = sum(sentence_losses) / len(sentence_losses)
valid_perplexity = np.exp(mean_valid_loss)
logging.info('Loss/ word: {:.4f} | Perplexity: {:.4f} | BLEU: {:.4f}'
.format(mean_valid_loss, valid_perplexity, valid_bleu))
def translate(config, sess_config, model=None):
""" Produces translations of the specified corpus using a trained translation model. """
if model is not None:
assert config.reload is not None, \
'Model path is not specified. Set path to model checkpoint using the --reload flag.'
# Prepare data
source_to_index, target_to_index, index_to_source, index_to_target, source_vocab_size, target_vocab_size = \
load_dictionaries(config)
# Set-up iterator
custom_translate_iterator = TextIterator(config,
config.translate_source_file,
None,
config.save_to,
[source_to_index],
target_to_index,
config.sentence_batch_size,
config.token_batch_size,
sort_by_length=False,
shuffle_each_epoch=False)
translate_iterator, _ = get_dataset_iterator(custom_translate_iterator, config.num_gpus)
# Set-up the model
model = create_model(config, source_vocab_size, target_vocab_size)
# For now, default to single-device OP; TODO: Fix for multi-GPU in the future.
translation_ops = \
get_single_ops(model, translate_iterator, config.num_gpus, source_to_index['<EOS>'], 'translation')
logging.info('[Single-device translation]')
# Unpack OPs
_, target_op, greedy_translations_op, _, beam_translations_op, beam_scores_op = translation_ops
# Initialize session
sess = tf.Session(config=sess_config)
# Prepare model
saver, checkpoint_path = session_setup(config, sess, model, training=False)
logging.info('-' * 20)
if checkpoint_path is not None:
logging.info('Translation model initialized form checkpoint {:s}'.format(checkpoint_path))
if len(config.reload) > 1:
logging.info('... averaged over {:d} preceding checkpoints.'.format(len(config.reload)))
else:
logging.info('No checkpoint to initialize the translation model from could be found. Exiting.')
sys.exit(1)
logging.info('-' * 20)
logging.info('NOTE: Maximum translation length is capped to {:d}.'.format(config.translation_max_len))
logging.info('Translating {:s} to {:s}.'.format(config.translate_source_file, config.translate_target_file))
logging.info('-' * 20)
# Define the feed_dict for the translation loop
feed_dict = {model.training: False}
# Open target file
target_file = open(config.translate_target_file, 'w')
# Initialize the inference iterator
sess.run(translate_iterator.initializer)
# Translate the source data-set
translation_loop(sess,
[target_op, greedy_translations_op, beam_translations_op, beam_scores_op],
feed_dict,
index_to_target,
target_file,
external=True,
beam_decoding=config.translate_with_beam_search,
full_beam=config.full_beam)
target_file.close()
def translation_scorer(config, sess_config):
""" Helper function for scoring individual test-set translations, as required for the evaluation of ablations
corpora such as LingEval97 and ContraWSD. """
assert config.reload is not None, \
'Model path is not specified. Set path to model checkpoint using the --reload flag.'
# Prepare data
source_to_index, target_to_index, index_to_source, index_to_target, source_vocab_size, target_vocab_size = \
load_dictionaries(config)
# Set-up iterator
custom_valid_iterator = TextIterator(config,
config.valid_source_dataset,
config.valid_target_dataset,
config.save_to,
[source_to_index],
target_to_index,
config.sentence_batch_size,
config.token_batch_size,
sort_by_length=False,
shuffle_each_epoch=False)
valid_iterator, _ = get_dataset_iterator(custom_valid_iterator, config.num_gpus)
# Set-up the model
model = create_model(config, source_vocab_size, target_vocab_size)
# Get model OPs
if config.num_gpus >= 2:
validation_ops = get_parallel_ops(model, valid_iterator, config.num_gpus, source_to_index['<EOS>'], 'training')
else:
validation_ops = get_single_ops(model, valid_iterator, config.num_gpus, source_to_index['<EOS>'], 'training')
# Unpack OPs
_, batch_loss_op, sentence_losses_op, _, summaries_op = validation_ops
# Initialize session
sess = tf.Session(config=sess_config)
# Prepare model
saver, checkpoint_path = session_setup(config, sess, model, training=False)
logging.info('-' * 20)
if checkpoint_path is not None:
logging.info('Scoring validation set sentences for the model initialized form checkpoint {:s}'
.format(checkpoint_path))
else:
logging.info('No checkpoint to initialize the translation model from could be found. Exiting.')
sys.exit(1)
logging.info('-' * 20)
logging.info('Scoring validation set sentences in corpus {:s}'.format(config.valid_target_dataset, model.name))
logging.info('-' * 20)
# Collect sentence scores
sess.run(valid_iterator.initializer)
feed_dict = {model.training: False}
all_sentence_scores = list()
sentence_id = 0
while True:
try:
sentence_losses = sess.run(sentence_losses_op, feed_dict=feed_dict)
all_sentence_scores += sentence_losses.tolist()
if (sentence_id + 1) % 100 == 0:
logging.info('Collected model scores for {:d} sentences'.format(sentence_id + 1))
sentence_id += 1
except tf.errors.OutOfRangeError:
break
logging.info('Done')
# Write to file
destination_dir = '.'.join(config.valid_source_dataset.split('.')[: -1])
destination_path = '{:s}.{:s}.scores'.format(destination_dir, config.model_type)
with open(destination_path, 'w') as dst:
for score in all_sentence_scores:
dst.write('{:f}\n'.format(score))
logging.info('Scores file saved to {:s}'.format(destination_path))
def parse_args():
parser = argparse.ArgumentParser()
data = parser.add_argument_group('data sets; model loading and saving')
data.add_argument('--source_dataset', type=str, metavar='PATH',
help='parallel training corpus (source)')
data.add_argument('--target_dataset', type=str, metavar='PATH',
help='parallel training corpus (target)')
data.add_argument('--dictionaries', type=str, required=True, metavar='PATH', nargs='+',
help='model vocabularies (source & target)')
data.add_argument('--max_vocab_source', type=int, default=-1, metavar='INT',
help='maximum length of the source vocabulary; unlimited by default (default: %(default)s)')
data.add_argument('--max_vocab_target', type=int, default=-1, metavar='INT',
help='maximum length of the target vocabulary; unlimited by default (default: %(default)s)')
network = parser.add_argument_group('network parameters')
network.add_argument('--model_name', type=str, default='nematode_model',
help='model file name (default: %(default)s)')
network.add_argument('--model_type', type=str, default='transformer',
choices=['base_transformer',
'lexical_shortcuts_transformer',
'dec_to_enc_shortcuts_transformer',
'full_shortcuts_transformer',
'enc_only_shortcuts_transformer',
'dec_only_shortcuts_transformer'],
help='type of the model to be trained / used for inference (default: %(default)s)')
network.add_argument('--embiggen_model', action='store_true',
help='scales up the model to match the transformer-BIG specifications')
network.add_argument('--embedding_size', type=int, default=512, metavar='INT',
help='embedding layer size (default: %(default)s)')
network.add_argument('--num_encoder_layers', type=int, default=6, metavar='INT',
help='number of encoder layers')
network.add_argument('--num_decoder_layers', type=int, default=6, metavar='INT',
help='number of decoder layers')
network.add_argument('--ffn_hidden_size', type=int, default=2048, metavar='INT',
help='inner dimensionality of feed-forward sub-layers in FAN models (default: %(default)s)')
network.add_argument('--hidden_size', type=int, default=512, metavar='INT',
help='dimensionality of the model\'s hidden representations (default: %(default)s)')
network.add_argument('--num_heads', type=int, default=8, metavar='INT',
help='number of attention heads used in multi-head attention (default: %(default)s)')
network.add_argument('--untie_decoder_embeddings', action='store_true',
help='untie the decoder embedding matrix from the output projection matrix')
network.add_argument('--untie_enc_dec_embeddings', action='store_true',
help='untie the encoder embedding matrix from the embedding and '
'projection matrices in the decoder')
training = parser.add_argument_group('training parameters')
training.add_argument('--max_len', type=int, default=100, metavar='INT',
help='maximum sequence length for training and validation (default: %(default)s)')
training.add_argument('--token_batch_size', type=int, default=4096, metavar='INT',
help='mini-batch size in tokens; set to 0 to use sentence-level batch size '
'(default: %(default)s)')
training.add_argument('--sentence_batch_size', type=int, default=64, metavar='INT',
help='mini-batch size in sentences (default: %(default)s)')
training.add_argument('--maxibatch_size', type=int, default=20, metavar='INT',
help='maxi-batch size (number of mini-batches sorted by length) (default: %(default)s)')
training.add_argument('--max_epochs', type=int, default=100, metavar='INT',
help='maximum number of training epochs (default: %(default)s)')
training.add_argument('--max_updates', type=int, default=1000000, metavar='INT',
help='maximum number of updates (default: %(default)s)')
training.add_argument('--warmup_steps', type=int, default=4000, metavar='INT',
help='number of initial updates during which the learning rate is increased linearly during '
'learning rate scheduling(default: %(default)s)')
training.add_argument('--learning_rate', type=float, default=2e-4, metavar='FLOAT',
help='initial learning rate (default: %(default)s)')
training.add_argument('--adam_beta1', type=float, default=0.9, metavar='FLOAT',
help='exponential decay rate of the mean estimate (default: %(default)s)')
training.add_argument('--adam_beta2', type=float, default=0.98, metavar='FLOAT',
help='exponential decay rate of the variance estimate (default: %(default)s)')
training.add_argument('--adam_epsilon', type=float, default=1e-9, metavar='FLOAT',
help='prevents division-by-zero (default: %(default)s)')
training.add_argument('--dropout_embeddings', type=float, default=0.1, metavar='FLOAT',
help='dropout applied to sums of word embeddings and positional encodings '
'(default: %(default)s)')
training.add_argument('--dropout_residual', type=float, default=0.1, metavar='FLOAT',
help='dropout applied to residual connections (default: %(default)s)')
training.add_argument('--dropout_relu', type=float, default=0.1, metavar='FLOAT',
help='dropout applied to the internal activation of the feed-forward sub-layers '
'(default: %(default)s)')
training.add_argument('--dropout_attn', type=float, default=0.1, metavar='FLOAT',
help='dropout applied to attention weights (default: %(default)s)')
training.add_argument('--label_smoothing_discount', type=float, default=0.1, metavar='FLOAT',
help='discount factor for regularization via label smoothing (default: %(default)s)')
training.add_argument('--grad_norm_threshold', type=float, default=0., metavar='FLOAT',
help='gradient clipping threshold - may improve training stability; '
'disabled by default (default: %(default)s)')
training.add_argument('--save_freq', type=int, default=5000, metavar='INT',
help='save frequency (default: %(default)s)')
training.add_argument('--save_to', type=str, default='model', metavar='PATH',
help='model checkpoint location (default: %(default)s)')
training.add_argument('--reload', type=str, nargs='+', default=None, metavar='PATH',
help='load existing model from this path; set to \'latest_checkpoint\' '
'to reload the latest checkpoint found in the --save_to directory')
training.add_argument('--max_checkpoints', type=int, default=1000, metavar='INT',
help='number of checkpoints to keep (default: %(default)s)')
training.add_argument('--summary_dir', type=str, required=False, metavar='PATH',
help='directory for saving summaries (default: same as --save_to)')
training.add_argument('--summary_freq', type=int, default=100, metavar='INT',
help='summary writing frequency; 0 disables summaries (default: %(default)s)')
training.add_argument('--num_gpus', type=int, default=0, metavar='INT',
help='number of GPUs to be used by the system; '
'no GPUs are used by default (default: %(default)s)')
training.add_argument('--log_file', type=str, default=None, metavar='PATH',
help='log file location (default: %(default)s)')
training.add_argument('--debug', action='store_true',
help='enable the TF debugger')
training.add_argument('--shortcut_type', type=str, default='lexical',
choices=['lexical', 'lexical_plus_feature_fusion', 'non-lexical'],
help='defines the shortcut variant to use in the version of the transformer equipped with '
'shortcut connections')
training.add_argument('--gradient_delay', type=int, default=0, metavar='INT',
help='Amount of steps by which the optimizer updates are to be delayed; '
'longer delays correspond to larger effective batch sizes (default: %(default)s)')
training.add_argument('--track_grad_rates', action='store_true',
help='track gradient norm rates and parameter-grad rates as TensorBoard summaries')
training.add_argument('--track_gate_values', action='store_true',
help='track gate activations for models with shortcuts as TensorBoard summaries')
validation = parser.add_argument_group('validation parameters')
validation.add_argument('--valid_source_dataset', type=str, default=None, metavar='PATH',
help='source validation corpus (default: %(default)s)')
validation.add_argument('--valid_target_dataset', type=str, default=None, metavar='PATH',
help='target validation corpus (default: %(default)s)')
validation.add_argument('--valid_gold_reference', type=str, default=None, metavar='PATH',
help='unprocessed target validation corpus used in calculating sacreBLEU '
'(default: %(default)s)')
validation.add_argument('--use_sacrebleu', action='store_true',
help='whether to use sacreBLEU for validation and testing')
validation.add_argument('--valid_freq', type=int, default=4000, metavar='INT',
help='validation frequency (default: %(default)s)')
validation.add_argument('--patience', type=int, default=-1, metavar='INT',
help='number of steps without validation-loss improvement required for early stopping; '
'disabled by default (default: %(default)s)')
validation.add_argument('--validate_only', action='store_true',
help='perform external validation with a pre-trained model')
validation.add_argument('--bleu_script', type=str, default=None, metavar='PATH',
help='path to the external validation script (default: %(default)s); '
'receives path of translation source file; must write a single score to STDOUT')
validation.add_argument('--score_translations', action='store_true',
help='scores translations provided in a target file according to the learned model')
display = parser.add_argument_group('display parameters')
display.add_argument('--disp_freq', type=int, default=100, metavar='INT',
help='training metrics display frequency (default: %(default)s)')
display.add_argument('--greedy_freq', type=int, default=1000, metavar='INT',
help='greedy sampling frequency (default: %(default)s)')
display.add_argument('--sample_freq', type=int, default=0, metavar='INT',
help='weighted sampling frequency; disabled by default (default: %(default)s)')
display.add_argument('--beam_freq', type=int, default=10000, metavar='INT',
help='beam search sampling frequency (default: %(default)s)')
display.add_argument('--beam_size', type=int, default=4, metavar='INT',
help='size of the decoding beam (default: %(default)s)')
translation = parser.add_argument_group('translation parameters')
translation.add_argument('--translate_only', action='store_true',
help='translate a specified corpus using a pre-trained model')
translation.add_argument('--translate_source_file', type=str, metavar='PATH',
help='corpus to be translated; must be pre-processed')
translation.add_argument('--translate_target_file', type=str, metavar='PATH',
help='translation destination')
translation.add_argument('--translate_with_beam_search', action='store_true',
help='translate using beam search')
translation.add_argument('--length_normalization_alpha', type=float, default=0.6, metavar='FLOAT',
help='adjusts the severity of length penalty during beam decoding (default: %(default)s)')
translation.add_argument('--no_normalize', action='store_true',
help='disable length normalization')
translation.add_argument('--full_beam', action='store_true',
help='return all translation hypotheses within the beam')
translation.add_argument('--translation_max_len', type=int, default=400, metavar='INT',
help='Maximum length of translation output sentence (default: %(default)s)')
config = parser.parse_args()
if not config.source_dataset:
logging.error('--source_dataset is required')
sys.exit(1)
if not config.target_dataset:
logging.error('--target_dataset is required')
sys.exit(1)
# Put check in place until factors are implemented
if len(config.dictionaries) != 2:
logging.error('exactly two dictionaries need to be provided')
sys.exit(1)
config.source_vocab = config.dictionaries[0]
config.target_vocab = config.dictionaries[-1]
# Embiggen the model
if config.embiggen_model:
config.embedding_size = 1024
config.ffn_hidden_size = 4096
config.hidden_size = 1024
config.num_heads = 16
config.dropout_embeddings = 0.3
config.adam_beta2 = 0.998
config.warmup_steps = 16000
return config
if __name__ == "__main__":
# IMPORTANT: Limit the number of reserved GPUs via 'export CUDA_VISIBLE_DEVICES $GPU_ID'
# Assemble config
config = parse_args()
# Logging to file
filemode = 'a' if config.reload else 'w'
logging.basicConfig(filename=config.log_file, filemode=filemode, level=logging.INFO,
format='%(levelname)s: %(message)s')
if config.log_file is not None:
# Logging to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
# Log the configuration when (re-)starting training/ validation/ translation
logging.info('\nRUN CONFIGURATION')
logging.info('=================')
for key, val in config.__dict__.items():
logging.info('{:s}: {}'.format(key, val))
logging.info('=================\n')
# Configure session
sess_config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
sess_config.gpu_options.allow_growth = False
# Filter out memory warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
with tf.Graph().as_default():
if config.translate_only:
# Translate a file
if not config.translate_source_file:
logging.error('--translate_source_file is required')
sys.exit(1)
if not config.translate_target_file:
logging.error('--translate_target_file is required')
sys.exit(1)
translate(config, sess_config)
elif config.validate_only:
validate(config, sess_config)
elif config.score_translations:
translation_scorer(config, sess_config)
else:
train(config, sess_config)
|
# Copyright 2020-2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Providers for PDKs to be used by downstream synthesis.
"""
StandardCellInfo = provider(
"Contains information about the standard cells used for synthesis",
fields = {
"corners": "list of CornerInfos for the PDK",
"default_corner": "A default corner info defined for the PDK.",
"tech_lef": "Tech LEF file for the PDK",
"cell_lef_definitions": "list of Abstract LEFs files for each standard cell.",
"parasitic_extraction_benchmark": "Optional calibration file for OpenRCX.",
"open_road_configuration": "OpenROAD PDK configuration.",
},
)
CornerInfo = provider(
"Contains information about standard cells at different corners",
fields = {
"liberty": "A file that points to the liberty file for this corner",
"with_ccsnoise": "boolean Indicates that this is a ccsnoise model.",
"with_leakage": "boolean Indicates wheter leakage is included in model",
"corner_name": "Name of the process corner",
},
)
|
from django.http import JsonResponse
from shop.models import ProductSKU
import json
class DataIntegrityCheckMixin:
def dispatch(self, request, *args, **kwargs):
# validate data
try:
data = json.loads(request.body.decode())
except:
return JsonResponse({'res': 0, 'errmsg': 'Invalid data'})
# print(request.data)
sku_id = data.get('sku_id')
count = data.get('count')
if not all([sku_id, count]):
return JsonResponse({'res': 0, 'errmsg': 'Lack of data'})
try:
count = int(count)
except ValueError:
return JsonResponse({'res': 0, 'errmsg': 'Invalid item count'})
try:
product = ProductSKU.objects.get(id=sku_id)
except ProductSKU.DoesNotExist:
return JsonResponse({'res': 0, 'errmsg': 'Item does not exist'})
if count > product.stock:
return JsonResponse({'res': 0, 'errmsg': 'Understocked'})
if count <= 0:
return JsonResponse({'res': 0, 'errmsg': 'At least 1 item required'})
return super().dispatch(request, *args, **kwargs)
|
import asyncio
import logging
import typing
from typing import Optional
from aiohttp.web import Request
from lbry.error import ResolveError, DownloadSDTimeoutError, InsufficientFundsError
from lbry.error import ResolveTimeoutError, DownloadDataTimeoutError, KeyFeeAboveMaxAllowedError
from lbry.error import InvalidStreamURLError
from lbry.stream.managed_stream import ManagedStream
from lbry.torrent.torrent_manager import TorrentSource
from lbry.utils import cache_concurrent
from lbry.schema.url import URL
from lbry.wallet.dewies import dewies_to_lbc
from lbry.file.source_manager import SourceManager
from lbry.file.source import ManagedDownloadSource
if typing.TYPE_CHECKING:
from lbry.conf import Config
from lbry.extras.daemon.analytics import AnalyticsManager
from lbry.extras.daemon.storage import SQLiteStorage
from lbry.wallet import WalletManager, Output
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
log = logging.getLogger(__name__)
class FileManager:
def __init__(self, loop: asyncio.AbstractEventLoop, config: 'Config', wallet_manager: 'WalletManager',
storage: 'SQLiteStorage', analytics_manager: Optional['AnalyticsManager'] = None):
self.loop = loop
self.config = config
self.wallet_manager = wallet_manager
self.storage = storage
self.analytics_manager = analytics_manager
self.source_managers: typing.Dict[str, SourceManager] = {}
self.started = asyncio.Event()
@property
def streams(self):
return self.source_managers['stream']._sources
async def create_stream(self, file_path: str, key: Optional[bytes] = None, **kwargs) -> ManagedDownloadSource:
if 'stream' in self.source_managers:
return await self.source_managers['stream'].create(file_path, key, **kwargs)
raise NotImplementedError
async def start(self):
await asyncio.gather(*(source_manager.start() for source_manager in self.source_managers.values()))
for manager in self.source_managers.values():
await manager.started.wait()
self.started.set()
def stop(self):
for manager in self.source_managers.values():
# fixme: pop or not?
manager.stop()
self.started.clear()
@cache_concurrent
async def download_from_uri(self, uri, exchange_rate_manager: 'ExchangeRateManager',
timeout: Optional[float] = None, file_name: Optional[str] = None,
download_directory: Optional[str] = None,
save_file: Optional[bool] = None, resolve_timeout: float = 3.0,
wallet: Optional['Wallet'] = None) -> ManagedDownloadSource:
wallet = wallet or self.wallet_manager.default_wallet
timeout = timeout or self.config.download_timeout
start_time = self.loop.time()
resolved_time = None
stream = None
claim = None
error = None
outpoint = None
if save_file is None:
save_file = self.config.save_files
if file_name and not save_file:
save_file = True
if save_file:
download_directory = download_directory or self.config.download_dir
else:
download_directory = None
payment = None
try:
# resolve the claim
try:
if not URL.parse(uri).has_stream:
raise InvalidStreamURLError(uri)
except ValueError:
raise InvalidStreamURLError(uri)
try:
resolved_result = await asyncio.wait_for(
self.wallet_manager.ledger.resolve(
wallet.accounts, [uri],
include_purchase_receipt=True,
include_is_my_output=True
), resolve_timeout
)
except asyncio.TimeoutError:
raise ResolveTimeoutError(uri)
except Exception as err:
if isinstance(err, asyncio.CancelledError):
raise
log.exception("Unexpected error resolving stream:")
raise ResolveError(f"Unexpected error resolving stream: {str(err)}")
if 'error' in resolved_result:
raise ResolveError(f"Unexpected error resolving uri for download: {resolved_result['error']}")
if not resolved_result or uri not in resolved_result:
raise ResolveError(f"Failed to resolve stream at '{uri}'")
txo = resolved_result[uri]
if isinstance(txo, dict):
raise ResolveError(f"Failed to resolve stream at '{uri}': {txo}")
claim = txo.claim
outpoint = f"{txo.tx_ref.id}:{txo.position}"
resolved_time = self.loop.time() - start_time
await self.storage.save_claim_from_output(self.wallet_manager.ledger, txo)
####################
# update or replace
####################
if claim.stream.source.bt_infohash:
source_manager = self.source_managers['torrent']
existing = source_manager.get_filtered(bt_infohash=claim.stream.source.bt_infohash)
elif claim.stream.source.sd_hash:
source_manager = self.source_managers['stream']
existing = source_manager.get_filtered(sd_hash=claim.stream.source.sd_hash)
else:
raise ResolveError(f"There is nothing to download at {uri} - Source is unknown or unset")
# resume or update an existing stream, if the stream changed: download it and delete the old one after
to_replace, updated_stream = None, None
if existing and existing[0].claim_id != txo.claim_id:
raise ResolveError(f"stream for {existing[0].claim_id} collides with existing download {txo.claim_id}")
if existing:
log.info("claim contains a metadata only update to a stream we have")
if claim.stream.source.bt_infohash:
await self.storage.save_torrent_content_claim(
existing[0].identifier, outpoint, existing[0].torrent_length, existing[0].torrent_name
)
claim_info = await self.storage.get_content_claim_for_torrent(existing[0].identifier)
existing[0].set_claim(claim_info, claim)
else:
await self.storage.save_content_claim(
existing[0].stream_hash, outpoint
)
await source_manager._update_content_claim(existing[0])
updated_stream = existing[0]
else:
existing_for_claim_id = self.get_filtered(claim_id=txo.claim_id)
if existing_for_claim_id:
log.info("claim contains an update to a stream we have, downloading it")
if save_file and existing_for_claim_id[0].output_file_exists:
save_file = False
if not claim.stream.source.bt_infohash:
existing_for_claim_id[0].downloader.node = source_manager.node
await existing_for_claim_id[0].start(timeout=timeout, save_now=save_file)
if not existing_for_claim_id[0].output_file_exists and (
save_file or file_name or download_directory):
await existing_for_claim_id[0].save_file(
file_name=file_name, download_directory=download_directory
)
to_replace = existing_for_claim_id[0]
# resume or update an existing stream, if the stream changed: download it and delete the old one after
if updated_stream:
log.info("already have stream for %s", uri)
if save_file and updated_stream.output_file_exists:
save_file = False
if not claim.stream.source.bt_infohash:
updated_stream.downloader.node = source_manager.node
await updated_stream.start(timeout=timeout, save_now=save_file)
if not updated_stream.output_file_exists and (save_file or file_name or download_directory):
await updated_stream.save_file(
file_name=file_name, download_directory=download_directory
)
return updated_stream
####################
# pay fee
####################
needs_purchasing = (
not to_replace and
not txo.is_my_output and
txo.has_price and
not txo.purchase_receipt
)
if needs_purchasing:
payment = await self.wallet_manager.create_purchase_transaction(
wallet.accounts, txo, exchange_rate_manager
)
####################
# make downloader and wait for start
####################
if not claim.stream.source.bt_infohash:
# fixme: this shouldnt be here
stream = ManagedStream(
self.loop, self.config, source_manager.blob_manager, claim.stream.source.sd_hash,
download_directory, file_name, ManagedStream.STATUS_RUNNING, content_fee=payment,
analytics_manager=self.analytics_manager
)
stream.downloader.node = source_manager.node
else:
stream = TorrentSource(
self.loop, self.config, self.storage, identifier=claim.stream.source.bt_infohash,
file_name=file_name, download_directory=download_directory or self.config.download_dir,
status=ManagedStream.STATUS_RUNNING,
analytics_manager=self.analytics_manager,
torrent_session=source_manager.torrent_session
)
log.info("starting download for %s", uri)
before_download = self.loop.time()
await stream.start(timeout, save_file)
####################
# success case: delete to_replace if applicable, broadcast fee payment
####################
if to_replace: # delete old stream now that the replacement has started downloading
await source_manager.delete(to_replace)
if payment is not None:
await self.wallet_manager.broadcast_or_release(payment)
payment = None # to avoid releasing in `finally` later
log.info("paid fee of %s for %s", dewies_to_lbc(stream.content_fee.outputs[0].amount), uri)
await self.storage.save_content_fee(stream.stream_hash, stream.content_fee)
source_manager.add(stream)
if not claim.stream.source.bt_infohash:
await self.storage.save_content_claim(stream.stream_hash, outpoint)
else:
await self.storage.save_torrent_content_claim(
stream.identifier, outpoint, stream.torrent_length, stream.torrent_name
)
claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier)
stream.set_claim(claim_info, claim)
if save_file:
await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download),
loop=self.loop)
return stream
except asyncio.TimeoutError:
error = DownloadDataTimeoutError(stream.sd_hash)
raise error
except Exception as err: # forgive data timeout, don't delete stream
expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError,
KeyFeeAboveMaxAllowedError, ResolveError, InvalidStreamURLError)
if isinstance(err, expected):
log.warning("Failed to download %s: %s", uri, str(err))
elif isinstance(err, asyncio.CancelledError):
pass
else:
log.exception("Unexpected error downloading stream:")
error = err
raise
finally:
if payment is not None:
# payment is set to None after broadcasting, if we're here an exception probably happened
await self.wallet_manager.ledger.release_tx(payment)
if self.analytics_manager and claim and claim.stream.source.bt_infohash:
# TODO: analytics for torrents
pass
elif self.analytics_manager and (error or (stream and (stream.downloader.time_to_descriptor or
stream.downloader.time_to_first_bytes))):
server = self.wallet_manager.ledger.network.client.server
self.loop.create_task(
self.analytics_manager.send_time_to_first_bytes(
resolved_time, self.loop.time() - start_time, None if not stream else stream.download_id,
uri, outpoint,
None if not stream else len(stream.downloader.blob_downloader.active_connections),
None if not stream else len(stream.downloader.blob_downloader.scores),
None if not stream else len(stream.downloader.blob_downloader.connection_failures),
False if not stream else stream.downloader.added_fixed_peers,
self.config.fixed_peer_delay if not stream else stream.downloader.fixed_peers_delay,
None if not stream else stream.sd_hash,
None if not stream else stream.downloader.time_to_descriptor,
None if not (stream and stream.descriptor) else stream.descriptor.blobs[0].blob_hash,
None if not (stream and stream.descriptor) else stream.descriptor.blobs[0].length,
None if not stream else stream.downloader.time_to_first_bytes,
None if not error else error.__class__.__name__,
None if not error else str(error),
None if not server else f"{server[0]}:{server[1]}"
)
)
async def stream_partial_content(self, request: Request, sd_hash: str):
return await self.source_managers['stream'].stream_partial_content(request, sd_hash)
def get_filtered(self, *args, **kwargs) -> typing.List[ManagedDownloadSource]:
"""
Get a list of filtered and sorted ManagedStream objects
:param sort_by: field to sort by
:param reverse: reverse sorting
:param comparison: comparison operator used for filtering
:param search_by: fields and values to filter by
"""
return sum((manager.get_filtered(*args, **kwargs) for manager in self.source_managers.values()), [])
async def delete(self, source: ManagedDownloadSource, delete_file=False):
for manager in self.source_managers.values():
await manager.delete(source, delete_file)
|
import QUANTAXIS as QA
from QUANTAXIS.QAFetch.QAhuobi import FIRST_PRIORITY
from scipy.signal import butter, lfilter
import numpy as np
import matplotlib.pyplot as plt
from QUANTAXIS.QAIndicator.talib_numpy import *
if __name__ == '__main__':
codelist = ['BCHUSDT', 'BSVUSDT', 'BTCUSDT', 'EOSUSDT', 'ETHUSDT', 'ETCUSDT', 'DASHUSDT', 'LTCUSDT', 'XMRUSDT', 'XRPUSDT', 'ZECUSDT']
data_1h = QA.QA_fetch_crypto_asset_min_adv(['binance','huobi'],
symbol=codelist + FIRST_PRIORITY,
start='2018-01-01',
end='2020-06-30 23:59:59',
frequence='60min')
#data_4h = QA.QA_DataStruct_Crypto_Asset_min(data_1h.resample('4h'))
#massive_predict_1h = data_day.add_func(price_predict_with_macd_trend_func)
from QUANTAXIS.QAAnalysis.QAAnalysis_signal import *
def ADXm(price, p=14, Level=25):
"""
和传统的ADX指标不同,ADX本身是使用绝对单位绘制的并阻止了趋势方向的侦测,而本指标清晰地显示了ADX的正向和反向半波(在图表上使用彩色显示),而 DI+/- 信号显示了它们的差距 (灰色)。
使用这个指标的方法与传统指标一样,
另外,它还显示了水平(虚线), 在虚线水平之上时就认为市场在有趋势的状态。这个水平通常设在百分之20-25的水平,依赖于它所应用的时段。
在设置中:
p - ADX 周期数.
Level - 重要水平.
"""
Bars = len(price)
IndicatorCounted()
Open = price.open.values
High = price.high.values
Low = price.low.values
Close = price.close.values
Time = price.index.get_level_values(Level=0)
return False
data_day = QA.QA_fetch_crypto_asset_day_adv(['huobi'],
symbol=['btcusdt'],
start='2018-01-01',
end='2020-06-30 23:59:59')
price_predict_day = data_day.add_func(price_predict_with_macd_trend_func)
ma30_croos_day = data_day.add_func(ma30_cross_func).reset_index([1,2])
dual_cross_day = data_day.add_func(dual_cross_func).reset_index([1,2])
boll_bands_day = data_day.add_func(boll_cross_func).reset_index([1,2])
tmom_day = time_series_momemtum(data_day.data.close, 10).reset_index([1,2])
tmom_negative = ((tmom_day['close'] < 0) & (price_predict_day['DEA'] < 0)) | \
((tmom_day['close'] < 0) & (price_predict_day['DELTA'] < 0)) | \
((tmom_day['close'] < 0) & (price_predict_day['MACD_CROSS_SX'] < price_predict_day['MACD_CROSS_JX']))
tmom_negative = tmom_negative[tmom_negative.apply(lambda x: x == True)] # eqv. Trim(x == False)
x_tp_min = price_predict_day[price_predict_day.apply(lambda x: x['PRICE_PRED_CROSS'] > 0, axis = 1)]['PRICE_PRED_CROSS'].values # eqv. Trim(x < 0)
x_tp_max = price_predict_day[price_predict_day.apply(lambda x: x['PRICE_PRED_CROSS'] < 0, axis = 1)]['PRICE_PRED_CROSS'].values * -1 # eqv. Trim(x > 0)
bootstrap_exodus = (tmom_negative & (boll_bands_day['BOLL_CROSS_JX'] > 2) & (price_predict_day['PRICE_PRED_CROSS_JX'] < price_predict_day['PRICE_PRED_CROSS_SX'])) & \
(price_predict_day['MACD_CROSS_JX'] < price_predict_day['MACD_CROSS_SX']) & (price_predict_day['DELTA'] > 0) & \
~((boll_bands_day['BBW_MA20'] > boll_bands_day['BOLL_WIDTH']) & (price_predict_day['MACD'] > 0))
bootstrap_exodus = bootstrap_exodus[bootstrap_exodus.apply(lambda x: x == True)] # eqv. Trim(x == False)
bootstrap_exodus2 = ((dual_cross_day['DUAL_CROSS_JX'] > 0) & (boll_bands_day['BOLL_CROSS_JX'] > 18) & (ma30_croos_day['MA30_CROSS_JX'] < ma30_croos_day['MA30_CROSS_SX'])) & \
((price_predict_day['PRICE_PRED_CROSS_JX'] < price_predict_day['PRICE_PRED_CROSS_SX'])) & \
~((boll_bands_day['BBW_MA20'] > boll_bands_day['BOLL_WIDTH']) & (price_predict_day['MACD'] > 0))
bootstrap_exodus2 = bootstrap_exodus2[bootstrap_exodus2.apply(lambda x: x == True)] # eqv. Trim(x == False)
bootstrap_exodus3 = ((dual_cross_day['DUAL_CROSS_JX'] > 0) & (boll_bands_day['BOLL_CROSS_JX'] > 2) & (price_predict_day['MACD_CROSS_JX'] < price_predict_day['MACD_CROSS_SX'])) & \
((price_predict_day['PRICE_PRED_CROSS_JX'] < price_predict_day['PRICE_PRED_CROSS_SX'])) & \
(((boll_bands_day['BOLL_CROSS_JX'] > 8) & (ma30_croos_day['MA30_CROSS_JX'] < ma30_croos_day['MA30_CROSS_SX'])) | (boll_bands_day['BOLL_CROSS_JX'] < 6)) & \
~((boll_bands_day['BBW_MA20'] > boll_bands_day['BOLL_WIDTH']) & (price_predict_day['MACD'] > 0))
bootstrap_exodus3 = bootstrap_exodus3[bootstrap_exodus3.apply(lambda x: x == True)] # eqv. Trim(x == False)
plt.figure(figsize = (22,9))
plt.plot(data_day.index.get_level_values(level=0), data_day.close, 'c', linewidth=0.6, alpha=0.75)
plt.plot(data_day.index.get_level_values(level=0), boll_bands_day['BOLL_UB'], linewidth = 0.6, alpha = 0.75)
plt.plot(data_day.index.get_level_values(level=0), boll_bands_day['BOLL_LB'], linewidth=0.6, alpha=0.75)
plt.plot(data_day.index.get_level_values(level=0), boll_bands_day['BOLL_MA'], linewidth = 0.6, alpha = 0.75)
plt.plot(tmom_negative.index, data_day.data.loc[tmom_negative.index].close, 'bx')
plt.plot(bootstrap_exodus.index, data_day.data.close.loc[bootstrap_exodus.index], 'co')
plt.plot(bootstrap_exodus2.index, data_day.data.close.loc[bootstrap_exodus2.index], 'yo')
plt.plot(bootstrap_exodus3.index, data_day.data.close.loc[bootstrap_exodus3.index], 'go')
plt.plot(data_day.close.iloc[x_tp_max].index.get_level_values(level=0), data_day.close.iloc[x_tp_max], 'gx')
plt.plot(data_day.close.iloc[x_tp_min].index.get_level_values(level=0), data_day.close.iloc[x_tp_min], 'ro')
plt.show()
|
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
requirements = [
]
setup(
name='timecheck',
version='0.1.0',
license='MIT',
author='Jeeseung Han',
author_email='jinh574@naver.com',
url='https://hashbox.github.io',
description='Check elapsed time',
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
classifiers=[],
)
|
import os
import unittest
from datetime import datetime
from intuitquickbooks.auth import Oauth1SessionManager
from intuitquickbooks.client import QuickBooks
from intuitquickbooks.objects.trackingclass import Class
class ClassTest(unittest.TestCase):
def setUp(self):
self.session_manager = Oauth1SessionManager(
sandbox=True,
consumer_key=os.environ.get('CONSUMER_KEY'),
consumer_secret=os.environ.get('CONSUMER_SECRET'),
access_token=os.environ.get('ACCESS_TOKEN'),
access_token_secret=os.environ.get('ACCESS_TOKEN_SECRET'),
)
self.qb_client = QuickBooks(
session_manager=self.session_manager,
sandbox=True,
company_id=os.environ.get('COMPANY_ID')
)
self.name = "Test Class {0}".format(datetime.now().strftime('%d%H%M'))
def test_create(self):
tracking_class = Class()
tracking_class.Name = self.name
tracking_class.save(qb=self.qb_client)
query_tracking_class = Class.get(tracking_class.Id, qb=self.qb_client)
self.assertEquals(query_tracking_class.Id, tracking_class.Id)
self.assertEquals(query_tracking_class.Name, self.name)
def test_update(self):
updated_name = "Updated {}".format(self.name)
tracking_class = Class.all(max_results=1, qb=self.qb_client)[0]
tracking_class.Name = updated_name
tracking_class.save(qb=self.qb_client)
query_tracking_class = Class.get(tracking_class.Id, qb=self.qb_client)
self.assertEquals(query_tracking_class.Id, tracking_class.Id)
self.assertEquals(query_tracking_class.Name, updated_name)
|
import time, datetime
import pandas as pd
import numpy as np
import json
from NLP.SVM.svm import Svm
from NLP.PREPROCESSING.preprocessor import Preprocessor
class SdgSvm(Svm):
"""
Concrete class to classify SDGs for modules and publications using the Svm model.
"""
def __init__(self):
super().__init__()
def make_text_predictions(self, text, preprocessor):
"""
Predicts probabilities of SDGs given any random text input.
"""
text = preprocessor.preprocess(text)
y_pred = self.sgd_pipeline.predict_proba([text])
return y_pred
def run(self):
"""
Trains the SVM model for clasifying SDGs using stochastic gradient descent.
"""
ts = time.time()
startTime = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
svm_dataset = "NLP/SVM/SDG/dataset.csv"
tags = ['SDG {}'.format(i) for i in range(1, 19)] # SDG tags.
# SDG results files.
model = "NLP/SVM/SDG/model.pkl"
self.load_dataset(svm_dataset)
self.load_tags(tags)
print("Training...")
X_train, X_test, y_train, y_test = self.train()
print("Saving results...")
self.serialize(model)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class History(object):
"""Hold the training history"""
def __init__(self):
self.hist = pd.DataFrame(columns=['train loss', 'train acc', 'val loss', 'val acc'])
formatters = {
'train loss': "{:0.8f}".format,
'train acc': "{:0.3f}".format,
'val loss': "{:0.8f}".format,
'val acc': "{:0.3f}".format}
def add(self, new_epoch):
self.hist.loc[len(self.hist)] = new_epoch
def get_last(self):
return self.hist.tail(1)
def get_best(self, n=1):
return self.hist.sort_values('val loss').head(n)
def get_best_val_acc(self):
return self.hist.sort_values('val acc', ascending=False).head(1)['val acc'].values[0]
def get_best_epochs_nb(self, n=1):
return self.hist.sort_values('val loss').head(n).index.tolist()
def get_hist(self):
return self.hist
def plot(self, title, avg_w_size=20):
colors = ['C0', 'C1']
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12, 7))
fig.suptitle(title)
self.hist[['train loss', 'val loss']].ewm(span=avg_w_size).mean().plot(ax=ax1, color=colors)
self.hist[['train loss', 'val loss']].plot(ax=ax1, alpha=0.4, color=colors, legend=False)
self.hist[['train acc', 'val acc']].ewm(span=avg_w_size).mean().plot(ax=ax2, color=colors)
self.hist[['train acc', 'val acc']].plot(ax=ax2, alpha=0.4, color=colors, legend=False)
ax1.set_ylabel('categorical cross entropy')
ax1.set_xlabel('epochs')
ax1.set_yscale('log')
ax1.grid(color='0.8', linewidth=0.5, ls='--')
ax2.set_ylabel('accuracy [% correct]')
ax2.set_xlabel('epochs')
ax2.grid(color='0.8', linewidth=0.5, ls='--')
|
import json
import redis
from django.conf import settings
from tulius.websockets import consts
def publish_message(channel, message):
redis_client = redis.Redis(
settings.REDIS_CONNECTION['host'],
settings.REDIS_CONNECTION['port'],
db=settings.REDIS_CONNECTION['db']
)
redis_client.publish(
consts.make_channel_name(channel), json.dumps(message)
)
def publish_message_to_user(user, action, pk):
publish_message(
consts.CHANNEL_USER.format(user.id), {
'.direct': True,
'.action': 'new_pm',
'.namespaced': 'pm',
'id': pk,
})
def notify_user_about_fixes(user, data):
publish_message(
consts.CHANNEL_USER.format(user.id), {
'.direct': True,
'.action': 'fixes_update',
'.namespaced': 'fixes_update',
'data': data,
})
def notify_thread_about_new_comment(sender, thread, comment, page):
publish_message(
consts.THREAD_COMMENTS_CHANNEL.format(thread_id=thread.id),
{
'.direct': True,
'.action': 'new_comment',
'id': comment.id,
'parent_id': thread.id,
'url': comment.get_absolute_url(),
'page': page,
})
|
import math
from typing import List
if __name__ == '__main__':
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
temp = math.inf
result = None
if len(nums) <3:
return []
else:
for i in range(len(nums)):
for j in range(i+1,len(nums)):
for k in range(j+1,len(nums)):
if abs(nums[i] + nums [j] + nums[k] - target) < temp:
temp = abs(nums[i] + nums [j] + nums[k] - target)
result = nums[i] + nums [j] + nums[k]
if abs(nums[i] + nums [j] + nums[k] - target) == 0:
return target
return result
solution = Solution()
print(solution.threeSumClosest([-1,2,1,-4], 1))
|
# pylint: disable=unused-import
# noinspection PyUnresolvedReferences
from scooter.models import rentals # noqa: F401
# noinspection PyUnresolvedReferences
from scooter.models import locations # noqa: F401
# noinspection PyUnresolvedReferences
from scooter.models import scooters # noqa: F401
# noinspection PyUnresolvedReferences
from scooter.models import users # noqa: F401
|
"""
Tests of dit.example_dists.dependencies
"""
from __future__ import division
import pytest
from dit.example_dists.dependencies import mixed, stacked
from dit.multivariate import coinformation, intrinsic_mutual_information
def test_mixed1():
"""
Test against known values.
"""
i = coinformation(mixed)
assert i == pytest.approx(0.0)
def test_mixed2():
"""
Test against known values.
"""
i = coinformation(mixed, [[0], [1]], [2])
assert i == pytest.approx(2.0)
def test_mixed3():
"""
Test against known values.
"""
i = intrinsic_mutual_information(mixed, [[0], [1]], [2])
assert i == pytest.approx(1.0)
def test_stacked1():
"""
Test against known values.
"""
i = coinformation(stacked)
assert i == pytest.approx(1.5849625007211565)
def test_stacked2():
"""
Test against known values.
"""
i = coinformation(stacked, [[0], [1]], [2])
assert i == pytest.approx(2/3)
def test_stacked3():
"""
Test against known values.
"""
i = intrinsic_mutual_information(stacked, [[0], [1]], [2])
assert i == pytest.approx(1/3)
|
from patternpieces import PatternPieces
class Piece:
def __init__(self, idpiece, left=PatternPieces.EDGE, up=PatternPieces.EDGE,
right=PatternPieces.EDGE, down=PatternPieces.EDGE):
self.id = idpiece
self.leftEdge = left
self.upEdge = up
self.rightEdge = right
self.downEdge = down
self.position = {"x": None, "y": None}
self.nbofrightrotate = 0
self.placed = False
self.imgpath = None
def switch(self, x):
return {
0: self.upEdge,
1: self.rightEdge,
2: self.downEdge,
3: self.leftEdge
}.get(x, False)
def getSidePattern(self, rotation):
rotation = rotation - self.nbofrightrotate if rotation - self.nbofrightrotate >= 0 else rotation - self.nbofrightrotate + 4
return self.switch(rotation)
|
"""Flashpoint Test File."""
import demistomock as demisto
import pytest
import json
import io
import datetime
import unittest
from unittest.mock import patch
from CommonServerPython import arg_to_datetime
from Flashpoint import Client, MESSAGES, MAX_PRODUCT, FILTER_DATE_VALUES, IS_FRESH_VALUES, MAX_PAGE_SIZE, \
SORT_DATE_VALUES, SORT_ORDER_VALUES
API_KEY = demisto.getParam('api_key')
HREF_BASE_URL = 'http://123-fake-api.com/api/v4/indicators/attribute/' # NOSONAR
TEST_SCAN_DOMAIN = 'fakedomain.com'
TEST_SCAN_IP = '0.0.0.0'
TEST_SCAN_FILENAME = 'fakefilename'
TEST_SCAN_URL = 'http://123-fake-api.com' # NOSONAR
TEST_SCAN_FILE = 'test_scan_dummy_file'
TEST_SCAN_EMAIL = 'fakeemail@test.com'
TEST_SCAN_REPORT_KEYWORD = 'fakexyz'
TEST_SCAN_REPORT_ID = 'test_scan_id'
TEST_SCAN_EVENT_ID = 'test_scan_id'
TEST_SCAN_FORUM_ID = 'test_scan_forum_id'
TEST_SCAN_FORUM_ROOM_ID = 'test_scan_forum_room_id'
TEST_SCAN_FORUM_USER_ID = 'test_scan_forum_user_id'
TEST_SCAN_FORUM_POST_ID = 'test_scan_forum_post_id'
TEST_SITE_SEARCH_KEYWORD = 'test'
TEST_POST_SEARCH_KEYWORD = 'testing'
INVALID_DATE_MESSAGE = '"abc" is not a valid date'
START_DATE = '2021-07-18T12:02:45Z'
def util_load_json(path: str) -> dict:
"""Load a json to python dict."""
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
class MyTestCase(unittest.TestCase):
"""Test case class."""
client = Client(API_KEY, "url", False, None, True)
@patch("Flashpoint.Client.http_request")
def test_test_module(self, mocker):
"""Test test_module."""
from Flashpoint import test_module
test_module(client=self.client, params={})
@patch("Flashpoint.Client.http_request")
def test_max_fetch_limit_failure(self, mocker):
"""Tests max_fetch parameter failure scenario."""
from Flashpoint import test_module
with pytest.raises(ValueError) as error1:
test_module(self.client, {"isFetch": True, "max_fetch": 0})
assert str(error1.value) == MESSAGES["INVALID_MAX_FETCH"].format(0)
@patch("Flashpoint.Client.http_request")
def test_max_fetch_value_failure(self, mocker):
"""Tests max_fetch parameter failure scenario."""
from Flashpoint import test_module
with pytest.raises(ValueError) as error2:
test_module(self.client, {"isFetch": True, "max_fetch": "a"})
assert str(error2.value) == '"a" is not a valid number'
@patch("Flashpoint.Client.http_request")
def test_first_fetch_failure(self, mocker):
"""Tests first_fetch parameter failure scenario."""
from Flashpoint import test_module
with pytest.raises(ValueError) as error3:
test_module(self.client, {"isFetch": True, "first_fetch": "abc"})
assert str(error3.value) == INVALID_DATE_MESSAGE
@patch("Flashpoint.Client.http_request")
def test_domain(self, mocker):
"""Test domain_lookup_command."""
from Flashpoint import domain_lookup_command
with open("./TestData/domain_response.json", encoding='utf-8') as f:
expected = json.load(f)
mocker.return_value = expected
command_result = domain_lookup_command(self.client, TEST_SCAN_DOMAIN)
resp = command_result.to_context().get('Contents')
result = self.get_result(resp)
# ec = command_result.to_context().get('EntryContext')
#
# with open("./TestData/domain_ec.json", encoding='utf-8') as f:
# expected_ec = json.load(f)
fpid = result['fpid']
assert result['name'] == TEST_SCAN_DOMAIN
assert result['href'] == HREF_BASE_URL + fpid
assert expected == resp
# assert expected_ec == ec # Testing CommandResult object, should not check that function
@patch("Flashpoint.Client.http_request")
def test_ip(self, mocker):
"""Test ip_lookup_command."""
from Flashpoint import ip_lookup_command
with open("./TestData/ip_response.json", encoding='utf-8') as f:
expected = json.load(f)
mocker.return_value = expected
command_result = ip_lookup_command(self.client, TEST_SCAN_IP)
resp = command_result.to_context().get('Contents')
result = self.get_result(resp)
# ec = command_result.to_context().get('EntryContext')
#
# with open("./TestData/ip_ec.json", encoding='utf-8') as f:
# expected_ec = json.load(f)
fpid = result['fpid']
assert result['name'] == TEST_SCAN_IP
assert result['href'] == HREF_BASE_URL + fpid
assert expected == resp
# assert expected_ec == ec # Testing CommandResult object, should not check that function
@patch("Flashpoint.Client.http_request")
def test_filename(self, mocker):
"""Test filename_lookup_command."""
from Flashpoint import filename_lookup_command
with open("./TestData/filename_response.json", encoding='utf-8') as f:
expected = json.load(f)
mocker.return_value = expected
hr, ec, resp = filename_lookup_command(self.client, TEST_SCAN_FILENAME)
result = self.get_result(resp)
with open("./TestData/filename_ec.json", encoding='utf-8') as f:
expected_ec = json.load(f)
fpid = result['fpid']
assert result['name'] == TEST_SCAN_FILENAME
assert result['href'] == HREF_BASE_URL + fpid
assert expected == resp
assert expected_ec == ec
@patch("Flashpoint.Client.http_request")
def test_url(self, mocker):
"""Test url_lookup_command."""
from Flashpoint import url_lookup_command
with open("./TestData/url_response.json", encoding='utf-8') as f:
expected = json.load(f)
mocker.return_value = expected
command_result = url_lookup_command(self.client, TEST_SCAN_URL)
resp = command_result.to_context().get('Contents')
result = self.get_result(resp)
# ec = command_result.to_context().get('EntryContext')
#
# with open("./TestData/url_ec.json", encoding='utf-8') as f:
# expected_ec = json.load(f)
fpid = result['fpid']
assert result['name'] == TEST_SCAN_URL
assert result['href'] == HREF_BASE_URL + fpid
assert expected == resp
# assert expected_ec == ec # Testing CommandResult object, should not check that function
@patch("Flashpoint.Client.http_request")
def test_file(self, mocker):
"""Test file_lookup_command."""
from Flashpoint import file_lookup_command
with open("./TestData/file_response.json", encoding='utf-8') as f:
expected = json.load(f)
mocker.return_value = expected
command_result = file_lookup_command(self.client, TEST_SCAN_FILE)
resp = command_result.to_context().get('Contents')
result = self.get_result(resp)
# ec = command_result.to_context().get('EntryContext')
#
# with open("./TestData/file_ec.json", encoding='utf-8') as f:
# expected_ec = json.load(f)
fpid = result['fpid']
assert result['name'] == TEST_SCAN_FILE
assert result['href'] == HREF_BASE_URL + fpid
assert expected == resp
# assert expected_ec == ec # Testing CommandResult object, should not check that function
@patch("Flashpoint.Client.http_request")
def test_email(self, mocker):
"""Test email_lookup_command."""
from Flashpoint import email_lookup_command
with open("./TestData/email_response.json", encoding='utf-8') as f:
expected = json.load(f)
mocker.return_value = expected
hr, ec, resp = email_lookup_command(self.client, TEST_SCAN_EMAIL)
result = self.get_result(resp)
with open("./TestData/email_ec.json", encoding='utf-8') as f:
expected_ec = json.load(f)
fpid = result['fpid']
assert result['name'] == TEST_SCAN_EMAIL
assert result['href'] == HREF_BASE_URL + fpid
assert expected == resp
assert expected_ec == ec
@patch("Flashpoint.Client.http_request")
def test_report_search_by_keyword(self, mocker):
"""Test get_reports_command."""
from Flashpoint import get_reports_command
with open("./TestData/report_search_by_keyword_response.json", encoding='utf-8') as f:
expected = json.load(f)
args = {
'report_search': TEST_SCAN_REPORT_KEYWORD
}
mocker.return_value = expected
hr, ec, resp = get_reports_command(self.client, args)
assert resp['data'][0]['title'] == TEST_SCAN_REPORT_KEYWORD
assert expected == resp
@patch("Flashpoint.Client.http_request")
def test_report_search_by_id(self, mocker):
"""Test get_report_by_id_command."""
from Flashpoint import get_report_by_id_command
with open("./TestData/report_search_by_id_response.json", encoding='utf-8') as f:
expected = json.load(f)
args = {
'report_id': TEST_SCAN_REPORT_ID
}
mocker.return_value = expected
hr, ec, resp = get_report_by_id_command(self.client, args)
with open("./TestData/report_search_by_id_ec.json", encoding='utf-8') as f:
expected_ec = json.load(f)
assert resp['id'] == TEST_SCAN_REPORT_ID
assert expected == resp
assert expected_ec == ec
@patch("Flashpoint.Client.http_request")
def test_event_search_by_id(self, mocker):
"""Test get_event_by_id_command."""
from Flashpoint import get_event_by_id_command
with open("./TestData/event_search_by_id_response.json", encoding='utf-8') as f:
expected = json.load(f)
args = {
'event_id': TEST_SCAN_EVENT_ID
}
mocker.return_value = expected
hr, ec, resp = get_event_by_id_command(self.client, args)
with open("./TestData/event_search_by_id_ec.json", encoding='utf-8') as f:
expected_ec = json.load(f)
assert resp[0]['fpid'] == TEST_SCAN_EVENT_ID
assert expected == resp
assert expected_ec == ec
@patch("Flashpoint.Client.http_request")
def test_event_search_by_id_when_no_malware_description_found(self, mocker):
"""Test get_event_by_id_command."""
from Flashpoint import get_event_by_id_command
with open("./TestData/event_search_by_id_response_no_malware_description.json", encoding='utf-8') as f:
expected = json.load(f)
args = {
'event_id': TEST_SCAN_EVENT_ID
}
mocker.return_value = expected
hr, ec, resp = get_event_by_id_command(self.client, args)
with open("./TestData/event_search_by_id_ec.json", encoding='utf-8') as f:
expected_ec = json.load(f)
# Without malware_description in response should not be considered in EC
expected_ec.get('Flashpoint.Event(val.EventId == obj.EventId)').pop('MalwareDescription')
assert resp[0]['fpid'] == TEST_SCAN_EVENT_ID
assert expected == resp
assert expected_ec == ec
@patch("Flashpoint.Client.http_request")
def test_forum_search_by_id(self, mocker):
"""Test get_forum_details_by_id_command."""
from Flashpoint import get_forum_details_by_id_command
with open("./TestData/forum_search_by_id_response.json", encoding='utf-8') as f:
expected = json.load(f)
args = {
'forum_id': TEST_SCAN_FORUM_ID
}
mocker.return_value = expected
hr, ec, resp = get_forum_details_by_id_command(self.client, args)
with open("./TestData/forum_search_by_id_ec.json", encoding='utf-8') as f:
expected_ec = json.load(f)
assert resp['id'] == TEST_SCAN_FORUM_ID
assert expected == resp
assert expected_ec == ec
@patch("Flashpoint.Client.http_request")
def test_forum_room_search_by_id(self, mocker):
"""Test get_room_details_by_id_command."""
from Flashpoint import get_room_details_by_id_command
with open("./TestData/forum_room_search_by_id_response.json", encoding='utf-8') as f:
expected = json.load(f)
args = {
'room_id': TEST_SCAN_FORUM_ROOM_ID
}
mocker.return_value = expected
hr, ec, resp = get_room_details_by_id_command(self.client, args)
with open("./TestData/forum_room_search_by_id_ec.json", encoding='utf-8') as f:
expected_ec = json.load(f)
assert resp['id'] == TEST_SCAN_FORUM_ROOM_ID
assert expected == resp
assert expected_ec == ec
@patch("Flashpoint.Client.http_request")
def test_forum_user_search_by_id(self, mocker):
"""Test get_user_details_by_id_command."""
from Flashpoint import get_user_details_by_id_command
with open("./TestData/forum_user_search_by_id_response.json", encoding='utf-8') as f:
expected = json.load(f)
args = {
'user_id': TEST_SCAN_FORUM_USER_ID
}
mocker.return_value = expected
hr, ec, resp = get_user_details_by_id_command(self.client, args)
with open("./TestData/forum_user_search_by_id_ec.json", encoding='utf-8') as f:
expected_ec = json.load(f)
assert resp['id'] == TEST_SCAN_FORUM_USER_ID
assert expected == resp
assert expected_ec == ec
@patch("Flashpoint.Client.http_request")
def test_forum_post_search_by_id(self, mocker):
"""Test get_post_details_by_id_command."""
from Flashpoint import get_post_details_by_id_command
with open("./TestData/forum_post_search_by_id_response.json", encoding='utf-8') as f:
expected = json.load(f)
args = {
'post_id': TEST_SCAN_FORUM_POST_ID
}
mocker.return_value = expected
hr, ec, resp = get_post_details_by_id_command(self.client, args)
with open("./TestData/forum_post_search_by_id_ec.json", encoding='utf-8') as f:
expected_ec = json.load(f)
assert resp['id'] == TEST_SCAN_FORUM_POST_ID
assert expected == resp
assert expected_ec == ec
@patch("Flashpoint.Client.http_request")
def test_search_events(self, mocker):
"""Test get_events_command."""
from Flashpoint import get_events_command
with open("./TestData/events_search_response.json", encoding='utf-8') as f:
expected = json.load(f)
mocker.return_value = expected
args = {
"limit": 5,
"report_fpid": None,
"attack_id": None,
"time_period": None,
}
hr, ec, resp = get_events_command(self.client, args)
assert expected == resp
@patch("Flashpoint.Client.http_request")
def test_forum_site_search(self, mocker):
"""Test get_forum_sites_command."""
from Flashpoint import get_forum_sites_command
with open("./TestData/forum_site_search_response.json", encoding='utf-8') as f:
expected = json.load(f)
mocker.return_value = expected
args = {
'site_search': TEST_SITE_SEARCH_KEYWORD
}
hr, ec, resp = get_forum_sites_command(self.client, args)
assert expected == resp
@patch("Flashpoint.Client.http_request")
def test_forum_post_search(self, mocker):
"""Test get_forum_posts_command."""
from Flashpoint import get_forum_posts_command
with open("./TestData/forum_post_search_response.json", encoding='utf-8') as f:
expected = json.load(f)
args = {
'post_search': TEST_POST_SEARCH_KEYWORD
}
mocker.return_value = expected
hr, ec, resp = get_forum_posts_command(self.client, args)
assert expected == resp
def test_validate_alert_list_args_when_valid_args_are_provided(self):
"""Test case scenario when the arguments provided are valid."""
from Flashpoint import validate_alert_list_args
args = {
'size': '5',
'since': '03/07/2021',
'scroll_id': ''
}
fetch_args = {
'size': 5,
'since': '2021-03-07T00:00:00Z',
}
assert validate_alert_list_args(args) == fetch_args
def test_validate_alert_list_args_when_size_is_invalid(self):
"""Test case scenario when the argument named size is invalid."""
from Flashpoint import validate_alert_list_args
with pytest.raises(ValueError) as err:
validate_alert_list_args({'size': '-1'})
assert str(err.value) == MESSAGES['SIZE_ERROR'].format('-1')
with pytest.raises(ValueError) as err:
validate_alert_list_args({'size': '101'})
assert str(err.value) == MESSAGES['SIZE_ERROR'].format('101')
def test_validate_alert_list_args_when_since_is_invalid(self):
"""Test case scenario when the argument named since is invalid."""
from Flashpoint import validate_alert_list_args
with pytest.raises(ValueError) as err:
validate_alert_list_args({'since': 'abc'})
assert str(err.value) == INVALID_DATE_MESSAGE
def test_validate_alert_list_args_when_until_is_invalid(self):
"""Test case scenario when the argument named until is invalid."""
from Flashpoint import validate_alert_list_args
with pytest.raises(ValueError) as err:
validate_alert_list_args({'until': 'abc'})
assert str(err.value) == INVALID_DATE_MESSAGE
@patch("Flashpoint.Client.http_request")
def test_alert_list_command_when_valid_response_is_returned(self, mocker):
"""Test case scenario when valid response is returned."""
from Flashpoint import flashpoint_alert_list_command
response = util_load_json("TestData/alert_list_response.json")
mocker.return_value = response
context = util_load_json("TestData/alert_list.json")
expected_hr = util_load_json("TestData/alert_hr.json")
result = flashpoint_alert_list_command(self.client, {})
assert result.raw_response == response
assert result.outputs == context
assert result.readable_output == expected_hr.get('Data')
@patch("Flashpoint.Client.http_request")
def test_alert_list_command_when_empty_response_is_returned(self, mocker):
"""Test case scenario when empty response is returned."""
from Flashpoint import flashpoint_alert_list_command
mocker.return_value = {}
result = flashpoint_alert_list_command(self.client, {})
assert result.readable_output == MESSAGES['NO_RECORDS_FOUND'].format('alerts')
@patch("Flashpoint.Client.http_request")
def test_alert_list_command_when_invalid_response_is_returned(self, mocker):
"""Test case scenario when empty response is returned."""
from Flashpoint import prepare_hr_for_alerts
alerts = {
"data": [
{"source": {"created_at": {}, "last_observed": {"date-time": "dummy"}, "file": ""}}
]
}
with pytest.raises(ValueError) as er:
prepare_hr_for_alerts(alerts.get("data"))
assert str(er.value) == MESSAGES['MISSING_DATA'].format('Alerts')
def test_validate_compromised_credentials_list_args_when_valid_args_are_provided(self):
"""Test case scenario when the arguments provided are valid."""
from Flashpoint import validate_compromised_credentials_list_args
args = {
'page_size': '50',
'page_number': '2',
'start_date': '06-01-2021',
'end_date': '07-01-2021',
'filter_date': 'created_at',
'sort_date': 'created_at',
'sort_order': 'desc',
'is_fresh': 'true'
}
params = {
'query': '+basetypes:(credential-sighting) +breach.created_at.date-time: [2021-06-01T00:00:00Z'
' TO 2021-07-01T00:00:00Z] +is_fresh:true',
'skip': 50,
'limit': 50,
'sort': 'breach.created_at.timestamp:desc'
}
assert validate_compromised_credentials_list_args(args) == params
def test_validate_compromised_credentials_list_args_when_page_size_is_invalid(self):
"""Test case scenario when the argument named page_size is invalid."""
from Flashpoint import validate_compromised_credentials_list_args
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'page_size': '-1'})
assert str(err.value) == MESSAGES['PAGE_SIZE_ERROR'].format('-1', MAX_PAGE_SIZE)
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'page_size': '1001'})
assert str(err.value) == MESSAGES['PAGE_SIZE_ERROR'].format('1001', MAX_PAGE_SIZE)
def test_validate_compromised_credentials_list_args_when_page_number_is_invalid(self):
"""Test case scenario when the argument named page_number is invalid."""
from Flashpoint import validate_compromised_credentials_list_args
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'page_number': '0'})
assert str(err.value) == MESSAGES['PAGE_NUMBER_ERROR'].format('0')
def test_validate_compromised_credentials_list_args_when_product_is_invalid(self):
"""Test case scenario when the product of page_size and page_number is invalid."""
from Flashpoint import validate_compromised_credentials_list_args
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'page_size': '1000', 'page_number': '20'})
assert str(err.value) == MESSAGES['PRODUCT_ERROR'].format(MAX_PRODUCT, 20000)
def test_validate_compromised_credentials_list_args_when_start_date_is_invalid(self):
"""Test case scenario when the argument named start_date is invalid."""
from Flashpoint import validate_compromised_credentials_list_args
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'start_date': 'abc'})
assert str(err.value) == INVALID_DATE_MESSAGE
def test_validate_compromised_credentials_list_args_when_end_date_is_invalid(self):
"""Test case scenario when the argument named end_date is invalid."""
from Flashpoint import validate_compromised_credentials_list_args
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'end_date': 'def days'})
assert str(err.value) == '"def days" is not a valid date'
def test_validate_compromised_credentials_list_args_when_start_date_is_not_provided(self):
"""Test case scenario when the argument named end_date is provided but start_date is not provided."""
from Flashpoint import validate_compromised_credentials_list_args
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'end_date': '3 days'})
assert str(err.value) == MESSAGES['START_DATE_ERROR']
def test_validate_compromised_credentials_list_args_when_filter_date_is_invalid(self):
"""Test case scenario when the argument named filter_date is invalid."""
from Flashpoint import validate_compromised_credentials_list_args
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'filter_date': 'indexed_at'})
assert str(err.value) == MESSAGES['FILTER_DATE_ERROR'].format('indexed_at', FILTER_DATE_VALUES)
def test_validate_compromised_credentials_list_args_when_dates_are_missing(self):
"""Test case scenario when filter_date is provided but start_date and end_date is missing."""
from Flashpoint import validate_compromised_credentials_list_args
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'filter_date': 'created_at'})
assert str(err.value) == MESSAGES['MISSING_DATE_ERROR']
def test_validate_compromised_credentials_list_args_when_filter_date_is_missing(self):
"""Test case scenario when start_date and end_date are provided but filter_date is missing."""
from Flashpoint import validate_compromised_credentials_list_args
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'start_date': '3 days'})
assert str(err.value) == MESSAGES['MISSING_FILTER_DATE_ERROR']
def test_validate_compromised_credentials_list_args_when_sort_date_is_invalid(self):
"""Test case scenario when the argument named sort_date is invalid."""
from Flashpoint import validate_compromised_credentials_list_args
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'sort_date': 'indexed_at'})
assert str(err.value) == MESSAGES['SORT_DATE_ERROR'].format('indexed_at', SORT_DATE_VALUES)
def test_validate_compromised_credentials_list_args_when_sort_order_is_invalid(self):
"""Test case scenario when the argument named sort_order is invalid."""
from Flashpoint import validate_compromised_credentials_list_args
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'sort_order': 'none'})
assert str(err.value) == MESSAGES['SORT_ORDER_ERROR'].format('none', SORT_ORDER_VALUES)
def test_validate_compromised_credentials_list_args_when_sort_date_is_missing(self):
"""Test case scenario when the sort_order is provided but sort_date is missing."""
from Flashpoint import validate_compromised_credentials_list_args
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'sort_order': 'asc'})
assert str(err.value) == MESSAGES['MISSING_SORT_DATE_ERROR']
def test_validate_compromised_credentials_list_args_when_is_fresh_is_invalid(self):
"""Test case scenario when the argument named is_fresh is invalid."""
from Flashpoint import validate_compromised_credentials_list_args
with pytest.raises(ValueError) as err:
validate_compromised_credentials_list_args({'is_fresh': 'none'})
assert str(err.value) == MESSAGES['IS_FRESH_ERROR'].format('none', IS_FRESH_VALUES)
@patch("Flashpoint.Client.http_request")
def test_compromised_credentials_list_command_when_valid_response_is_returned(self, mocker):
"""Test case scenario when valid response is returned."""
from Flashpoint import flashpoint_compromised_credentials_list_command
response = util_load_json("TestData/compromised_credentials_list_response.json")
mocker.return_value = response
context = util_load_json("TestData/compromised_credentials_list.json")
expected_hr = util_load_json("TestData/compromised_credentials_hr.json")
result = flashpoint_compromised_credentials_list_command(self.client, {})
assert result.outputs == context
assert result.raw_response == response
assert result.readable_output == expected_hr.get('Data')
@patch("Flashpoint.Client.http_request")
def test_compromised_credentials_list_command_when_empty_response_is_returned(self, mocker):
"""Test case scenario when empty response is returned."""
from Flashpoint import flashpoint_compromised_credentials_list_command
mocker.return_value = {}
result = flashpoint_compromised_credentials_list_command(self.client, {})
assert result.readable_output == MESSAGES['NO_RECORDS_FOUND'].format('compromised credentials')
def test_prepare_args_for_alerts_when_valid_args_are_provided(self):
"""Test case scenario when the arguments provided are valid."""
from Flashpoint import prepare_args_for_fetch_alerts
last_run = {
'since': START_DATE,
'scroll_id': 'dummy-scroll-id'
}
expected_args = {
'size': 15,
'since': START_DATE,
'scroll_id': 'dummy-scroll-id'
}
args = prepare_args_for_fetch_alerts(max_fetch=15, start_time='2021-07-28T00:00:00Z', last_run=last_run)
assert args == expected_args
def test_prepare_args_for_alerts_when_max_fetch_is_invalid(self):
"""Test case scenario when argument named max_fetch is invalid."""
from Flashpoint import prepare_args_for_fetch_alerts
with pytest.raises(ValueError) as err:
prepare_args_for_fetch_alerts(max_fetch=-1, start_time='', last_run={})
assert str(err.value) == MESSAGES['INVALID_MAX_FETCH'].format(-1)
def test_prepare_args_for_compromised_credentials_when_valid_args_are_provided(self):
"""Test case scenario when the arguments provided are valid."""
from Flashpoint import prepare_args_for_fetch_compromised_credentials
end_date = arg_to_datetime('now')
end_date = datetime.datetime.timestamp(end_date)
expected_args = {
'limit': 15,
'query': '+basetypes:(credential-sighting) +header_.indexed_at: [1626609765'
' TO {}] +is_fresh:true'.format(int(end_date)),
'skip': 0,
'sort': 'header_.indexed_at:asc'
}
args = prepare_args_for_fetch_compromised_credentials(max_fetch=15, start_time=START_DATE,
is_fresh=True, last_run={})
assert args == expected_args
def test_prepare_args_for_compromised_credentials_when_max_fetch_is_invalid(self):
"""Test case scenario when argument named max_fetch is invalid."""
from Flashpoint import prepare_args_for_fetch_compromised_credentials
with pytest.raises(ValueError) as err:
prepare_args_for_fetch_compromised_credentials(max_fetch=0, start_time='', is_fresh=True, last_run={})
assert str(err.value) == MESSAGES['INVALID_MAX_FETCH'].format(0)
def test_validate_fetch_incidents_params_when_valid_params_are_provided(self):
"""Test case scenario when the arguments provided are valid."""
from Flashpoint import validate_fetch_incidents_params
params = {
'fetch_type': 'Alerts',
'first_fetch': START_DATE,
'max_fetch': '20',
'is_fresh_compromised_credentials': False
}
fetch_params = {
'size': 20,
'since': START_DATE,
}
expected_params = {
'fetch_type': 'Alerts',
'start_time': START_DATE,
'fetch_params': fetch_params
}
assert validate_fetch_incidents_params(params, {}) == expected_params
del params['fetch_type']
start_time = '2021-08-04T10:10:00Z'
last_run = {
'fetch_count': 1,
'end_time': '2021-08-05T03:43:52Z',
'start_time': start_time,
'fetch_sum': 20
}
fetch_params = {
'limit': 20,
'query': '+basetypes:(credential-sighting) +header_.indexed_at: [1628071800'
' TO 1628135032]',
'skip': 20,
'sort': 'header_.indexed_at:asc'
}
expected_params = {
'fetch_type': 'Compromised Credentials',
'start_time': start_time,
'fetch_params': fetch_params
}
assert validate_fetch_incidents_params(params, last_run) == expected_params
def test_validate_fetch_incidents_params_when_first_fetch_is_invalid(self):
"""Test case scenario when argument named first_fetch is invalid."""
from Flashpoint import validate_fetch_incidents_params
with pytest.raises(ValueError) as err:
validate_fetch_incidents_params({"first_fetch": "abc"}, {})
assert str(err.value) == INVALID_DATE_MESSAGE
with pytest.raises(ValueError) as err:
validate_fetch_incidents_params({"first_fetch": None}, {})
assert str(err.value) == MESSAGES['INVALID_FIRST_FETCH']
def test_validate_fetch_incidents_params_when_max_fetch_is_invalid(self):
"""Test case scenario when argument named max_fetch is invalid."""
from Flashpoint import validate_fetch_incidents_params
with pytest.raises(ValueError) as err:
validate_fetch_incidents_params({"max_fetch": "abc"}, {})
assert str(err.value) == '"abc" is not a valid number'
with pytest.raises(ValueError) as err:
validate_fetch_incidents_params({"max_fetch": ""}, {})
assert str(err.value) == MESSAGES['INVALID_MAX_FETCH'].format('None')
def test_remove_duplicate_records(self):
"""Test case scenario when there are duplicate records."""
from Flashpoint import remove_duplicate_records
alerts = util_load_json("TestData/fetch_alert_list.json")
next_run = {
'alert_ids': [
'3d376ab6-a1bd-4acc-84e6-2c385f51a3ea',
'86dfde39-a9f5-4ab8-a8f9-1890146034a0',
'ed707017-26c4-4551-b3a0-3856c54d699b'
]
}
expected_alerts = util_load_json("TestData/fetch_alert_list_after_removing_duplication.json")
assert remove_duplicate_records(alerts, "Alerts", next_run) == expected_alerts
def test_prepare_incidents_from_alerts_data_when_valid_response_is_returned(self):
"""Test case scenario when the given data is valid."""
from Flashpoint import prepare_incidents_from_alerts_data
start_time = '2021-06-16T02:22:14Z'
response = util_load_json('TestData/alert_list_response.json')
expected_incidents = util_load_json('TestData/incidents_alerts.json')
expected_next_run = {
'alert_ids': ['2983ad0b-b03d-4202-bea7-65dd94697b5b', 'a31a9f81-988b-47c0-9739-1300e1855f6b'],
'start_time': '2021-07-28T16:56:07Z',
'scroll_id': 'f97c16ab5408f3bb7df60e58c5b24a57$1623810166.258678',
'since': start_time,
'size': '1',
'until': '2021-06-16T02:45:00Z'
}
next_run, incidents = prepare_incidents_from_alerts_data(response, {}, start_time)
assert next_run == expected_next_run
assert incidents == expected_incidents
def test_prepare_incidents_from_alerts_data_when_empty_response_is_returned(self):
"""Test case scenario when empty response is returned."""
from Flashpoint import prepare_incidents_from_alerts_data
expected_next_run = {
'scroll_id': None,
'since': START_DATE
}
next_run, incidents = prepare_incidents_from_alerts_data({}, {}, START_DATE)
assert next_run == expected_next_run
assert incidents == []
def test_prepare_incidents_from_compromised_credentials_data_when_valid_response_is_returned(self):
"""Test case scenario when the given data is valid."""
from Flashpoint import prepare_incidents_from_compromised_credentials_data
response = util_load_json('TestData/compromised_credentials_list_response.json')
next_run = {
'fetch_count': 0,
'fetch_sum': 100
}
expected_incidents = util_load_json('TestData/incidents_compromised_credentials.json')
expected_next_run = {
'total': 1302,
'fetch_count': 1,
'fetch_sum': 100,
'start_time': START_DATE,
'hit_ids': ['YOBETNFzX0Ohjiq0xi_2Eg'],
'last_time': '2021-03-31T19:42:05Z',
'last_timestamp': 1617219725
}
next_run, incidents = prepare_incidents_from_compromised_credentials_data(response, next_run, START_DATE)
assert next_run == expected_next_run
assert incidents == expected_incidents
end_time = '2021-08-05T17:50:00Z'
last_time = '2021-03-31T19:42:05Z'
next_run = {
'fetch_count': 2,
'fetch_sum': 100,
'start_time': START_DATE,
'end_time': end_time
}
expected_next_run = {
'total': None,
'fetch_count': 0,
'fetch_sum': 0,
'start_time': last_time,
'end_time': end_time,
'hit_ids': ['YOBETNFzX0Ohjiq0xi_2Eg'],
'last_time': last_time,
'last_timestamp': 1617219725
}
response['hits']['total'] = 100
next_run, _ = prepare_incidents_from_compromised_credentials_data(response, next_run, START_DATE)
assert next_run == expected_next_run
def test_prepare_incidents_from_compromised_credentials_data_when_empty_response_is_returned(self):
"""Test case scenario when empty response is returned."""
from Flashpoint import prepare_incidents_from_compromised_credentials_data
next_run = {
'fetch_sum': 100,
'fetch_count': 0,
}
expected_next_run = {
'fetch_sum': 0,
'fetch_count': 0,
'total': None
}
next_run, incidents = prepare_incidents_from_compromised_credentials_data({'hits': {'total': 0}}, next_run,
START_DATE)
assert next_run == expected_next_run
assert incidents == []
def test_prepare_incidents_from_compromised_credentials_data_when_email_is_not_present(self):
"""Test case scenario when email key is not present in the response."""
from Flashpoint import prepare_incidents_from_compromised_credentials_data
next_run = {
'fetch_count': 0,
'fetch_sum': 100
}
response = util_load_json("TestData/compromised_credentials_list_response.json")
del response['hits']['hits'][0]['_source']['email']
expected_incidents = util_load_json("TestData/incidents_compromised_credentials_when_email_not_present.json")
_, incidents = prepare_incidents_from_compromised_credentials_data(response, next_run, START_DATE)
assert incidents == expected_incidents
def test_prepare_incidents_from_compromised_credentials_data_when_fpid_is_not_present(self):
"""Test case scenario when email key is not present in the response."""
from Flashpoint import prepare_incidents_from_compromised_credentials_data
next_run = {
'fetch_count': 0,
'fetch_sum': 100
}
response = util_load_json("TestData/compromised_credentials_list_response.json")
del response['hits']['hits'][0]['_source']['email']
del response['hits']['hits'][0]['_source']['fpid']
expected_incidents = util_load_json("TestData/incidents_compromised_credentials_when_fpid_not_present.json")
_, incidents = prepare_incidents_from_compromised_credentials_data(response, next_run, START_DATE)
assert incidents == expected_incidents
def test_prepare_incidents_from_compromised_credentials_data_when_records_are_more_than_limit(self):
"""Test case scenario when the records are more than 10k."""
from Flashpoint import prepare_incidents_from_compromised_credentials_data
response = util_load_json("TestData/compromised_credentials_list_response.json")
total = 10001
response['hits']['total'] = total
with pytest.raises(ValueError) as err:
prepare_incidents_from_compromised_credentials_data(response, {'fetch_count': 0}, START_DATE)
assert str(err.value) == MESSAGES['TIME_RANGE_ERROR'].format(total)
def test_prepare_incidents_from_compromised_credentials_data_when_duplicate_records_are_present(self):
"""Test case scenario when the records are duplicate."""
from Flashpoint import prepare_incidents_from_compromised_credentials_data
end_time = '2021-08-16T12:50:00Z'
last_time = '2021-08-13T12:07:37Z'
hit_ids = ['sIgauE9_X_m-y4NG-YuFig', 'kpKTMfErVDeb_zc60b52rg', '8m1IiImZVLOjdSOa16WKug',
'BeGFUbnlVMaur1g2u242sg', 'e_xaqvFdUz6ssGVbbXG7WA', 'yvzOnxaMVTKljLaSOYdILQ',
'fhiwOzONUDmZNq1TP092Zg', 'I-lA13YAUTmvB_XR9s6DXA', 'f1k62JNjUgu__CmUWSKrcw',
'HOr1NJB-X4yxJjmBhF3j1Q', 'E-w8zTgAUoCIdm0BZzLcyA', 'm-QJuetCX-6dbbBPedwqew',
'ueX_g5ZMW824FG-DpWecZg', 'qY7WhCzSV0aX2l39CIvCKg', '4Ztk3NxdULiozsxk2YYa2w']
next_run = {
'total': None,
'fetch_count': 0,
'fetch_sum': 15,
'start_time': last_time,
'end_time': end_time,
'hit_ids': hit_ids,
'last_time': last_time,
'last_timestamp': 1628856457
}
expected_next_run = {
'total': 46,
'fetch_count': 1,
'fetch_sum': 15,
'start_time': last_time,
'end_time': end_time,
'hit_ids': hit_ids,
'last_time': last_time,
'last_timestamp': 1628856457
}
response = util_load_json("TestData/compromised_credentials_duplicate_records.json")
next_run, incidents = prepare_incidents_from_compromised_credentials_data(response, next_run, last_time)
assert incidents == []
assert next_run == expected_next_run
@patch("Flashpoint.Client.http_request")
def test_fetch_incidents_when_valid_response_is_returned(self, mocker):
"""Test case scenario for successful execution of fetch_incident."""
from Flashpoint import fetch_incidents
response = util_load_json('TestData/compromised_credentials_list_response.json')
mocker.return_value = response
expected_incidents = util_load_json('TestData/incidents_compromised_credentials.json')
params = {'max_fetch': '1', 'first_fetch': '1 year', 'fetch_type': ''}
_, incidents = fetch_incidents(self.client, {}, params)
assert incidents == expected_incidents
response = util_load_json('TestData/alert_list_response.json')
mocker.return_value = response
expected_incidents = util_load_json('TestData/incidents_alerts.json')
params = {'max_fetch': '1', 'first_fetch': '1 year', 'fetch_type': 'Alerts'}
_, incidents = fetch_incidents(self.client, {}, params)
assert incidents == expected_incidents
def get_result(self, resp):
"""Get result."""
resp = resp[0]
type = resp['Attribute']['type']
name = resp['Attribute']['value'][type]
fpid = resp['Attribute']['fpid']
href = resp['Attribute']['href']
result = {
'name': name,
'type': type,
'fpid': fpid,
'href': href
}
return result
if __name__ == '__main__':
unittest.main()
|
# Задача 6. Вариант 32
# Создайте игру, в которой компьютер загадывает название одного из двадцати восьми стран, входящих в Европейский союз, а игрок должен его угадать.
# 24.03.2016
# Ширлин Вячеслав Викторович
import random
print ("Программа загадывает название одного из двадцати восьми стран, входящих в Европейский союз, а игрок должен его угадать. \n\n\n")
a = ['Австрия', 'Бельгия', 'Болгария', ' Великобритания', 'Венгрия','Германия','Греция','Дания','Ирландия','Испания','Италия','Кипр','Латвия','Литва','Люксембург','Мальта','Нидерланды','Польша','Словакия','Словения','Португалия','Румыния','Финляндия','Франция','Хорватия','Чехия','Швеция','Эстония',]
b = random.randint(0, 28)
answer = ''
while b != answer:
answer = input ("Введите ваш вариант")
if b == answer:
print ("Молодец, возьми с полки пряник")
else:
print ("С кем не бывает")
input ("Нажмите Enter для выхода.")
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse
from django.views import View
from django.core import serializers
from .models import RiskType, Field, NumberField, DateField, TextField
import json
from django.http import JsonResponse
import datetime
class IndexView(View):
def get(self, request):
return render(request, 'BriteCore/index.html')
class RiskTypeView(View):
def get(self, request, taipe):
risk_type = get_object_or_404(RiskType, slug=taipe).getDict()
risk_type['fields'] = []
for x in Field.objects.filter(risk_type=risk_type['id']):
risk_type['fields'].append(x.getDict())
return JsonResponse(risk_type)
class AllRiskTypeView(View):
def get(self,request):
risk_types = json.loads(serializers.serialize('json', RiskType.objects.all().exclude(slug='')))
for x in risk_types:
x['fields']['fields'] = json.loads(serializers.serialize('json', Field.objects.filter(risk_type=x['pk'])))
fields = x['fields']
fields2 = x['fields']['fields']
fields['fields'] = []
for y in fields2:
del y['fields']['risk_type']
fields['fields'].append(y['fields'])
x.clear()
x.update(fields)
return JsonResponse(risk_types, safe=False)
def post(self,request):
post_data = json.loads(request.body)
risk_type= get_object_or_404(RiskType, slug=post_data['slug'])
new_risk_type = RiskType(title=risk_type.title)
new_risk_type.save()
for x in post_data['fields']:
field_object = get_object_or_404(Field, slug=x['slug'], field_type=x['field_type'], risk_type=risk_type)
new_field_object = Field(title=field_object.title, risk_type=new_risk_type, field_type=field_object.field_type)
new_field_object.save()
value = ""
if field_object.field_type == 'Text':
value = TextField(field=new_field_object, value=x['value'])
if field_object.field_type == 'Number':
value = NumberField(field=new_field_object, value=x['value'])
if field_object.field_type == 'Date':
value = DateField(field=new_field_object, value=x['value'])
value.save()
return HttpResponse('')
|
from .config import Config as StreamsConfig
|
l=[1,2,3,4,5]
print(list(filter(lambda x:x%2,l)))
|
import os
from platform import uname
from typing import Any, Dict
from cpuinfo import get_cpu_info
from GPUtil import getGPUs
from psutil import cpu_count, cpu_freq, virtual_memory
NUM_CPU = os.cpu_count() or 1
def get_machine_info() -> Dict[str, Any]:
sys = uname()
cpu = get_cpu_info()
svmem = virtual_memory()
gpus = getGPUs()
return {
"system": {"system": sys.system, "node": sys.node, "release": sys.release},
"cpu": {
"model": cpu["brand_raw"],
"architecture": cpu["arch_string_raw"],
"cores": {
"physical": cpu_count(logical=False),
"total": cpu_count(logical=True),
},
"frequency": f"{(cpu_freq().max / 1000):.2f} GHz",
},
"memory": {
"total": get_size(svmem.total),
"used": get_size(svmem.used),
"available": get_size(svmem.available),
},
"gpus": (
[{"name": g.name, "memory": f"{g.memoryTotal} MB"} for g in gpus]
if gpus
else None
),
}
def get_size(bytes, suffix="B"):
"""Scale bytes to its proper format, e.g. 1253656 => '1.20MB'"""
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f} {unit}{suffix}"
bytes /= factor
|
#
# Copyright 2011-2012 Ning, Inc.
#
# Ning licenses this file to you under the Apache License, version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import unittest
from killbill import Account
class TestSerialization(unittest.TestCase):
def test_should_be_able_to_serialize_account_from_json(self):
accountJson = '{"accountId":"d9164b72-03b2-4c41-a0e1-8351f17050b4",\
"name":"stephane","externalKey":"4ff6022398b3a","email":"stephane@yahoo.com",\
"currency":"USD","paymentMethodId":null,"address1":"1769 mission street","address2":"Street Address 2",\
"company":"","state":"CA","country":"United States","phone":"4152715447","length":8,"billCycleDay":1,"timeZone":"UTC"}'
account = Account.fromJson(accountJson)
self.assertEqual('d9164b72-03b2-4c41-a0e1-8351f17050b4', account.accountId)
self.assertEqual('stephane', account.name)
self.assertEqual('4ff6022398b3a', account.externalKey)
self.assertEqual('stephane@yahoo.com', account.email)
self.assertEqual('USD', account.currency)
self.assertEqual(None, account.paymentMethodId)
self.assertEqual('1769 mission street', account.address1)
self.assertEqual('Street Address 2', account.address2)
self.assertEqual('', account.company)
self.assertEqual('CA', account.state)
self.assertEqual('United States', account.country)
self.assertEqual('4152715447', account.phone)
self.assertEqual(8, account.length)
self.assertEqual(1, account.billCycleDay)
self.assertEqual('UTC', account.timeZone)
if __name__ == '__main__':
unittest.main()
|
import compute_distance_and_align.compute_levenshtein_distance as compute_dist
def align_strings(seq1, seq2):
'''
Calculates minimum edit distance between str1 and str2
and saves backpointers to retrieve the allignments
:param srt seq1: from this s®tring
:param srt seq2: into this string
:returns: edit distance, a tuple of (seq1, changes)
:rtype: a tuple of (seq1, changes)
changes is a string where:
"-": deletion from either seq1 or seq2
a lowercase letter: no editing needed
an uppercase letter: substitution or adding of this letter to seq2
'''
distance = 0
alignment = ""
if len(seq1) == 0 and len(seq2) == 0:
return distance, (alignment, alignment)
elif len(seq1) == 0:
distance = len(seq2)
alignment = seq2.upper()
elif len(seq2) == 0:
distance = len(seq1)
for letter in seq1:
alignment += '-'
elif seq1 == seq2:
distance = 0
alignment = seq1
else:
shortest_dist, table, row, column = compute_dist.compute_levenshtein_distance(seq1, seq2)
while True:
if (row == 0 and column == 0):
break
# Make sure that i or j haven't reached 0'th row or 0'th column
if row != 0 and column != 0 and seq2[row - 1] == seq1[column - 1]:
alignment += seq2[row - 1]
row = row - 1
column = column - 1
elif table[row][column] == (table[row - 1][column - 1] + 1):
alignment += seq2[row - 1].upper()
row = row - 1
column = column - 1
elif table[row][column] == (table[row - 1][column] + 1):
alignment += seq2[row - 1].upper()
row = row - 1
elif table[row][column] == (table[row][column - 1] + 1):
alignment += '-'
column = column - 1
distance = table[row][column]
alignment = alignment[::-1]
return distance, (seq1, alignment)
if __name__ == "__main__":
seq1 = 'abcdef'
seq2 = 'azced'
distance, alignment = align_strings('abcdef', 'azced')
print("\nFrom string: ", seq1, "\nto string:", seq2,
"\nMinimum edit distance:", distance,
"\nChanges:", alignment) |
from django.conf.urls import patterns, include, url
from django.contrib import admin
#admin.autodiscover() #drop this line for Django 1.8
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'hiren.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'disk.views.index'),
url(r"^login$", 'disk.views.login'),
url(r"^logout$", 'disk.views.logout'),
url(r"^browse$", 'disk.views.browse'),
url(r"^search$", 'disk.views.search'),
url(r"^add$", 'disk.views.add'),
url(r"^json$", 'disk.views.json'),
url(r"^eject$", 'disk.views.eject'),
url(r"^browse/(?P<disk>\d+)/$", 'disk.views.disk_names'),
url(r"^browse/id/(?P<ids>\d+)/edit$", 'disk.views.edit'),
url(r"^browse/id/(?P<ids>\d+)/delete$", 'disk.views.delete'),
url(r'^search/', include('haystack.urls')),
)
|
# Copyright (c) 2017-2018 {Flair Inc.} WESLEY PENG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import TestCase
from taf.foundation.conf import Configuration
from taf.foundation.utils import YAMLData
# from taf.foundation.utils import logger
class TestConfiguration(TestCase):
def setUp(self):
self.conf = Configuration()
# logger.setLevel('DEBUG')
def test_configuration(self):
self.assertIs(
self.conf.get_instance(),
Configuration.get_instance()
)
self.assertIsInstance(
Configuration.get_instance().plugins,
YAMLData
)
_conf_file = 'test_config.yml'
_conf_key = 'test_config_dummy_key'
_conf_value = 'enabled'
plugins = Configuration.get_instance().plugins
plugins += {
_conf_key: _conf_value
}
self.conf.save_as(_conf_file)
# logger.debug('Validating configuration file')
self.assertTrue(
os.path.isfile(_conf_file)
)
with open(_conf_file, 'r') as conf:
for line in conf:
if (_conf_key in line) and (_conf_value in line):
found = True
break
else:
found = False
# logger.debug('Validating configuration value')
self.assertTrue(found)
os.remove(_conf_file)
|
from typing import List, Any
import cv2
import numpy as np
from Constants import Constants
from Vison.MathHandler import MathHandler
if __name__ == '__main__':
# Create instances of MathHandler class and Constants classes. These will keep all of the numbers
# and calculations of of this class so that this space can be dedicated to the pipeline. Functions and
# constants will, as a result, have their own dedicated space for editing and optimization in their respective
# classes
m = MathHandler()
c = Constants()
# Read the values of the constants, since they may have changed as the tuning system may have changed the
# CSV that stores the values shared by the programs
c.readValues()
# Create VideoCapture object to grab frames from the USB Camera as color matrices
cap = cv2.VideoCapture(0)
while True:
# Read a frame from the camera
ret, frame = cap.read()
# Convert the frame to HSV Colorspace for filtering
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Convert the HSV Image into a bitmap mask using the two arrays defined from tuning
mask = cv2.inRange(hsv, c.lowArray, c.highArray)
# Find contours in the mask image and save to the contours array
im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Iterate through the contours array, and remove anything that doesn't pass the size threshold
bigArrays = []
for cnt in contours:
if cv2.contourArea(cnt) > 1000:
bigArrays.append(cnt)
contours = bigArrays
# Try-Catch loop here to handle instances where no rectangles are found.
# In this case, the system is told not to move
try:
# Variables for determining if the target is partially off-screen on any axis
inRangeY = True
inRangeX = True
# Sort the arrays by size, and then take the largest array.
# This solves any sub-selections, or large static in the image.
contoursSorted = sorted(contours, key=lambda contourArea: cv2.contourArea(contourArea))
contoursSorted.reverse()
cnt = contoursSorted[0]
# Create a bounding rectangle from the largest contour
rect = cv2.minAreaRect(cnt)
# Take said bounding rectangle and simplify it onto integer coords.
box = cv2.boxPoints(rect)
box = np.int0(box)
# Check if any of the points on the bounding box fall on the edge of the image.
# If this is the case, the target is partially off-screen, and indicate to the
# InRange variables that this is the case.
points: List[Any] = []
# OpenCV doesn't register the BoxPoints object as iterable, so this will fire a false warning on
# PyCharm. The next line disables that warning. If you aren't in PyCharm, it won't do anything.
# noinspection PyTypeChecker
for point in box:
if point[0] <= 0 or point[0] >= frame.shape[1]:
inRangeX = False
if point[1] <= 0 or point[1] >= frame.shape[0]:
inRangeY = False
points.append(point)
# Sort the points by the slope of the line generated between them and (0,0)
# This will allow us to determine where each point lies on the rectangle for the center-point
# estimation process that will follow
points.sort(key=m.getSlope, reverse=True)
# Create arrays that will hold the points about the lines that connect the corners of the bounding
# box to estimate the center point
line1 = []
line2 = []
# Append the appropriate points to their respective lines
# Since the points array is already sorted by slope relative to (0,0), and we know the rectangle
# Has 4 corner points, append the points with the largest and smallest slopes to one line, and the
# two remaining points to the other. This will almost certainly yield two lines running in an X pattern
# across the bounding box to find the estimated center point of the contour, accounting for the shift
# in perspective, which is the reason this system required cross-lines in the first place and couldn't
# just simply use the width and height of the contour
line1.append(points[0])
line1.append(points[3])
line2.append(points[1])
line2.append(points[2])
# Using some basic algebra, find the intersection point of the two lines and return that point
# to variables x and y respectively
x, y = m.line_intersection(line1, line2)
# Use OpenCV functions to determine the width and height of the original image. These will be used
# when calculating yaw and pitch errors later
imageWidth = frame.shape[1]
imageHeight = frame.shape[0]
# Calculate the yaw of center-point relative to the center of the image, with -1 being on the far left
# side of the screen, 1 being on the far right, and 0 being right in the center. This makes writing a
# PID loop for other closed-loop feedback system extremely easy on the other side.
yaw = m.calculateYawError(x, imageWidth)
# Calculate the pitch of the target with the exact same system as yaw
pitch = m.calculatePitchError(y, imageHeight)
# If system is in debug mode, print and display all of this data. Otherwise, don't
# in order to keep loop times as low as possible
if c.isDebug():
if c.getDebug() is 1 or c.getDebug() is 3:
cv2.line(frame, (points[0][0], points[0][1]), (points[3][0], points[3][1]), (0, 255, 0), 2)
cv2.line(frame, (points[1][0], points[1][1]), (points[2][0], points[2][1]), (255, 255, 0), 2)
if inRangeX:
print("Yaw: ", yaw)
print("Pitch: ", pitch)
# In range, green center point
cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 0), -1)
else:
# Out of range, red center point
cv2.circle(frame, (int(x), int(y)), 5, (0, 0, 255), -1)
if c.getDebug() > 1:
print("Points: ", points[0], points[1], points[2], points[3])
print("Point 1 Slope: ", m.getSlope(points[0]))
print("Point 2 Slope: ", m.getSlope(points[1]))
print("Point 3 Slope: ", m.getSlope(points[2]))
print("Point 4 Slope: ", m.getSlope(points[3]))
cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)
# This catch will occur when no fitting contours are found in the image
except Exception as err:
# If in debug mode, print out the error
if c.getDebug() > 1:
print(err)
# If in debug mode, show the image. If not, keep this disabled, as it slows down the program
# significantly
if c.isDebug():
cv2.imshow("frame", frame)
# Mandatory break statement that will trigger a clean shutdown of the program upon the ESC key being
# pressed. Using this method to stop the program is recommended since OpenCV leaves windows hanging and
# camera streams open if the program is forcibly quit.
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
|
import logging
import sys
from logging.handlers import TimedRotatingFileHandler
from pathlib import Path
from application.main.config import settings
from application.main.utility.config_loader import ConfigReaderInstance
logging_config = ConfigReaderInstance.yaml.read_config_from_file(
settings.LOG_CONFIG_FILENAME)
class CustomFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: grey + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
class Handlers:
def __init__(self):
self.formatter = CustomFormatter()
self.log_filename = Path().joinpath(
settings.APP_CONFIG.LOGS_DIR, logging_config.FILENAME)
self.rotation = logging_config.ROTATION
def get_console_handler(self):
"""
:return:
"""
console_handler = logging.StreamHandler(sys.stdout.flush())
console_handler.setFormatter(self.formatter)
return console_handler
def get_file_handler(self):
"""
:return:
"""
file_handler = TimedRotatingFileHandler(
self.log_filename, when=self.rotation)
file_handler.setFormatter(self.formatter)
return file_handler
def get_handlers(self):
return [self.get_console_handler(), self.get_file_handler()] |
#Pair up NPOL UF files and combine to create single cfradial file
import numpy as np
import os
import logging as log
# Inputs
inDir = '/home/disk/bob/olympex/raw/npol_qc2/rhi'
paramFile = '../params/RadxConvert.npol_rhi_west'
binDir = '/home/disk/meso-home/meso/lrose/bin'
#dates = ['20151105','20151112','20151113','20151114','20151115',
# '20151116','20151117','20151118','20151119','20151120',
# '20151121','20151122','20151123','20151124','20151125',
# '20151126','20151130',
# '20151201','20151202','20151203','20151204','20151205',
# '20151206','20151207','20151208','20151209','20151210',
# '20151211','20151212','20151214','20151215',
# '20151216','20151217','20151218','20151219']
# '20160103','20160104','20160105',
# '20160106','20160108','20160110',
# '20160111','20160112','20160113','20160114','20160115']
dates = ['20151213']
# Start log
log.basicConfig(format='%(levelname)s:%(message)s',level=log.INFO)
for date in dates:
print date
thisDir = inDir+'/'+date+'/rhi_a'
for fname1 in os.listdir(thisDir):
if fname1.endswith('00-20.uf'):
log.info( "file1 = {}".format(fname1) )
# Find matching date and time
# For filename format: NPOL1_2015_1212_130002_rhi_00-20.uf.gz
#radar,year,monthday,time,scan,azrange = fname1.split("_") for orig files
# For filename format: olympex_NPOL1_20151213_140003_rhi_00-20.uf
project,radar,date,time,scan,azrange = fname1.split("_")
fname2 = project+'_'+radar+'_'+date+'_'+time+'_'+scan+'_20-40.uf'
if os.path.isfile(thisDir+'/'+fname2):
log.info( "file2 = {}".format(fname2) )
command = binDir+'/RadxConvert -v -params '+paramFile+' -f '+thisDir+'/'+fname1+' '+thisDir+'/'+fname2
os.system(command)
if not os.path.exists(thisDir+'/DONE'):
os.makedirs(thisDir+'/DONE')
os.rename(thisDir+'/'+fname1, thisDir+'/DONE/'+fname1)
os.rename(thisDir+'/'+fname2, thisDir+'/DONE/'+fname2)
|
from datetime import datetime
import numpy as np
from aidapy import event_search
# Time interval
start_time = datetime(2017, 7, 15, 7, 0, 0)
end_time = datetime(2017, 7, 15, 12, 0, 0)
# Input settings to look for dipolarization fronts on MMS1 probe
settings = {
"criteria": lambda dc_mag_z, mag_elangle, sc_pos_x, sc_pos_y:
(np.where(dc_mag_z == np.max(dc_mag_z))[0] > np.where(dc_mag_z == np.min(dc_mag_z))[0]) &
(np.abs(mag_elangle[np.where(dc_mag_z == np.min(dc_mag_z))[0]] - mag_elangle[np.where(dc_mag_z == np.max(dc_mag_z))[0]]) > 10) &
(np.any(mag_elangle > 45)) & (np.all(sc_pos_x <= -5 * 6378)) & (np.all(np.abs(sc_pos_y) <= 15 * 6378)),
"parameters": {"mission": "mms",
"process": "df",
"probes": ['1'],
"time_window": 306,
"coords": "gse",
"mode": 'low_res',
"time_step": 306,
"sample_freq": 1}}
event_search(settings, start_time, end_time, plot=True)
|
from django.urls import path
from . import views
urlpatterns = [
path('allstudents/', views.student_list, name='student_list'),
path('<int:student_id>/', views.single_student, name='single_student'),
path('registration/', views.student_regi, name='student_regi'),
path('edit/<int:pk>', views.edit_student, name='edit_student'),
path('delete/<int:student_id>', views.delete_student, name='delete_student'),
path('attendance/count', views.attendance_count, name='attendance_count'),
]
|
"""
MIT License
Copyright (c) 2016 Ionata Digital
Copyright (c) 2009-2014 Joshua Roesslein
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import print_function
import six
class SenseTError(Exception):
"""SenseT exception"""
def __init__(self, reason, response=None, api_code=None):
self.reason = six.text_type(reason)
self.response = response
self.api_code = api_code
Exception.__init__(self, reason)
def __str__(self):
return self.reason
def is_rate_limit_error_message(message):
"""Check if the supplied error message belongs to a rate limit error."""
return isinstance(message, list) \
and len(message) > 0 \
and 'code' in message[0] \
and message[0]['code'] == 88
class RateLimitError(SenseTError):
"""Exception for SenseT hitting the rate limit."""
# RateLimitError has the exact same properties and inner workings
# as SenseTError for backwards compatibility reasons.
pass
|
# Generated by Django 3.2.5 on 2021-08-14 17:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('character', '0006_auto_20210814_1738'),
]
operations = [
migrations.RemoveField(
model_name='bond',
name='bond_list',
),
migrations.RemoveField(
model_name='character',
name='bonds',
),
migrations.AddField(
model_name='bond',
name='character',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='character.character'),
),
migrations.AddField(
model_name='bond',
name='text',
field=models.TextField(default=''),
),
migrations.DeleteModel(
name='BondList',
),
]
|
#coding=utf-8
import json
import io
import urllib.request
import time
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/49.0.2')]
# 根据 t_company_logo.json、t_company.json文件内容,
# 将t_company_logo文件中的公司logo 与 t_company文件同名公司的logo
# 打印 带有公司logo url地址的t_company信息
# ******************** Begin ********************
# with open('t_company.json', encoding='utf-8') as company:
# i = 0
# for company_line in company:
# i += 1
# company_obj = json.loads(company_line) #将json字符串转化为对象
#
# with open('t_company_logo.json', encoding='utf-8') as logo:
# for logo_line in logo:
# logo_obj = json.loads(logo_line) #将json字符串转化为对象
#
# if company_obj['name'] == logo_obj['companyName']:
# tempUrl = logo_obj['logoUrl']
# try:
# opener.open(tempUrl)
# # print(str(i) + ' : ', tempUrl+'没问题')
# company_obj['logo_url'] = logo_obj['logoUrl']
# except urllib.error.HTTPError:
# # print(tempUrl+'=访问页面出错')
# time.sleep(0.1)
# except urllib.error.URLError:
# # print(tempUrl+'=访问页面出错')
# time.sleep(0.1)
# time.sleep(0.1)
#
# with open('t_comment.json', encoding='utf-8') as comment:
# for comment_line in comment:
# comment_obj = json.loads(comment_line) #将json字符串转化为对象
#
# if company_obj['_id'] == comment_obj['company_id']:
# company_obj['create_time'] = comment_obj['create_time']
# else:
# company_obj['create_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
#
# print(company_obj)
# ******************** End ********************
with open('t_company-26.json', encoding='utf-8') as company:
i = 0
for company_line in company:
i += 1
company_obj = json.loads(company_line) #将json字符串转化为对象
comment_count = 0
comment_create_time_list = []
with open('t_comment-26.json', encoding='utf-8') as comment:
for comment_line in comment:
comment_obj = json.loads(comment_line) #将json字符串转化为对象
if company_obj['_id'] == comment_obj['company_id']:
comment_count += 1
time.sleep(0.1)
# print(company_obj['_id'], comment_count, comment_obj['create_time'])
comment_create_time_list.append(comment_obj['create_time'])
company_obj['comment_total'] = comment_count
comment_create_time_list.sort()
# print(i, company_obj['name'], comment_create_time_list[-1])
if len(comment_create_time_list):
company_obj['create_time'] = comment_create_time_list[-1]
else:
company_obj['create_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print(company_obj) |
# Description:
'''This is the demo code for all the functions of UPS Plus.
Advanced users can select the functions they need through the function options provided in the code below to customize and develop them to meet their needs.
'''
import time
import smbus2
import logging
from ina219 import INA219,DeviceRangeError
DEVICE_BUS = 1
DEVICE_ADDR = 0x17
PROTECT_VOLT = 3700
SAMPLE_TIME = 2
ina = INA219(0.00725,address=0x40)
ina.configure()
print("Raspberry Pi power supply voltage: %.3f V" %ina.voltage())
print("Current current consumption of Raspberry Pi: %.3f mA" %ina.current())
print("Current power consumption of Raspberry Pi: %.3f mW" %ina.power())
ina = INA219(0.005,address=0x45)
ina.configure()
print("Batteries Voltage: %.3f V" % ina.voltage())
try:
if ina.current() > 0:
print("Battery current (charging), rate: %.3f mA"% (ina.current()))
print("Current battery power supplement: %.3f mW"% ina.power())
else:
print("Battery current (discharge), rate: %.3f mA"% (0-ina.current()))
print("Current battery power consumption: %.3f mW"% ina.power())
except DeviceRangeError:
print('Battery power is too high.')
bus = smbus2.SMBus(DEVICE_BUS)
aReceiveBuf = []
aReceiveBuf.append(0x00) # Placeholder
for i in range(1,255):
aReceiveBuf.append(bus.read_byte_data(DEVICE_ADDR, i))
print("Current processor voltage: %d mV"% (aReceiveBuf[2] << 8 | aReceiveBuf[1]))
print("Current Raspberry Pi report voltage: %d mV"% (aReceiveBuf[4] << 8 | aReceiveBuf[3]))
print("Current battery port report voltage: %d mV"% (aReceiveBuf[6] << 8 | aReceiveBuf[5])) # This value is inaccurate during charging
print("Current charging interface report voltage (Type C): %d mV"% (aReceiveBuf[8] << 8 | aReceiveBuf[7]))
print("Current charging interface report voltage (Micro USB): %d mV"% (aReceiveBuf[10] << 8 | aReceiveBuf[9]))
if (aReceiveBuf[8] << 8 | aReceiveBuf[7]) > 4000:
print('Currently charging through Type C.')
elif (aReceiveBuf[10] << 8 | aReceiveBuf[9]) > 4000:
print('Currently charging via Micro USB.')
else:
print('Currently not charging.') # Consider shutting down to save data or send notifications
print("Current battery temperature (estimated): %d degC"% (aReceiveBuf[12] << 8 | aReceiveBuf[11])) # Learned from the battery internal resistance change, the longer the use, the more stable the data.
print("Full battery voltage: %d mV"% (aReceiveBuf[14] << 8 | aReceiveBuf[13]))
print("Battery empty voltage: %d mV"% (aReceiveBuf[16] << 8 | aReceiveBuf[15]))
print("Battery protection voltage: %d mV"% (aReceiveBuf[18] << 8 | aReceiveBuf[17]))
print("Battery remaining capacity: %d %%"% (aReceiveBuf[20] << 8 | aReceiveBuf[19])) # At least one complete charge and discharge cycle is passed before this value is meaningful.
print("Sampling period: %d Min"% (aReceiveBuf[22] << 8 | aReceiveBuf[21]))
if aReceiveBuf[23] == 1:
print("Current power state: normal")
else:
print("Current power status: off")
if aReceiveBuf[24] == 0:
print('No shutdown countdown!')
else:
print("Shutdown countdown: %d sec"% (aReceiveBuf[24]))
if aReceiveBuf[25] == 1:
print("Automatically turn on when there is external power supply!")
else:
print("Does not automatically turn on when there is an external power supply!")
if aReceiveBuf[26] == 0:
print('No restart countdown!')
else:
print("Restart countdown: %d sec"% (aReceiveBuf[26]))
print("Accumulated running time: %d sec"% (aReceiveBuf[31] << 24 | aReceiveBuf[30] << 16 | aReceiveBuf[29] << 8 | aReceiveBuf[28]))
print("Accumulated charged time: %d sec"% (aReceiveBuf[35] << 24 | aReceiveBuf[34] << 16 | aReceiveBuf[33] << 8 | aReceiveBuf[32]))
print("This running time: %d sec"% (aReceiveBuf[39] << 24 | aReceiveBuf[38] << 16 | aReceiveBuf[37] << 8 | aReceiveBuf[36]))
print("Version number: %d "% (aReceiveBuf[41] << 8 | aReceiveBuf[40]))
#The following code demonstrates resetting the protection voltage
# bus.write_byte_data(DEVICE_ADDR, 17,PROTECT_VOLT & 0xFF)
# bus.write_byte_data(DEVICE_ADDR, 18,(PROTECT_VOLT >> 8)& 0xFF)
# print("Successfully set the protection voltage as: %d mV"% PROTECT_VOLT)
#The following code demonstrates resetting the sampling period
# bus.write_byte_data(DEVICE_ADDR, 21,SAMPLE_TIME & 0xFF)
# bus.write_byte_data(DEVICE_ADDR, 22,(SAMPLE_TIME >> 8)& 0xFF)
# print("Successfully set the sampling period as: %d Min"% SAMPLE_TIME)
# Set to shut down after 240 seconds (can be reset repeatedly)
# bus.write_byte_data(DEVICE_ADDR, 24,240)
# Cancel automatic shutdown
# bus.write_byte_data(DEVICE_ADDR, 24,0)
# Automatically turn on when there is an external power supply (If the automatic shutdown is set, when there is an external power supply, it will shut down and restart the board.)
# 1) If you want to completely shut down, please don't turn on the automatic startup when there is an external power supply.
# 2) If you want to shut down the UPS yourself because of low battery power, you can shut down the UPS first, and then automatically recover when the external power supply comes.
# 3) If you simply want to force restart the power, please use another method.
# 4) Set to 0 to cancel automatic startup.
# 5) If this automatic startup is not set, and the battery is exhausted and shut down, the system will resume work when the power is restored as much as possible, but it is not necessarily when the external power supply is plugged in.
# bus.write_byte_data(DEVICE_ADDR, 25,1)
# Force restart (simulate power plug, write the corresponding number of seconds, shut down 5 seconds before the end of the countdown, and then turn on at 0 seconds.)
# bus.write_byte_data(DEVICE_ADDR, 26,30)
# Restore factory settings (clear memory, clear learning parameters, can not clear the cumulative running time, used for after-sales purposes.)
# bus.write_byte_data(DEVICE_ADDR, 27,1)
# Enter the OTA state (the user demo program should not have this thing, after setting, unplug the external power supply, unplug the battery, reinstall the battery, install the external power supply (optional), you can enter the OTA mode and upgrade the firmware.)
# bus.write_byte_data(DEVICE_ADDR, 50,127)
# Serial Number
UID0 = "%08X" % (aReceiveBuf[243] << 24 | aReceiveBuf[242] << 16 | aReceiveBuf[241] << 8 | aReceiveBuf[240])
UID1 = "%08X" % (aReceiveBuf[247] << 24 | aReceiveBuf[246] << 16 | aReceiveBuf[245] << 8 | aReceiveBuf[244])
UID2 = "%08X" % (aReceiveBuf[251] << 24 | aReceiveBuf[250] << 16 | aReceiveBuf[249] << 8 | aReceiveBuf[248])
print("Serial Number is:" + UID0 + "-" + UID1 + "-" + UID2 )
|
import os
import os.path
import pydicom
import shutil
from multiprocessing import Process
import time
# set initial values
src_path = "dicom file directory"
des_path = "destination directory"
process_count = 10 # number of process you use
def sd_form(str): # Series Description
str = str.replace(' ', '_')
str = str.replace('<', '_')
str = str.replace('>', '_')
str = str.upper()
return str
def sn_form(str): # Series Number
str = str.zfill(4)
return str
def pn_form(str): # Patient Number
str = str.replace(' ', '_')
str = str.upper()
return str
def create_folder(dir): # create new folder # only if folder doesn't exists
if os.path.isdir(dir):
return
try:
os.makedirs(dir)
print(f"Folder created \"{dir}\"")
except FileExistsError:
print(f"[Error] while creating new folder \"{dir}\"")
def get_dirs(path):
dir_list = list()
dirs = os.listdir(path)
for dir in dirs:
dir_path = os.path.join(path, dir)
if os.path.isdir(dir_path):
dir_list.append(dir_path)
return dir_list
def split_num(num, divisor): # set number of folders allocated to a process
l = list()
range_list = list()
q, r = divmod(num, divisor)
for i in range(divisor):
l.append(q)
for i in range(r):
l[i] += 1
for i, n in enumerate(l):
n += sum(l[:i])
range_list.append(n)
return range_list
def split_list(dir_list, num_pr):
total = list()
num_dir = len(dir_list)
range_list = split_num(num_dir, num_pr)
index = 0
for n in range_list:
total.append(dir_list[index:n])
index = n
return total
def create_dcm_folder(id, new_path, path_list):
for path in path_list:
for root, dirs, files in os.walk(path):
rootpath = os.path.join(path, root)
for file in files:
filepath =os.path.join(rootpath, file)
# data elements info for foldername
try:
ds = pydicom.dcmread(filepath, specific_tags=['SeriesDescription','SeriesNumber','PatientName','PatientID'])
except:
continue
series_des = sd_form(str(ds.SeriesDescription))
series_num = sn_form(str(ds.SeriesNumber))
patient_name = pn_form(str(ds.PatientName))
patient_id = str(ds.PatientID)
parentF_name = f'{patient_name}_{patient_id}'
subF_name = f'{series_des}_{series_num}'
new_folder_path = os.path.join(new_path, parentF_name, subF_name)
create_folder(new_folder_path)
shutil.copy2(filepath, new_folder_path) # copy file # (filepath) > (new_folder_path)
##################################################
if __name__ == "__main__":
start = time.time()
path = os.path.abspath(src_path)
new_path = os.path.abspath(des_path)
dir_list = get_dirs(path)
dir_list = split_list(dir_list, process_count)
process_l = list()
for i, dir in enumerate(dir_list):
p = Process(target=create_dcm_folder, args=(i, new_path, dir))
p.start()
process_l.append(p)
for p in process_l:
p.join()
print(f"time: {time.time() - start}")
|
# encoding: UTF-8
"""
Implement phylib command line application.
"""
from __future__ import print_function
import sys
__USAGE__ = """usage: phylib <command> [<args>]
The support commands are:
cfutil-export export channel data to file or Channel Finder Service
impact-lattice generate IMPACT lattice file (test.in)
impact-vastart start IMPACT virtual accelerator
impact-settings read settings from IMPACT lattice file (test.in)
frib-layout generate layout file from FRIB Expanded Lattice File (XLF)
frib-channels generate a channels data file with FRIB naming conventions
help show help information for a specified topic
"""
def main():
"""Entry point of command line application."""
if len(sys.argv) < 2:
print(__USAGE__, file=sys.stderr)
return 1
cmd = sys.argv[1].strip().lower()
if cmd == "impact-lattice":
import impact_lattice
return impact_lattice.main()
elif cmd == "impact-settings":
import impact_settings
return impact_settings.main()
elif cmd == "impact-vastart":
import impact_vastart
return impact_vastart.main()
elif cmd == "impact-model":
import impact_model
return impact_model.main()
elif cmd == "cfutil-export":
import cfutil_export
return cfutil_export.main()
elif cmd == "frib-layout":
from phyutil.phytool import frib_layout
return frib_layout.main()
elif cmd == "frib-channels":
from phyutil.phytool import frib_channels
return frib_channels.main()
elif cmd == "help":
return print_help()
else:
print(__USAGE__, file=sys.stderr)
print("Unrecognized command: {}".format(cmd), file=sys.stderr)
return 1
def print_help():
"""Display help information for the specified topic."""
if len(sys.argv) < 3:
print(__USAGE__, file=sys.stderr)
print("See 'phylib help <command>' for more information on a specific command.", file=sys.stderr)
return 1
cmd = sys.argv[2].strip().lower()
if cmd == "impact-lattice":
import impact_lattice
impact_lattice.print_help()
elif cmd == "impact-settings":
import impact_settings
impact_settings.print_help()
elif cmd == "impact-vastart":
import impact_vastart
impact_vastart.print_help()
elif cmd == "impact-model":
import impact_model
impact_model.print_help()
elif cmd == "cfutil-export":
import cfutil_export
cfutil_export.print_help()
elif cmd == "frib-layout":
from phyutil.phytool import frib_layout
frib_layout.print_help()
elif cmd == "frib-channels":
from phyutil.phytool import frib_channels
frib_channels.print_help()
else:
print("No help available for command: {}".format(cmd), file=sys.stderr)
return 1
|
'''
This builds up the interface for the proof search module.
'''
import gen_model_beam_search
import gen_model_beam_search_torch
import pred_model as pred_model_run
import payout_model_5_train as payout_model_run
from models import *
from beam_search import *
import os
import sys
import numpy as np
import pickle as pickle
import data_utils5 as data_utils
import nnlibrary as nn
import data_utils as data_utils_new
import constructor_list
import torch
import torch_models
NUM_ALLOWED_CONSTRUCTORS = None
DISALLOWED_PROPS = ['idi', 'dummylink', 'dtrucor']
PRED_ENSEMBLE = 1
PRED_CACHE_ENSEMBLE = 1
PAYOUT_ENSEMBLE = 1
GEN_ENSEMBLE = 1
PAYOUT_SCALE = 1.0 # Chosen to make the spread of payouts roughly uniform over 0.5-1.0.
if NUM_ALLOWED_CONSTRUCTORS is None:
ALLOWED_CONSTRUCTORS = None
else:
ALLOWED_CONSTRUCTORS = set(constructor_list.order[:NUM_ALLOWED_CONSTRUCTORS])
class ProofInterface:
def __init__(self, args, lm, recalculate_props=True, directory='searcher'):
self.lm = lm
self.config = data_utils_new.get_config(lm)
self.args = args
self.holo_directory = 'searcher'
# load all the variables and parameters
# I'm fixing the file locations by hand because lazy.
loc = 'cpu' if args.cpu else 'cuda:0'
if self.args.no_use_torch:
self.gen_config = gen_model_beam_search.Config(lm)
self.gen_config.load(self.holo_directory+'/gen.parameters')
self.gen_var = gen_model_beam_search.Variables(self.gen_config)
self.gen_var.load(self.holo_directory+'/gen.weights')
self.pred_config = pred_model_run.Config(lm)
self.pred_config.load(self.holo_directory+'/pred.parameters')
self.pred_var = pred_model_run.Variables(self.pred_config)
self.pred_var.load(self.holo_directory+'/pred.weights')
self.payout_config = payout_model_run.Config(lm)
self.payout_config.load(self.holo_directory+'/payout.parameters')
self.payout_var = payout_model_run.Variables(self.payout_config)
self.payout_var.load(self.holo_directory+'/payout.weights')
print (args.device)
# Load model.
self.args.vocab_size = len(self.config.encode)+1
if self.args.interface_pred_model != '':
if self.args.stat_model:
self.pred_model = torch.load(self.args.interface_pred_model)
else:
self.args_pred = torch.load(self.args.interface_pred_model, map_location=loc)['args']
self.args_pred.device = args.device
self.args_pred.cpu = args.cpu
#self.args_pred.vocab_size = 1189 if hasattr(self.args_pred, 'gen_lm') and self.args_pred.gen_lm else 1089
self.args_pred.max_len = self.args.max_len
self.pred_model = torch_models.PredModel(self.args_pred, self.config).cuda()
self.pred_model.load(self.args.interface_pred_model)
self.pred_model.to(args.device)
self.pred_model.args.device = args.device
else:
self.pred_model = torch_models.PredModel(args, self.config).to(args.device)
self.args_pred = args
if self.args.interface_gen_model != '':
if self.args.stat_model:
data = torch.load(self.args.interface_gen_model)
self.gen_model = torch_models.LModel(data['args'], self.config).cuda()
self.gen_model.load_state_dict(data['models'])
else:
self.args_gen = torch.load(self.args.interface_gen_model, map_location=loc)['args']
self.args_gen.device = args.device
self.args_gen.cpu = args.cpu
#self.args_gen.vocab_size = 1189 if hasattr(self.args_gen, 'gen_lm') and self.args_gen.gen_lm else 1089
self.args_gen.max_len = self.args.max_len
self.gen_model = torch_models.GenModel2(self.args_gen, self.config).cuda()
self.gen_model.load(self.args.interface_gen_model)
self.gen_model.to(args.device)
self.gen_model.args.device = args.device
else:
self.gen_model = torch_models.GenModel2(args, self.config).to(args.device)
self.args_gen = args
if self.args.interface_payout_model != '':
self.args_payout = torch.load(self.args.interface_payout_model)['args']
#self.args_payout.vocab_size = 1189 if hasattr(self.args_payout, 'gen_lm') and self.args_payout.gen_lm else 1089
self.args_payout.max_len = self.args.max_len
self.payout_model = torch_models.Payout(self.args_payout, self.config).cuda()
self.payout_model.load(self.args.interface_payout_model)
self.payout_model.to(args.device)
self.payout_model.args.device = args.device
else:
self.payout_model = torch_models.Payout(args, self.config).to(args.device)
self.args_payout = args
self.pred_model.eval()
self.gen_model.eval()
self.payout_model.eval()
#self.args.vocab_size = len(self.config.encode)+1
#self.pred_model = torch_models.PredModel(args, self.config).to(args.device)
#self.gen_model = torch_models.GenModel(args, self.config).to(args.device)
#self.payout_model = torch_models.Payout(args).to(args.device)
#if self.args.interface_pred_model != '':
# self.pred_model.cuda()
# self.pred_model.load(self.args.interface_pred_model)
# self.pred_model.to(args.device)
#if self.args.interface_gen_model != '':
# self.gen_model.cuda()
# self.gen_model.load(self.args.interface_gen_model)
# self.gen_model.to(args.device)
#if self.args.interface_payout_model != '':
# self.payout_model.load(self.args.interface_payout_model)
#self.pred_model.cpu()
#self.gen_model.cpu()
#self.payout_model.cpu()
#torch.save({'models':self.pred_model.state_dict()}, '../models/pred_default_cpu')
#torch.save({'models':self.gen_model.state_dict()}, '../models/gen_default_cpu')
#torch.save({'models':self.payout_model.state_dict()}, '../models/payout_default_cpu')
# beam search interface
if self.args.no_use_torch:
self.bsi = gen_model_beam_search.BeamSearchInterface([self.gen_var]*GEN_ENSEMBLE)
else:
self.bsi = gen_model_beam_search_torch.BeamSearchInterface([None]*GEN_ENSEMBLE, self.args, self.gen_model)
# remember the answer so that we don't need to constantly recalculate it
file_path = directory+'/pred_database'
if self.args.cpu:
file_path += '_cpu'
if os.path.isfile(file_path) and not recalculate_props:
print ('loading proposition vectors')
if self.args.no_use_torch:
with open(file_path, 'rb') as handle:
self.pred_database = pickle.load(handle, encoding='latin1')
else:
self.pred_database = torch.load(file_path)#pickle.load(handle)
else:
print ('using proposition vectors at '+file_path)
if self.args.stat_model:
self.initialize_pred_tfidf()
else:
self.initialize_pred(file_path)
print ('pred_database', self.pred_database.shape)
def initialize_pred_tfidf(self):
with open('../data/props_encode', 'rb') as f:
prop_inputs = pickle.load(f)
prop_embs = torch.zeros(len(prop_inputs), len(self.config.encode)+1).to(self.args.device)
for i in range(len(prop_inputs)):
prop_embs[i] = self.pred_model.embed(prop_inputs[i][0])
self.pred_database = prop_embs
print ('\rdone adding propositions')
def initialize_pred(self, file_path):
args = self.args
if args.partial_lm:
prop_inputs = []
for prop in self.lm.database.propositions_list:
prop_inputs.append(data_utils_new.encode_proof_step(prop.tree, prop.f, prop.hyps, self.lm, self.config))
else:
with open(os.path.join(self.args.data_path, 'props_encode'), 'rb') as f:
prop_inputs = pickle.load(f)
self.pred_database = torch.zeros(len(prop_inputs), self.args_pred.nFeats*2 if self.args_pred.bidirectional else self.args_pred.nFeats).to(args.device)
l = 0
while l < len(prop_inputs):
#os.system('nvidia-smi')
r = min(len(prop_inputs), l+args.batch_size)
tokens = [torch.LongTensor(prop_inputs[i][0]).to(args.device) for i in range(l, r)]
trees = [torch.Tensor(prop_inputs[i][1]).to(args.device) for i in range(l, r)]
with torch.no_grad():
self.pred_database[l:r] = self.pred_model.embed(tokens, trees, _type='p')
l = r
print ('\rdone adding propositions')
# save the database
#if self.args.no_use_torch:
# with open(file_path, 'wb') as handle:
# pickle.dump(self.pred_database, handle)
#else:
# torch.save(self.pred_database, file_path)
'''
def initialize_pred(self, file_path):
# this initializes all of the proposition vectors in database,
# so that we can call them quickly when we need to.
# this should include the multiplication
#self.pred_database = [pred_model_run.get_prop_vector([self.pred_var]*ENSEMBLE, prop) for prop in self.lm.database.propositions_list)]
self.pred_database = []
for i, prop in enumerate(self.lm.database.propositions_list):
sys.stdout.write('\rvectorizing proposition '+str(i))
sys.stdout.flush()
self.pred_database.append(pred_model_run.get_prop_vector([self.pred_var]*PRED_CACHE_ENSEMBLE, prop))
print ('\rdone adding propositions')
self.pred_database = np.stack(self.pred_database, axis=0)
# save the database
with open(file_path, 'wb') as handle:
pickle.dump(self.pred_database, handle)
'''
def rename_var(self, statement, hyps, f, config):
random_replacement_dict = config.lm.random_replacement_dict_f(f=f)
statement = statement.copy().replace_values(random_replacement_dict)
hyps = [h.tree.copy().replace_values(random_replacement_dict) for h in hyps if h.type=='e']
statement_graph_structure = TreeInformation([statement],
start_symbol=None, intermediate_symbol='END_OF_HYP',
end_symbol='END_OF_SECTION')
hyps_graph_structure = TreeInformation(hyps,
start_symbol=None, intermediate_symbol='END_OF_HYP',
end_symbol='END_OF_SECTION')
in_string, structured_data = data_utils_new.merge_graph_structures_new([statement_graph_structure, hyps_graph_structure])
tokens = [config.encode[t] for t in in_string]
trees = torch.Tensor(structured_data).to(self.args.device)
tokens = torch.LongTensor(tokens).to(self.args.device)
return tokens, trees
def payout(self, tree, context):
hyps = context.hyps #[h.tree for h in context.hyps if h.type=='e']
#f = None
#statement = tree
#random_replacement_dict = self.payout_config.lm.random_replacement_dict_f(f=f)
#statement = statement.copy().replace_values(random_replacement_dict)
#hyps = [h.copy().replace_values(random_replacement_dict) for h in hyps]
#statement_graph_structure = TreeInformation([statement],
# start_symbol=None, intermediate_symbol='END_OF_HYP',
# end_symbol='END_OF_SECTION')
#hyps_graph_structure = TreeInformation(hyps,
# start_symbol=None, intermediate_symbol='END_OF_HYP',
# end_symbol='END_OF_SECTION')
#in_string, structured_data = data_utils_new.merge_graph_structures_new([statement_graph_structure, hyps_graph_structure])
#tokens = [self.payout_config.encode[t] for t in in_string]
#tokens, trees = payout_model_run.get_input(tree, context)
#print (tokens)
#print (structured_data)
#trees = torch.Tensor(structured_data).to(self.args.device)
#tokens = torch.LongTensor(tokens).to(self.args.device)
#print (tokens)
#print (trees)
tokens, trees = self.rename_var(tree, hyps, None, self.config)
with torch.no_grad():
score = self.payout_model.forward(([tokens], [trees], None))
#print ('payout', tokens.shape, trees.shape, score.shape)
return score.item()
#return payout_model_run.get_payout([self.payout_var]*PAYOUT_ENSEMBLE, tree, context)
def initialize_payout(self, context):
#context.difficulty = self.payout(context.tree, context)
pass
def get_payout(self, tree, context):
''' note: the test dataset had the following histogram for delta,
[ 0.05543478, 0.01594203, 0.01376812, 0.00797101, 0.00398551,
0.00144928, 0.00144928] using bin sizes of 0.5, i.e. 0-0.5, 0.5-1,...
'''
# TODO tokenization
if self.args.no_use_torch:
score = payout_model_run.get_payout([self.payout_var]*PAYOUT_ENSEMBLE, tree, context)
score = np.exp(score)/(1.0+np.exp(score))
else:
score = self.payout(tree, context)
#print ('payout', score)
return score
#print 'getting payout'
# return difficulty
# delta = (context.difficulty - difficulty) * PAYOUT_SCALE
# delta = (difficulty) * PAYOUT_SCALE
# return delta
#return np.exp(include_score)/(1.0+np.exp(include_score))
def props_torch(self, tree, context):
# TODO tokenization
#print ('props_torch')
hyps = context.hyps
#statement = tree
#f = None
#random_replacement_dict = self.pred_config.lm.random_replacement_dict_f(f=f)
#statement = statement.copy().replace_values(random_replacement_dict)
#hyps = [h.tree.copy().replace_values(random_replacement_dict) for h in hyps if h.type=='e']
tokens, trees = self.rename_var(tree, hyps, None, self.config)
#print ('props', tokens.shape, trees.shape)
with torch.no_grad():
if self.args.stat_model:
g_vec = self.pred_model.embed(tokens).view(1,-1)
else:
g_vec = self.pred_model.embed([tokens], [trees], _type='g')
#print (g_vec.shape)
# get visible props
labels = self.lm.searcher.search(tree, context, max_proposition=context.number, vclass='|-')
for label in DISALLOWED_PROPS:
if label in labels:
labels.remove(label)
prop_nums = torch.LongTensor([self.config.lm.database.propositions[label].number for label in labels]).to(self.args.device)
#print(prop_nums.shape)
p_vec = self.pred_database[prop_nums]
#print (p_vec.shape)
# score
with torch.no_grad():
score = self.pred_model.biln(g_vec, p_vec).view(-1)
#print (score.shape)
score -= score.max()
#print (score)
return labels, score.cpu().numpy()
def props_holophrasm(self, tree, context):
# returns the sorted list of propositions.
vec = pred_model_run.get_main_vector([self.pred_var]*PRED_ENSEMBLE, tree, context)
labels = self.lm.searcher.search(tree, context, max_proposition=context.number, vclass='|-')
# we disallow these two particular propositions
for label in DISALLOWED_PROPS:
if label in labels:
labels.remove(label)
prop_nums = [self.lm.database.propositions[label].number for label in labels]
submatrix = self.pred_database[np.array(prop_nums), :]
logits = np.dot(submatrix, vec)
# print labels, nn.log_softmax(logits)
# input("Press Enter to continue...")
return labels, logits - np.max(logits) # rescaled log-probability
#return labels, nn.log_softmax(logits)
# # we don't need to do the sorting here
# prop_indices = np.argsort(logits)[::-1]
# sorted_labels = [labels[index] for index in prop_indices]
# probs = nn.log_softmax(logits)
# probs = probs[prop_indices]
# return sorted_labels, probs # highest to lowest
def props(self, tree, context):
if self.args.no_use_torch:
labels, scores = self.props_holophrasm(tree, context)
#print ('props_holophrasm')
#print (labels)
#print (scores)
else:
labels, scores = self.props_torch(tree, context)
#print ('props_torch')
#print (labels)
#print (scores)
return labels, scores
def apply_prop(self, tree, context, prop_name, n=10, return_replacement_dict=False, step=None):
# shortcut if the unconstrainer arity is 0
prop = self.lm.database.propositions[prop_name]
if prop.unconstrained_arity() == 0:
return [(0.0, self.lm.simple_apply_prop(tree, prop, context, vclass='|-'))]
''' in this case, params = tree, context, prop_name '''
beam_searcher = BeamSearcher(self.bsi, (tree, context, prop_name, ALLOWED_CONSTRUCTORS, return_replacement_dict, step))
out = beam_searcher.best(n, n, n) #(width, k, num_out) See notes regarding accuracy
#print 'out', out
return out
def is_tautology(self, tree, context):
'''
check to see wether the tree is tautologically true.
We can do this *really* quickly, so we might as well.
There's a little redundency in that we calculate the
viable props twice, but it's a pretty quick process.
Returns None if not a tautology, otherwise returns a
label for a proposition that proves it immediately.
'''
labels = self.lm.searcher.search(tree, context, max_proposition=context.number, vclass='|-')
tauts = set(labels).intersection(self.lm.tautologies)
if len(tauts)==0:
return None
else:
return tauts.pop()
|
import pylibimport
# Downloaded whl
filename = "./sub/import_dir/opencv_python-4.5.1.48-cp38-cp38-win_amd64.whl"
imp = pylibimport.VersionImporter(install_dir='./sub/target_dir')
# imp.install(filename, 'cv2', '4.5.1') # Install with name cv2
# cv2_4_5_1 = imp.import_module('cv2', '4.5.1')
# Use import chain if import is different from name ('cv2.other' same as "import cv2.other")
cv2_4_5_1 = imp.install(filename, 'opencv', '4.5.1', import_chain='cv2') # Optional name and version with whl file.
print(dir(cv2_4_5_1))
|
#!/usr/bin/env python3
"""
Copyright 2019 Johns Hopkins University (Author: Phani Sankar Nidadavolu)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import print_function
import os
import sys
import argparse
from collections import OrderedDict
from scipy.io import wavfile
from numpy import linalg as LA
import numpy as np
def get_args():
parser = argparse.ArgumentParser('This script maps each simulated utterances to '
'its corresponding rt60')
parser.add_argument('data_dir', type=str)
parser.add_argument('rir2rt60_map', type=str)
parser.add_argument('utt2reverbinfo_file', type=str)
args = parser.parse_args()
return args
def check_args(args):
if not os.path.isdir(args.data_dir):
raise ValueError('inp data dir {d} does not exist'.format(d=args.data_dir))
args.wav_scp = '{d}/wav.scp'.format(d=args.data_dir)
args.utt2uniq = '{d}/utt2uniq'.format(d=args.data_dir)
if not os.path.isfile(args.wav_scp):
raise ValueError('File provided wav scp {w} does not exist'.format(
w=args.wav_scp))
if not os.path.isfile(args.utt2uniq):
raise ValueError('File provided utt2uniq {w} does not exist'.format(
w=args.utt2uniq))
if not os.path.isfile(args.rir2rt60_map):
raise ValueError('File provided rt60 info file {r} does not exist'.format(
r=args.rir2rt60_map))
return args
def map_rirs_to_rt60s(ifile):
with open(ifile) as f:
content = f.read().splitlines()
ririd_to_rt60 = {}
for line in content:
line_parsed = line.strip().split()
ririd, roomid, rt60 = line_parsed[0], line_parsed[1], line_parsed[2]
ririd_to_rt60[ririd] = rt60
return ririd_to_rt60
def map_utt_to_uniq(ifile):
with open(ifile) as f:
content = f.read().splitlines()
utt2uniq = {}
for line in content:
line_parsed = line.strip().split()
utt, uniq = line_parsed[0], line_parsed[1]
utt2uniq[utt] = uniq
return utt2uniq
def map_utts_to_room_ids(ifile):
# 100304-sre06-kacg-a-reverb sph2pipe -f wav -p -c 1 /export/corpora/LDC/LDC2011S09/data/train/data/kacg.sph | wav-reverberate --shift-output=true --impulse-response="sox RIRS_NOISES/simulated_rirs/smallroom/Room200/Room200-00049.wav -r 8000 -t wav - |" - - |
with open(ifile) as f:
content = f.read().splitlines()
#utt_to_room_id = {}
utt_to_room_info = OrderedDict()
for line in content:
line_parsed = line.strip().split()
utt = line_parsed[0]
utt_to_room_info[utt] = {}
for key in line_parsed[1:]:
if "RIRS_NOISES" in key:
rir = key
utt_to_room_info[utt]['rir'] = rir
break
rir_parsed = rir.split('/')
room_type = rir_parsed[2]
room_id = rir_parsed[-2]
rir_id = rir_parsed[-1].split('.wav')[0]
if 'small' in room_type:
kwrd = 'small'
utt_to_room_info[utt]['roomtype'] = 'smallroom'
elif 'medium' in room_type:
kwrd = 'medium'
utt_to_room_info[utt]['roomtype'] = 'mediumroom'
elif 'large' in room_type:
kwrd = 'large'
utt_to_room_info[utt]['roomtype'] = 'largeroom'
else:
raise ValueError('unknown room type {r} found'.format(r=room_id))
utt_to_room_info[utt]['roomid'] = kwrd + '-' + room_id
utt_to_room_info[utt]['ririd'] = kwrd + '-' + rir_id
return utt_to_room_info
def get_h_n_direct_and_n_direct_from_rir(rir, normalize_rir=True):
fs, data = wavfile.read(rir)
if normalize_rir:
data = data/LA.norm(data)
return np.max(data), np.argmax(data)
def main():
args = get_args()
args = check_args(args)
ririd_to_rt60 = map_rirs_to_rt60s(args.rir2rt60_map)
utts_to_roominfo = map_utts_to_room_ids(args.wav_scp)
utt2uniq = map_utt_to_uniq(args.utt2uniq)
print('\nCreating utt2reverbinfo file {i}'.format(i=args.utt2reverbinfo_file))
with open(args.utt2reverbinfo_file, 'w') as f:
for utt in utts_to_roominfo:
roomid = utts_to_roominfo[utt]['roomid']
ririd = utts_to_roominfo[utt]['ririd']
roomtype = utts_to_roominfo[utt]['roomtype']
rt60 = ririd_to_rt60[ririd]
rir = utts_to_roominfo[utt]['rir']
uniq = utt2uniq[utt]
h_n_direct, n_direct = get_h_n_direct_and_n_direct_from_rir(rir)
#f.write('{utt} {rt} {rid}\n'.format(utt=utt, rt=rt60, rid=roomid))
f.write('{utt} {uniq} {roomid} {rt} {h_n} {n}\n'.format(
utt=utt, roomid=roomid, rt=rt60, uniq=uniq, h_n=h_n_direct, n=n_direct))
print('Successfully created utt2reverbinfo file: {f}'.format(f=args.utt2reverbinfo_file))
if __name__ == '__main__':
main()
|
import pytest
from test_tube.argparse_hopt import HyperOptArgumentParser
from test_tube.hpc import SlurmCluster
def test_slurm_time_to_seconds():
parser = HyperOptArgumentParser()
parsed = parser.parse_args()
cluster = SlurmCluster(log_path='/home/travis', hyperparam_optimizer=parsed)
assert cluster.slurm_time_to_seconds('15:00') == 900
assert cluster.slurm_time_to_seconds('1-12:20:12') == 130812
assert cluster.slurm_time_to_seconds('1:20:12') == 4812
assert cluster.slurm_time_to_seconds('00:20:12') == 1212
assert cluster.slurm_time_to_seconds('00:00:12') == 12
assert cluster.slurm_time_to_seconds('12') == 12
if __name__ == '__main__':
pytest.main([__file__])
|
# -*- coding: utf-8 -*-
'''
@author: kebo
@contact: kebo0912@outlook.com
@version: 1.0
@file: trainer.py
@time: 2021/05/12 01:09:57
这一行开始写关于本文件的说明与解释
'''
import tensorflow as tf
from functools import wraps
from cybo.data.dataloader import Dataloader
from cybo.models.model import Model
from cybo.training.utils import evaluate
from cybo.training.tensorboard import TensorBoard, Mode
RUN_EAGER = False
def debug(run_eager: bool = False):
def wrapper(func):
@wraps(func)
@tf.function()
def run_with_tf_function(*args, **kwargs):
return func(*args, **kwargs)
@wraps(func)
def run_without_tf_function(*args, **kwargs):
return func(*args, **kwargs)
if run_eager:
return run_without_tf_function
else:
return run_with_tf_function
return wrapper
class Trainer():
def __init__(self,
model: Model,
training_dataloader: Dataloader,
optimizer: tf.keras.optimizers.Optimizer,
epochs: int,
checkpoint_path: str,
validation_dataloader: Dataloader = None,
patience: int = 5,
max_to_keep: int = 3,
monitor: str = "acc",
use_tensorboard: bool = False,
logs_dir: str = "logs/",
run_eager: bool = False
) -> None:
self.model = model
self.training_dataloader = training_dataloader
self.validation_dataloader = validation_dataloader or training_dataloader
self.optimizer = optimizer
self.epochs = epochs
self.loss_metric = tf.keras.metrics.Mean(name="loss")
self.val_loss_metric = tf.keras.metrics.Mean(name="val_loss")
self.checkpoint_path = checkpoint_path
self.max_to_keep = max_to_keep
self.monitor = monitor
self.patience = patience
self.use_tensorboard = use_tensorboard
if self.use_tensorboard:
self.tensorboard = TensorBoard(logs_dir=logs_dir)
global RUN_EAGER
RUN_EAGER = run_eager
def train(self):
ckpt = tf.train.Checkpoint(
model=self.model, optimizer=self.optimizer, epoch=tf.Variable(1))
ckpt_manager = tf.train.CheckpointManager(
ckpt, self.checkpoint_path, max_to_keep=self.max_to_keep)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
tf.print("restore from latest checkpoint succeed !")
best_acc = 0.0
early_stop_epochs = 0
for epoch in tf.range(ckpt.epoch, self.epochs+1):
tf.print(f"Epoch {epoch}/{self.epochs}:")
# 更新ckpt中epoch值
ckpt.epoch.assign_add(1)
metrics = self.model.get_metrics(reset=True, training=True)
self.loss_metric.reset_states()
bar = tf.keras.utils.Progbar(
len(self.training_dataloader),
unit_name="sample",
stateful_metrics=["loss"] + list(metrics.keys()))
for batch in self.training_dataloader:
self.train_step(batch)
log_values = [("loss", self.loss_metric.result().numpy())]
log_values.extend(
[(k, v) for k, v in self.model.get_metrics(
training=True).items()])
bar.add(self.training_dataloader.batch_size, log_values)
evaluate_metrics = evaluate(
model=self.model, dataloader=self.validation_dataloader)
tf.print("validation result - " +
" - ".join([f"{k}: {v}" for k, v in evaluate_metrics.items()]))
if self.use_tensorboard:
self.tensorboard.write_logs(
Mode.train.value, log_values, epoch)
self.tensorboard.write_logs(
Mode.evaluate.value,
[(k, v) for k, v in evaluate_metrics.items()],
epoch)
if evaluate_metrics.get(self.monitor, 1.0) >= best_acc:
ckpt_save_path = ckpt_manager.save()
tf.print(
f"Saving checkpoint for epoch {epoch} at {ckpt_save_path}")
best_acc = evaluate_metrics.get(self.monitor, 1.0)
early_stop_epochs = 0
else:
tf.print(f"validation {self.monitor} is not improved")
early_stop_epochs += 1
if early_stop_epochs >= self.patience:
tf.print(f"Early stopping with patience {self.patience}")
break
tf.print("Training completed !")
@debug(run_eager=RUN_EAGER)
def train_step(self, batch):
with tf.GradientTape() as tape:
output_dict = self.model(**batch, training=True)
gradients = tape.gradient(
output_dict["loss"],
self.model.trainable_variables)
self.optimizer.apply_gradients(
zip(gradients, self.model.trainable_variables))
self.loss_metric.update_state(output_dict["loss"])
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from backbone import (res32_cifar,res32_cifar_group, res50,res50_group, res10, res10_group, res152,res152_group)
from modules import GAP, FCNorm, FCGroupNorm, Identity, SEN, GMP, LWS, LWS_bias
import copy
import numpy as np
import cv2
class Network(nn.Module):
def __init__(self, cfg, groups, mode="train", num_classes=1000):
super(Network, self).__init__()
pretrain = (
True
if mode == "train"
and cfg.RESUME_MODEL == ""
and cfg.BACKBONE.PRETRAINED_MODEL != ""
else False
)
self.num_classes = num_classes
self.cfg = cfg
self.group = groups
self.backbone = eval(self.cfg.BACKBONE.TYPE)(
self.cfg,
pretrain=pretrain,
pretrained_model=cfg.BACKBONE.PRETRAINED_MODEL,
last_layer_stride=2,
)
self.module = self._get_module()
self.classifier = self._get_classifer()
def forward(self, x, **kwargs):
# print(x[0].shape)
if "feature_flag" in kwargs or "feature_cb" in kwargs or "feature_rb" in kwargs:
return self.extract_feature(x, **kwargs)
elif "classifier_flag" in kwargs:
return self.classifier(x)
elif 'feature_maps_flag' in kwargs:
return self.extract_feature_maps(x)
elif 'layer' in kwargs and 'index' in kwargs:
if kwargs['layer'] in ['layer1', 'layer2', 'layer3']:
x = self.backbone.forward(x, index=kwargs['index'], layer=kwargs['layer'], coef=kwargs['coef'])
else:
x = self.backbone(x)
x = self.module(x)
if kwargs['layer'] == 'pool':
x = kwargs['coef']*x+(1-kwargs['coef'])*x[kwargs['index']]
x = x.view(x.shape[0], -1)
x = self.classifier(x)
if kwargs['layer'] == 'fc':
x = kwargs['coef']*x + (1-kwargs['coef'])*x[kwargs['index']]
return x
x = self.backbone(x)
x = self.module(x)
x = x.view(x.shape[0], -1)
x = self.classifier(x)
return x
def get_backbone_layer_info(self):
if "cifar" in self.cfg.BACKBONE.TYPE:
layers = 3
blocks_info = [5, 5, 5]
elif 'res10' in self.cfg.BACKBONE.TYPE:
layers = 4
blocks_info = [1, 1, 1, 1]
else:
layers = 4
blocks_info = [3, 4, 6, 3]
return layers, blocks_info
def extract_feature(self, x, **kwargs):
x = self.backbone(x)
x = self.module(x)
x = x.view(x.shape[0], -1)
return x
def extract_feature_maps(self, x):
x = self.backbone(x)
return x
def extract_feature_maps_multi(self, x):
x = self.backbone(x)
return x
def freeze_backbone(self):
print("Freezing backbone .......")
for p in self.backbone.parameters():
p.requires_grad = False
def load_backbone_model(self, backbone_path=""):
self.backbone.load_model(backbone_path)
print("Backbone model has been loaded...")
def load_model(self, model_path):
pretrain_dict = torch.load(
model_path, map_location="cuda"
)
pretrain_dict = pretrain_dict['state_dict'] if 'state_dict' in pretrain_dict else pretrain_dict
model_dict = self.state_dict()
from collections import OrderedDict
new_dict = OrderedDict()
for k, v in pretrain_dict.items():
print(k)
if k.startswith("module"):
new_dict[k[7:]] = v
else:
new_dict[k] = v
model_dict.update(new_dict)
self.load_state_dict(model_dict)
print("All model has been loaded...")
def get_fc(self, model_path):
pretrain_dict = torch.load(
model_path, map_location="cuda"
)
pretrain_dict = pretrain_dict['state_dict'] if 'state_dict' in pretrain_dict else pretrain_dict
from collections import OrderedDict
new_dict = OrderedDict()
for k, v in pretrain_dict.items():
if k.startswith("module"):
new_dict[k[7:]] = v
else:
new_dict[k] = v
fc_weight_many = pretrain_dict['module.classifier_many.weight'].cpu().numpy()
fc_bias_many = pretrain_dict['module.classifier_many.bias'].cpu().numpy()
fc_weight_medium = pretrain_dict['module.classifier_medium.weight'].cpu().numpy()
fc_bias_medium = pretrain_dict['module.classifier_medium.bias'].cpu().numpy()
fc_weight_few = pretrain_dict['module.classifier_few.weight'].cpu().numpy()
fc_bias_few = pretrain_dict['module.classifier_few.bias'].cpu().numpy()
return [fc_weight_many, fc_weight_medium, fc_weight_few], [fc_bias_many, fc_bias_medium, fc_bias_few]
def get_feature_length(self):
if "cifar" in self.cfg.BACKBONE.TYPE:
num_features = 64
elif 'res10' in self.cfg.BACKBONE.TYPE:
num_features = 512
else:
num_features = 2048
return num_features
def _get_module(self):
module_type = self.cfg.MODULE.TYPE
if module_type == "GAP":
module = GAP()
elif module_type == "GMP":
module = GMP()
elif module_type == "Identity":
module= Identity()
elif module_type == "SEN":
module= SEN(c=64)
else:
raise NotImplementedError
return module
def _get_classifer(self):
bias_flag = self.cfg.CLASSIFIER.BIAS
num_features = self.get_feature_length()
if self.cfg.CLASSIFIER.TYPE == "FCNorm":
classifier = FCNorm(num_features, self.num_classes)
elif self.cfg.CLASSIFIER.TYPE == "FC":
classifier = nn.Linear(num_features, self.num_classes, bias=bias_flag)
elif self.cfg.CLASSIFIER.TYPE == "FCGroupNorm":
classifier = FCGroupNorm(num_features, self.num_classes, self.group)
else:
raise NotImplementedError
return classifier
def cam_params_reset(self):
self.classifier_weights = np.squeeze(list(self.classifier.parameters())[0].detach().cpu().numpy())
def get_CAM_with_groundtruth(self, image_idxs, dataset, size):
ret_cam = []
size_upsample = size
for i in range(len(image_idxs)):
idx = image_idxs[i]
label = dataset.label_list[idx]
self.eval()
with torch.no_grad():
img = dataset._get_trans_image(idx)
feature_conv = self.forward(img.to('cuda'), feature_maps_flag=True).detach().cpu().numpy()
b, c, h, w = feature_conv.shape
assert b == 1
feature_conv = feature_conv.reshape(c, h*w)
cam = self.classifier_weights[label].dot(feature_conv)
del img
del feature_conv
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255*cam_img)
ret_cam.append(cv2.resize(cam_img, size_upsample))
return ret_cam
class Network_Group(nn.Module):
def __init__(self, cfg, mode="train", num_classes=1000):
super(Network_Group, self).__init__()
pretrain = (
True
if mode == "train"
and cfg.RESUME_MODEL == ""
and cfg.BACKBONE.PRETRAINED_MODEL != ""
else False
)
self.num_classes = num_classes
self.cfg = cfg
self.backbone = eval(self.cfg.BACKBONE.TYPE)(
self.cfg,
pretrain=pretrain,
pretrained_model=cfg.BACKBONE.PRETRAINED_MODEL,
last_layer_stride=2,
)
self.module = self._get_module()
#self.gate = self._get_gate()
#self.classifier_many,self.classifier_medium,self.classifier_few,self.classifier_all = self._get_classifer()
self.classifier_many, self.classifier_medium, self.classifier_few = self._get_classifer()
def forward(self, x, **kwargs):
if "feature_flag" in kwargs or "feature_cb" in kwargs or "feature_rb" in kwargs:
return self.extract_feature(x, **kwargs)
elif "classifier_flag" in kwargs:
x_few = self.classifier_few(x[0])
x_medium = self.classifier_medium(x[1])
x_many = self.classifier_many(x[2])
x = [x_many, x_medium, x_few]
return x
elif 'feature_maps_flag' in kwargs:
return self.extract_feature_maps(x)
elif 'layer' in kwargs and 'index' in kwargs:
if kwargs['layer'] in ['layer1', 'layer2', 'layer3']:
x = self.backbone.forward(x, index=kwargs['index'], layer=kwargs['layer'], coef=kwargs['coef'])
else:
x = self.backbone(x)
x = self.module(x)
if kwargs['layer'] == 'pool':
x = kwargs['coef']*x+(1-kwargs['coef'])*x[kwargs['index']]
#x_all = self.classifier_many(x[3])
x_many =self.classifier_many(x[2])
x_medium = self.classifier_medium(x[1])
x_few = self.classifier_few(x[0])
x = [x_many, x_medium, x_few]
if kwargs['layer'] == 'fc':
x = kwargs['coef']*x + (1-kwargs['coef'])*x[kwargs['index']]
return x
x = self.backbone(x)
x_out = []
for branch in x:
branch = self.module(branch)
branch = branch.view(branch.shape[0], -1)
x_out.append(branch)
x_few = self.classifier_few(x_out[0])
x_medium = self.classifier_medium(x_out[1])
x_many = self.classifier_many(x_out[2])
x = [x_many, x_medium, x_few]
return x
def get_backbone_layer_info(self):
if "cifar" in self.cfg.BACKBONE.TYPE:
layers = 3
blocks_info = [5, 5, 5]
elif 'res10' in self.cfg.BACKBONE.TYPE:
layers = 4
blocks_info = [1, 1, 1, 1]
elif 'res50' in self.cfg.BACKBONE.TYPE:
layers = 4
blocks_info = [3, 4, 6, 3]
else:
layers = 4
blocks_info = [3, 8, 36, 3]
return layers, blocks_info
def extract_feature(self, x, **kwargs):
x = self.backbone(x)
x_out = []
for branch in x:
branch = self.module(branch)
branch = branch.view(branch.shape[0], -1)
x_out.append(branch)
return x_out
def freeze_backbone(self):
print("Freezing backbone .......")
for p in self.backbone.parameters():
p.requires_grad = False
def load_backbone_model(self, backbone_path=""):
self.backbone.load_model(backbone_path)
print("Backbone model has been loaded...")
def load_model(self, model_path):
pretrain_dict = torch.load(
model_path, map_location="cuda"
)
pretrain_dict = pretrain_dict['state_dict'] if 'state_dict' in pretrain_dict else pretrain_dict
model_dict = self.state_dict()
from collections import OrderedDict
new_dict = OrderedDict()
for k, v in pretrain_dict.items():
print(k)
if k.startswith("module"):
new_dict[k[7:]] = v
else:
new_dict[k] = v
model_dict.update(new_dict)
self.load_state_dict(model_dict)
print("All model has been loaded...")
def get_fc(self, model_path):
pretrain_dict = torch.load(
model_path, map_location="cuda"
)
pretrain_dict = pretrain_dict['state_dict'] if 'state_dict' in pretrain_dict else pretrain_dict
from collections import OrderedDict
new_dict = OrderedDict()
for k, v in pretrain_dict.items():
print(k)
if k.startswith("module"):
new_dict[k[7:]] = v
else:
new_dict[k] = v
#fc_weight_all = pretrain_dict['module.classifier_all.weight'].cpu().numpy()
# fc_bias_all = pretrain_dict['module.classifier_all.bias'].cpu().numpy()
fc_weight_many = pretrain_dict['module.classifier_many.fc.weight'].cpu().numpy()
fc_bias_many = pretrain_dict['module.classifier_many.fc.bias'].cpu().numpy()
fc_scales_many = pretrain_dict['module.classifier_many.scales'].cpu().numpy()
fc_weight_medium = pretrain_dict['module.classifier_medium.fc.weight'].cpu().numpy()
fc_bias_medium = pretrain_dict['module.classifier_medium.fc.bias'].cpu().numpy()
fc_scales_medium = pretrain_dict['module.classifier_medium.scales'].cpu().numpy()
fc_weight_few = pretrain_dict['module.classifier_few.fc.weight'].cpu().numpy()
fc_bias_few = pretrain_dict['module.classifier_few.fc.bias'].cpu().numpy()
fc_scales_few = pretrain_dict['module.classifier_few.scales'].cpu().numpy()
return [fc_weight_many,fc_weight_medium,fc_weight_few ] ,[fc_bias_many,fc_bias_medium,fc_bias_few],[fc_scales_many,fc_scales_medium,fc_scales_few]#
def get_feature_length(self):
if "cifar" in self.cfg.BACKBONE.TYPE:
num_features = 64
elif 'res10' in self.cfg.BACKBONE.TYPE:
num_features = 512
else:
num_features = 2048
return num_features
def _get_module(self):
module_type = self.cfg.MODULE.TYPE
if module_type == "GAP":
module = GAP()
elif module_type == "Identity":
module= Identity()
elif module_type == "SEN":
module= SEN(c=64)
else:
raise NotImplementedError
return module
def _get_gate(self):
gate = nn.Linear(64, 3, bias=True)
return gate
def _get_classifer(self):
bias_flag = self.cfg.CLASSIFIER.BIAS
num_features = self.get_feature_length()
if self.cfg.CLASSIFIER.TYPE == "FCNorm":
classifier_many = FCNorm(num_features, self.num_classes)
classifier_medium = FCNorm(num_features, self.num_classes)
classifier_few = FCNorm(num_features, self.num_classes)
elif self.cfg.CLASSIFIER.TYPE == "FC":
classifier_many = nn.Linear(num_features, self.num_classes , bias=bias_flag)
classifier_medium = nn.Linear(num_features, self.num_classes, bias=bias_flag)
classifier_few = nn.Linear(num_features, self.num_classes, bias=bias_flag)
elif self.cfg.CLASSIFIER.TYPE == "LWS":
classifier_many = LWS(num_features, self.num_classes, bias=bias_flag)
classifier_medium = LWS(num_features, self.num_classes, bias=bias_flag)
classifier_few = LWS(num_features, self.num_classes, bias=bias_flag)
elif self.cfg.CLASSIFIER.TYPE == "LWS_bias":
classifier_many = LWS_bias(num_features, self.num_classes, bias=bias_flag)
classifier_medium = LWS_bias(num_features, self.num_classes, bias=bias_flag)
classifier_few = LWS_bias(num_features, self.num_classes, bias=bias_flag)
else:
raise NotImplementedError
#return classifier_many, classifier_medium, classifier_few, classifier_all
return classifier_many, classifier_medium, classifier_few
def _get_branch(self):
num_features = self.get_feature_length()
branch_many = SubGroup(num_features)
branch_medium = SubGroup(num_features)
branch_few = SubGroup(num_features)
return branch_many, branch_medium, branch_few
def cam_params_reset(self):
self.classifier_weights = np.squeeze(list(self.classifier.parameters())[0].detach().cpu().numpy())
class SubGroup(nn.Module):
def __init__(self,num_features):
super(SubGroup, self).__init__()
self.feat1 = nn.Conv1d(in_channels=num_features, out_channels=num_features, kernel_size=1)
self.feat2 = nn.Conv1d(in_channels=num_features, out_channels=num_features, kernel_size=1)
self.feat3 = nn.Conv1d(in_channels=num_features, out_channels=num_features, kernel_size=1)
#self.init_weights(self.feat1)
#self.init_weights(self.feat2)
#self.init_weights(self.feat3)
def init_weights(self, m):
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
def forward(self, x):
x = self.feat1(x)
x = self.feat2(x)
x = self.feat3(x)
return x |
from enum import Enum
class Feature(str, Enum):
MINIMUM = "MINIMUM"
MAXIMUM = "MAXIMUM"
VARIANCE = "VARIANCE"
ABS_ENERGY = "ABS_ENERGY"
MEAN = "MEAN"
MEDIAN = "MEDIAN"
SKEWNESS = "SKEWNESS"
KURTOSIS = "KURTOSIS" |
from floodsystem.stationdata import build_station_list
from floodsystem.stationdata import update_water_levels
from floodsystem.flood import stations_level_over_threshold
stations = build_station_list()
update_water_levels(stations)
print(stations_level_over_threshold(stations, 0.8))
|
import re
from nltk.util import ngrams, pad_sequence, everygrams
from nltk.tokenize import word_tokenize
from nltk.lm import MLE, WittenBellInterpolated
import numpy as np
import plotly.graph_objects as go
from scipy.ndimage import gaussian_filter
# Training data file
train_data_file = ""
# read training data
with open(train_data_file) as f:
train_text = f.read().lower()
# apply preprocessing (remove text inside square and curly brackets and rem punc)
train_text = re.sub(r"\[.*\]|\{.*\}", "", train_text)
train_text = re.sub(r'[^\w\s]', "", train_text)
# set ngram number
n = 4
# pad the text and tokenize
training_data = list(pad_sequence(word_tokenize(train_text), n,
pad_left=True,
left_pad_symbol="<s>"))
# generate ngrams
ngrams = list(everygrams(training_data, max_len=n))
print("Number of ngrams:", len(ngrams))
# build ngram language models
model = WittenBellInterpolated(n)
model.fit([ngrams], vocabulary_text=training_data)
print(model.vocab)
# testing data file
test_data_file = ""
# Read testing data
with open(test_data_file) as f:
test_text = f.read().lower()
test_text = re.sub(r'[^\w\s]', "", test_text)
# Tokenize and pad the text
testing_data = list(pad_sequence(word_tokenize(test_text), n,
pad_left=True,
left_pad_symbol="<s>"))
print("Length of test data:", len(testing_data))
# assign scores
scores = []
for i, item in enumerate(testing_data[n-1:]):
s = model.score(item, testing_data[i:i+n-1])
scores.append(s)
scores_np = np.array(scores)
# set width and height
width = 8
height = np.ceil(len(testing_data)/width).astype("int32")
print("Width, Height:", width, ",", height)
# copy scores to rectangular blank array
a = np.zeros(width*height)
a[:len(scores_np)] = scores_np
diff = len(a) - len(scores_np)
# apply gaussian smoothing for aesthetics
a = gaussian_filter(a, sigma=1.0)
# reshape to fit rectangle
a = a.reshape(-1, width)
# format labels
labels = [" ".join(testing_data[i:i+width]) for i in range(n-1, len(testing_data), width)]
labels_individual = [x.split() for x in labels]
labels_individual[-1] += [""]*diff
labels = [f"{x:60.60}" for x in labels]
# create heatmap
fig = go.Figure(data=go.Heatmap(
z=a, x0=0, dx=1,
y=labels, zmin=0, zmax=1,
customdata=labels_individual,
hovertemplate='%{customdata} <br><b>Score:%{z:.3f}<extra></extra>',
colorscale="burg"))
fig.update_layout({"height":height*28, "width":1000, "font":{"family":"Courier New"}})
fig['layout']['yaxis']['autorange'] = "reversed"
fig.show()
|
import csv
from ..base import BaseDataset
from ..utils import image_loader
from .schemas import MultiClassClassificationDatasetSchema
"""
The format of the multiclass classification dataset is:
image_path1,label1
image_path2,label2
...
"""
class MultiClassClassificationDataset(BaseDataset):
schema = MultiClassClassificationDatasetSchema
def __init__(self, config):
# now call the constructor to validate the schema
BaseDataset.__init__(self, config)
# load the data
self.data = self.load_dataset(self.config.csv_file_path)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
# load image
img = image_loader(self.data[index][0])
# apply transformations
if self.transform:
img = self.transform(img)
target = self.data[index][1]
if self.target_transform:
target = self.target_transform(self.data[index][1])
return img, target
def __len__(self):
return len(self.data)
def get_labels(self):
return self.labels
def load_dataset(self, file_path):
if not self.labels:
raise ValueError(
"You need to provide the list of labels for the dataset"
)
data = []
if file_path:
with open(file_path, "r") as f:
csv_reader = csv.reader(f)
for index, row in enumerate(csv_reader):
path = row[0]
item = (path, self.labels.index(row[1]))
data.append(item)
return data
|
''' nio_pe.py: A specialization of the PE class, for use with Nick's Accelerator
'''
from core.defines import Operator
from core.pe import PE
from core.pipeline import Stage
from core.messaging import Message
from core.utils import *
class NioPE(PE):
def __init__(self, system_clock_ref, message_router):
PE.__init__(self, system_clock_ref, message_router)
self._pipeline = [None for x in range(0,3)]
self._pipeline[0] = FetchStage(self, self._message_router)
self._pipeline[1] = ExecStage(self)
self._pipeline[2] = AcknStage(self, self._message_router)
self._stall = False
def process(self):
# If we are stalled, only process a send...
if self._stall:
self.pipeline[-1].process()
self._num_stalls += 1
return
# Otherwise, proceed with processing.
self._pipeline[2].accept_message(self._pipeline[1].get_message())
self._pipeline[1].accept_message(self._pipeline[0].get_message())
for stage in self._pipeline:
stage.process()
def stall(self):
self._stall = True
def continue_processing(self):
self._stall = False
class FetchStage(Stage):
def __init__(self, nio_pe, router):
Stage.__init__(self)
self._nio_pe = nio_pe
self._router = router
def process(self):
self._message = self._router.fetch(self._nio_pe)
class ExecStage(Stage):
def __init__(self, nio_pe):
Stage.__init__(self)
self._accumulator = 0
self._nio_pe = nio_pe
def process(self):
if self._message is None:
return
op1 = int_repr_of_float_to_float(self._message.op1)
op2 = int_repr_of_float_to_float(self._message.op2)
dest = self._message.source
message_id = self._message.message_id
seq_num = self._message.seq_num
operator = self._message.operation
result = 0
if operator == Operator.ADD:
result = op1+op2
elif operator == Operator.SUB:
result = op1-op2
elif operator == Operator.MUL:
result = op1*op2
elif operator == Operator.DIV:
result = op1/op2
elif operator == Operator.CMAC:
self._accumulator = op1*op2
result = self._accumulator
elif operator == Operator.MAC:
self._accumulator += op1*op2
result = self._accumulator
elif operator == Operator.CLEAR:
self._accumulator = 0
result = self._accumulator
elif operator == Operator.MAX:
result = max(op1, op2)
elif operator == Operator.MIN:
result = min(op1, op2)
attributes = {
"result" : result
}
self._message = Message(self._nio_pe, dest, Message.PEDone, message_id, seq_num, attributes = attributes)
class AcknStage(Stage):
def __init__(self, nio_pe, router):
Stage.__init__(self)
self._nio_pe = nio_pe
self._router = router
def process(self):
if self._message is None:
return
if not self._router.send(self._message):
self._message = None
self._nio_pe.stall()
else:
self._nio_pe.continue_processing()
|
'''
Pure python implementation of a connect 4 terminal game object.
Optimizations applied allow computation of one move and one check in approx. 100us.
---Still kinda slow... see connect4tf.py for a (hopefully) faster implementation.---nvm, this is all i got
'''
import numpy as np
class Connect4Board(object):
def __init__(self, board_shape=(6, 7), winVecs=None):
if winVecs is not None:
self.wins=winVecs
self.grid = np.zeros(board_shape, dtype=np.int8)
self.height = np.zeros(board_shape[1], dtype=np.int8)
self.player = 1
def move(self, prob):# Add a piece to the board
prob = prob * np.less(self.height, 6)
if np.sum(prob) > 0:
slot = np.argmax(prob)
self.grid[self.height[slot], slot] = self.player
self.height[slot] += 1
#self.player *= -1# this is a bit slower than if statements...
if self.player < 0:# swap players
self.player = 1
else:
self.player = -1
return None# No tie
return 0# Game is a tie
def check(self):# Check if anyone has won
checked = np.dot(self.grid.reshape(42,), self.wins)
if np.sum(checked > 3):
return 1
if np.sum(checked < -3):
return -1
return None# No winner
def __str__(self):# String for print
return str(self.grid[::-1])
class BoardExplorer(Connect4Board):
def __init__(self, board_shape=(6, 7), toWin=4):
super().__init__(board_shape)
self.toWin = toWin
self.wins = []
def findHorizWins(self):
for i in range(self.grid.shape[1]-self.toWin+1):
horiz = np.zeros(self.grid.shape[1], dtype=np.int8)
for k in range(self.toWin):
horiz[i+k] = 1
for j in range(self.grid.shape[0]):
self.grid[j] = horiz
self.wins.append(self.grid)
# self.wins.append(np.where(self.grid > 0))
# print(super().__str__())
super().__init__(self.grid.shape) #reset grid
# print("winpatterns:", len(self.wins))
def findVerticalWns(self):
for i in range(self.grid.shape[0]-self.toWin+1):
vert = np.zeros((self.grid.shape[0], 1), dtype=np.int8)
for k in range(self.toWin):
vert[i+k][0] = 1
for j in range(self.grid.shape[1]):
self.grid[:,j] = vert[:,0]
self.wins.append(self.grid)
# self.wins.append(np.where(self.grid > 0))
# print(super().__str__())
super().__init__(self.grid.shape) #reset grid
# print("winpatterns:", len(self.wins))
def findDiagWins(self):
# first half:
row = 0
while(row < self.grid.shape[0]):
col = 0
rowTmp = row
diag = []
while rowTmp >= 0: # get the diagonal
diag.append((rowTmp, col))
rowTmp -= 1
col += 1
row += 1
if len(diag) >= self.toWin:# if diag large enough,
# step through all positions of a winning sequence on the grid:
for i in range(len(diag)-self.toWin+1):
# Choose points:
winDiag = [diag[i+k] for k in range(self.toWin)]
# convert points to np.array indices
winIdx = (np.array([i for i, j in winDiag]), np.array([j for i, j in winDiag]))
self.grid[winIdx] = 1# winning diag = to 1
self.wins.append(self.grid)
# self.wins.append(np.where(self.grid > 0))#save points
# print(super().__str__())
self.grid = np.flip(self.grid, axis=0)# flip vertically
self.wins.append(self.grid)
# self.wins.append(np.where(self.grid > 0))#save points
# print(super().__str__())
self.grid = np.flip(self.grid, axis=1)# flip horizontally
self.wins.append(self.grid)
# self.wins.append(np.where(self.grid > 0))#save points
# print(super().__str__())
self.grid = np.flip(self.grid, axis=0)# flip vertically
self.wins.append(self.grid)
# self.wins.append(np.where(self.grid > 0))#save points
# print(super().__str__())
super().__init__(self.grid.shape)# reset grid
print("winpatterns:", len(self.wins))
def getWinPatterns(self):
self.wins = []
self.findHorizWins()
self.findVerticalWns()
self.findDiagWins()
filters = np.swapaxes(np.array(self.wins, dtype=np.int8).T, 0, 1) #(6x7x69)
return filters.reshape(42, 69) # single vector for comparison
if __name__ == "__main__":
explore = BoardExplorer()
# explore.findHorizWins()
# explore.findVerticalWns()
# explore.findDiagWins()
winVecs = explore.getWinPatterns()
# np.save("winVecs", winVecs)
board = Connect4Board(winVecs=winVecs)
for i in range(5):
board.move(i+1)
board.move(i)
board.move(i+1)
board.move(i)
board.move(i+1)
board.move(i)
print(board)
print(board.check())
winVecs1 = explore.getWinPatterns()
board = Connect4Board(winVecs=winVecs1)
import timeit
print(timeit.timeit("board = Connect4Board(winVecs=winVecs)", setup="from __main__ import Connect4Board, winVecs", number=1000)/1000)
print(timeit.timeit("board.move(1);board = Connect4Board(winVecs=winVecs)", setup="from __main__ import board, Connect4Board, winVecs", number=10000)/10000)
print(timeit.timeit("board.check()", setup="from __main__ import board", number=10000)/10000)
# x = board.grid.reshape((42,))
# y = winFilters
# print(np.dot(x, y))
# exit()
# print(winFilters)
# print(winFilters.shape)
# x = np.matmul(board.grid, winFilters)
# print(x[:,:,:])
# x = np.sum(x, axis=0)
# x = np.sum(x, axis=0)
# # print(winFilters[:,:,np.where(x == 16)[0][0]])
# print(x.shape)
# print(x)
# exit()
# print("wins = [")
# for i in wins:
# print("(np.array([{},{},{},{}], dtype=np.int32), np.array([{},{},{},{}], dtype=np.int32)),".format(
# i[0][0], i[0][1], i[0][2], i[0][3], i[1][0], i[1][1], i[1][2], i[1][3]))
# print("]")
# exit()
# import timeit
# x = np.ndarray((6, 7), dtype=np.int8)
# y = np.ndarray((6, 7, 69), dtype=np.int8)
# print(timeit.timeit("np.sum(np.dot(x, y))", setup="from __main__ import x, y, np", number=10000)/10000)
# print(timeit.timeit("np.sum(np.dot(board, winFilters))", setup="from __main__ import board, winFilters, np", number=1000)/1000)
# print(timeit.timeit("board.check()", setup="from __main__ import board", number=1000)/1000)
# print(timeit.timeit("board = Connect4Board()", setup="from __main__ import Connect4Board", number=1000)/1000)
# import time
# N = 1000
# start = time.time()
# for i in range(N):
# board = Connect4Board()
# for k in range(6):
# for j in range(7):
# board.move(j)
# [np.sum(board.grid[i]) for i in wins]
#
# print(1000/(time.time()-start))
|
from src.infra.db.setup import Session
def get_db():
db = Session()
try:
yield db
finally:
db.close()
|
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Code shared between tests.
from __future__ import absolute_import
from __future__ import print_function
import os
import os.path
import re
try:
import io as StringIO
import os as dircache
except ImportError:
import cStringIO as StringIO
import dircache
import shutil
import subprocess
import sys
import tempfile
import traceback
import unittest
import zipfile
import transitfeed
from transitfeed import problems
def check_call(cmd, expected_retcode=0, stdin_str="", **kwargs):
"""Convenience function that is in the docs for subprocess but not
installed on my system. Raises an Exception if the return code is not
expected_retcode. Returns a tuple of strings, (stdout, stderr)."""
try:
if 'stdout' in kwargs or 'stderr' in kwargs or 'stdin' in kwargs:
raise Exception("Don't pass stdout or stderr")
# If a custom 'env' is in kwargs this will be passed to subprocess.Popen and
# will prevent the subprocess from inheriting the parent's 'env'.
# On Windows 7 we have to make sure that our custom 'env' contains
# 'SystemRoot' as some code here is using os.urandom() which requires this
# system variable. See review at http://codereview.appspot.com/4240085/ and
# thread "is this a bug? no environment variables" at
# http://www.gossamer-threads.com/lists/python/dev/878941
if 'SystemRoot' in os.environ:
if 'env' in kwargs:
kwargs['env'].setdefault('SystemRoot', os.environ['SystemRoot'])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE,
**kwargs)
(out, err) = p.communicate(stdin_str)
retcode = p.returncode
except Exception as e:
raise Exception("When running %s: %s" % (cmd, e))
if retcode < 0:
raise Exception(
"Child '%s' was terminated by signal %d. Output:\n%s\n%s\n" %
(cmd, -retcode, out, err))
elif retcode != expected_retcode:
raise Exception(
"Child '%s' returned %d. Output:\n%s\n%s\n" %
(cmd, retcode, out, err))
return out, err
def data_path(path):
here = os.path.dirname(__file__)
return os.path.join(here, 'data', path)
def getdata_path_contents():
here = os.path.dirname(__file__)
return dircache.listdir(os.path.join(here, 'data'))
class TestCase(unittest.TestCase):
"""Base of every TestCase class in this project.
This adds some methods that perhaps should be in unittest.TestCase.
"""
# Note from Tom, Dec 9 2009: Be careful about adding set_up or tear_down
# because they will be run a few hundred times.
def assert_matches_regex(self, regex, string):
"""Assert that regex is found in string."""
if not re.search(regex, string):
self.fail("string %r did not match regex %r" % (string, regex))
class RedirectStdOutTestCaseBase(TestCase):
"""Save stdout to the StringIO buffer self.this_stdout"""
def set_up(self):
self.saved_stdout = sys.stdout
self.this_stdout = StringIO.StringIO()
sys.stdout = self.this_stdout
def tear_down(self):
sys.stdout = self.saved_stdout
self.this_stdout.close()
class GetPathTestCase(TestCase):
"""TestCase with method to get paths to files in the distribution."""
def set_up(self):
self._origcwd = os.getcwd()
super(GetPathTestCase, self).set_up()
def get_example_path(self, name):
"""Return the full path of a file in the examples directory"""
return self.get_path('examples', name)
def get_test_data_path(self, *path):
"""Return the full path of a file in the tests/data directory"""
return self.get_path('tests', 'data', *path)
def get_path(self, *path):
try:
self.set_up()
except AttributeError:
self._origcwd = os.getcwd()
"""Return absolute path of path. path is relative main source directory."""
here = os.path.dirname(__file__) # Relative to _origcwd
return os.path.join(self._origcwd, here, '..', *path)
class TempDirTestCaseBase(GetPathTestCase):
"""Make a temporary directory the current directory before running the test
and remove it after the test.
"""
def set_up(self):
GetPathTestCase.set_up(self)
self.tempdirpath = tempfile.mkdtemp()
os.chdir(self.tempdirpath)
def tear_down(self):
os.chdir(self._origcwd)
shutil.rmtree(self.tempdirpath)
GetPathTestCase.tear_down(self)
@staticmethod
def check_call_with_path(cmd, expected_retcode=0, stdin_str=""):
"""Run python script cmd[0] with args cmd[1:], making sure 'import
transitfeed' will use the module in this source tree. Raises an Exception
if the return code is not expected_retcode. Returns a tuple of strings,
(stdout, stderr)."""
tf_path = transitfeed.__file__
# Path of the directory containing transitfeed. When this is added to
# sys.path importing transitfeed should work independent of if
# transitfeed.__file__ is <parent>/transitfeed.py or
# <parent>/transitfeed/__init__.py
transitfeed_parent = tf_path[:tf_path.rfind("transitfeed")]
transitfeed_parent = transitfeed_parent.replace("\\", "/").rstrip("/")
script_path = cmd[0].replace("\\", "/")
script_args = cmd[1:]
# Propogate sys.path of this process to the subprocess. This is done
# because I assume that if this process has a customized sys.path it is
# meant to be used for all processes involved in the tests. The downside
# of this is that the subprocess is no longer a clean version of what you
# get when running "python" after installing transitfeed. Hopefully if this
# process uses a customized sys.path you know what you are doing.
env = {"PYTHONPATH": ":".join(sys.path)}
# Instead of directly running the script make sure that the transitfeed
# module in this source directory is at the front of sys.path. Then
# adjust sys.argv so it looks like the script was run directly. This lets
# OptionParser use the correct value for %proj.
cmd = [sys.executable, "-c",
"import sys; "
"sys.path.insert(0,'%s'); "
"sys.argv = ['%s'] + sys.argv[1:]; "
"exec(open('%s'))" %
(transitfeed_parent, script_path, script_path)] + script_args
return check_call(cmd, expected_retcode=expected_retcode, shell=False,
env=env, stdin_str=stdin_str)
@staticmethod
def convert_zip_to_dict(zip):
"""Converts a zip file into a dictionary.
Arguments:
zip: The zipfile whose contents are to be converted to a dictionary.
Returns:
A dictionary mapping filenames to file contents."""
zip_dict = {}
for archive_name in zip.namelist():
zip_dict[archive_name] = zip.read(archive_name)
zip.close()
return zip_dict
@staticmethod
def convert_dict_to_zip(dict):
"""Converts a dictionary to an in-memory zipfile.
Arguments:
dict: A dictionary mapping file names to file contents
Returns:
The new file's in-memory contents as a file-like object."""
zipfile_mem = StringIO.StringIO()
zip = zipfile.ZipFile(zipfile_mem, 'a')
for arcname, contents in dict.items():
zip.writestr(arcname, contents)
zip.close()
return zipfile_mem
class TempFileTestCaseBase(TestCase):
"""
Subclass of TestCase which sets self.tempfilepath to a valid temporary zip
file name and removes the file if it exists when the test is done.
"""
def set_up(self):
(fd, self.tempfilepath) = tempfile.mkstemp(".zip")
# Open file handle causes an exception during remove in Windows
os.close(fd)
def tear_down(self):
if os.path.exists(self.tempfilepath):
os.remove(self.tempfilepath)
class MemoryZipTestCase(TestCase):
"""Base for TestCase classes which read from an in-memory zip file.
A test that loads data from this zip file exercises almost all the code used
when the feedvalidator runs, but does not touch disk. Unfortunately it is very
difficult to add new stops to the default stops.txt because a new stop will
break tests in StopHierarchyTestCase and StopsNearEachOther."""
_IGNORE_TYPES = ["expiration_date"]
def set_up(self):
self.accumulator = RecordingProblemAccumulator(self, self._IGNORE_TYPES)
self.problems = transitfeed.ProblemReporter(self.accumulator)
self.zip_contents = {}
self.set_archive_contents(
"agency.txt",
"agency_id,agency_name,agency_url,agency_timezone\n"
"DTA,Demo Agency,http://google.com,America/Los_Angeles\n")
self.set_archive_contents(
"calendar.txt",
"service_id,monday,tuesday,wednesday,thursday,friday,saturday,sunday,"
"start_date,end_date\n"
"FULLW,1,1,1,1,1,1,1,20070101,20101231\n"
"WE,0,0,0,0,0,1,1,20070101,20101231\n")
self.set_archive_contents(
"calendar_dates.txt",
"service_id,date,exception_type\n"
"FULLW,20070101,1\n")
self.set_archive_contents(
"routes.txt",
"route_id,agency_id,route_short_name,route_long_name,route_type\n"
"AB,DTA,,Airport Bullfrog,3\n")
self.set_archive_contents(
"trips.txt",
"route_id,service_id,trip_id\n"
"AB,FULLW,AB1\n")
self.set_archive_contents(
"stops.txt",
"stop_id,stop_name,stop_lat,stop_lon\n"
"BEATTY_AIRPORT,Airport,36.868446,-116.784582\n"
"BULLFROG,Bullfrog,36.88108,-116.81797\n"
"STAGECOACH,Stagecoach Hotel,36.915682,-116.751677\n")
self.set_archive_contents(
"stop_times.txt",
"trip_id,arrival_time,departure_time,stop_id,stop_sequence\n"
"AB1,10:00:00,10:00:00,BEATTY_AIRPORT,1\n"
"AB1,10:20:00,10:20:00,BULLFROG,2\n"
"AB1,10:25:00,10:25:00,STAGECOACH,3\n")
def make_loader_and_load(self,
problems=None,
extra_validation=True,
gtfs_factory=None):
"""Returns a Schedule loaded with the contents of the file dict."""
if gtfs_factory is None:
gtfs_factory = transitfeed.get_gtfs_factory()
if problems is None:
problems = self.problems
self.create_zip()
self.loader = transitfeed.loader(
problems=problems,
extra_validation=extra_validation,
zip=self.zip,
gtfs_factory=gtfs_factory)
return self.loader.load()
def append_to_archive_contents(self, arcname, s):
"""Append string s to file arcname in the file dict.
All calls to this function, if any, should be made before calling
make_loader_and_load."""
current_contents = self.zip_contents[arcname]
self.zip_contents[arcname] = current_contents + s
def set_archive_contents(self, arcname, contents):
"""Set the contents of file arcname in the file dict.
All calls to this function, if any, should be made before calling
make_loader_and_load."""
self.zip_contents[arcname] = contents
def get_archive_contents(self, arcname):
"""Get the contents of file arcname in the file dict."""
return self.zip_contents[arcname]
def remove_archive(self, arcname):
"""Remove file arcname from the file dict.
All calls to this function, if any, should be made before calling
make_loader_and_load."""
del self.zip_contents[arcname]
def get_archive_names(self):
"""Get a list of all the archive names in the file dict."""
return self.zip_contents.keys()
def create_zip(self):
"""Create an in-memory GTFS zipfile from the contents of the file dict."""
self.zipfile = StringIO.StringIO()
self.zip = zipfile.ZipFile(self.zipfile, 'a')
for (arcname, contents) in self.zip_contents.items():
try:
self.zip.writestr(arcname, contents)
except TypeError:
self.zip.write(arcname, contents)
def dump_zip_file(self, zf):
"""Print the contents of something zipfile can open, such as a StringIO."""
# Handy for debugging
z = zipfile.ZipFile(zf)
for n in z.namelist():
print("--\n%s\n%s" % (n, z.read(n)))
class LoadTestCase(TestCase):
def set_up(self):
self.accumulator = RecordingProblemAccumulator(self, ("expiration_date",))
self.problems = transitfeed.ProblemReporter(self.accumulator)
def load(self, feed_name):
loader = transitfeed.loader(
data_path(feed_name), problems=self.problems, extra_validation=True)
loader.load()
def expect_invalid_value(self, feed_name, column_name):
self.load(feed_name)
self.accumulator.pop_invalid_value(column_name)
self.accumulator.assert_no_more_exceptions()
def expect_missing_file(self, feed_name, file_name):
self.load(feed_name)
e = self.accumulator.pop_exception("MissingFile")
self.assertEqual(file_name, e.file_name)
# Don't call assert_no_more_exceptions() because a missing file causes
# many errors.
INVALID_VALUE = Exception()
class ValidationTestCase(TestCase):
def set_up(self):
self.accumulator = RecordingProblemAccumulator(
self, ("expiration_date", "NoServiceExceptions"))
self.problems = transitfeed.ProblemReporter(self.accumulator)
def tear_down(self):
self.accumulator.tear_down_assert_no_more_exceptions()
def expect_no_problems(self, object):
self.accumulator.assert_no_more_exceptions()
object.Validate(self.problems)
self.accumulator.assert_no_more_exceptions()
# TODO: think about Expect*Closure methods. With the
# RecordingProblemAccumulator it is now possible to replace
# self.expect_missing_value_in_closure(lambda: o.method(...), foo)
# with
# o.method(...)
# self.expect_missing_value_in_closure(foo)
# because problems don't raise an exception. This has the advantage of
# making it easy and clear to test the return value of o.method(...) and
# easier to test for a sequence of problems caused by one call.
# neun@ 2011-01-18: for the moment I don't remove the Expect*InClosure methods
# as they allow enforcing an assert_no_more_exceptions() before validation.
# When removing them we do have to make sure that each "logical test block"
# before an Expect*InClosure usage really ends with assert_no_more_exceptions.
# See http://codereview.appspot.com/4020041/
def validate_and_expect_missing_value(self, object, column_name):
self.accumulator.assert_no_more_exceptions()
object.Validate(self.problems)
self.expect_exception('missing_value', column_name)
def expect_missing_value_in_closure(self, column_name, c):
self.accumulator.assert_no_more_exceptions()
rv = c()
self.expect_exception('missing_value', column_name)
def validate_andexpect_invalid_value(self, object, column_name,
value=INVALID_VALUE):
self.accumulator.assert_no_more_exceptions()
object.Validate(self.problems)
self.expect_exception('invalid_value', column_name, value)
def expect_invalid_value_in_closure(self, column_name, value=INVALID_VALUE,
c=None):
self.accumulator.assert_no_more_exceptions()
rv = c()
self.expect_exception('invalid_value', column_name, value)
def validate_and_expect_invalid_float_value(self, object, value):
self.accumulator.assert_no_more_exceptions()
object.Validate(self.problems)
self.expect_exception('InvalidFloatValue', None, value)
def validate_and_expect_other_problem(self, object):
self.accumulator.assert_no_more_exceptions()
object.Validate(self.problems)
self.expect_exception('other_problem')
def expect_other_problem_in_closure(self, c):
self.accumulator.assert_no_more_exceptions()
rv = c()
self.expect_exception('other_problem')
def validate_and_expect_date_outside_valid_range(self, object, column_name,
value=INVALID_VALUE):
self.accumulator.assert_no_more_exceptions()
object.Validate(self.problems)
self.expect_exception('DateOutsideValidRange', column_name, value)
def expect_exception(self, type_name, column_name=None, value=INVALID_VALUE):
e = self.accumulator.pop_exception(type_name)
if column_name:
self.assertEqual(column_name, e.column_name)
if value != INVALID_VALUE:
self.assertEqual(value, e.value)
# these should not throw any exceptions
e.FormatProblem()
e.FormatContext()
self.accumulator.assert_no_more_exceptions()
def simple_schedule(self):
"""Return a minimum schedule that will load without warnings."""
schedule = transitfeed.Schedule(problem_reporter=self.problems)
schedule.AddAgency("Fly Agency", "http://iflyagency.com",
"America/Los_Angeles")
service_period = transitfeed.ServicePeriod("WEEK")
service_period.SetWeekdayService(True)
service_period.SetStartDate("20091203")
service_period.SetEndDate("20111203")
service_period.set_date_has_service("20091203")
schedule.add_service_period_object(service_period)
stop1 = schedule.add_stop(lng=1.00, lat=48.2, name="Stop 1", stop_id="stop1")
stop2 = schedule.add_stop(lng=1.01, lat=48.2, name="Stop 2", stop_id="stop2")
stop3 = schedule.add_stop(lng=1.03, lat=48.2, name="Stop 3", stop_id="stop3")
route = schedule.AddRoute("54C", "", "Bus", route_id="054C")
trip = route.AddTrip(schedule, "bus trip", trip_id="CITY1")
trip.AddStopTime(stop1, stop_time="12:00:00")
trip.AddStopTime(stop2, stop_time="12:00:45")
trip.AddStopTime(stop3, stop_time="12:02:30")
return schedule
# TODO(anog): Revisit this after we implement proper per-exception level change
class RecordingProblemAccumulator(problems.ProblemAccumulatorInterface):
"""Save all problems for later inspection.
Args:
test_case: a unittest.TestCase object on which to report problems
ignore_types: sequence of string type names that will be ignored by the
ProblemAccumulator
"""
def __init__(self, test_case, ignore_types=None):
self.exceptions = []
self._test_case = test_case
self._ignore_types = ignore_types or set()
self._sorted = False
def _report(self, e):
# Ensure that these don't crash
e.FormatProblem()
e.FormatContext()
if e.__class__.__name__ in self._ignore_types:
return
# Keep the 7 nearest stack frames. This should be enough to identify
# the code path that created the exception while trimming off most of the
# large test framework's stack.
traceback_list = traceback.format_list(traceback.extract_stack()[-7:-1])
self.exceptions.append((e, ''.join(traceback_list)))
def pop_exception(self, type_name):
"""Return the first exception, which must be a type_name."""
if not self._sorted:
self._sort_exception_groups()
self._sorted = True
e = self.exceptions.pop(0)
e_name = e[0].__class__.__name__
self._test_case.assertEqual(e_name, type_name,
"%s != %s\n%s" %
(e_name, type_name, self.format_exception(*e)))
return e[0]
@staticmethod
def format_exception(exce, tb):
return ("%s\nwith gtfs file context %s\nand traceback\n%s" %
(exce.FormatProblem(), exce.FormatContext(), tb))
def tear_down_assert_no_more_exceptions(self):
"""Assert that there are no unexpected problems left after a test has run.
This function should be called on a test's tear_down. For more information
please see assert_no_more_exceptions"""
assert len(self.exceptions) == 0, \
"see util.RecordingProblemAccumulator.assert_no_more_exceptions"
def assert_no_more_exceptions(self):
"""Check that no unexpected problems were reported.
Every test that uses a RecordingProblemReporter should end with a call to
this method. If set_up creates a RecordingProblemReporter it is good for
tear_down to double check that the exceptions list was emptied.
"""
exceptions_as_text = []
for e, tb in self.exceptions:
exceptions_as_text.append(self.format_exception(e, tb))
# If the assertFalse below fails the test will abort and tear_down is
# called. Some tear_down methods assert that self.exceptions is empty as
# protection against a test that doesn't end with assert_no_more_exceptions
# and has exceptions remaining in the RecordingProblemReporter. It would
# be nice to trigger a normal test failure in tear_down but the idea was
# rejected (http://bugs.python.org/issue5531).
self.exceptions = []
self._test_case.assertFalse(exceptions_as_text,
"\n".join(exceptions_as_text))
def pop_column_specific_exception(self, type_name, column_name, file_name=None):
"""Pops and validates column-specific exceptions from the accumulator.
Asserts that the exception is of the given type, and originated in the
specified file and column.
Arguments:
type_name: the type of the exception as string, e.g. 'invalid_value'
column_name: the name of the field (column) which caused the exception
file_name: optional, the name of the file containing the bad field
Returns:
the exception object
"""
e = self.pop_exception(type_name)
self._test_case.assertEquals(column_name, e.column_name)
if file_name:
self._test_case.assertEquals(file_name, e.file_name)
return e
def pop_invalid_value(self, column_name, file_name=None):
return self.pop_column_specific_exception("invalid_value", column_name,
file_name)
def pop_missing_value(self, column_name, file_name=None):
return self.pop_column_specific_exception("missing_value", column_name,
file_name)
def pop_date_outside_valid_range(self, column_name, file_name=None):
return self.pop_column_specific_exception("DateOutsideValidRange", column_name,
file_name)
def pop_duplicate_column(self, file_name, header, count):
e = self.pop_exception("DuplicateColumn")
self._test_case.assertEquals(file_name, e.file_name)
self._test_case.assertEquals(header, e.header)
self._test_case.assertEquals(count, e.count)
return e
def _sort_exception_groups(self):
"""Applies a consistent order to exceptions for repeatable testing.
Exceptions are only sorted when multiple exceptions of the same type appear
consecutively within the full exception list. For example, if the exception
list is ['B2', 'B1', 'A2', 'A1', 'A3', 'B3'], where A B and C are distinct
exception types, the resulting order is ['B1', 'B2', 'A1', 'A2', 'A3', 'B3']
Notice the order of exception types does not change, but grouped exceptions
of the same type are sorted within their group.
The ExceptionWithContext.GetOrderKey method id used for generating the sort
key for exceptions.
"""
sorted_exceptions = []
exception_group = []
current_exception_type = None
def process_exception_group():
exception_group.sort(key=lambda x: x[0].GetOrderKey())
sorted_exceptions.extend(exception_group)
for e_tuple in self.exceptions:
e = e_tuple[0]
if e.__class__ != current_exception_type:
current_exception_type = e.__class__
process_exception_group()
exception_group = []
exception_group.append(e_tuple)
process_exception_group()
self.exceptions = sorted_exceptions
class TestFailureProblemAccumulator(problems.ProblemAccumulatorInterface):
"""Causes a test failure immediately on any problem."""
def __init__(self, test_case, ignore_types=("expiration_date",)):
self.test_case = test_case
self._ignore_types = ignore_types or set()
def _report(self, e):
# These should never crash
formatted_problem = e.FormatProblem()
formatted_context = e.FormatContext()
exception_class = e.__class__.__name__
if exception_class in self._ignore_types:
return
self.test_case.fail(
"%s: %s\n%s" % (exception_class, formatted_problem, formatted_context))
def get_test_failure_problem_reporter(test_case,
ignore_types=("expiration_date",)):
accumulator = TestFailureProblemAccumulator(test_case, ignore_types)
problems = transitfeed.ProblemReporter(accumulator)
return problems
class ExceptionProblemReporterNoExpiration(problems.ProblemReporter):
"""Ignores feed expiration problems.
Use TestFailureProblemReporter in new code because it fails more cleanly, is
easier to extend and does more thorough checking.
"""
def __init__(self):
accumulator = transitfeed.ExceptionProblemAccumulator(raise_warnings=True)
transitfeed.ProblemReporter.__init__(self, accumulator)
def expiration_date(self, expiration, context=None):
pass # We don't want to give errors about our test data
|
"""Configuration for kalufs-kepubify.
May be overridden by instance/config.py.
"""
import os
# The log folder location
LOG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "logs")
# Set log level to debug
DEBUG = True
TEMPLATES_AUTO_RELOAD = True
# Generate with os.urandom(24)
SECRET_KEY = "SUPERSECRETKEY"
# Needed if application is not mounted in root
APPLICATION_ROOT = ""
# kepubify config
KEPUBIFY_PATH = "/home/anne/projects/kalufs-kepubify/instance/kepubify-linux-64bit"
# Dir for temporary file storage
TMP_DIR = "tmp"
|
import atexit
import collections
import dataclasses
import functools
from math import log
import more_itertools
ALPHABET = "abcdefghijklmnopqrstuvwxyz"
def _fmt_permitted(permitted):
return "\n".join("".join(c if c in p else " " for c in ALPHABET) for p in permitted)
@dataclasses.dataclass(frozen=True)
class Constraint:
permitted: tuple[tuple[str, ...], ...]
lo: tuple[tuple[str, int], ...]
hi: tuple[tuple[str, int], ...]
@staticmethod
def new_from_state(state):
constraint = Constraint.new(ALPHABET)
for guess, feedback in [step.split(":") for step in state.split(",")]:
feedback = [int(f) for f in feedback]
constraint = constraint.tightened(guess, feedback)
return constraint
@staticmethod
def new(alphabet: str):
return Constraint(
permitted=tuple(tuple(alphabet) for _ in range(5)),
lo=(),
hi=(),
)
def tightened(self, guess, feedback):
permitted = [set(p) for p in self.permitted]
lo = collections.defaultdict(lambda: 0, self.lo)
hi = collections.defaultdict(lambda: 5, self.hi)
required = set()
for i, (g, f) in enumerate(zip(guess, feedback)):
match f:
case 0:
assert g == "-"
case 1:
permitted[i].discard(g)
# If a letter occurs multiple times in a guess but only once in the
# answer, only the first occurrence will be scored as a two.
if g not in required:
for p in permitted:
p.discard(g)
case 2:
required.add(g)
permitted[i].discard(g)
case 3:
required.add(g)
permitted[i] = {g}
case _:
assert False
positive = collections.Counter(
g for g, f in zip(guess, feedback) if f in {2, 3}
)
negative = collections.Counter(g for g, f in zip(guess, feedback) if f in {1})
for k, v in positive.items():
lo[k] = max(lo[k], v)
if k in negative:
hi[k] = min(hi[k], v)
return Constraint(
permitted=tuple(tuple(p) for p in permitted),
lo=tuple(lo.items()),
hi=tuple(hi.items()),
)
def permits(self, word):
for c, p in zip(word, self.permitted):
if c not in p:
return False
counts = collections.Counter(word)
for c, v in self.lo:
if counts[c] < v:
return False
for c, v in self.hi:
if v < counts[c]:
return False
return True
def _quick_score(secret, guess):
result = [None] * 5
remaining = list(secret)
for i, (s, g) in enumerate(zip(secret, guess)):
if s == g:
result[i] = 3
remaining[i] = None
for i, g in enumerate(guess):
if result[i]:
continue
if g in remaining:
result[i] = 2
remaining[remaining.index(g)] = None
else:
result[i] = 1
return tuple(result)
def _entropy(options, guess):
"""Return entropy of the score"""
counter = collections.Counter(_quick_score(secret, guess) for secret in options)
denominator = sum(counter.values())
return -sum(
numerator / denominator * log(numerator / denominator)
for numerator in counter.values()
if numerator
)
def _min_surprise(options, guess):
"""Return entropy of the score"""
counter = collections.Counter(_quick_score(secret, guess) for secret in options)
numerator = max(counter.values())
denominator = sum(counter.values())
return log(denominator / numerator)
@functools.cache
def _options(constraint, wordlist):
"""Return (superset of) possible answers"""
# Superset because the information from the state may not be fully exploited
return [word for word in wordlist if constraint.permits(word)]
atexit.register(lambda: print(_options.__name__, _options.cache_info()))
@functools.cache
def _choice(constraint, allowed_guesses, allowed_answers, adversarial):
"""Return the word to try next
Note that this need not be a possible answer.
"""
plausible_answers = _options(constraint, allowed_answers)
# If there are only three options left and we guess at random then we expect to use
# two more guesses. If we first guess a word that is impossible then we will need
# at least two guesses. As such, switching to choosing only from plausible answers
# will not hurt.
if len(plausible_answers) <= 3:
plausible_guesses = plausible_answers
else:
plausible_guesses = allowed_guesses
if adversarial:
rating = _min_surprise
else:
rating = _entropy
ratings = {guess: rating(plausible_answers, guess) for guess in plausible_guesses}
# Ordered collection before this point for reproducibility
plausible_answers = set(plausible_answers)
return max(ratings, key=lambda k: (ratings[k], k in plausible_answers))
atexit.register(lambda: print(_choice.__name__, _choice.cache_info()))
class SimpleGuesser:
def __init__(self, wordlist: dict[str, bool]) -> None:
self._guesses = tuple(sorted(wordlist))
self._answers = tuple(sorted(k for k, v in wordlist.items() if v))
def __call__(self, state: str) -> str:
constraint = Constraint.new_from_state(state)
return more_itertools.first_true(self._answers, pred=constraint.permits)
class MaxEntropyGuesser(SimpleGuesser):
def __call__(self, state: str) -> str:
constraint = Constraint.new_from_state(state)
result = _choice(constraint, self._guesses, self._answers, False)
return result
class MaximinSurpriseGuesser(SimpleGuesser):
def __call__(self, state: str) -> str:
constraint = Constraint.new_from_state(state)
return _choice(constraint, self._guesses, self._answers, True)
class CheapHeuristicGuesser(SimpleGuesser):
# cheap here means it can be precomputed
def __init__(self, wordlist: dict[str, bool]) -> None:
super().__init__(wordlist)
self._answers = sorted(self._answers, key=lambda g: len(set(g)), reverse=True)
|
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from .tesisfunctions import Plotim,overlay
import cv2
import numpy as np
from .tesisfunctions import brightness, IMAGEPATH,graphpolygontest,thresh_biggestCnt,\
CircleClosure,twoMaxTest,graphDeffects,extendedSeparatingLine
fn1 = r'im1_2.jpg'
#fn1 = IMAGEPATH+r"cellphone_retinal/ALCATEL ONE TOUCH IDOL X/left_DAVID/IMG_20150730_115534_1.jpg"
name = fn1.split('\\')[-1].split(".")[0]
fore = cv2.imread(fn1)
fore = cv2.resize(fore,(300,300))
P = brightness(fore)
thresh,lastthresh = cv2.threshold(P,0,1,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
Plotim(name + " overlayed lastthresh", overlay(fore.copy(), lastthresh * 255, alpha=lastthresh * 0.2)).show()
for i in range(2): # test multiple applications to results
# SIMULATE polygon test
dist_transform = cv2.distanceTransform(lastthresh,cv2.DIST_LABEL_PIXEL,5)
dist_transform[lastthresh==0] = -1 # simulate outside points
graph = graphpolygontest(dist_transform,name+" dist_transform")
center = graph.center
cx,cy = center
centerVal = dist_transform[cy,cx]
print("center: ", center, " Value: ", centerVal)
graph.show()
overcircle = np.zeros_like(lastthresh,np.uint8)
cv2.circle(overcircle,center,centerVal,1,-1)
overcircle[lastthresh==0]=0
Plotim(name + " overlayed circle", overcircle).show()
#DEFECTS
pallet = [[0,0,0],[255,255,255]]
pallet = np.array(pallet,np.uint8)
imdefects = pallet[overcircle]
imdefects = overlay(fore.copy(), imdefects, alpha=brightness(imdefects))
cnt = thresh_biggestCnt(overcircle)
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
if twoMaxTest(defects,epsilon=0.5):
graphDeffects(imdefects,cnt,defects)
#SEPARATING LINE
start,end = extendedSeparatingLine(imdefects.shape, cnt, defects)
cv2.line(imdefects,start,end,[0,0,100],2)
Plotim(name + " and circle defects", imdefects).show()
cv2.line(lastthresh,start,end,0,2)
cnt = thresh_biggestCnt(lastthresh)
else:
cnt = CircleClosure(lastthresh)
ellipse = cv2.fitEllipse(cnt)
mask = np.ones(P.shape,dtype=np.uint8)
cv2.ellipse(mask,ellipse,0,-1)
fore[mask>0]=0
Plotim(name + " result", fore).show()
#cv2.imwrite("mask_"+name+".png",fore) |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
#
# RFID Read
#
import os,sys
import time
import json
import rfidiot
import CHIP_IO.GPIO as GPIO
from colorama import init
init(strip=not sys.stdout.isatty()) # strip colors if stdout is redirected
from termcolor import cprint
from pyfiglet import figlet_format
from RFIDapi import *
from pygame import mixer
import config
# readerprofile = [0,3] #action items are only the ones listed in the readerprofile
state = 0
readerid = config.settings['readerID']
mixer.init()
mixer.pre_init(44100, 16, 2, 4096) #frequency, size, channels, buffersize
def testNetwork():
for x in xrange(1,10):
post = logAction("networktest", "94BF840E", "ACT")
#print post
#print time.time()
data = getVistorActions("94BF840E")
#print data
#print time.time()
# Card reader Functions
def open_reader():
""" Attempts to open the card reader """
try:
print "open reader try"
card = rfidiot.card
return card
except:
print "Couldn't open reader!"
sys.exit()
return None
def listen(card, interval):
""" Listens for a card to be placed on the reader """
while 1:
if card.select():
playConfirmation()
post = logAction(readerid, card.uid, "ACT")
if post:
data = getVistorActions(card.uid)
print data
print ("aantal punten: " + str(data['credits']))
print ("huidige status: ")
cprint(figlet_format(data['visitortype'], font='banner'),'yellow', 'on_red', attrs=['bold'])
# print ("naam: " + str(data['name']) )
playAudio(str(data['visitortype']))
break
# print 'Waiting: Card Placement'
time.sleep(interval)
# return card.uid
def listen_remove(card, interval, card_id):
""" Listens for a card to be placed on the reader """
# Screen.wrapper(datascreen)
while 1:
if not card.select():
# data = json.dumps({"card_info":
# [{"card_id": card_id}, {"timedate": get_time()}, {"action": "Removed"}]})
# print(data)
break
# print "Waiting: Card Removal"
# time.sleep(interval)
def playConfirmation():
if not mixer.music.get_busy():
dir = os.path.dirname(__file__)
filename = os.path.join(dir, 'soundboard/Mobile/vip.mp3')
mixer.music.load(filename)
mixer.music.play()
def playAudio(userType):
for x in xrange(1,5):
if not mixer.music.get_busy():
# print "first play"
dir = os.path.dirname(__file__)
if "Basic" in userType:
filename = os.path.join(dir, 'soundboard/Mobile/basic.mp3')
elif "Premium VIP" in userType :
filename = os.path.join(dir, 'soundboard/Mobile/premium_vip.mp3')
else:
filename = os.path.join(dir, 'soundboard/Mobile/vip.mp3')
mixer.music.load(filename)
mixer.music.play()
break
time.sleep(0.3)
return None
##setup stuff
# Open the card reader
card = open_reader()
card_info = card.info('cardselect v0.1m')
# testNetwork()
# Main loop
while 1:
# print "main"
# time.sleep(0.5)
card_id = listen(card, 0.3)
listen_remove(card, 0.1, card_id)
#Read RFID
#send ID to server
#print stuff
#print when ready for new scan
|
import unittest
from problem_322 import path_steps
class Problem322TestCase(unittest.TestCase):
number_1 = 10
number_2 = 11
start = 0
step = 0
def test_path_steps_1(self):
self.assertEqual(4, path_steps(self.start, self.step, self.number_1))
def test_path_steps_2(self):
self.assertEqual(5, path_steps(self.start, self.step, self.number_2))
if __name__ == "__main__":
unittest.main()
|
# coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
from pathlib import Path
import yaml
from doc_builder.utils import update_versions_file
class UtilsTester(unittest.TestCase):
def test_update_versions_file(self):
repo_folder = Path(__file__).parent.parent
# test canonical
with tempfile.TemporaryDirectory() as tmp_dir:
with open(f"{tmp_dir}/_versions.yml", "w") as tmp_yml:
versions = [{"version": "main"}, {"version": "v4.2.3"}, {"version": "v4.2.1"}]
yaml.dump(versions, tmp_yml)
update_versions_file(tmp_dir, "v4.2.2", repo_folder)
with open(f"{tmp_dir}/_versions.yml", "r") as tmp_yml:
yml_str = tmp_yml.read()
expected_yml = "- version: main\n- version: v4.2.3\n- version: v4.2.2\n- version: v4.2.1\n"
self.assertEqual(yml_str, expected_yml)
# test yml with main version only
with tempfile.TemporaryDirectory() as tmp_dir:
with open(f"{tmp_dir}/_versions.yml", "w") as tmp_yml:
versions = [{"version": "main"}]
yaml.dump(versions, tmp_yml)
update_versions_file(tmp_dir, "v4.2.2", repo_folder)
with open(f"{tmp_dir}/_versions.yml", "r") as tmp_yml:
yml_str = tmp_yml.read()
expected_yml = "- version: main\n- version: v4.2.2\n"
self.assertEqual(yml_str, expected_yml)
# test yml without main version
with tempfile.TemporaryDirectory() as tmp_dir:
with open(f"{tmp_dir}/_versions.yml", "w") as tmp_yml:
versions = [{"version": "v4.2.2"}]
yaml.dump(versions, tmp_yml)
self.assertRaises(ValueError, update_versions_file, tmp_dir, "v4.2.2", repo_folder)
# test inserting duplicate version into yml
with tempfile.TemporaryDirectory() as tmp_dir:
with open(f"{tmp_dir}/_versions.yml", "w") as tmp_yml:
versions = [{"version": "main"}]
yaml.dump(versions, tmp_yml)
update_versions_file(tmp_dir, "v4.2.2", repo_folder)
update_versions_file(tmp_dir, "v4.2.2", repo_folder) # inserting duplicate version
with open(f"{tmp_dir}/_versions.yml", "r") as tmp_yml:
yml_str = tmp_yml.read()
expected_yml = "- version: main\n- version: v4.2.2\n"
self.assertEqual(yml_str, expected_yml)
|
import os
from six.moves import xrange
from pymt.portprinter.port_printer import VtkPortPrinter
from pymt.testing.ports import UniformRectilinearGridPort
def test_one_file(tmpdir):
port = UniformRectilinearGridPort()
with tmpdir.as_cwd():
printer = VtkPortPrinter(port, "landscape_surface__elevation")
printer.open()
printer.write()
assert os.path.isfile("landscape_surface__elevation_0000.vtu")
def test_time_series(tmpdir):
expected_files = [
"sea_floor_surface_sediment__mean_of_grain_size_0000.vtu",
"sea_floor_surface_sediment__mean_of_grain_size_0001.vtu",
"sea_floor_surface_sediment__mean_of_grain_size_0002.vtu",
"sea_floor_surface_sediment__mean_of_grain_size_0003.vtu",
"sea_floor_surface_sediment__mean_of_grain_size_0004.vtu",
]
port = UniformRectilinearGridPort()
with tmpdir.as_cwd():
printer = VtkPortPrinter(port, "sea_floor_surface_sediment__mean_of_grain_size")
printer.open()
for _ in xrange(5):
printer.write()
printer.close()
for filename in expected_files:
assert os.path.isfile(filename)
def test_multiple_files(tmpdir):
port = UniformRectilinearGridPort()
with tmpdir.as_cwd():
for _ in xrange(5):
printer = VtkPortPrinter(port, "sea_surface__temperature")
printer.open()
printer.write()
printer.close()
assert os.path.isfile("sea_surface__temperature_0000.vtu")
def test_port_as_string(tmpdir, with_two_components):
with tmpdir.as_cwd():
printer = VtkPortPrinter("air_port", "air__density")
printer.open()
printer.write()
printer.close()
assert os.path.isfile("air__density_0000.vtu")
|
class NoPubSubDriver(Exception):
pass
|
import numpy as np
a = np.array([-100, -10, 0, 10, 100])
print(a)
# [-100 -10 0 10 100]
print(np.sign(a))
# [-1 -1 0 1 1]
print(type(np.sign(a)))
# <class 'numpy.ndarray'>
print(np.sign(a).dtype)
# int64
a_float = np.array([-1.23, 0.0, 1.23])
print(a_float)
# [-1.23 0. 1.23]
print(np.sign(a_float))
# [-1. 0. 1.]
print(np.sign(a_float).dtype)
# float64
print(np.sign(100))
# 1
print(type(np.sign(100)))
# <class 'numpy.int64'>
print(np.sign(-1.23))
# -1.0
print(type(np.sign(-1.23)))
# <class 'numpy.float64'>
a_special = np.array([0.0, -0.0, np.inf, -np.inf, np.nan])
print(a_special)
# [ 0. -0. inf -inf nan]
print(np.sign(a_special))
# [ 0. 0. 1. -1. nan]
print(np.sign(a_special).dtype)
# float64
a_complex = np.array([[10 + 10j, -10 + 10j], [10 - 10j, -10 - 10j], [10, -10], [10j, -10j], [0, np.nan], [0j, np.nan * 1j]])
print(a_complex)
# [[ 10.+10.j -10.+10.j]
# [ 10.-10.j -10.-10.j]
# [ 10. +0.j -10. +0.j]
# [ 0.+10.j -0.-10.j]
# [ 0. +0.j nan +0.j]
# [ 0. +0.j nan+nanj]]
print(np.sign(a_complex))
# [[ 1.+0.j -1.+0.j]
# [ 1.+0.j -1.+0.j]
# [ 1.+0.j -1.+0.j]
# [ 1.+0.j -1.+0.j]
# [ 0.+0.j nan+0.j]
# [ 0.+0.j nan+0.j]]
print(a_complex.real)
# [[ 10. -10.]
# [ 10. -10.]
# [ 10. -10.]
# [ 0. -0.]
# [ 0. nan]
# [ 0. nan]]
print(np.sign(a_complex.real))
# [[ 1. -1.]
# [ 1. -1.]
# [ 1. -1.]
# [ 0. 0.]
# [ 0. nan]
# [ 0. nan]]
print(a_complex.imag)
# [[ 10. 10.]
# [-10. -10.]
# [ 0. 0.]
# [ 10. -10.]
# [ 0. 0.]
# [ 0. nan]]
print(np.sign(a_complex.imag))
# [[ 1. 1.]
# [-1. -1.]
# [ 0. 0.]
# [ 1. -1.]
# [ 0. 0.]
# [ 0. nan]]
|
#!/usr/bin/env python3
import re
from setuptools import setup, find_packages
INIT_FILE = 'orbital/__init__.py'
init_data = open(INIT_FILE).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", init_data))
AUTHOR_EMAIL = metadata['author']
VERSION = metadata['version']
LICENSE = metadata['license']
DESCRIPTION = metadata['description']
AUTHOR, EMAIL = re.match(r'(.*) <(.*)>', AUTHOR_EMAIL).groups()
requires = ['numpy', 'scipy', 'astropy', 'matplotlib', 'represent>=1.3.0',
'sgp4']
setup(
name='OrbitalPy',
version=VERSION,
description=DESCRIPTION,
long_description=open('README').read(),
author=AUTHOR,
author_email=EMAIL,
url='https://github.com/RazerM/orbital',
packages=find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Astronomy',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
license=LICENSE,
install_requires=requires)
|
from latex import build_pdf, LatexBuildError
from latex.errors import parse_log
import pytest
def test_generates_something():
min_latex = r"""
\documentclass{article}
\begin{document}
Hello, world!
\end{document}
"""
pdf = build_pdf(min_latex)
assert pdf
def test_raises_correct_exception_on_fail():
broken_latex = r"""foo"""
with pytest.raises(LatexBuildError):
build_pdf(broken_latex)
def test_finds_errors_correctly():
broken_latex = r"""
\documentclass{article}
\begin{document}
All good
\undefinedcontrolsequencehere
\end{document}
"""
try:
build_pdf(broken_latex)
except LatexBuildError as e:
assert parse_log(e.log) == e.get_errors()
else:
assert False, 'no exception raised'
|
#! python3
import re
import cloudconvert
def get_thumbnail_url(youtube_url, thumbnail_res):
youtube_url = youtube_url.rstrip()
youtube_id = ""
# Get YouTube video ID
search_pattern = re.search("(?:\/|%3D|v=|vi=)([0-9A-z-_]{11})(?:[%#?&]|$)", youtube_url)
if search_pattern:
youtube_id = search_pattern.group(1)
youtube_thumbnail_url = f"https://i.ytimg.com/vi/{youtube_id}/{thumbnail_res}default.jpg"
print(f"\nVideo's max resolution thumbnail: {youtube_thumbnail_url}")
return youtube_thumbnail_url
def download_thumbnail(youtube_thumbnail_url):
# Get the API key from the API_KEY file
with open("API_KEY", "r") as file:
API_KEY = file.read()
# All of cloudconvert's processes and conversions
print("Creating process...")
api = cloudconvert.Api(API_KEY)
process = api.createProcess({
"inputformat": "jpg",
"outputformat": "jpg"
})
print("Converting image...")
process.start({
"input": "download",
"file": youtube_thumbnail_url,
"outputformat": "jpg",
"preset": "AduhH6JcFl",
"wait": True
})
process.refresh()
process.wait()
print(process["message"])
print("Downloading image to the same directory as the YT2TVDB.py file...")
process.download()
print("Download finished!")
def main():
youtube_url = input("Type or paste the YouTube link: ")
try:
thumbnail_url = get_thumbnail_url(youtube_url, "maxres")
download_thumbnail(thumbnail_url)
except:
print("Max resolution failed, retrying with a lower resolution")
thumbnail_url = get_thumbnail_url(youtube_url, "mq")
download_thumbnail(thumbnail_url)
input("\nPress the return key to exit") # Prevents CMD from auto-exiting
if __name__ == '__main__':
main()
|
"""Helper classes for formulating game outcome measure"""
import numpy as np
import scipy.stats
import scipy.optimize
try:
import matplotlib.pyplot as plt
except:
pass
class GameOutcomeMeasure:
"""Base class for game outcome measure
Derived class must implement __call__ method, which accepts array
of point_differences and returns array of corresponding game
outcome measures
"""
def plot(self, ub=None, lb=0):
if ub is None:
if hasattr(self, 'max_point_diff'):
ub = self.max_point_diff
else:
ub = 20
xvals = np.linspace(lb, ub, 200)
f, ax = plt.subplots()
ax.plot(xvals, self(xvals))
xpoints = np.arange(lb, ub+1)
ax.plot(xpoints, self(xpoints), 'o')
ax.grid()
ax.set_xlabel('Point difference')
ax.set_ylabel('Game outcome measure')
class PointDifference(GameOutcomeMeasure):
supports_off_def = True
def __init__(self):
pass
def __call__(self, point_diff):
return point_diff
class CappedPointDifference(GameOutcomeMeasure):
def __init__(self, cap=15):
self.max_point_diff = cap
if cap > 1:
self.supports_off_def = True
else:
self.supports_off_def = False
def __call__(self, point_diff):
return np.sign(point_diff) * np.fmin(self.max_point_diff, np.abs(point_diff))
class BetaCurve(GameOutcomeMeasure):
def __init__(self, max_point_diff=20, gom_at_1=3, max_gom=None):
"""
Parameters
----------
max_point_diff : int
Limit after which game outcome measure no longer increases
gom_at_1 : int
Value of the game outcome measure when the point
difference is 1 (i.e., the amount earned by winning)
max_gom : int or None
Value of the game outcome measure at max_point_diff. If
None, set equal to max_point_diff.
"""
if max_gom is None:
max_gom = max_point_diff
normed_gom_at_1 = gom_at_1 / float(max_gom)
xval = 1.0 / max_point_diff
def root_func(alpha):
return scipy.stats.beta.cdf(xval, alpha, 1.0/alpha) - normed_gom_at_1
sol = scipy.optimize.root_scalar(root_func, bracket=[0.05, 10.0])
self.alpha = sol.root
self.beta = 1.0/self.alpha
self.max_point_diff = max_point_diff
self.max_gom = max_gom
self.rv = scipy.stats.beta(self.alpha, self.beta, scale=self.max_point_diff)
def __call__(self, point_diff):
return np.sign(point_diff) * self.max_gom * self.rv.cdf(np.abs(point_diff))
if __name__ == '__main__':
gom = BetaCurve()
print('alpha:', gom.alpha)
gom.plot()
plt.show()
|
from flask import Flask
from flask import jsonify
from flask import abort
from card import Card
import maker
import json
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
app.config['JSON_AS_ASCII'] = False
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
# 名刺一覧を作る
cards = maker.make()
@app.route('/')
def root():
return "JJSONPlaceholder"
@app.route('/cards')
def list_cards():
"名刺一覧を返却"
ja = []
for card in cards:
ja.append(card.__dict__)
return jsonify(ja)
@app.route('/cards/<int:id>')
def show_card(id):
"名刺1枚を返却"
if 1 <= id and id <= len(cards):
card = cards[id - 1]
return jsonify(card.__dict__)
else:
abort(404)
if __name__ == '__main__':
# ローカルテスト環境
app.run(host='127.0.0.1', port=8080, debug=True)
|
# coding=utf-8
import json
import time
import sys
from functools import wraps
python_version = sys.version[0]
if python_version == '3':
basestring = str
def fn_timer(function):
"""
元素查找计时器
"""
@wraps(function)
def function_timer(*args, **kwargs):
t0 = time.time()
result = function(*args, **kwargs)
t1 = time.time()
total_time = str(t1 - t0)
return total_time, result
return function_timer
def format_json(content):
"""
格式化JSON
"""
if isinstance(content, basestring):
content = json.loads(content)
if python_version == '3':
result = json.dumps(content, sort_keys=True, indent=4, separators=(',', ': ')). \
encode('latin-1').decode('unicode_escape')
else:
result = json.dumps(content, sort_keys=True, indent=4, separators=(',', ': ')). \
decode("unicode_escape")
return result
def pretty_print(content):
"""
美化打印
"""
print(format_json(content))
|
import os
import json
import inspect
from datetime import date
import numpy as np
from typing import List, Tuple
import matplotlib.pyplot as plt
from ReinLife.World.entities import Agent
class Saver:
""" Class for saving the results of an experiment
If directories do not currently exist, it will create them. The general structure
of the experiment will be:
.
├── 2020-04-22_V1
│ └── PPO
│ │ └── brain_1.pt
│ │ └── brain_2.pt
│ └── PERD3QN
│ │ └── brain_1.pt
│ └── DQN
│ │ └── brain_1.pt
│ └── Results...
│
├── 2020-04-22_V1
│ └── PPO
│ │ └── brain_1.pt
│ └── PERD3QN
│ └── brain_1.pt
etc.
Thus, each experiment is defined by the date of saving the models and an additional "_V1" if multiple
experiments were performed on the same day. Within each experiment, each model is saved into a
directory of its model class, for example PPO, PERD3QN, and DQN. Then, each model is saved as
"brain_x.pt" where x is simply the sequence in which it is saved.
Parameters:
-----------
main_folder : str
The folder you would want to store the experiment.
NOTE: This is just the name of the top folder. The exact location of the main_folder is determined
by your curren working directory.
google_colab : bool, default False
Whether you use google colaboratory to run the experiment
"""
def __init__(self, main_folder: str, google_colab: bool = False):
cwd = os.getcwd()
self.google_colab = google_colab
self.separator = "/" if self.google_colab else "\\"
self.main_folder = cwd + self.separator + main_folder
def save(self, agents: List[Agent], family: bool, results: dict, settings: dict, fig: plt.Figure):
""" Save brains and create directories if neccesary
Parameters:
-----------
agents : List[Agent]
A list of all agents for which paths need to be created
family : bool
Whether there are static families
results : dict
The results of the experiment
settings : dict
The settings of the experiment
fig : plt.Figure
The matplotlib figure to save
"""
directory_paths, agent_paths, experiment_path = self._get_paths(agents, family)
self._create_directories(directory_paths)
for agent in agents:
agent.save_brain(agent_paths[agent])
with open(experiment_path + self.separator + "results.json", "w") as f:
json.dump(results, f, indent=4)
with open(experiment_path + self.separator + "settings.json", "w") as f:
json.dump(settings, f, indent=4)
if fig:
fig.savefig(experiment_path + self.separator + "results.png", dpi=150)
self._save_params(agents, agent_paths)
print("################")
print("Save Successful!")
print("################")
def _get_paths(self, agents: List[Agent], family: bool) -> Tuple[List[str], dict, str]:
""" Get all paths for creating directories and paths for agents' brains
Parameters:
-----------
agents : List[Agent]
A list of all agents for which paths need to be created
family : bool
Whether there are static families
Returns:
--------
all_paths : List[str]
All paths that need to be created
agents_paths : dict
For each agent, the path that needs to be created
experiment_path : str
The main experiment folder
"""
# Get experiment folder path and increment if one already exists executed on the same day
today = str(date.today())
experiment_path = self.main_folder + self.separator + today + "_V1"
if os.path.exists(experiment_path):
paths = [path for path in os.listdir(self.main_folder) if today in path]
index = str(max([self.get_int(path.split("V")[-1]) for path in paths]) + 1)
experiment_path = experiment_path[:-1] + index
# Get path for each model in the experiment directory
model_paths = list(set([experiment_path + self.separator + agent.brain.method for agent in agents]))
# Extract paths for each agent based on their gene value
if family:
agents_paths = {agent: experiment_path + self.separator + agent.brain.method +
self.separator + "brain_gene_" + str(agent.gene)
for agent in agents}
# If agents have the same model (i.e., "duplicates"), then increment their directory number
else:
agents_paths = {agent: experiment_path + self.separator + agent.brain.method + self.separator + "brain_1"
for agent in agents}
vals, count = np.unique([val for val in agents_paths.values()], return_counts=True)
duplicates = {x[0]: y[0] for x, y in zip(vals[np.argwhere(count > 1)], count[np.argwhere(count > 1)])}
for duplicate in duplicates.keys():
for count in range(duplicates[duplicate]):
agents_paths[self.get_key(duplicate, agents_paths)] = duplicate[:-1] + str(count+1)
all_paths = [self.main_folder] + [experiment_path] + model_paths
return all_paths, agents_paths, experiment_path
def _create_directories(self, all_paths: List[str]):
""" Create directories if neccesary and print which were created """
created_paths = []
for path in all_paths:
if not os.path.exists(path):
if self._create_directory(path):
created_paths.append(path)
else:
raise Exception(f'{path} could not be created')
if created_paths:
print("The following directories were created: ")
for path in created_paths:
print(f"* {path}")
print()
@staticmethod
def _save_params(agents: List[Agent], agent_paths: dict):
""" Extract and save the parameters for each brain
Parameters:
-----------
agents : List[Agent]
A list of all agents for which paths need to be created
agents_paths : dict
For each agent, the path that needs to be created
"""
parameters = {agent: {} for agent in agents}
# Extract parameters
for agent in agents:
params = inspect.getmembers(agent.brain, lambda a: not (inspect.isroutine(a)))
for name, val in params:
if type(val) in [float, int, bool, str]:
parameters[agent][name] = val
# Save parameters
for agent in agents:
path = agent_paths[agent].replace("brain", "parameters") + ".json"
with open(path, "w") as f:
json.dump(parameters[agent], f, indent=4)
@staticmethod
def _create_directory(path: str) -> bool:
""" Attempts to create a directory """
try:
os.mkdir(path)
except OSError:
return False
else:
return True
@staticmethod
def get_key(val: str, dictionary: dict) -> str:
""" Gets the key of a value in a dictionary """
return next(key for key, value in dictionary.items() if value == val)
@staticmethod
def get_int(a_string: str) -> int:
""" Get all integers in a string """
return int("".join([s for s in a_string if s.isdigit()]))
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
"""A few checks at the CASIA_FASD database.
"""
import os, sys
import unittest
from . import Database
from nose.plugins.skip import SkipTest
class FASDDatabaseTest(unittest.TestCase):
"""Performs various tests on the CASIA_FASD spoofing attack database."""
"""
def test01_cross_valid(self): # testing the cross-validation subsets
db = Database()
'''
db.cross_valid_gen(60, 60, 5) # 60 is the number of real samples as well as in each attack type of the database
'''
subsets_real, subsets_attack = db.cross_valid_read()
self.assertEqual(len(subsets_real), 5)
self.assertEqual(len(subsets_attack), 5)
for i in range(0,5):
self.assertEqual(len(subsets_real[i]), 12)
self.assertEqual(len(subsets_attack[i]), 12)
files_real_val, files_real_train = db.cross_valid_foldobjects(cls='real', fold_no=1)
self.assertEqual(len(files_real_val), 12) # number of samples in validation subset of real accesses
self.assertEqual(len(files_real_train), 48) # number of samples in training subset of real accesses
files_real_val, files_real_train = db.cross_valid_foldobjects(types='warped', cls='attack', fold_no=2)
self.assertEqual(len(files_real_val), 12) # number of samples in validation subset of warped attacks
self.assertEqual(len(files_real_train), 48) # number of samples in training subset of warped attacks
files_real_val, files_real_train = db.cross_valid_foldobjects(types=('warped', 'cut'), cls='attack', fold_no=3)
self.assertEqual(len(files_real_val), 24) # number of samples in validation subset of warped and cut attacks
self.assertEqual(len(files_real_train), 96) # number of samples in training subset of of warped and cut attacks
files_real_val, files_real_train = db.cross_valid_foldobjects(types=('warped', 'cut', 'video'), cls='attack', fold_no=4)
self.assertEqual(len(files_real_val), 36) # number of samples in validation subset of all attacks
self.assertEqual(len(files_real_train), 144) # number of samples in training subset of all attacks
files_real_val, files_real_train = db.cross_valid_foldobjects(types=None, cls='attack', fold_no=4)
self.assertEqual(len(files_real_val), 36) # number of samples in validation subset of all attacks
self.assertEqual(len(files_real_train), 144) # number of samples in training subset of all attacks
"""
def test02_dumplist(self):
from bob.db.base.script.dbmanage import main
self.assertEqual(main('casia_fasd dumplist --self-test'.split()), 0)
def test03_checkfiles(self):
from bob.db.base.script.dbmanage import main
self.assertEqual(main('casia_fasd checkfiles --self-test'.split()), 0)
def test05_query_obj(self):
db = Database()
fobj = db.objects()
self.assertEqual(len(fobj),
600) # number of all the videos in the database
fobj = db.objects(groups='train', ids=[21])
self.assertEqual(len(fobj), 0) # number of train videos for client 21
fobj = db.objects(groups='test', cls='real')
self.assertEqual(
len(fobj),
90) # number of real test videos (30 clients * 3 qualitites)
fobj = db.objects(groups='test', cls='real', types='cut')
self.assertEqual(
len(fobj), 0
) # number of real test videos - cut attacks (can not be real and attacks at the same time of course)
fobj = db.objects(groups='train', cls='real', qualities='low')
self.assertEqual(
len(fobj), 20
) # number of real train videos with low quality (20 clients * 1 real low quality video)
fobj = db.objects(groups='train', cls='attack', qualities='normal')
self.assertEqual(
len(fobj), 60
) # number of real train videos with normal quality (20 clients * 3 attack types)
fobj = db.objects(groups='test', qualities='high')
self.assertEqual(
len(fobj), 120
) # number of real test videos with high quality (30 clients * 4 attack types)
fobj = db.objects(groups='test', types='warped')
self.assertEqual(
len(fobj),
90) # number of test warped videos (30 clients * 3 qualities)
fobj = db.objects(
groups='test', types='video', qualities='high', ids=[1, 2, 3])
self.assertEqual(
len(fobj),
0) # clients with ids 1, 2 and 3 are not in the test set
fobj = db.objects(
groups='train', types='video', qualities='high', ids=[1, 2, 3])
self.assertEqual(
len(fobj), 3
) # number of high quality video attacks of clients 1, 2 and 3 (3 clients * 1)
fobj = db.objects(
groups='train', types='video', qualities='high', ids=1)
self.assertEqual(
len(fobj), 1
) # number of high quality video attacks of client 1(1 client * 1)
self.assertEqual(fobj[0].filename, 'train_release/1/HR_4')
self.assertEqual(fobj[0].make_path('xxx', '.avi'),
'xxx/train_release/1/HR_4.avi')
fobj = db.objects(
groups='test', types='warped', qualities='low', ids=21)
self.assertEqual(
len(fobj), 1
) # number of high quality video attacks of client 21 (1 client * 1)
self.assertFalse(fobj[0].is_real())
self.assertEqual(fobj[0].get_clientid(), 21)
self.assertEqual(fobj[0].get_type(), 'warped')
self.assertEqual(fobj[0].get_quality(), 'low')
#self.assertTrue(os.path.exists(fobj[0].facefile()))
def test06_cross_valid(self): # testing the cross-validation subsets
db = Database('folds')
'''
db.cross_valid_gen(60, 60, 5) # 60 is the number of real samples as well as in each attack type of the database
'''
subsets_real, subsets_attack = db.cross_valid_read()
self.assertEqual(len(subsets_real), 5)
self.assertEqual(len(subsets_attack), 5)
for i in range(0, 5):
self.assertEqual(len(subsets_real[i]), 12)
self.assertEqual(len(subsets_attack[i]), 12)
files_real_val, files_real_train = db.cross_valid_foldobjects(
cls='real', fold_no=1)
self.assertEqual(
len(files_real_val),
12) # number of samples in validation subset of real accesses
self.assertEqual(
len(files_real_train),
48) # number of samples in training subset of real accesses
files_real_val, files_real_train = db.cross_valid_foldobjects(
types='warped', cls='attack', fold_no=2)
self.assertEqual(
len(files_real_val),
12) # number of samples in validation subset of warped attacks
self.assertEqual(
len(files_real_train),
48) # number of samples in training subset of warped attacks
files_real_val, files_real_train = db.cross_valid_foldobjects(
types=('warped', 'cut'), cls='attack', fold_no=3)
self.assertEqual(
len(files_real_val), 24
) # number of samples in validation subset of warped and cut attacks
self.assertEqual(
len(files_real_train), 96
) # number of samples in training subset of of warped and cut attacks
files_real_val, files_real_train = db.cross_valid_foldobjects(
types=('warped', 'cut', 'video'), cls='attack', fold_no=4)
self.assertEqual(
len(files_real_val),
36) # number of samples in validation subset of all attacks
self.assertEqual(
len(files_real_train),
144) # number of samples in training subset of all attacks
files_real_val, files_real_train = db.cross_valid_foldobjects(
types=None, cls='attack', fold_no=4)
self.assertEqual(
len(files_real_val),
36) # number of samples in validation subset of all attacks
self.assertEqual(
len(files_real_train),
144) # number of samples in training subset of all attacks
|
# -*- coding: utf8 -*-
import json
from typing import List
from console.models import TaskIntra, task_intra_repo
from console.exceptions import NotFound, AlreadyExist
from console.user import UserService
from console.project import ProjectService
from console.utils import get_time_version
class TaskIntraService:
task_intra_repo = task_intra_repo
def __init__(self, tid: str = None, project_id: str = None, name: str = None, version: str = None):
if tid:
self.task_intra = self.task_intra_repo.get(tid)
elif project_id and name and version:
self.task_intra = self.task_intra_repo.filter(project_id=project_id, name=name, version=version)
def create_task(self, project_id: str, name: str, owner_id: str, type: int, task_root: bool, token: str = None,
comment: str = None, config: str = None, meta: str = None):
UserService(uid=owner_id)
version = get_time_version()
ProjectService(pid=project_id)
task_intra_check = self.task_intra_repo.filter(project_id=project_id, name=name, task_root=True)
if task_root:
if task_intra_check:
raise AlreadyExist(message=f'task intra {name} in project {project_id} already exist')
else:
if not task_intra_check:
raise NotFound(message=f'task intra root {name} in project {project_id} not found')
token = task_intra_check.token
task_intra = TaskIntra(project_id=project_id, name=name, version=version, owner_id=owner_id, type=type,
token=token, task_root=task_root, comment=comment, config=config, meta=meta)
self.task_intra = task_intra
self.task_intra_repo.insert_or_update(self.task_intra)
return self.task_intra
def check_task_name(self, task_name: str) -> bool:
return self.task_intra_repo.filter(name=task_name) is not None
def update_task(self, request_data: dict) -> TaskIntra:
if self.task_intra is None:
raise NotFound(message='task intra object init failed')
need_update = False
if 'owner_id' in request_data and request_data['owner_id']:
UserService(uid=request_data['owner_id'])
self.task_intra.owner_id = request_data['owner_id']
need_update = True
if 'token' in request_data:
if self.task_intra_repo.filter(token=request_data['token']):
raise AlreadyExist(message='token is already in use')
self.task_intra.token = request_data['token']
need_update = True
if 'comment' in request_data:
self.task_intra.comment = request_data['comment']
need_update = True
if 'config' in request_data:
config = json.loads(self.task_intra.config) if self.task_intra.config else {}
config.update(request_data['config'])
self.task_intra.config = json.dumps(config)
need_update = True
if 'meta' in request_data:
meta = json.loads(self.task_intra.meta) if self.task_intra.meta else {}
meta.update(request_data['meta'])
self.task_intra.meta = json.dumps(meta)
need_update = True
if need_update:
self.task_intra_repo.insert_or_update(self.task_intra)
return self.task_intra
def get_task_list(self, request_data: dict) -> List[TaskIntra]:
return self.task_intra_repo.get_all(**request_data)
|
import time
import machine, neopixel
np = neopixel.NeoPixel(machine.Pin(33), n=10,bpp=3,timing=1)
def demo(np):
n = np.n
# cycle
for i in range(4 * n):
for j in range(n):
np[j] = (0, 0, 0)
np[i % n] = (255, 255, 255)
np.write()
time.sleep_ms(25)
# bounce
for i in range(4 * n):
for j in range(n):
np[j] = (0, 0, 128)
if (i // n) % 2 == 0:
np[i % n] = (0, 0, 0)
else:
np[n - 1 - (i % n)] = (0, 0, 0)
np.write()
time.sleep_ms(50)
# fade in/out
for i in range(0, 4 * 256, 8):
for j in range(n):
if (i // 256) % 2 == 0:
val = i & 0xff
else:
val = 255 - (i & 0xff)
np[j] = (val, 0, 0)
np.write()
# clear
for i in range(n):
np[i] = (0, 0, 0)
np.write()
while True:
demo(np)
|
import re
from collections import namedtuple
import attr
from automat import MethodicalMachine
from .conversion import Converter
from .messages import Notice, Error
_convert_to_underscores_lmao = re.compile(r"(?<!^)(?=[A-Z])")
def _get_last_collector(results):
try:
from twisted.internet.defer import Deferred, DeferredList
except ImportError:
Deferred = None
results = list(results)
if Deferred in map(type, results):
for res in results:
if not isinstance(res, Deferred):
results.remove(res)
r = DeferredList(list(results), fireOnOneErrback=True, consumeErrors=True)
r.addCallback(lambda res: res[-1][-1])
return r
return results
@attr.s
class Transaction:
_conn = attr.ib()
def begin(self):
return self._conn.execute("BEGIN", [])
def commit(self):
return self._conn.execute("COMMIT", [])
def rollback(self):
return self._conn.execute("ROLLBACK", [])
async def __aenter__(self):
await self.begin()
return self._conn
async def __aexit__(self, exc_type, exc, tb):
if tb is None:
await self.commit()
else:
await self.rollback()
@attr.s
class PostgresConnection(object):
_machine = MethodicalMachine()
_io_impl = attr.ib()
encoding = attr.ib(default="utf8")
_converter = attr.ib(factory=Converter)
_dataRows = attr.ib(factory=list, init=False, repr=False)
_auth = attr.ib(default=None, init=False, repr=False)
_parameters = attr.ib(factory=dict, init=False)
@_machine.state(initial=True)
def DISCONNECTED(self):
"""
Not connected.
"""
@_machine.state()
def CONNECTING(self):
pass
@_machine.state()
def WAITING_FOR_AUTH(self):
pass
@_machine.state()
def WAITING_FOR_READY(self):
pass
@_machine.state()
def WAITING_FOR_PARSE(self):
pass
@_machine.state()
def WAITING_FOR_DESCRIBE(self):
pass
@_machine.state()
def WAITING_FOR_BIND(self):
pass
@_machine.state()
def WAITING_FOR_CLOSE(self):
pass
@_machine.state()
def READY(self):
pass
@_machine.state()
def NEEDS_AUTH(self):
pass
@_machine.state()
def RECEIVING_COPY_DATA(self):
pass
@_machine.state()
def EXECUTING(self):
pass
@_machine.state()
def WAITING_FOR_COPY_OUT_RESPONSE(self):
pass
@_machine.state()
def COPY_OUT_COMPLETE(self):
pass
@_machine.state()
def COMMAND_COMPLETE(self):
pass
@_machine.input()
def _REMOTE_READY_FOR_QUERY(self, message):
pass
@_machine.input()
def _REMOTE_PARSE_COMPLETE(self, message):
pass
@_machine.input()
def _REMOTE_ROW_DESCRIPTION(self, message):
pass
@_machine.input()
def _REMOTE_BIND_COMPLETE(self, message):
pass
@_machine.input()
def _REMOTE_COMMAND_COMPLETE(self, message):
pass
@_machine.input()
def _REMOTE_DATA_ROW(self, message):
pass
@_machine.input()
def _REMOTE_NO_DATA(self, message):
pass
@_machine.input()
def _REMOTE_AUTHENTICATION_OK(self, message):
pass
@_machine.input()
def _REMOTE_AUTHENTICATION_CLEARTEXT_PASSWORD(self, message):
pass
@_machine.input()
def _REMOTE_CLOSE_COMPLETE(self, message):
pass
@_machine.input()
def _REMOTE_PARAMETER_STATUS(self, message):
pass
@_machine.input()
def _REMOTE_COPY_OUT_RESPONSE(self, message):
pass
@_machine.input()
def _REMOTE_COPY_DATA(self, message):
pass
@_machine.input()
def _REMOTE_COPY_DONE(self, message):
pass
def _wait_for_ready(self, *args, **kwargs):
self._ready_callback = self._io_impl.make_callback()
return self._ready_callback
@_machine.input()
def connect(self, endpoint, database, username, password=None):
pass
@_machine.output()
def do_connect(self, endpoint, database, username, password=None):
if password:
self._auth = password
return self._io_impl.connect(self, endpoint, database, username)
@_machine.output()
def _wait_for_ready_on_connect(self, database, username, password):
return self._wait_for_ready()
@_machine.output()
def _on_connected(self, message):
if self._ready_callback:
self._io_impl.trigger_callback(self._ready_callback, message.backend_status)
DISCONNECTED.upon(
connect,
enter=CONNECTING,
outputs=[do_connect, _wait_for_ready_on_connect],
collector=_get_last_collector,
)
@_machine.output()
def _send_auth_plaintext(self, message):
self._pg.sendAuth(self._auth)
# Let's not store this in memory.
self._auth = None
CONNECTING.upon(
_REMOTE_AUTHENTICATION_CLEARTEXT_PASSWORD,
enter=WAITING_FOR_AUTH,
outputs=[_send_auth_plaintext],
)
WAITING_FOR_AUTH.upon(
_REMOTE_AUTHENTICATION_OK, enter=WAITING_FOR_READY, outputs=[]
)
CONNECTING.upon(_REMOTE_AUTHENTICATION_OK, enter=WAITING_FOR_READY, outputs=[])
@_machine.output()
def _register_parameter(self, message):
self._parameters[message.name] = message.val
if message.name == "server_encoding":
self._pg._encoding = message.val
WAITING_FOR_READY.upon(
_REMOTE_PARAMETER_STATUS, enter=WAITING_FOR_READY, outputs=[_register_parameter]
)
WAITING_FOR_READY.upon(
_REMOTE_READY_FOR_QUERY, enter=READY, outputs=[_on_connected]
)
COMMAND_COMPLETE.upon(_REMOTE_READY_FOR_QUERY, enter=READY, outputs=[_on_connected])
@_machine.input()
def query(self, query, vals):
pass
@_machine.output()
def _do_query(self, query, vals):
self._currentQuery = query
self._currentVals = vals
self._dataRows = []
self._ready_callback = self._io_impl.make_callback()
self._pg.sendParse(query)
@_machine.output()
def _wait_for_result(self, query, vals):
self._result_callback = self._io_impl.make_callback()
self._io_impl.add_callback(self._result_callback, lambda x: self._collate())
return self._result_callback
@_machine.output()
def _wait_for_ready_on_query(self, query, vals):
return self._wait_for_ready()
READY.upon(
query,
enter=WAITING_FOR_PARSE,
outputs=[_do_query, _wait_for_ready_on_query, _wait_for_result],
collector=_get_last_collector,
)
def execute(self, command, args=[]):
d = self.query(command, args)
self._io_impl.add_callback(d, lambda x: None)
return d
@_machine.output()
def _do_send_describe(self, message):
self._pg.sendDescribe()
WAITING_FOR_PARSE.upon(
_REMOTE_PARSE_COMPLETE, enter=WAITING_FOR_DESCRIBE, outputs=[_do_send_describe]
)
@_machine.output()
def _on_row_description(self, message):
print(message.values)
self._currentDescription = message.values
@_machine.output()
def _do_bind(self, message):
bind_vals = [self._converter.to_postgres(x) for x in self._currentVals]
self._pg.sendBind(bind_vals)
WAITING_FOR_DESCRIBE.upon(
_REMOTE_ROW_DESCRIPTION,
enter=WAITING_FOR_BIND,
outputs=[_on_row_description, _do_bind],
)
WAITING_FOR_DESCRIBE.upon(
_REMOTE_NO_DATA, enter=WAITING_FOR_BIND, outputs=[_do_bind]
)
@_machine.output()
def _send_execute(self, message):
self._pg.sendExecute("")
WAITING_FOR_BIND.upon(
_REMOTE_BIND_COMPLETE, enter=EXECUTING, outputs=[_send_execute]
)
@_machine.output()
def _store_row(self, message):
self._addDataRow(message)
EXECUTING.upon(_REMOTE_DATA_ROW, enter=EXECUTING, outputs=[_store_row])
@_machine.output()
def _on_command_complete(self, message):
self._currentQuery = None
self._currentVals = None
self._io_impl.trigger_callback(self._result_callback, True)
self._pg.sync()
EXECUTING.upon(
_REMOTE_COMMAND_COMPLETE, enter=COMMAND_COMPLETE, outputs=[_on_command_complete]
)
def _addDataRow(self, msg):
self._dataRows.append(msg.values)
def _collate(self):
"""
Collate the responses of a query.
"""
if not self._dataRows:
return []
for row in self._currentDescription:
if row.field_name == b"?column?":
row.field_name = b"anonymous"
res = namedtuple(
"Result", [x.field_name.decode("utf8") for x in self._currentDescription]
)
resp = []
for i in self._dataRows:
row = []
for x, form in zip(i, self._currentDescription):
row.append(self._converter.from_postgres(x, form))
resp.append(res(*row))
self._dataRows.clear()
self._currentDescription = None
return resp
@_machine.input()
def close(self):
pass
@_machine.output()
def _do_close(self):
if not self._ready_callback:
self._ready_callback = self._io_impl.make_callback()
self._pg.close()
return self._ready_callback
READY.upon(
close,
enter=WAITING_FOR_CLOSE,
outputs=[_do_close],
collector=_get_last_collector,
)
WAITING_FOR_CLOSE.upon(_REMOTE_CLOSE_COMPLETE, enter=WAITING_FOR_READY, outputs=[])
def _onMessage(self, message):
# These can come at any time
if isinstance(message, Notice):
return
elif isinstance(message, Error):
print(message)
self._pg.transport.loseConnection()
return
rem = _convert_to_underscores_lmao.sub("_", message.__class__.__name__).upper()
func = getattr(self, "_REMOTE_" + rem, None)
if func is None:
print(f"Ignoring incoming message {message} as {rem}")
return
func(message)
return
def new_transaction(self):
return Transaction(self)
@_machine.input()
def copy_out(self, target, table=None, query=None):
pass
@_machine.output()
def _do_copy_out(self, target, table=None, query=None):
self._copy_out_func = target
if table is not None and query is not None:
raise Exception("Only one must be provided")
if table:
target_query = "COPY " + table.replace('"', '""') + " TO STDOUT"
elif query:
target_query = "COPY (" + query + ") TO STDOUT"
# target_query += " WITH (FORMAT binary)"
self._currentQuery = query
self._currentVals = []
self._ready_callback = self._io_impl.make_callback()
self._pg.sendQuery(target_query)
return self._ready_callback
READY.upon(
copy_out,
enter=WAITING_FOR_COPY_OUT_RESPONSE,
outputs=[_do_copy_out],
collector=_get_last_collector,
)
@_machine.output()
def _on_copy_out_response(self, message):
pass
WAITING_FOR_COPY_OUT_RESPONSE.upon(
_REMOTE_COPY_OUT_RESPONSE,
enter=RECEIVING_COPY_DATA,
outputs=[_on_copy_out_response],
)
@_machine.output()
def _on_copy_data(self, message):
self._copy_out_func(message)
RECEIVING_COPY_DATA.upon(
_REMOTE_COPY_DATA, enter=RECEIVING_COPY_DATA, outputs=[_on_copy_data]
)
RECEIVING_COPY_DATA.upon(_REMOTE_COPY_DONE, enter=COPY_OUT_COMPLETE, outputs=[])
@_machine.output()
def _on_copy_out_complete(self, message):
# self._io_impl.trigger_callback(self._copy_out_complete_callback, True)
pass
COPY_OUT_COMPLETE.upon(
_REMOTE_COMMAND_COMPLETE,
enter=COMMAND_COMPLETE,
outputs=[],
)
|
from django.apps import AppConfig
class DjangoConohaObjstorageConfig(AppConfig):
name = 'django_conoha_objstorage'
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Make task_group_id nullable
Revision ID: 1a4241cfd4cd
Revises: 44047daa31a9
Create Date: 2015-08-12 10:48:03.112117
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1a4241cfd4cd'
down_revision = '44047daa31a9'
def upgrade():
op.alter_column(
"cycle_task_groups",
"task_group_id",
existing_type=sa.Integer(),
nullable=True
)
def downgrade():
op.alter_column(
"cycle_task_groups",
"task_group_id",
existing_type=sa.Integer(),
nullable=False
)
|
from .matrix_factorization import MFExplicitSGD
from .matrix_factorization_prep import MFExplicitPrepSGD
__all__ = [
"MFExplicitSGD",
"MFExplicitPrepSGD"
]
|
import numpy as np
import pickle
import glob
import copy
from l96 import l96
from l96 import l96_jacobian
#import ipdb
########################################################################################################################
# Non-linear model vectorized for ensembles
def l96V(x, f):
""""This describes the derivative for the non-linear Lorenz 96 Model of arbitrary dimension n.
This will take the state vector x of shape sys_dim X ens_dim and return the equation for dxdt"""
# shift minus and plus indices
x_m_2 = np.concatenate([x[-2:, :], x[:-2, :]])
x_m_1 = np.concatenate([x[-1:, :], x[:-1, :]])
x_p_1 = np.concatenate([x[1:,:], np.reshape(x[0,:], [1, len(x[0, :])])], axis=0)
dxdt = (x_p_1-x_m_2)*x_m_1 - x + f
return dxdt
########################################################################################################################
# Euler-Murayama path
def em_step_path(x, xi, h, args):
"""This will propagate the ensemble state vector one step forward by euler-maruyama
Step size is h and the weiner process is assumed to have a scalar diffusion coefficient. The realization of the
Brownian motion must be supplied as a standard normal variable xi, that is to be used across methods."""
# unpack the arguments for the integration step
[f, diffusion] = args
# rescale the standard normal to variance h
W = xi * np.sqrt(h)
# step forward by interval h
x_step = x + h * l96V(x, f) + diffusion * W
return x_step
########################################################################################################################
# Stochastic Runge-Kutta, 4 step
# This is the four step runge kutta scheme for stratonovich calculus, described in Hansen and Penland 2005
# The rule has strong convergence order 1
def rk_step_path(x, xi, h, args):
"""One step of integration rule for l96 4 step stratonovich runge kutta
Here it is assumed that the Brownian motion is given a priori, and we wish to reconstruct the path. The value xi
is a standard normal vector, pre-generated to be used across the different methods."""
# unpack the arguments
[f, diffusion] = args
# rescale the standard normal to variance h
W = xi * np.sqrt(h)
# Define the four terms of the RK scheme recursively
k1 = l96V(x, f) * h + diffusion * W
k2 = l96V(x + .5 * k1, f) * h + diffusion * W
k3 = l96V(x + .5 * k2, f) * h + diffusion * W
k4 = l96V(x + k3, f) * h + diffusion * W
return x + (1 / 6) * (k1 + 2*k2 + 2*k3 + k4)
########################################################################################################################
# non-linear L96 Runge Kutta vectorized for ensembles
def l96_rk4_stepV(x, h, f):
# calculate the evolution of x one step forward via RK-4
k_x_1 = l96V(x, f)
k_x_2 = l96V(x + k_x_1 * (h / 2.0), f)
k_x_3 = l96V(x + k_x_2 * (h / 2.0), f)
k_x_4 = l96V(x + k_x_3 * h, f)
x_step = x + (h / 6.0) * (k_x_1 + 2 * k_x_2 + 2 * k_x_3 + k_x_4)
return x_step
########################################################################################################################
# auxiliary functions for the 2nd order taylor expansion
# these need to be computed once, only as a function of the order of truncation of the fourier series, p
def rho(p):
return 1/12 - .5 * np.pi**(-2) * np.sum(1 / np.arange(1, p+1)**2)
def alpha(p):
return (np.pi**2) / 180 - .5 * np.pi**(-2) * np.sum(1 / np.arange(1, p+1)**4)
########################################################################################################################
# 2nd order strong taylor SDE step
# This method is derived from page 359, NUMERICAL SOLUTIONS OF STOCHASTIC DIFFERENTIAL EQUATIONS, KLOEDEN & PLATEN;
# this uses the approximate statonovich integrals defined on page 202
# this depends on rho and alpha as above
def l96_2tay_sde(x, h, args):
"""One step of integration rule for l96 second order taylor rule
Note that the discretization error depends loosely on p. rho and alpha are to be computed by the auxiliary functions,
depending only on p, and supplied for all steps. xi is a standard normal vector to be used across all methods."""
# Infer system dimension
sys_dim = len(x)
# unpack the args for the integration step
[f, diffusion, p, RHO, ALPHA, xi] = args
# Compute the deterministic dxdt and the jacobian equations
dx = l96(x, f)
Jac_x = l96_jacobian(x)
### random variables
# Vectors xi, mu, phi are sys_dim X 1 vectors of iid standard normal variables,
# zeta and eta are sys_dim X p matrices of iid standard normal variables. Functional relationships describe each
# variable W_j as the transformation of xi_j to be of variace given by the length of the time step h. The functions
# of random Fourier coefficients a_i, b_i are given in terms mu/ eta and phi/zeta respectively.
# draw standard normal samples
rndm = np.random.standard_normal([sys_dim, 2*p + 2])
mu = rndm[:, 0]
phi = rndm[:, 1]
zeta = rndm[:, 2: p+2]
eta = rndm[:, p+2:]
### define the auxiliary functions of random fourier coefficients, a and b
# denominators for the a series
tmp = np.tile(1 / np.arange(1, p+1), [sys_dim, 1])
# vector of sums defining a terms
a = -2 * np.sqrt(h * RHO) * mu - np.sqrt(2*h) * np.sum(zeta * tmp, axis=1) / np.pi
# denominators for the b series
tmp = np.tile(1 / np.arange(1, p+1)**2, [sys_dim, 1])
# vector of sums defining b terms
b = np.sqrt(h * ALPHA) * phi + np.sqrt(h / (2 * np.pi**2) ) * np.sum(eta * tmp, axis=1)
# vector of first order Stratonovich integrals
J_pdelta = (h / 2) * (np.sqrt(h) * xi + a)
### auxiliary functions for higher order stratonovich integrals ###
def Psi(l, j):
# psi will be a generic function of the indicies l and j, we will define psi plus and psi minus via this
psi = h**2 * xi[l] * xi[j] / 3 + h * a[l] * a[j] / 2 + h**(1.5) * (xi[l] * a[j] + xi[j] * a[l]) / 4 \
- h**(1.5) * (xi[l] * b[j] + xi[j] * b[l]) / (2 * np.pi)
return psi
# we define the approximations of the second order Stratonovich integral
psi_plus = np.array([Psi((i-1) % sys_dim, (i+1) % sys_dim) for i in range(sys_dim)])
psi_minus = np.array([Psi((i-2) % sys_dim, (i-1) % sys_dim) for i in range(sys_dim)])
# the final vectorized step forward is given as
x_step = x + dx * h + h**2 * .5 * Jac_x @ dx # deterministic taylor step
x_step += diffusion * np.sqrt(h) * xi # stochastic euler step
x_step += + diffusion * Jac_x @ J_pdelta # stochastic first order taylor step
x_step += diffusion**2 * (psi_plus - psi_minus) # stochastic second order taylor step
return x_step
########################################################################################################################
def analyze_ensemble(ens, truth):
"""This will compute the ensemble RMSE as compared with the true twin, and the spread."""
# infer the shapes
[sys_dim, N_ens] = np.shape(ens)
# compute the ensemble mean
mean = np.mean(ens, axis=1)
# compute the RMSE of the ensemble mean
rmse = np.sqrt( np.mean( (truth - mean)**2 ) )
# compute the anomalies
A_t = (ens.transpose() - mean) / np.sqrt(N_ens - 1)
# and the ensemble covariances
S = A_t.transpose() @ A_t
# we compute the spread as in whitaker & louge 98 by the standard deviation of the mean square deviation of the ensemble
spread = np.sqrt( ( 1 / (N_ens - 1) ) * np.sum(np.mean( (mean - ens.transpose())**2, axis=1)))
return [rmse, spread]
########################################################################################################################
# Stochastic EnKF analysis step
def enkf_stoch_analysis(ens, obs_perts, obs_cov):
"""This is a function to perform a vanilla stochastic EnKF analysis step
this takes an ensemble, a matrix of perturbed observations and the ensemble estimated observational uncertainty,
thereafter performing the analysis"""
# first infer the ensemble dimension and the system dimension
[sys_dim, N_ens] = np.shape(ens)
# we compute the ensemble mean and normalized anomalies
X_mean = np.mean(ens, axis=1)
A_t = (ens.transpose() - X_mean) / np.sqrt(N_ens - 1)
# and the ensemble covariances
S = A_t.transpose() @ A_t
# we compute the ensemble based gain and the analysis ensemble
K_gain = S @ np.linalg.inv( S + obs_cov)
ens = ens + K_gain @ (obs_perts - ens)
return ens
##########################################################################################################################
def exp(args):
"""This experiment computes EnKF analysis statistics in a twin experiment where the ensemble integration method varies
In the below, we will use a single truth-twin to generate an initial condition and observation sequences for different
implementations of the stochastic EnKF across different methods of generating the ensemble-based forecast. We
generate the forecast ensembles with respect to the same Brownian motion realizations for each the Euler-Maruyama,
Runge-Kutta, Taylor and ad hoc methods. The filter RMSE and spread of each implementation is saved as output."""
# we unpack parameters used for the integration run
[tru_seq, tanl, diff, obs_un, obs_h, seed] = args
# set system paramters
sys_dim = 10
h = 0.001
f = 8
params = [f, diff]
RHO = rho(1)
ALPHA = alpha(1)
# set filter parameters
obs_dim = 10
nanl = 25000
burn = 5000
N_ens = 100
tanl_steps = int(tanl / h)
# generate the initial condition for all filters
X_em = np.random.multivariate_normal(tru_seq[:, 0], np.eye(sys_dim) * obs_un, size=N_ens).transpose()
X_rk = copy.copy(X_em)
X_ah = copy.copy(X_em)
X_ty = copy.copy(X_em)
# create storage for the forecast and analysis statistics
em_for_stat = np.zeros([2, nanl])
em_ana_stat = np.zeros([2, nanl])
rk_for_stat = np.zeros([2, nanl])
rk_ana_stat = np.zeros([2, nanl])
ah_for_stat = np.zeros([2, nanl])
ah_ana_stat = np.zeros([2, nanl])
ty_for_stat = np.zeros([2, nanl])
ty_ana_stat = np.zeros([2, nanl])
# generate the observation sequence
tru_seq = tru_seq[:, 1: burn + nanl +1]
obs_seq = tru_seq.transpose() + np.random.multivariate_normal(np.zeros(sys_dim), np.eye(sys_dim) * obs_un, size=(burn + nanl))
obs_seq = obs_seq.transpose()
for i in range(nanl + burn):
# we loop over the analysis cycles
# generate the brownian process over the length of the observation interval
W = np.random.standard_normal([sys_dim, N_ens, tanl_steps])
for j in range(tanl_steps):
# we take tanl_steps forward to the next observation time
# first choosing the noise matrix to be used by all ensembles
xi = np.squeeze(W[:,:,j])
# propagate each of the ensembles forward
X_em = em_step_path(X_em, xi, h, params)
X_rk = rk_step_path(X_rk, xi, h, params)
X_ah = l96_rk4_stepV(X_ah, h, f)
for k in range(N_ens):
# we compute the ensemble propagation in a non-vectorized format
args = [f, diff, 1, RHO, ALPHA, np.squeeze(xi[:,k])]
X_ty[:, k] = l96_2tay_sde(np.squeeze(X_ty[:, k]), h, args)
# make a final perturbation by the same Brownian process all at the end instead, for the ad hoc method
X_ah = X_ah + diff * np.sum(W * h, axis=2)
if i >= burn:
# forecast RMSE and spread calculated
em_for_stat[:, i - burn] = analyze_ensemble(X_em, tru_seq[:, i])
rk_for_stat[:, i - burn] = analyze_ensemble(X_rk, tru_seq[:, i])
ah_for_stat[:, i - burn] = analyze_ensemble(X_ah, tru_seq[:, i])
ty_for_stat[:, i - burn] = analyze_ensemble(X_ty, tru_seq[:, i])
# we use the perturbed observation (stochastic EnKF) so that we will want to generate the same perturbed observations
# over each ensemble (though different accross samples)
obs_pert = np.sqrt(obs_un) * np.random.standard_normal([sys_dim, N_ens])
obs_pert = (obs_pert.transpose() - np.mean(obs_pert, axis=1)).transpose()
obs_cov = (obs_pert @ obs_pert.transpose()) / (N_ens - 1)
# after computing the empirical observation error covariance, and the mean zero perturbations, we add these to the
# original observation
obs_pert = (obs_seq[:, i] + obs_pert.transpose()).transpose()
# perform a kalman filtering step
X_em = enkf_stoch_analysis(X_em, obs_pert, obs_cov)
X_rk = enkf_stoch_analysis(X_rk, obs_pert, obs_cov)
X_ah = enkf_stoch_analysis(X_ah, obs_pert, obs_cov)
X_ty = enkf_stoch_analysis(X_ty, obs_pert, obs_cov)
if i >= burn:
# analysis RMSE and spread calculated
em_ana_stat[:, i - burn] = analyze_ensemble(X_em, tru_seq[:, i])
rk_ana_stat[:, i - burn] = analyze_ensemble(X_rk, tru_seq[:, i])
ah_ana_stat[:, i - burn] = analyze_ensemble(X_ah, tru_seq[:, i])
ty_ana_stat[:, i - burn] = analyze_ensemble(X_ty, tru_seq[:, i])
data = {
'em_for_stat': em_for_stat, 'em_ana_stat': em_ana_stat,
'rk_for_stat': rk_for_stat, 'rk_ana_stat': rk_ana_stat,
'ah_for_stat': ah_for_stat, 'ah_ana_stat': ah_ana_stat,
'ty_for_stat': ty_for_stat, 'ty_ana_stat': ty_ana_stat
}
fname = './data/ens_bias_data_final/ens_bias_diff_' + str(diff).zfill(2) + '_tanl_' + str(tanl).zfill(2) + '_obs_un_' + str(obs_un).zfill(2) + \
'_seed_' + str(seed).zfill(2) + \
'_nens_' + str(N_ens).zfill(4) + '_nanl_' + str(nanl).zfill(3) + '_h_' + str(h).zfill(3) + '_obs_h_' + str(obs_h).zfill(3) + '.txt'
f = open(fname, 'wb')
pickle.dump(data, f)
f.close()
return args
########################################################################################################################
# Code below used for a single run, for debugging purposes
#f = open('../data/obs_trajs/fine_coarse_obs/h_001/tay_obs_seed_000_sys_dim_10_analint_0.1_diffusion_0.1_h_0.001.txt', 'rb')
#tmp = pickle.load(f)
#f.close()
#
#tobs = tmp['tobs']
#params = tmp['params']
#
#args = [tobs, params[2], params[1], .25, params[3], params[0]]
#
#print(exp(args))
|
######################################################################################################################
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import unittest
from scheduling.minute_setbuilder import MinuteSetBuilder
class TestMinuteSetBuilder(unittest.TestCase):
def test_name(self):
for i in range(0, 59):
self.assertEqual(MinuteSetBuilder().build(str(i)), {i})
def test_exceptions(self):
self.assertRaises(ValueError, MinuteSetBuilder().build, "60")
|
import requests
print("key = ")
key = raw_input()
print("id = ")
yt = raw_input()
r = requests.get('https://www.googleapis.com/youtube/v3/videos?id=' + yt + '&key=' + key + '&part=statistics').json()
like = int(r['items'][0]['statistics']['likeCount'])
dislike = int(r['items'][0]['statistics']['dislikeCount'])
print("like = %d" % like)
print("dislike = %d" % dislike)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.