text
stringlengths 8
6.05M
|
|---|
# n=int(input('Enter:'))
# for i in range(1,n+1):
# for j in range(i):
# print('*',end=' ')
# print()
# n=int(input('Enter:'))
# for i in range(n,0,-1):
# for j in range(i):
# print('*',end=' ')
# print()
# n=int(input('Enter:'))
# k=1
# for i in range(1,n+1):
# for j in range(i):
# print(k,end=' ')
# k+=1
# print()
# a='python'
# l=len(a)
# for i in range(1,l+1):
# for j in range(i):
# print(a[j],end=' ')
# print()
# n=int(input('Enter:'))
# k=1
# while k<=n:
# for i in range(n-k,0,-1):
# print(' ',end='')
# for j in range(k):
# print('*',end=' ')
# k+=1
# print()
n=int(input('Enter:'))
k=0
while k<n:
for i in range(k):
print(' ',end='')
for j in range(n-k):
print('*',end=' ')
k+=1
print()
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
import time
# In[2]:
sys.path.append(".")
# In[3]:
import boucleThread
# In[6]:
time_init = time.perf_counter()
boucleThread.boucleSimple(1000000)
print( time.perf_counter() - time_init)
# In[7]:
time_init = time.perf_counter()
boucleThread.boucleThread(1000000)
print( time.perf_counter() - time_init)
# In[ ]:
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from storage.impl.deserialize import Deserializer
from storage.schema.deserialize import SchemaDeserializer
from storage.markdown.serialize import MarkdownImplSerializer, MarkdownSerializer
from models.reg import Registry
from models.data_types import ComplexDataType, PrimitiveDataType
import io
import os
if __name__ == u'__main__':
resource_decoder = Deserializer(u'./schema/merged')
resource_registry = Registry()
type_decoder = SchemaDeserializer(u'./schema/merged')
type_registry = Registry()
resource_serializer = MarkdownImplSerializer(u'.', u'docs')
type_serializer = MarkdownSerializer(u'.', u'docs')
for name in resource_decoder.resources():
res = resource_decoder.decode_resource(name)
resource_registry.add_type(res)
resource_serializer.serialize_resource(res)
for name in type_decoder.types():
type = type_decoder.decode_type(name)
type_registry.add_type(type)
type_serializer.serialize(type)
primitive_types = []
complex_types = []
for name in sorted(type_registry.types.keys()):
type = type_registry.get_type(name)
if isinstance(type, ComplexDataType):
complex_types.append(type)
elif isinstance(type, PrimitiveDataType):
primitive_types.append(type)
with io.open(os.path.join(u'./', u'docs', u'index.md'), u'w', encoding='utf-8') as output:
output.write(u'# APIv2 Documentation\n')
output.write(u'## Простые типы данных\n')
for type in primitive_types:
output.write(u'- [' + type.name + u'](/Types/' + type.name + u')\n')
output.write(u'## Сложные типы данных\n')
for type in complex_types:
output.write(u'- [' + type.name + u'](/Types/' + type.name + u')\n')
output.write(u'## Ресурсы\n')
for name in sorted(resource_registry.types.keys()):
output.write(u'- [' + name + u'](/Resources/' + name + u')\n')
|
##################################################################
# Fengjun Yang, 2018
# Code for doing q-learning with the alpaca algorithm
##################################################################
import numpy as np
import tensorflow as tf
from metamountaincar.mmcNormalize import mmcNorm
class ALPaCAQ():
"""
The online part (agent) of the ALPaCA Q learning
Takes an environment and a pretrained Alpaca agent and can be trained
with either e-greedy or TS algorithm
"""
def __init__(self, offline, env):
self.env = env
self.config = offline.config
self.norm = mmcNorm(offline.config)
self.alpaca = offline.alpaca
# Extract the parameters
session = offline.sess
alpaca = offline.alpaca
K0 = session.run(alpaca.K).astype(np.float64)
L0 = session.run(alpaca.L).astype(np.float64)
L_inv = np.linalg.inv(L0)
self.Q = L0 @ K0
self.SigEps = session.run(alpaca.SigEps)[0,0,0,0]
self.K = K0.copy()
self.L_inv = np.tile(L_inv, (3,1,1))
# Store the initial weights so we can reset the agent in experiments
self.K0 = K0
self.L_inv_0 = self.L_inv
self.Q0 = self.Q
def reset(self):
self.K = self.K0
self.L_inv = self.L_inv_0
self.Q = self.Q0
def predict_q_values(self, state, K=None):
"""
Given an observation, return the q value predicted by the alpaca
agent. Observation should be a 1-d array
Can specify K to be the sampled weights or target weights
"""
if K is None:
K = self.K
phi = self.encode_observation(state)
mu = self.norm.denorm_y(K.T @ phi)
return mu
def predict_var(self, state):
"""
Give the variance of prediction
"""
phi = self.encode_observation(state)
var = np.zeros(self.L_inv.shape[0])
for i in range(3):
v = (1 + phi.T @ self.L_inv[i] @ phi) * self.SigEps
var[i] = self.norm.denorm_var_pred(v)
return var
def update_model(self, state, action, target):
"""
Update the parameters based on the observed state transition and reward
"""
L_inv = self.L_inv[action]
Q = self.Q[:,action]
phi = self.encode_observation(state)
# update lambda Alg.2, line 4
L_phi = L_inv @ phi
L_inv_new = L_inv - (1 / (1 + phi.T @ L_phi)) * (L_phi @ L_phi.T)
Q_new = Q[:, None] + phi * self.norm.norm_y(target)
self.K[:, action] = (L_inv_new @ Q_new)[:, 0]
self.L_inv[action] = L_inv_new
self.Q[:, action] = Q_new[:, 0]
def get_action(self, observation, training=False):
pred_values = self.predict_q_values(observation)
action = np.argmax(pred_values)
return action
def encode_observation(self, observation):
"""
Map an observation to phi
"""
norm_ob = self.norm.norm_x(observation[None,None,:])
phi = self.alpaca.encode(norm_ob)[0].T
return phi
def decode_return(self, pred):
"""
Map a prediction to q-value
"""
q = self.norm.denorm_y(pred[None,None,:])
return q
class EGreedyALPaCAQ(ALPaCAQ):
"""
ALPaCAQ agent that explores the environment with epsilon-greedy behavior
policy
"""
def __init__(self, offline, env):
super().__init__(offline, env)
self.K_target = self.K.copy()
self.L_inv_target = self.L_inv.copy()
self.update_target = 500
self.num_steps = 0
def train(self, epsilon=0.3, step_limit=200, render=False):
# Initialize env variables
observation = self.env.reset()
done = False
step = 0
while (step < step_limit) and not done:
pred_q_values = self.predict_q_values(observation)
action = np.argmax(pred_q_values)
if np.random.random() < epsilon:
action = np.random.randint(3)
if render:
self.env.render()
new_ob, reward, done, _ = self.env.step(action)
# Update model
if done:
target = 0
else:
next_q_value = self.predict_q_values(
new_ob, self.K_target).max()
next_q_value = min(next_q_value, 0)
target = reward + next_q_value
self.update_model(observation, action, target)
# Step
observation = new_ob
step = step + 1
self.num_steps += 1
if self.num_steps % self.update_target == 0:
self.K_target = self.K.copy()
self.L_inv_target = self.L_inv.copy()
return step
class TS_ALPaCAQ(ALPaCAQ):
def __init__(self, offline, env):
super().__init__(offline, env)
self.resample = 80
self.update_target = 500
self.num_steps = 0
self.num_episodes = 0
self.K_sample = np.zeros(self.K.shape, dtype=np.float64)
self.K_target = self.K.copy()
self.L_inv_target= self.L_inv.copy()
self.sample_last_layer()
def sample_last_layer(self):
""" Sample a last layer weights """
self.K_sample[:, 0] = np.random.multivariate_normal(
self.K_target[:, 0], self.L_inv_target[0] * self.SigEps)
self.K_sample[:, 1] = np.random.multivariate_normal(
self.K_target[:, 1], self.L_inv_target[1] * self.SigEps)
self.K_sample[:, 2] = np.random.multivariate_normal(
self.K_target[:, 2], self.L_inv_target[2] * self.SigEps)
def train(self, step_limit=200, render=False):
"""
TS training per episode; Resample weights every episode
"""
self.sample_last_layer()
# Initialize env variables
observation = self.env.reset()
done = False
step = 0
while (step < step_limit) and not done:
sampled_q_values = self.predict_q_values(
observation, self.K_sample)
action = np.argmax(sampled_q_values)
if render:
self.env.render()
new_ob, reward, done, _ = self.env.step(action)
if done:
target = 0
else:
next_q_value = self.predict_q_values(
new_ob, self.K_target).max()
next_q_value = min(next_q_value, 0)
target = reward + next_q_value
self.update_model(observation, action, target)
observation = new_ob
step = step + 1
self.num_steps += 1
if self.num_steps % self.update_target == 0:
np.copyto(self.K_target, self.K)
np.copyto(self.L_inv_target, self.L_inv)
return step
class TS_MC_ALPaCAQ(TS_ALPaCAQ):
def __init__(self, offline, env):
super().__init__(offline, env)
def ts_sample_traj(self, step_limit):
"""
Samples a trajectory to use for Monte-Carlo training
"""
sa_traj, reward_traj = [], []
observation = self.env.reset()
counter = 0
done = False
# Sample a trajectory
while counter < step_limit and not done:
# sample action based on sampled last layer
sampled_q_values = self.predict_sampled_q_values(observation)
action = np.argmax(sampled_q_values)
sa_traj.append((observation, action))
observation, reward, done, _ = self.env.step(action)
reward_traj.append(reward)
counter = counter + 1
# convert reward to target values
target_val = np.cumsum(reward_traj[::-1])[::-1]
return sa_traj, target_val
def train(self, step_limit=200):
# Resample last layer every episode
self.sample_last_layer()
# Sample a trajectory and find target values in the traj
sa_traj, target_val = self.ts_sample_traj(step_limit)
episode_length = len(target_val)
for i in range(episode_length):
self.update_model(*sa_traj[i], target_val[i])
return episode_length
|
def product_array(numbers):
all_prod = 1
for x in numbers:
all_prod*=x
return [all_prod//x for x in numbers]
'''
Task
Given an array/list [] of integers , Construct a product array Of same size Such
That prod[i] is equal to The Product of all the elements of Arr[] except Arr[i].
Notes
Array/list size is at least 2 .
Array/list's numbers Will be only Postives
Repeatition of numbers in the array/list could occur.
Input >> Output Examples
1- productArray ({12,20}) ==> return {20,12}
Explanation:
The first element in prod [] array 12 is the product of all
array's elements except the first element
The second element 20 is the product of all array's elements
except the second element .
2- productArray ({1,5,2}) ==> return {10,2,5}
Explanation:
The first element 10 is the product of all array's elements
except the first element *1***
The second element 2 is the product of all array's elements
except the second element 5
The Third element 5 is the product of all array's elements
except the Third element 2.
3- productArray ({10,3,5,6,2}) return ==> {180,600,360,300,900}
Explanation:
The first element 180 is the product of all array's elements except
the first element 10
The second element 600 is the product of all array's elements except
the second element 3
The Third element 360 is the product of all array's elements except
the third element 5
The Fourth element 300 is the product of all array's elements except
the fourth element 6
Finally ,The Fifth element 900 is the product of all array's elements
except the fifth element 2
'''
|
import os
import glob
import pandas as pd
import numpy as np
from PIL import Image
from PIL import ImageFilter
import gc
import time
import datetime
from calendar import timegm
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import random
import math
from playsound import playsound
#Training moduals
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report
from sklearn.decomposition import PCA
import collections
#This part of a group functions involved in extracting meta and target data from a messy data storage system
# As a former scientist I can say that is unrealistic to assume that after 40 years of asking,
# the scientific community will start properly tagging data. 50% of their time is already occupied handling data.
# The idea for this program is that scientists can just throw their data into a hole and the following tools will
# pull it out and align it for analysis.
class Meta_getta:
def __init__(self, list_obj, df_obj, str_obj):
self.list_obj = list_obj
self.df_obj = df_obj
self.str_obj = str_obj
def meta_getta(self, path):
self.list_obj = glob.glob(path + '**/*.jpg', recursive= True)
self.df_obj = pd.read_csv(path + self.str_obj)
self.df_obj.columns = ['ISO/Time', 'date/time', 'stage reading', 'drp1', 'drp2', 'drp3']
self.str_obj = '%m/%d/%Y %H:%M'
temp = self.df_obj['date/time'].values
self.df_obj = self.df_obj.drop(columns = ['drp1', 'drp2', 'drp3', 'ISO/Time'])
self.df_obj['date/time'] = np.array(self.to_epoch(temp))
def img_meta_getta(self):
temp = []
#file_path = []
for i in self.list_obj:
mtime = os.stat(i).st_mtime
temp.append(int(mtime))
self.str_obj = p = '%Y-%m-%d %H:%M:%S'
self.df_obj = pd.DataFrame(np.array(self.to_epoch(self.norm_time(temp))), columns = ['date/time'])
self.df_obj['path'] = np.array(self.list_obj)
def norm_time(self, obj):
new = []
for i in obj:
# print (i)
ep = datetime.datetime.fromtimestamp(i)
new.append(str(ep.replace(second=0)))
return new
def to_epoch(self, obj):
ep = []
for i in obj:
# print(i)
ep.append(timegm(time.strptime(i, self.str_obj)))
tm = timegm(time.strptime(i,self.str_obj))
# print(time.strftime(p, time.gmtime(tm)))
return ep
def merge_set(self, df1, df2):
self.df_obj = df1.set_index('date/time').join(df2.set_index('date/time'))
self.df_obj = self.df_obj.dropna(axis=0)
self.df_obj = self.df_obj.reset_index(drop= True)
##### Command Block Meta Getta #####
meta = Meta_getta(0, 0, "Gage_height.ACTIVE_(PRIMARY,_MIXER)@04085108.20190201.csv")
meta.meta_getta("large_dataset/")
img = Meta_getta(sorted(meta.list_obj), 0, 0)
img.img_meta_getta()
Feed_the_mule = Meta_getta(0, 0, 0)
Feed_the_mule.merge_set(img.df_obj, meta.df_obj)
#saves data to .csv in a proper data alignment
Feed_the_mule.df_obj.to_csv("final_meta.csv")
###### CV block ######
class Cambrain:
def __init__(self, df_obj, np_obj, size_int, time_strt, time_end):
self.df_obj = df_obj
self.np_obj = np_obj
self.size_int = size_int
self.time_strt = time_strt
self.time_end = time_end
def hacks(self):
return "\nTime for process took {:.6f} seconds\n".format(self.time_end - self.time_strt)
def give_up_the_ghosts(self):
n_components = self.np_obj
self.time_strt = time.time()
self.np_obj = SKPCA(n_components=n_components, whiten=True).fit(self.df_obj)
self.time_end = time.time()
print(self.hacks())
# Resizes/crops using the janky PIL library
def re_size(self):
size = (int(self.size_int * 192), int(self.size_int * 108))
img = self.df_obj.resize(size, Image.ANTIALIAS)
box = (0, int(self.size_int * 20), int(self.size_int * 192), int(self.size_int * 88))
self.df_obj = img.crop(box)
# Will project a image as long as the pixel values don't contain imaginary numbers.
def get_x(self):
self.time_strt = time.time()
x = self.df_obj['path']
path = x.tolist()
dims = Image.open(path[0])
w, h = dims.size
#print(h, w, len(meta))
dimensions = ((self.size_int * 68) * (self.size_int * 192)) # (int(h-400) * int(w))
datas = np.zeros(shape=(len(self.df_obj['path'].values), dimensions))
#print(datas.shape)
for i, obj in enumerate(path):
self.df_obj = Image.open(obj)
self.df_obj = self.df_obj.convert('L')
self.re_size()
self.df_obj = np.array(self.df_obj)
datas[i] = np.reshape(self.df_obj, -1)
self.df_obj = datas
self.time_end = time.time()
self.hacks()
def get_Y(self):
self.np_obj = np.asarray(self.df_obj['stage reading'], dtype="|S6")
class CAMBOT():
def __init__(self, TR, TS, LS, HB, PCA, HARD, SOFT, Y_pred, int_obj, target_obj):
self.TR = TR
self.TS = TS
self.PCA = PCA
self.LS = LS
self.HB = HB
self.HARD = HARD
self.SOFT = SOFT
self.Y_pred = Y_pred
self.int_obj = int_obj
self.target_obj = target_obj
self.RMSE = 0
self.randy = 0
def train_model(self):
self.LS.time_strt = time.time()
self.TR.get_Y()
self.TR.get_x()
self.TS.get_Y()
self.TS.get_x()
self.PCA.df_obj = self.TR.df_obj
self.PCA.np_obj = self.int_obj
self.PCA.give_up_the_ghosts()
PCA_x_tr = self.PCA.np_obj.transform(self.TR.df_obj)
PCA_x_ts = self.PCA.np_obj.transform(self.TS.df_obj)
clf = MLPClassifier(batch_size=150, verbose=True, early_stopping=True).fit(np.array(PCA_x_tr), np.array(self.TR.np_obj))
self.Y_pred = clf.predict(PCA_x_ts)
print('\n--Reduced dimensions to 9--\n')
#results.to_csv("evaluation_doc.csv", ',')
self.LS.time_end = time.time()
self.LS.hacks()
return clf
def GO(self):
self.target_obj = self.HB.df_obj
#hold_back.to_csv('holdback.txt')
self.peek()
self.HB.get_Y()
self.HB.get_x()
self.SOFT.pred_obj = []
ctr = 0
while len(self.SOFT.pred_obj) < 100:
ctr += 1
gc.collect()
self.TR.df_obj, self.TS.df_obj = train_test_split(self.LS.df_obj, test_size=0.20, random_state=random.randrange(843))
clf = self.train_model()
self.MSE()
print('--Model Accuracy--\n\nRandom prediction: ', self.randy, '\nAlgorthim prediction: ', self.RMSE, '\nThreshold: 0.7')
print('Total Runs: ', ctr)
if self.RMSE < .7:
PCA_x_ts = self.PCA.np_obj.transform(self.HB.df_obj)
self.Y_pred = clf.predict(PCA_x_ts)
self.SOFT.pred_obj.append(self.Y_pred[0])
print("Successful tests: ", len(self.SOFT.pred_obj))
print("\n--Viable slope--\n\ny true = ", self.HB.np_obj[0].decode('utf-8'), " ft y predicted = ", self.Y_pred[0].decode('utf-8'), 'ft\n')
self.HARD.pred_obj = self.SOFT.pred_obj
self.SOFT.np_obj = self.HB.np_obj[0]
self.HARD.np_obj = self.HB.np_obj[0]
self.SOFT.VERIFY()
self.HARD.VERIFY()
#print ('\nHard Precision: ', 100*(self.HARD.FP/len(self.SOFT.pred_obj)), '%%\nSoft Prediction: ', 100 * (self.SOFT.FP/len(self.SOFT.pred_obj)),
# '%%\n\nConfidence score: ')
#alert_alarm()
# Mean squared Error to tell me how dumb this robot is.
def MSE(self):
self.randy = [np.array(self.TS.np_obj, dtype = float).mean()] * len(self.TS.np_obj)
for i, obj in enumerate(self.randy):
self.randy[i] = obj*self.randocalrisian()
err = np.subtract(np.array(self.randy, dtype=float), np.array(self.TS.np_obj, dtype=float))
SE = np.square(err)
self.randy = math.sqrt(SE.mean())
err = np.subtract(np.array(self.Y_pred, dtype=float), np.array(self.TS.np_obj, dtype=float))
SE = np.square(err)
self.RMSE = math.sqrt(SE.mean())
def randocalrisian(self):
s = random.uniform(.25, 1.75)
return s
def peek(self):
x = self.target_obj['path']
path = x.tolist()
for i, obj in enumerate(path):
img = Image.open(obj)
img = img.resize((3*192, 3*108))
plt.imshow(img)
plt.show()
break
class verify:
def __init__(self, acc_obj, depth):
self.pred_obj = 0
self.FP = 0
self.acc_obj = acc_obj
self.TP = 0
self.np_obj = 0
self.depth = depth
d = decimal.Decimal(str(acc_obj))
r = d.as_tuple().exponent
self.precision = abs(r)
self.rng = depth * (10**self.precision)
print(self.rng)
def VERIFY(self):
clss = []
i = 0
while i < self.rng+1:
clss.append(round(i * self.acc_obj, self.precision))
i += 1
KNN = dict.fromkeys(clss, 0)
for i, obj in enumerate(self.pred_obj):
tgt = float(self.pred_obj[i])
check = round(tgt, self.precision)
KNN[check] += 1
best = sorted(((value, key) for (key, value) in KNN.items()), reverse=True)
choice = best[0][1]
TP = best[0][0]
Precision = (TP/len(self.pred_obj)) * 100
print (choice, '\n', TP, '\n', Precision, '%')
FP = np.subtract(np.array(best), np.array((0, best[0][1])))
print(FP)
a = np.array()
print(np.where(best[:100,][1] > 0, a, best[:100,][1]))
##### Command Block #####
Hold_back = Cambrain(0, 0, 2, 0, 0)
Learning_set = Cambrain(0, 0, 2, 0, 0)
#Produce Eignen Vectors and Values Via PCA
datas = pd.read_csv("final_meta.csv")
Learning_set.df_obj, Hold_back.df_obj = train_test_split(datas, test_size=0.001, random_state=random.randrange(843))
print (Learning_set.df_obj.shape)
PCA = Cambrain(Learning_set.df_obj, 0, 2, 0, 2)
Tr_set = Cambrain(Learning_set.df_obj ,0 ,2 ,0 ,0)
Ts_set = Cambrain(Learning_set.df_obj, 0, 2, 0, 0)
Target_set = Cambrain(Hold_back.df_obj, 0, 2, 0, 0)
soft = verify(0.1, 20)
hard = verify(0.01, 20)
cambot = CAMBOT(Tr_set, Ts_set, Learning_set, Hold_back, PCA, hard, soft, 0, 100, 0)
cambot.GO()
|
#Kayla Batzer
#HW 5 P1
#I pledge my honor to abide by the Stevens honor code
def square(nums):
for i in range(len(nums)):
nums[i] = nums[i] * nums[i]
def main():
inputList = [1, 2, 3, 4, 5]
square(inputList)
print(inputList)
main()
|
"""
Core module for ace-cli.
"""
from ace import config
from ace import project
from ace import graphstack
from ace import plugins
from inspect import getdoc
def seed_parser(parser):
"""
Adds arguments to parser.
"""
seed_parser_project(parser)
seed_parser_graphstack(parser)
seed_parser_plugins(parser)
def seed_commands(commands):
"""
Adds commands.
Core commands are:
init
project
add
remove
set
clear
dump
load
ping
apiversion
current
list
graphstack
add
remove
set
clear
current
trigger
apikey
list
plugins
install
uninstall
list
available
"""
default_init = {'__default__':init_command}
commands['init'] = default_init
commands['project'] = project_commands()
commands['graphstack'] = graphstack_commands()
commands['plugins'] = plugins_commands()
def seed_help(help_messages):
"""
Adds help messages to help block.
"""
init_help = {'__default__':getdoc(init_command)}
help_messages['init'] = init_help
help_messages['project'] = project_help()
help_messages['graphstack'] = graphstack_help()
help_messages['plugins'] = plugins_help()
# ===============
# = Subcommands =
# ===============
def init_command(args):
"""
Initializes ACE.
"""
config.init_environment(args)
# =======================
# = Project Subcommands =
# =======================
def add_project_command(args):
"""
Adds a project to main config.
"""
config.add_project(args)
def remove_project_command(args):
"""
Removes a project from main config.
"""
config.remove_project(args)
def set_project_command(args):
"""
Sets the local project.
"""
config.set_project(args)
def clear_project_command(args):
"""
Clears the local project.
"""
config.clear_project(args)
def dump_project_command(args):
"""
Dumps the local project to JSON.
"""
project.dump_project(args)
def load_project_command(args):
"""
Loads the project from JSON.
"""
project.load_project(args)
def ping_library_command(args):
"""
Pings the project library.
"""
project.ping_library(args)
def api_version_command(args):
"""
Gets the API vesion set for the current project.
"""
return config.api_version(args)
def current_project_command(args):
"""
Gets the current project.
"""
return config.current_project(args)
def list_projects_command(args):
"""
Lists the projects.
"""
return config.list_projects(args)
def show_librarykey_command(args):
"""
Show the current project's library key.
"""
return config.library_key(args)
# ==========================
# = GraphStack Subcommands =
# ==========================
def add_graphstack_command(args):
"""
Adds a graphstack to the main config.
"""
config.add_graphstack(args)
def remove_graphstack_command(args):
"""
Removes a graphstack from the main config.
"""
config.remove_graphstack(args)
def set_graphstack_command(args):
"""
Sets the current graphstack.
"""
config.set_graphstack(args)
def clear_graphstack_command(args):
"""
Clears the current graphstack.
"""
config.clear_graphstack(args)
def current_graphstack_command(args):
"""
Gets the current graphstack.
"""
return config.current_graphstack(args)
def trigger_graphstack_command(args):
"""
Triggers the graphstack.
"""
graphstack.trigger_graphstack(args)
def api_key_command(args):
"""
Gets the API key for the current graphstack.
"""
return config.api_key(args)
def list_graphstacks_command(args):
"""
Lists the graphstacks for a project.
"""
return config.list_graphstacks(args)
# =======================
# = Plugins Subcommands =
# =======================
def install_plugin_command(args):
"""
Installs a plugin.
"""
config.install_plugin(args)
def uninstall_plugin_command(args):
"""
Uninstalls a plugin.
"""
config.uninstall_plugin(args)
def list_installed_plugins_command(args):
"""
Lists the installed plugins.
"""
config.list_installed_plugins(args)
def available_plugins_command(args):
"""
Lists the available, uninstalled plugins.
"""
plugins.available(args)
# ====================
# = Command Builders =
# ====================
def project_commands():
"""
Project actions.
"""
pcom = {'add':add_project_command,
'remove':remove_project_command,
'set':set_project_command,
'clear':clear_project_command,
'dump':dump_project_command,
'load':load_project_command,
'ping':ping_library_command,
'apiversion':api_version_command,
'current':current_project_command,
'list':list_projects_command,
'librarykey':show_librarykey_command}
return pcom
def graphstack_commands():
"""
GraphStack commands.
"""
gscom = {'add':add_graphstack_command,
'remove':remove_graphstack_command,
'set':set_graphstack_command,
'clear':clear_graphstack_command,
'current':current_graphstack_command,
'trigger':trigger_graphstack_command,
'apikey':api_key_command,
'list':list_graphstacks_command}
return gscom
def plugins_commands():
"""
Plugins commands.
"""
picom = {'install':install_plugin_command,
'uninstall':uninstall_plugin_command,
'list':list_installed_plugins_command,
'available':available_plugins_command}
return picom
# ==================
# = Parser Seeders =
# ==================
def seed_parser_project(parser):
"""
Seeds the parser for project related args.
"""
parser.add_argument('--library-key',dest='library_key',default=None)
parser.add_argument('--project',dest='project',default=None)
parser.add_argument('--api-version',dest='api_version',default=None)
parser.add_argument('--data-file',dest='data_file',default=None)
def seed_parser_graphstack(parser):
"""
Seeds the parser for graphstack related args.
"""
parser.add_argument('--api-key',dest='api_key',default=None)
parser.add_argument('--graphstack',dest='graphstack',default=None)
parser.add_argument('--category',dest='category',default=None)
parser.add_argument('--action',dest='action',default=None)
parser.add_argument('--num-triggers',dest='num_triggers',default=1)
parser.add_argument('--profile-distribution',dest='profile_distribution',default=1.0) # percent of triggers with unique profile ids
parser.add_argument('--variables',dest='variables',default=None) # trigger variables with pattern var1:val1,var2:val2,var3:val3...
parser.add_argument('--profile',dest='profile',default=None) # you can specify a particular profile
def seed_parser_plugins(parser):
"""
Seeds the parser for plugins related args.
"""
parser.add_argument('--plugin',dest='plugin',default=None)
# =================
# = Help Builders =
# =================
def project_help():
"""
Help for project functions.
"""
phelp = {'add':getdoc(add_project_command),
'remove':getdoc(remove_project_command),
'set':getdoc(set_project_command),
'clear':getdoc(clear_project_command),
'dump':getdoc(dump_project_command),
'load': getdoc(load_project_command),
'ping':getdoc(ping_library_command),
'apiversion':getdoc(api_version_command),
'current':getdoc(current_project_command),
'list':getdoc(list_projects_command)}
return phelp
def graphstack_help():
"""
Help for graphstack functions.
"""
gshelp = {'add':getdoc(add_graphstack_command),
'remove':getdoc(remove_graphstack_command),
'set':getdoc(set_graphstack_command),
'clear':getdoc(clear_graphstack_command),
'current':getdoc(current_graphstack_command),
'trigger':getdoc(trigger_graphstack_command),
'apikey':getdoc(api_key_command)}
return gshelp
def plugins_help():
"""
Help for plugins functions.
"""
pihelp = {'install':getdoc(install_plugin_command),
'uninstall':getdoc(uninstall_plugin_command),
'list':getdoc(list_installed_plugins_command),
'available':getdoc(available_plugins_command)}
return pihelp
|
## Script (Python) "getTVBrNCP"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=
##
folder_path = '/'.join(context.getPhysicalPath())
solicitacoes = context.portal_catalog.searchResults(meta_type=['Grupo'], sort_on="modified", sort_order="reverse", path={'query': folder_path})
return solicitacoes
|
import turtle
count = 10
while (count > 0):
turtle.forward(100)
turtle.left(30)
print(count)
count = count - 1
|
from django.shortcuts import render
# Create your views here.
def handler404(request):
return render(request, 'travolta.html', status=404)
def handler500(request):
# break
# return True
return render(request, 'travolta.html', status=500)
# def handler502(request):
# if (request.path == 'favicon.ico'):
# return render(request, 'static/icons/favicon.png', status=502)
|
# 水的不能再水的题
class Solution:
def nextGreatestLetter(self, letters: List[str], target: str) -> str:
if letters[-1] <= target:
return letters[0]
for i in range(len(letters)):
if letters[i]>target:
return letters[i]
return ""
|
import socket
import threading
import pickle
from models.message import Message
HOST = '127.0.0.1'
PORT = 51234
username = bytes(input("Nazwa użytkownika: ").encode('utf-8'))
class Client:
def __init__(self):
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self):
# Connect to server
self._s.connect((HOST, PORT))
self._s.sendall(username)
threading.Thread(target=self.retrieve_data).start()
def retrieve_data(self):
# Wait for data from server
while True:
data = self._s.recv(1024)
if not data:
break
print(data.decode('utf-8'))
def send_message(self, content):
# Send message to server
msg = Message(content, username)
msg = pickle.dumps(msg)
self._s.send(msg)
client = Client()
client.connect()
while True:
# Wait for message
msg = str(input())
if msg:
client.send_message(msg)
|
import storingwebpages
import trials
import unifier
import cleaner
query = "husband"
number_of_files = 200
storingwebpages.results(query, "advanced", "1674", "00", "1913", "99", number_of_files)
trials.trials(query, number_of_files)
unifier.unifier(query, number_of_files)
cleaner.cleaner(query, number_of_files)
|
class HTMLGen():
def comment(self, text):
return '<!--{}-->'.format(text)
def __getattr__(self, tag):
return lambda inner: '<{}>{}</{}>'.format(tag,inner,tag)
'''
Another rewarding day in the fast-paced world of WebDev. Man, you love your job!
But as with any job, somtimes things can get a little tedious. Part of the
website you're working on has a very repetitive structure, and writing all
the HTML by hand is a bore. Time to automate! You want to write some functions
that will generate the HTML for you.
To organize your code, make of all your functions methods of a class called HTMLGen.
Tag functions should be named after the tag of the element they create.
Each function will take one argument, a string, which is the inner HTML
of the element to be created. The functions will return the string for the
appropriate HTML element.
For example,
In JavaScript:
var g = new HTMLGen();
var paragraph = g.p('Hello, World!');
var block = g.div(paragraph);
// The following are now true
paragraph === '<p>Hello, World!</p>'
block === '<div><p>Hello, World!</p></div>'
In Python:
g = HTMLGen();
paragraph = g.p('Hello, World!')
block = g.div(paragraph)
# The following are now true
paragraph == '<p>Hello, World!</p>'
block == '<div><p>Hello, World!</p></div>'
Your HTMLGen class should have methods to create the following elements:
a
b
p
body
div
span
title
comment
Note: The comment method should wrap its argument with an HTML comment.
It is the only method whose name does not match an HTML tag.
So, g.comment('i am a comment') must produce <!--i am a comment-->.
'''
|
#!/usr/bin/env python
# -*- coding:utf-8-unix -*-
###
### RIBES.py - RIBES (Rank-based Intuitive Bilingual Evaluation Score) scorer
### Copyright (C) 2011-2014 Nippon Telegraph and Telephone Corporation
###
### This program is free software; you can redistribute it and/or
### modify it under the terms of the GNU General Public License
### as published by the Free Software Foundation; either version 2
### of the License, or (at your option) any later version.
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
###
## History
## version 1.03.1 (2014/9/8) Fixed a compatibility problem of "split", which allows zero-length references wrongly with Python 2.
## Introduced a new option "-z/--emptyref" to allow zero-length references, which would be helpful for evaluation on data with blank lines.
## version 1.03 (2014/8/13) Supports Python 2.6 or higher
## Eliminated encoding option (now RIBES.py only supports utf-8)
## Limits word delimiters to ASCII white spaces (now multibyte spaces cannot be used as word delimiters)
## version 1.02.4 (2013/12/17) Fixed a problem in word alignment
## version 1.02.3 (2012/2/23) Fixed a problem in output
## version 1.02.2 (2011/10/25) Fixed a problem without -o option (in systems without /dev/stdout)
## version 1.02.1 (2011/8/18) Fixed bug on bytes.decode
## version 1.02 (2011/8/16) Improved distinguishment of same words, with a little code refactoring
## version 1.01 (2011/8/10) Fixed bug on empty lines
## version 1.0 (2011/8/1) Initial release
#
# Reference:
# Tsutomu Hirao, Hideki Isozaki, Katsuhito Sudoh, Kevin Duh, Hajime Tsukada, and Masaaki Nagata,
# "Evaluating Translation Quality with Word Order Correlations,"
# Journal of Natural Language Processing, Vol. 21, No. 3, pp. 421-444, June, 2014 (in Japanese).
#
# Hideki Isozaki, Tsutomu Hirao, Kevin Duh, Katsuhito Sudoh, and Hajime Tsukada,
# "Automatic Evaluation of Translation Quality for Distant Language Pairs,"
# Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing (EMNLP),
# pp. 944--952 Cambridge MA, October, 2010
# -- http://aclweb.org/anthology-new/D/D10/D10-1092.pdf
#
from __future__ import print_function
import sys
if type(sys.version_info) is not tuple and sys.version_info.major != 3:
reload(sys)
sys.setdefaultencoding("utf-8")
import os,re
import datetime
import traceback
from optparse import OptionParser
from math import exp
#################################################################################################
# #
# Wrapper Function #
# #
#################################################################################################
from . import register_corpus_metric
@register_corpus_metric('RIBES')
def get_ribes_score(targets, decodes):
return corpus_ribes([[t for t in targets]], [o for o in decodes])
#################################################################################################
_RIBES_VERSION = '1.03'
debug = 0
multiws_pattern = re.compile(r'\s+')
### "overlapping" substring counts ( string.count(x) returns "non-overlapping" counts... )
def overlapping_count (pattern, string):
pos = string.find(pattern)
if pos > -1:
return 1 + overlapping_count (pattern, string[pos+1:])
else:
return 0
### calculate Kendall's tau
def kendall(ref, hyp, emptyref=False):
"""Calculates Kendall's tau between a reference and a hypothesis
Calculates Kendall's tau (also unigram precision and brevity penalty (BP))
between a reference word list and a system output (hypothesis) word list.
Arguments:
ref : list of reference words
sub : list of system output (hypothesis) words
(optional) emptyref : allow empty reference translations (ignored in the evaluation)
Returns:
A tuple (nkt, precision, bp)
- nkt : normalized Kendall's tau
- precision : unigram precision
- bp : brevity penalty
Raises:
RuntimeError: reference has no words, possibly due to a format violation
"""
# check reference length, raise RuntimeError if no words are found.
if len(ref) == 0:
if emptyref == True:
return (None, None, None)
else:
raise RuntimeError ("Reference has no words")
# check hypothesis length, return "zeros" if no words are found
elif len(hyp) == 0:
if debug > 1: print ("nkt=%g, precision=%g, bp=%g" % (0.0, 0.0, 0.0), file=sys.stderr)
return (0.0, 0.0, 0.0)
# bypass -- return 1.0 for identical hypothesis
#elif ref == hyp:
# if debug > 1: print ("nkt=%g, precision=%g, bp=%g" % (nkt, precision, bp), file=sys.stderr)
# return (1.0, 1.0, 1.0)
# calculate brevity penalty (BP), not exceeding 1.0
bp = min(1.0, exp(1.0 - 1.0 * len(ref)/len(hyp)))
### determine which ref. word corresponds to each hypothesis word
# list for ref. word indices
intlist = []
### prepare helper pseudo-string representing ref. and hyp. word sequences as strings,
### by mapping each word into non-overlapping Unicode characters
# Word ID (dictionary)
worddict = {}
# Unicode hexadecimal sequences for ref. and words
_ref = ""
_hyp = ""
for w in ref:
# if w is not found in dictironary "worddict", add it.
if w not in worddict:
worddict[w] = len(worddict)
# append Unicode hexadecimal for word w (with offset of 0x4e00 -- CJK character range)
_ref += str(hex(worddict[w] + 0x4e00)).replace('0x', '', 1)
# decode Unicode (UTF-16 BigEndian) sequences to UTF-8
if type(sys.version_info) is not tuple and sys.version_info.major == 3:
if sys.version_info.minor > 1:
mapped_ref = bytes.fromhex(_ref).decode(encoding="utf_16_be")
else:
mapped_ref = bytes.fromhex(_ref).decode("utf_16_be")
else:
mapped_ref = _ref.decode("hex").decode("utf_16_be")
for w in hyp:
# if w is not found in dictironary "worddict", add it.
if w not in worddict:
worddict[w] = len(worddict)
# append Unicode hexadecimal for word w (with offset of 0x4e00 -- CJK character range)
_hyp += str(hex(worddict[w] + 0x4e00)).replace('0x', '', 1)
# decode Unicode (UTF-16 BigEndian) sequences to UTF-8
if type(sys.version_info) is not tuple and sys.version_info.major == 3:
if sys.version_info.minor > 1:
mapped_hyp = bytes.fromhex(_hyp).decode(encoding="utf_16_be")
else:
mapped_hyp = bytes.fromhex(_hyp).decode("utf_16_be")
else:
mapped_hyp = _hyp.decode("hex").decode("utf_16_be")
for i in range(len(hyp)):
### i-th hypthesis word hyp[i]
if not hyp[i] in ref:
### hyp[i] doesn't exist in reference
pass
# go on to the next hyp. word
elif ref.count(hyp[i]) == 1 and hyp.count(hyp[i]) == 1:
### if we can determine one-to-one word correspondence by only unigram
### one-to-one correspondence
# append the index in reference
intlist.append(ref.index(hyp[i]))
# go on to the next hyp. word
else:
### if not, we consider context words...
# use Unicode-mapped string for efficiency
for window in range (1, max(i+1, len(hyp)-i+1)):
if window <= i:
ngram = mapped_hyp[i-window:i+1]
if overlapping_count(ngram, mapped_ref) == 1 and overlapping_count(ngram, mapped_hyp) == 1:
intlist.append(mapped_ref.index(ngram) + len(ngram) -1)
break
if i+window < len(hyp):
ngram = mapped_hyp[i:i+window+1]
if overlapping_count(ngram, mapped_ref) == 1 and overlapping_count(ngram, mapped_hyp) == 1:
intlist.append(mapped_ref.index(ngram))
break
### At least two word correspondences are needed for rank correlation
n = len(intlist)
if n == 1 and len(ref) == 1:
if debug > 1: print ("nkt=%g, precision=%g, bp=%g" % (1.0, 1.0/len(hyp), bp), file=sys.stderr)
return (1.0, 1.0/len(hyp), bp)
elif n < 2:
# if not, return score 0.0
if debug > 1: print ("nkt=%g, precision=%g, bp=%g" % (0.0, 0.0, bp), file=sys.stderr)
return (0.0, 0.0, bp)
### calculation of rank correlation coefficient
# count "ascending pairs" (intlist[i] < intlist[j])
ascending = 0.0
for i in range(len(intlist)-1):
for j in range(i+1,len(intlist)):
if intlist[i] < intlist[j]:
ascending += 1
# normalize Kendall's tau
nkt = ascending / ((n * (n - 1))/2)
# calculate unigram precision
precision = 1.0 * n / len(hyp)
# return tuple (Normalized Kendall's tau, Unigram Precision, and Brevity Penalty)
if debug > 1: print ("nkt=%g, precision=%g, bp=%g" % (nkt, precision, bp), file=sys.stderr)
return (nkt, precision, bp)
class RIBESevaluator:
"""RIBES evaluator class.
Receives "Corpus" instances and score them with hyperparameters alpha and beta.
Attributes (private):
__sent : show sentence-level scores or not
__alpha : hyperparameter alpha, for (unigram_precision)**alpha
__beta : hyperparameter beta, for (brevity_penalty)**beta
__output : output file name
"""
def __init__ (self, sent=False, alpha=0.25, beta=0.10, output=sys.stdout):
"""Constructor.
Initialize a RIBESevaluator instance with four attributes. All attributes have their default values.
Arguments (Keywords):
- sent : for attribute __sent, default False
- alpha : for attribute __alpha, default 0.25
- beta : for attribute __beta, default 0.10
- output : for attribute __output, default sys.stdout
"""
self.__sent = sent
self.__alpha = alpha
self.__beta = beta
self.__output = output
def eval (self, hyp, REFS, emptyref=False):
"""Evaluate a system output with multiple references.
Calculates RIBES for a system output (hypothesis) with multiple references,
and returns "best" score among multi-references and individual scores.
The scores are corpus-wise, i.e., averaged by the number of sentences.
Arguments:
hyp : "Corpus" instance of hypothesis
REFS : list of "Corpus" instances of references
(optional) emptyref : allow empty reference translations (default: False; ignored in the evaluation)
Returns:
A floating point value _best_ribes_acc
- _best_ribes_acc : best corpus-wise RIBES among multi-reference
Raises:
RuntimeError : #sentences of hypothesis and reference doesn't match
RuntimeError : from the function "kendall"
"""
for ref in REFS:
# check #sentences of hypothesis and each of the multi-references
if len(hyp) != len(ref):
raise RuntimeError ( "Different #sentences between " + hyp.filename + " (" + str(len(hyp)) + "sents.) and " + ref.filename + "( " + str(len(ref)) + "sents.)")
# initialize "best" corpus-wise score
_best_ribes_acc = 0.0
# the number of valid sentences with at least one non-empty reference translations
_num_valid_refs = 0
# scores each hypothesis
for i in range (len(hyp)):
# initialize "best" sentence-wise score
_best_ribes = -1.0
# for each reference
for r in range(len(REFS)):
try:
# calculate Kendall's tau, unigram precision, and brevity penalty.
(nkt, precision, bp) = kendall(REFS[r][i], hyp[i], emptyref=emptyref)
except Exception as e:
# if the function "kendall" raises an exception, throw toward the main function
print ("Error in " + REFS[r].filename + " line " + str(i), file=sys.stderr)
raise e
# in case of an empty reference, ignore this
if nkt != None:
# RIBES = (normalized Kendall's tau) * (unigram_precision ** alpha) * (brevity_penalty ** beta)
_ribes = nkt * (precision ** self.__alpha) * (bp ** self.__beta)
# maintain the best sentence-wise score
if _ribes > _best_ribes:
_best_ribes = _ribes
if _best_ribes > -1.0:
# found a non-empty reference translation
_num_valid_refs += 1
# accumulate the "best" sentence-wise score for the "best" corpus-wise score
_best_ribes_acc += _best_ribes
# print "best" sentence-wise score if __sent is True
if self.__sent and self.__output != None:
print ("%.6f alpha=%f beta=%f %s sentence %d" % (_best_ribes, self.__alpha, self.__beta, hyp.filename, i), file=self.__output)
elif self.__sent and self.__output != None:
print ("%.6f alpha=%f beta=%f %s sentence %d" % (-float("inf"), self.__alpha, self.__beta, hyp.filename, i), file=self.__output)
# returns the "best" corpus-wise RIBES
return _best_ribes_acc / _num_valid_refs
class Corpus:
"""Corpus class.
Stores sentences and is used for evaluation.
Attributes (private):
__sentence : list of sentences (word lists)
__numwords : #words in the corpus (currently not used but can be used for corpus statistics.)
Attributes (public):
filename : corpus file name (set as public for error messages about the corpus)
"""
def __init__ (self, _file, case=False):
"""Constructor.
Initialize a Corpus instance by a corpus file with a utf-8 encoding.
Argument:
_file : corpus file of "sentence-per-line" format
Keyword:
case : preserve uppercase letters or not, default: False
"""
# initialize contents
self.__sentence = []
self.__numwords = 0
# set file name
self.filename = _file
# read corpus
with open(_file) as fp:
for line in fp:
# eliminates unnecessary spaces (white spaces and tabs) in each sentence
line = multiws_pattern.sub(r' ', line.strip())
# lowercasing if case is False
if not case:
line = line.lower()
# split the sentence to a word list and append it to the corpus sentence list
if len(line) == 0:
self.__sentence.append( [] )
else:
self.__sentence.append( line.split(" ") )
# count words
self.__numwords += len(self.__sentence[-1])
def __len__ (self):
"""Corpus size.
Returns:
len(self.__sentence) : corpus size (#sentences)
"""
return len(self.__sentence)
def __getitem__ (self, index):
"""Pick up a sentence in the corpus
Argument:
index : index of the sentence to pick up
Returns:
self.__sentence[index] : (index+1)-th sentence in the corpus
Raises:
IndexError : index exceeds the size of the corpus
"""
if len(self.__sentence)-1 < index:
raise IndexError ( "Invalid index " + str(index) + " for list of " + str(len(self.__sentence)) + " sentences" )
else:
return self.__sentence[index]
###
### wrapper function for output
###
def outputRIBES (options, args, file=sys.stdout):
# print start time
print ("# RIBES evaluation start at " + str(datetime.datetime.today()), file=sys.stderr)
# initialize "RIBESevaluator" instance
evaluator = RIBESevaluator (sent=options.sent, alpha=options.alpha, beta=options.beta, output=file)
# REFS : list of "Corpus" instance (for multi reference)
REFS = []
for _ref in options.ref:
if debug > 0:
# print reference file name (if debug > 0)
print ("# reference file [" + str(len(REFS)) + "] : " + _ref, file=file)
# read multi references, construct and store "Corpus" instance
REFS.append( Corpus(_ref, case=options.case) )
for i in range(len(args)):
if debug > 0:
# print system output file name (if debug > 0)
print ("# system output file [" + str(i) + "] : " + args[i], file=file)
# read system output and construct "Corpus" instance
result = Corpus(args[i], case=options.case)
# evaluate by RIBES -- "best_ribes" stands for the best score by multi-references, RIBESs stands for the score list for each references
best_ribes = evaluator.eval (result, REFS, emptyref=options.emptyref)
# print resutls
print ("%.6f alpha=%f beta=%f %s" % (best_ribes, options.alpha, options.beta, args[i]), file=file)
# print end time
print ("# RIBES evaluation done at " + str(datetime.datetime.today()), file=sys.stderr)
class CorpusX:
def __init__ (self, _sentence, filename='none'):
# initialize contents
self.__sentence = _sentence
self.__numwords = 0
self.filename = filename
for line in self.__sentence:
self.__numwords += len(line)
def __len__ (self):
return len(self.__sentence)
def __getitem__ (self, index):
if len(self.__sentence)-1 < index:
raise IndexError ( "Invalid index " + str(index) + " for list of " + str(len(self.__sentence)) + " sentences" )
else:
return self.__sentence[index]
def corpus_ribes(references, translation):
evaluator = RIBESevaluator (sent=False, alpha=0.25, beta=0.10, output=sys.stdout)
REFS = [CorpusX(_ref, 'reference') for _ref in references]
result = CorpusX(translation, 'translation')
best_ribes = evaluator.eval(result, REFS, emptyref=True)
return best_ribes
###
### main function
###
def main ():
# variable "debug" is global...
global debug
usage = "%prog [options] system_outputs"
optparser = OptionParser(usage)
### option definitions
# -d/--debug : debug level (0: scores and start/end time, 1: +ref/hyp files)
optparser.add_option("-d", "--debug", dest="debug", default=0, type="int", help="debug level", metavar="INT")
# -r/--ref : reference (multiple references available, repeat "-r REF" in arguments)
optparser.add_option("-r", "--ref", dest="ref", default=[], action="append", type="string", help="reference translation file (use multiple \"-r REF\" for multi-references)", metavar="FILE")
# -c/--case : preserve uppercase letters
optparser.add_option("-c", "--case", dest="case", default=False, action="store_true", help="preserve uppercase letters in evaluation (default: False -- lowercasing all words)")
# -s/--sentence : show scores for every sentences
optparser.add_option("-s", "--sentence", dest="sent", default=False, action="store_true", help="output scores for every sentences")
# -a/--alpha : "Unigram Precison" to the {alpha}-th power
optparser.add_option("-a", "--alpha", dest="alpha", default=0.25, type="float", help="hyperparameter alpha (default=0.25)", metavar="FLOAT")
# -b/--beta : "Brevity Penalty" to the {beta}-th power
optparser.add_option("-b", "--beta", dest="beta", default=0.10, type="float", help="hyperparameter beta (default=0.10)", metavar="FLOAT")
# -o/--output : output file
optparser.add_option("-o", "--output", dest="output", default="", type="string", help="log output file", metavar="FILE")
# -z/--emptyref : allow empty reference translations (ignored in the evaluation)
optparser.add_option("-z", "--emptyref", dest="emptyref", default=False, action="store_true", help="allow empty reference translations (default: False -- raise RuntimeError in that case)")
# args : system outputs
# parse options
(options, args) = optparser.parse_args()
# set debug level (global)
debug = options.debug
if len(options.output) == 0:
# output to stdout
outputRIBES (options, args)
else:
# output file is automatically closed ...
with open (options.output, 'w') as ofp:
outputRIBES (options, args, file=ofp)
if __name__ == "__main__":
try:
main()
except Exception as err:
traceback.print_exc(file=sys.stderr)
sys.exit(255)
|
import mimetypes
from Jumpscale import j
from Jumpscale.clients.peewee.peewee import OperationalError
from . import auth
from .rooter import app, abort, enable_cors, package_route, response, request, PACKAGE_BASE_URL
MODEL_URL = f"{PACKAGE_BASE_URL}/model/<model_url>"
RECORD_URL = f"{MODEL_URL}/<record_id>"
def get_model(package, model_url):
"""a helper method to get a model from a package
:param package: package object
:type package: ThreeBotPackage
:param model_url: full model url
:type model_url: str
:raises j.exceptions.NotFound: if model cannot be found
:return: model object
:rtype: BCDBModel
"""
try:
model = package.bcdb.model_get(url=model_url)
except j.exceptions.Input:
raise j.exceptions.NotFound(f"model of {model_url} cannot be found")
return model
def model_route(handler):
"""a decorator for any model related routes
it can decorate handlers which takes threebot_name and package_name
then, pass the model only.
if the model or the package cannot be found, it will
abort with 404 and the correct body message
:param handler: handler function
:type handler: function
:return: decorated function
:rtype: function
"""
@auth.admin_only
@package_route
def inner(*args, **kwargs):
package = kwargs.pop("package")
model_url = kwargs.pop("model_url")
try:
kwargs["model"] = get_model(package, model_url)
except j.exceptions.NotFound as ex:
return abort(404, ex.message)
return handler(*args, **kwargs)
return inner
def record_route(handler):
"""
used with model_decorator to get pass a record instead of a model
:param handler: handler function
:type handler: function
:return: decorated function
:rtype: function
"""
@model_route
def inner(*args, **kwargs):
model = kwargs.pop("model")
record_id = kwargs.pop("record_id")
try:
record_id = int(record_id)
except ValueError:
return abort(400, "invalid record id")
try:
record = model.get(record_id)
except j.exceptions.NotFound as ex:
return abort(404, ex.message)
kwargs["record"] = record
return handler(*args, **kwargs)
return inner
@app.get(MODEL_URL)
@model_route
@enable_cors
def find(model):
"""handle get operation on the model
do find with given query parameters
:param model: model object
:type model: str
:return: data or corresponding error status
:rtype: response
"""
try:
records = model.find(**request.query)
except OperationalError as ex:
return abort(400, f"some fields are not indexed, {ex}")
response.headers["Content-Type"] = "application/json"
return j.data.serializers.json.dumps([record._ddict for record in records])
@app.post(MODEL_URL)
@model_route
@enable_cors
def create(model):
"""handle create operation on the model
create a new record with the given
:param model: model object
:type model: BCDBModel
:return: response body as {id: <new record id>} or corresponding error status
:rtype: response
"""
data = request.json
if not data:
return abort(400, "no data was given")
record = model.new(data=data)
record.save()
if not record.id:
return abort(400, "could not create the record, please check your data")
response.status = 201
return j.data.serializers.json.dumps({"id": record.id})
@app.get(RECORD_URL)
@record_route
@enable_cors
def get(record):
"""handle get operation on record
:param record: record object
:type record: JSXObject
:return: record as json or corresponding error status
:rtype: response
"""
response.headers["Content-Type"] = "application/json"
return j.data.serializers.json.dumps(record._ddict)
@app.post(RECORD_URL)
@record_route
@enable_cors
def update(record):
"""handle post operation on record
updates the current record with new data (given in request body)
:param record: record object
:type record: JSXObject
:return: data or corresponding error status
:rtype: response
"""
data = request.json
if not data:
return abort(400, "no data was given")
record._data_update(data)
record.save()
# no content here
response.status = 204
@app.delete(RECORD_URL)
@record_route
@enable_cors
def delete(record):
"""handle post operation on record
updates the current record with new data (given in request body)
:param record: record object
:type record: JSXObject
:return: data or corresponding error status
:rtype: response
"""
record.delete()
# no content here
response.status = 204
@app.get("/bcdbfs/<url:re:.+>")
@auth.admin_only
@enable_cors
def fs(url):
try:
data = j.sal.bcdbfs.file_read("/" + url)
except j.exceptions.NotFound:
return abort(404, f"could not find the file of {url}")
response.headers["Content-Type"] = mimetypes.guess_type(url)[0]
return data
|
import cv2
ff=None
video=cv2.VideoCapture(0)
while True:
check, f = video.read()
status=0
demo=cv2.cvtColor(f,cv2.COLOR_BGR2GRAY)
demo=cv2.GaussianBlur(demo,(21,21),0)
if ff is None:
ff=demo
continue
a=cv2.absdiff(ff,demo)
b=cv2.threshold(a, 30, 255, cv2.THRESH_BINARY)[1]
b=cv2.dilate(b, None, iterations=2)
(cnts,_)=cv2.findContours(b.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for i in cnts:
if cv2.contourArea(i) < 10000 :
continue
(x, y, w, h)=cv2.boundingRect(i)
cv2.rectangle(f, (x, y), (x+w, y+h), (100,100,100), 3)
cv2.imshow("Color Frame",f)
key=cv2.waitKey(1)
if key==ord('q'):
break
video.release()
cv2.destroyAllWindows
|
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField, SelectField,IntegerField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError, InputRequired
class BuildIdForm(FlaskForm):
firmware_build_id = StringField('Firmware Buid Id',validators=[DataRequired()])
submit = SubmitField('Confirm')
|
"""
A simple SET card game implemented in Python
Using tkinter for visualization
"""
import base64
import math
import time
import tkinter as tk
from tkinter import messagebox
from set_logic import *
__author__ = "Frederik Leira"
__version__ = "0.1"
CARD_HEIGHT = 300
CARD_WIDTH = 225
CARD_SPACE = 10
CARD_BACK_BACKGROUND = "white"
CARD_BACK_FOREGROUND = "red"
CARD_BACK_TEXT_COLOUR = "blue"
CARD_BACK_TEXT = "SET"
def timer_to_string(timer):
timer = int(timer)
time_string = "Time: "
time_string += str(math.floor(timer/60))+':'+str(timer%60).zfill(2)
return time_string
def PointsInCircum(x, y, r,n=30):
return [(x+math.cos(2*math.pi/n*i)*r,y+math.sin(2*math.pi/n*i)*r) for i in range(0,n+1)]
def _create_square(self, x, y, r, **kwargs):
return self.create_rectangle(x-r, y-r, x+r, y+r, **kwargs)
def _create_circle(self, x, y, r, **kwargs):
points = PointsInCircum(x,y,r)
return self.create_polygon(points, **kwargs)
#return self.create_oval(x-r, y-r, x+r, y+r, **kwargs)
def _create_triangle(self, x, y, r, **kwargs):
points = [x-r,y+r,x,y-r,x+r,y+r]
return self.create_polygon(points, **kwargs)
tk.Canvas.create_circle = _create_circle
tk.Canvas.create_square = _create_square
tk.Canvas.create_triangle = _create_triangle
class CardView:
"""
A class to manage the drawing of a SET card on a canvas.
"""
def __init__(self, canvas, card, left_side, top_side,
background_colour=CARD_BACK_BACKGROUND,
foreground_colour=CARD_BACK_FOREGROUND,
text_colour=CARD_BACK_TEXT_COLOUR, text=CARD_BACK_TEXT):
"""
Construct a new card to be drawn on the given canvas at the left_position.
Parameters:
canvas (tk.Canvas): The canvas to draw the card onto.
left_side (int): The amount of pixels in the canvas to draw the card.
background_colour (tk.Color): Backface card background colour.
foreground_colour (tk.Color): Backface card foreground colour.
text_colour (tk.Color): Backface card text colour.
text (str): Backface card text to display.
"""
self._canvas = canvas
self.card = card
self.left_side = left_side
self.right_side = left_side + CARD_WIDTH
self.top_side = top_side
self.bottom_side = top_side + CARD_HEIGHT
self._background = background_colour
self._foreground = foreground_colour
self._text_colour = text_colour
self._text = text
self._image = None
self.draw()
def getShapeView(self):
shape = str(self.card.getShape().name)
if shape == "circle":
return self._canvas.create_circle
elif shape == "square":
return self._canvas.create_square
elif shape == "triangle":
return self._canvas.create_triangle
def draw(self):
"""Draw the backface of the card to the canvas."""
self._back = self.draw_back(self._background)
color = str(self.card.getColor().name)
fill = str(self.card.getFill().name)
fill_color = color
stipple = ''
shapenumber = int(self.card.getNumber().value)
shape = str(self.card.getShape().name)
if fill == 'half':
stipple = 'gray25'
elif fill == 'none':
fill_color = ''
drawShape = self.getShapeView()
for i in range(0,shapenumber):
drawShape(self.left_side+(CARD_WIDTH // 2), self.top_side+(CARD_HEIGHT // (shapenumber+1))*(i+1), 30, \
outline=color, fill=fill_color, stipple=stipple, width=2)
return 1
def draw_back(self, colour):
"""Draw the back of the canvas (the background not the backface).
Parameters:
colour (tk.Color): The colour of the background.
"""
return self._canvas.create_rectangle(self.left_side, self.top_side,
self.right_side, self.bottom_side,
fill=colour)
def draw_text(self, text, colour):
"""Draw text in the middle of the card.
Parameters:
text (str): The text to display on the card.
colour (tk.Color): The colour of the text to display.
"""
return self._canvas.create_text(self.left_side + (CARD_WIDTH // 2),
CARD_HEIGHT // 2, text=text, fill=colour,
font=('Times', '16', 'bold italic'))
class TableView(tk.Canvas):
"""
A Canvas that displays a table of set cards on a board.
"""
def __init__(self, master, table, pick_card=None, border_color="#6D4C41",
active_border="red", offset_x=CARD_WIDTH, offset_y=CARD_HEIGHT, *args, **kwargs):
"""
Construct a table view.
Parameters:
master (tk.Tk|tk.Frame): The parent of this canvas.
pick_card (callable): The callback when card in this deck is clicked.
Takes an int representing the cards index.
border_colour (tk.Color): The colour of the table border.
offset (int): The offset between cards on the deck.
"""
super().__init__(master, *args, **kwargs, bg=border_color,
highlightthickness=5, highlightbackground=border_color)
self.pick_card = pick_card
self.offset_x = offset_x
self.offset_y = offset_y
self.cards = {}
self.num_of_cols = int(len(table.getCards())/3)
self._border_color = border_color
self._active_border = active_border
self.bind("<Button-1>", self._handle_click)
def _handle_click(self, event):
"""Handles when the player clicks the deck."""
# the index of the card in the deck
slot_x = event.x // CARD_WIDTH
slot_y = event.y // CARD_HEIGHT
self.pick_card(int(slot_y*self.num_of_cols+slot_x))
def draw_card(self, card, slot):
"""
Draw a card in the given slot on the table.
Parameters:
card (Card): The card to draw to the table.
slot (int): The position on the table to draw the card.
Returns:
(CardView): The card view drawn at the slot for a given card.
"""
left_side = (slot%self.num_of_cols) * self.offset_x
top_side = math.floor(slot/self.num_of_cols) * self.offset_y
self.cards[slot] = CardView(self, card, left_side, top_side)
return self.cards[slot]
def draw(self, table, show=True):
"""
Draw the cards based of the data in a given table instance.
Parameter:
table (Table): The current table to draw.
show (bool): Whether the cards should be displayed or not.
"""
# resize the canvas to fit all the cards in the deck
self.resize(len(table.getCards()))#deck.get_amount())
for i, card in enumerate(table.getCards()):
self.draw_card(card, i)
#view.redraw(card if show else None)
def resize(self, size):
"""
Calculate the dimensions required to fit 'size' cards in this canvas
and update the canvas size.
Parameters:
size (int): The amount of cards that should be displayed on this table.
"""
self.num_of_cols = size/3
width = min(size, self.num_of_cols)*CARD_WIDTH
height = math.floor(size/self.num_of_cols)*CARD_HEIGHT
# resize canvas, adjust for border
self.config(width=width - 10, height=height - 10)
class SetApp:
"""A graphical SET application"""
def __init__(self, master, game, board_color="#F9B05A"):
"""Create a new SET application based on a given SetGame.
Parameters:
master (tk.Tk): The root window for the SET application.
game (SEtGame): The game to display in this application.
board_colour (tk.Color): The background colour of the board.
"""
self._master = master
self.game = game
self.card_picks = []
self.board_color = board_color
self.start = time.time()
self.timer = 0
self.status = ""
# define all the class variables
self._board = self.deck = self.table = None
self.render_board()
self.add_menu()
def render_board(self):
# remove old frame, if it exists
if self._board is not None:
self._board.pack_forget()
# create a board frame
self._board = board = tk.Frame(self._master, width=5*CARD_WIDTH, height=4*CARD_HEIGHT, padx=20, pady=20,
bg=self.board_color,
borderwidth=2, relief="groove")
board.pack(expand=True, fill=tk.BOTH)
# draw the table with cards
self.table = self.draw_board()
# draw the status bar
self.timer_label, self.status_label, self.cards_left_label = self.draw_status()
# start updating the status bar
self.onUpdate()
def new_game(self):
"""Start a new game"""
deck = Deck()
deck.shuffle()
self.game = SetGame(deck)
self.start = time.time()
self.timer = 0
self.status_label.config(text="")
self.render_board()
def add_menu(self):
"""Create a menu for the application"""
menu = tk.Menu(self._master)
# file menu with new game and exit
file = tk.Menu(menu)
file.add_command(label="New Game", command=self.new_game)
file.add_command(label="Exit", command=self._master.destroy)
# add file menu to menu
menu.add_cascade(label="File", menu=file)
self._master.config(menu=menu)
def pick_card(self, slot):
"""Called when a given playable player selects a slot.
Parameters:
slot (int): The card index they selected.
"""
card_view = self.table.cards[slot]
if slot in self.card_picks:
self.card_picks.remove(slot)
card_view._canvas.itemconfig(card_view._back, fill='white')#str(card.getColor().name))
return
else:
self.card_picks.append(slot)
card_view._canvas.itemconfig(card_view._back, fill='gray')#str(card.getColor().name))
if len(self.card_picks) == 3:
cards = self.game.getTable().getCards()
s = Set(cards[self.card_picks[0]], cards[self.card_picks[1]], cards[self.card_picks[2]])
if s.isSetValid():
#append to found_sets
self.game.getTable().removeCards(self.card_picks)
self.game.getTable().fillTable(self.game.getDeck())
self.card_picks = []
self.table.draw(self.game.getTable())
self.cards_left_label.config(text="Cards Left: "+str(len(self.game.getDeck().getCards())))
def draw_board(self):
"""Draw the board (table with cards).
Returns:
TableView: the active TableView for this SetGame
"""
board = tk.Frame(self._board, width=4*CARD_WIDTH, height=3*CARD_HEIGHT, bg="#6D4C41")
board.pack(side=tk.TOP, pady=20, fill=tk.BOTH, expand=True)
# left pickup card pile view
table = TableView(board, self.game.getTable(), pick_card=lambda card: self.pick_card(card))
table.pack(side=tk.TOP)
table.draw(self.game.getTable(), show=True)
return table
def draw_status(self):
"""Draw a status bar below the tableview."""
timer_label = tk.Label(self._board, text=timer_to_string(self.timer),
font=('Times', '24', 'bold italic'),
bg=self.board_color)
timer_label.pack(side=tk.LEFT, expand=True, fill=tk.X)
status_label = tk.Label(self._board, text="",
font=('Times', '24', 'bold italic'),
bg=self.board_color)
status_label.pack(side=tk.LEFT, expand=True, fill=tk.X)
cards_left_label = tk.Label(self._board, text="Cards Left: "+str(len(self.game.getDeck().getCards())), \
font=('Times', '24', 'bold italic'),
bg=self.board_color)
cards_left_label.pack(side=tk.RIGHT, expand=True, fill=tk.X)
return timer_label, status_label, cards_left_label
def onUpdate(self):
# update displayed time
if self.game.isActive(): self.timer = time.time()-self.start
else: self.status_label.config(text="You Win")
self.timer_label.config(text=timer_to_string(self.timer))
# schedule timer to call myself after 1 second
self._board.after(1000, self.onUpdate)
def main():
# create window for uno
root = tk.Tk()
root.title("SET Card Game")
# build a pickup pile
deck = Deck(number_of_attributes=4)
deck.shuffle()
# create and play the game
game = SetGame(deck)
app = SetApp(root, game)
# update window dimensions
root.update()
root.minsize(root.winfo_width(), root.winfo_height())
root.mainloop()
if __name__ == "__main__":
main()
|
from .base_urls import *
from django.urls import include, re_path
urlpatterns += [
re_path(r'^', include('data_aggregator.urls')),
]
|
def evaluate_conversion(converted_model, x_test, y_test, testacc, batch_size, timesteps=50):
"""
Utility function for simple evaluation of the simulation accuracy.
"""
for i in range(1, timesteps + 1):
_, acc = converted_model.evaluate(x_test, y_test, batch_size=batch_size, verbose=0)
print(
"Timesteps", str(i) + "/" + str(timesteps) + " -",
"acc spiking (orig): %.2f%% (%.2f%%)" % (acc * 100, testacc * 100),
"- conv loss: %+.2f%%" % ((-(1 - acc / testacc) * 100))
)
def evaluate_conversion_and_save_data(converted_model, x_test, y_test, testacc, batch_size, timesteps=50):
"""
Utility function for evaluation and saving of the simulation accuracy.
"""
accuracy_per_t = []
for i in range(0, timesteps):
_, acc = converted_model.evaluate(x_test, y_test, batch_size=batch_size, verbose=0)
accuracy_per_t.append(acc)
print(
"Timesteps", str(i) + "/" + str(timesteps) + " -",
"acc spiking (orig): %.2f%% (%.2f%%)" % (acc*100, testacc*100),
"- conv loss: %+.2f%%" % ((-(1 - acc/testacc)*100)))
return accuracy_per_t
|
/Users/samnayrouz/anaconda3/lib/python3.6/rlcompleter.py
|
import pcl
import numpy as np
import pcl.pcl_visualization
# from pcl.pcl_registration import icp, gicp, icp_nl
cloud = pcl.load_XYZRGB('./examples/pcldata/tutorials/table_scene_mug_stereo_textured.pcd')
visual = pcl.pcl_visualization.CloudViewing()
# PointXYZ
# visual.ShowMonochromeCloud(cloud)
# visual.ShowGrayCloud(cloud, b'cloud')
visual.ShowColorCloud(cloud, b'cloud')
# visual.ShowColorACloud(cloud, b'cloud')
# while True:
# visual.WasStopped()
# end
flag = True
while flag:
flag != visual.WasStopped()
end
def point_cloud(self, depth):
rows, cols = depth.shape
c, r = np.meshgrid(np.arange(cols), np.arange(rows), sparse=True) #meshgrid the 2d array to put a 3rd dimension into it
valid = (depth > 0) & (depth < 255)
z = np.where(valid, depth / 256.0, np.nan) #Pixels with invalid depth in the input have NaN for the z-coordinate in the result.
x = np.where(valid, z * (c - self.cx) / self.fx, 0)
y = np.where(valid, z * (r - self.cy) / self.fy, 0)
return np.dstack((x, y, z)) #restack columns
pass
|
"""
tupla = ('a', '1', '2', '-2', '+4', '5', 'sis')
resultat = [ valors for valors in tupla if not str(valors).isalpha()]
parells = [ valors for valors in resultat if valors % 2 == 0 and valors > 0]
print(sum(parells))
"""
#~SOLUCIÓ 14
tupla = ('a', '1', '2', '-2', '+4', '5', 'sis')
lista_nomes_digits = [valors for valors in tupla if (valors[0] == '+' or valors[0] == '-' or valors.isdigit())]
#lista_parells = [int(valors) for valors in lista_nomes_digits if int(valors) % 2 == 0 and int(valors) > 0]
print(sum([int(valors) for valors in lista_nomes_digits if int(valors) % 2 == 0 and int(valors) > 0]))
#print(sum(list(map(int, parells))))
#~SOLUCIÓ 15
#tupla = ('a', '1', '2', '-2', '+4', '5', 'sis')
#lista_nomes_digits = [valors for valors in tupla if (valors[0] == '+' or valors[0] == '-' or valors.isdigit())]
print(sum(list(map(int, [valors for valors in tupla if (valors[0] == '+' or valors[0] == '-' or valors.isdigit())]))))
#print(sum(list(map(int, parells))))
#solució 16
print(tuple(map(int, lista_nomes_digits)))
#solució 17
def fins_un_maxim(tupla, maxim):
"""
>>> fins_un_maxim((1, 7, 2, -2, 4, 2, 4), 7)
(1, 1, 2, -2, 4)
"""
resultado = 0
#sumar hasta el valor máximo
print([resultado for valors in tupla if (resultado = resultado + valors) < maxim])
#tupla_2 = [resultado += valors for valors in tupla if resultado < maxim ]
#print(tuple(map(int, tupla_2)))
for valors in tupla:
if resultado + valors <= maxim:
tupla_2 = (valors, )
print(tupla_2)
|
import sys
import json
import numpy as np
import statsmodels.api as sm
from astropy.coordinates import Longitude
from astropy.modeling import models, fitting
sys.path.append("../Dust-wave")
from bow_projection import Spline_R_theta_from_grid
def departure(R, theta):
"""Parabolic departure of R(theta)"""
return 1.0/R - 0.5*(1 + np.cos(theta))
def R_from_Delta(mu, Delta):
"""Get radius back from departure coefficient"""
return 1.0/(Delta + 0.5*(1.0 + mu))
JSON_SUFFIX = "-arcdata.json"
def load_R_th(arc_prefix):
jfile = arc_prefix + JSON_SUFFIX
data = json.load(open(jfile))
R0 = np.array(data['outer']['R0'])
R = np.array(data['outer']['R'])
th = np.radians(data['outer']['theta'])
# th = Longitude(data['outer']['theta'], unit='deg')
# th += Longitude(data['outer']['PA0'], unit='deg')
return th, R/R0
class Simulation(object):
"""
Bow shape from simulation - defined on grid and fit with splines
Callable as function of theta
"""
lowess_frac = 0.2
def extrapolation_factor(self, mu):
if self.force_open:
return np.abs(-1.0 - mu)**0.5
else:
return 1.0
def extrapolation(self, mu):
return self.extrapolation_factor(mu)*self.extrap_polyfit(mu)
def __init__(self, name, extrap_degree=2, mu0=-0.5,
cheby_degree=10, force_open=False, mode="all"):
self.name = name
self.force_open = force_open
self.thgrid, self.Rgrid = load_R_th(name)
self.thmax = self.thgrid.max()
# Set up grid of departure function vs mu
Delta = departure(self.Rgrid, self.thgrid)
mu = np.cos(self.thgrid)
# Set up Chebyshev fit to grid data (theta < thmax)
self.chebyfit = models.Chebyshev1D(degree=cheby_degree)
fitter = fitting.LevMarLSQFitter()
self.chebyfit = fitter(self.chebyfit, mu, Delta)
# Set up extrapolation fit for theta > thmax
# Only fit mu < mu0
mask = mu <= mu0
self.extrap_polyfit = np.poly1d(np.polyfit(
mu[mask], Delta[mask]/self.extrapolation_factor(mu[mask]),
deg=extrap_degree))
# if mode == "all":
# # Use all points but take absolute value of theta
# self.thgrid = np.abs(self.thgrid)
# # And do some lowess smoothing
# smooth = sm.nonparametric.lowess(self.Rgrid, self.thgrid,
# frac=self.lowess_frac)
# self.thgrid = smooth[:, 0]
# self.Rgrid = smooth[:, 1]
# elif mode == "positive":
# # Use only points with positive theta
# m = self.thgrid > 0.0
# self.thgrid = self.thgrid[m]
# self.Rgrid = self.Rgrid[m]
# elif mode == "negative":
# # Use only points with negative theta
# m = self.thgrid < 0.0
# self.thgrid = -self.thgrid[m]
# self.Rgrid = self.Rgrid[m]
# # Make sure arrays are sorted
# sort_order = self.thgrid.argsort()
# self.thgrid = self.thgrid[sort_order]
# self.Rgrid = self.Rgrid[sort_order]
# th_x = np.arccos(mu_x)
# R_x = R_from_Delta(mu_x, Delta_x)
# # Add on the extrapolated points
# self.thgrid = np.concatenate((self.thgrid, th_x))
# self.Rgrid = np.concatenate((self.Rgrid, R_x))
# # And sort again just in case
# sort_order = self.thgrid.argsort()
# self.thgrid = self.thgrid[sort_order]
# self.Rgrid = self.Rgrid[sort_order]
# # Finally do the spline fit
# self.splinefit = Spline_R_theta_from_grid(
# theta_grid=self.thgrid, R_grid=self.Rgrid)
def __call__(self, theta):
# When called as a function, give the fitted result
mu = np.cos(theta)
# Use Chebyshev for the range of the grid data
# and use extrapolation for larger angles
mask = np.cos(theta) >= np.cos(self.thmax)
Delta = np.empty_like(mu)
Delta[mask] = self.chebyfit(mu[mask])
Delta[~mask] = self.extrapolation(mu[~mask])
return R_from_Delta(mu, Delta)
|
from riak import RiakClient, RiakError
import inspect
# in the style of the sql plugin example, naturally
# README example borrowed from caleb brown's bottle-couchdb
# read: blatantly plundered.
__author__ = 'Kevin Anderson'
__version__ = '0.01'
__license__ = 'BSD'
class RiakPlugin(object):
''' This plugin passes a riak database handle to route callbacks that
accept a `db` keyword argument. If the callback does not accept such
a parameter, no connection is made. You can override the database settings
on a per-route basis. '''
name = 'riak'
api = 2
def __init__(self, host='127.0.0.1', port=8098, prefix='riak', \
mapred_prefix='mapred', transport_class=None, \
client_id=None, solr_transport_class=None, \
transport_options=None, keyword='db'):
self.host = host
self.port = port
self.prefix = prefix
self.mapred_prefix = mapred_prefix
self.transport_class = transport_class
self.client_id = client_id
self.solr_transport_class = solr_transport_class
self.transport_options = transport_options
self.keyword = keyword
def setup(self, app):
''' Make sure that other installed plugins don't affect the same
keyword argument.'''
for other in app.plugins:
if not isinstance(other, RiakPlugin): continue
if other.keyword == self.keyword:
raise PluginError("Found another riak plugin with "\
"conflicting settings (non-unique keyword).")
def apply(self, callback, context):
# Override global configuration with route-specific values.
conf = context.config.get('riak') or {}
host = conf.get('host', self.host)
port = conf.get('port', self.port)
prefix = conf.get('prefix', self.prefix)
mapred_prefix = conf.get('mapred_prefix', self.mapred_prefix)
transport_class = conf.get('transport_class', self.transport_class)
client_id = conf.get('client_id', self.client_id)
solr_transport_class = conf.get('solr_transport_class', \
self.solr_transport_class)
transport_options = conf.get('transport_options', \
self.transport_options)
keyword = conf.get('keyword', self.keyword)
# Test if the original callback accepts a 'db' keyword.
# Ignore it if it does not need a database handle.
args = inspect.getargspec(context.callback)[0]
if keyword not in args:
return callback
def wrapper(*args, **kwargs):
# Connect to the database
db = RiakClient(host=host, port=port, prefix=prefix, \
mapred_prefix=mapred_prefix, transport_class=transport_class, \
client_id=client_id, \
solr_transport_class=solr_transport_class, \
transport_options=transport_options)
# Add the connection handle as a keyword argument.
kwargs[keyword] = db
try:
rv = callback(*args, **kwargs)
except RiakError, e:
raise HTTPError(500, "Database Error", e)
finally:
pass # meh
return rv
# Replace the route callback with the wrapped one.
return wrapper
def close(self):
pass
|
#python没必要用全局变量,数据封装
#private 私有变量
class Student(object):#继承object,还可以继承其它的类
def __init__(self,name,score):
self.__name=name
self.__score=score
def print_score(self):
print('%s:%s'%(self.__name,self.__score))#private 不可改变数据
zhao=Student('zhao','99')
zhao.print_score()
class Animal(object):
#没有变量,不用初始化数据??
def run(self):
print('animal is running...')
class Dog(Animal):
def run(self):
print('dog is running...')#多态,在父类和子类都有run()方法时,子类优先级高于父类
def eat(self):
print('dog is eating....')
class Cat(Animal):
pass
class Husky(Dog):
pass
d=Dog()
d.run()
d.eat()
print(type(123)) #检测属于那个类,123属于整形类
print(type(d))
print(abs)
h=Husky()
print(isinstance(h,Dog))
print(dir(Dog))
#类属性,实例属性,实例属性优先级高于类属性
class Stu(object):
name='Stu'
s=Stu()
print(s.name)#类的属性
s.name='shuai'
print(s.name)#实例属性,,高于类属性
#给实例绑定方法
def set_age(self,age):
self.age=age
from types import MethodType
s.set_age=MethodType(set_age,s)
s.set_age(25)
print(s.age)
|
# References
# https://github.com/hminle/car-behavioral-cloning-with-pytorch/blob/master/utils.py
# https://github.com/hminle/car-behavioral-cloning-with-pytorch/blob/master/experiment.ipynb
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from PIL import Image
import scipy.misc
import lmdb
import random
from random import randint
from random import shuffle
import cv2
# Transform that augment the driving angles
class AugmentDrivingTransform(object):
def __call__(self, sample):
image = sample['image']
steering = sample['label']
# Only augment steering that is not zero
if steering != 0:
# Roll the dice
prob = random.random()
# Half chance of nothing half do some augmentation
if prob > 0.5:
# Flip image and steering angle
sample['image'] = np.fliplr(sample['image'])
sample['label'] = -steering
return sample
class RandomBrightness(object):
def __call__(self, sample):
image = sample['image']
steering = sample['label']
# Roll the dice
prob = random.random()
# Half chance of nothing half do some augmentation
if prob > 0.5:
# HSV (Hue, Saturation, Value) is also called HSB ('B' for Brightness).
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
ratio = 1.0 + 0.1 * (np.random.rand() - 0.5)
hsv[:,:,2] = hsv[:,:,2] * ratio
sample['image'] = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB).astype('uint8')
return sample
class ConvertToGray(object):
def __call__(self, sample):
image = sample['image']
steering = sample['label']
# Roll the dice
prob = random.random()
# Half chance of nothing half do some augmentation
if prob > 0.5:
# Get each channel
r, g, b = image[:, :, 0], image[:, :, 1], image[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
# To keep same number of channels add gray to each one.
img_new = image.copy()
img_new[:, :, 0] = gray
img_new[:, :, 1] = gray
img_new[:, :, 2] = gray
sample['image'] = img_new.astype('uint8')
return sample
class ConvertToSepia(object):
def __call__(self, sample):
image = sample['image']
steering = sample['label']
# Roll the dice
prob = random.random()
# Half chance of nothing half do some augmentation
if prob > 0.5:
# Get each channel
r, g, b = image[:, :, 0], image[:, :, 1], image[:, :, 2]
img_new = image.copy()
img_new[:, :, 0] = 0.393 * r + 0.769 * g + 0.189 * b
img_new[:, :, 1] = 0.349 * r + 0.686 * g + 0.168 * b
img_new[:, :, 2] = 0.272 * r + 0.534 * g + 0.131 * b
sample['image'] = img_new.astype('uint8')
return sample
class AddNoise(object):
def __call__(self, sample):
image = sample['image']
steering = sample['label']
# Roll the dice
prob = random.random()
# Half chance of nothing half do some augmentation
if prob > 0.5:
row,col,ch = image.shape
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch) / 30
img_new = image + image * gauss
sample['image'] = img_new.astype('uint8')
return sample
class DrivingDataToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, label = sample['image'], sample['label']
image = (image / 255.0).astype('float32')
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return {'image': torch.from_numpy(image.copy()), 'label': label}
class DriveData_LMDB(Dataset):
__xs = []
__ys = []
__env = []
def __init__(self, folder_dataset, transform=None):
self.transform = transform
# Load LMDB file
print('Load LMDB:', folder_dataset)
self.__env = lmdb.open(folder_dataset, readonly=True)
# Open and load LMDB file including the whole training data (And load to memory)
with self.__env.begin() as txn:
cursor = txn.cursor()
for key, value in cursor:
key_str = key.decode('ascii')
# print(key_str)
if 'label' in key_str:
self.__ys.append(np.float32(np.asscalar(np.frombuffer(value, dtype=np.float32, count=1))))
else:
# Get shape information from key name
info_key = key_str.split('_')
# Get image shape [2:None] means from index 2 to the end
shape_img = tuple(map(lambda x: int(x), info_key[2:None]))
# Convert to float32
self.__xs.append(np.frombuffer(value, dtype=np.uint8).reshape(shape_img))
def addFolder(self, folder_dataset):
print('Not supported now for LMDB')
pass
# Override to give PyTorch access to any image on the dataset
def __getitem__(self, index):
img = self.__xs[index]
img = (scipy.misc.imresize(img[126:226], [66, 200]))
# Convert label to torch tensors
label = self.__ys[index]
# Do Transformations on the image/label
sample = {'image': img, 'label': label}
if self.transform is not None:
sample = self.transform(sample)
return sample
# Override to give PyTorch size of dataset
def __len__(self):
return len(self.__xs)
|
# to send a fancy content in mail we can use MIMEMultipart
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
sender_email = "yourmailid@gmail.com"
receiver_email = "toemailid@gmail.com"
password = input("Type your password and press enter:")
message = MIMEMultipart("alternative")
message["Subject"] = "testing"
message["From"] = sender_email
message["To"] = receiver_email
# Create the plain-text and HTML version of your message
text = """\
Hi,
How are you?
python has many packages
https://pypi.org/"""
html = """\
<html>
<body>
<p>Hi,<br>
How are you?<br>
<a href="https://pypi.org/"> Python Packages </a>
has many great tutorials.
</p>
</body>
</html>
"""
# Turn these into plain/html MIMEText objects
part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
message.attach(part1)
message.attach(part2)
# Create secure connection with server and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(
sender_email, receiver_email, message.as_string()
)
|
import functools
@functools.lru_cache()
def editDist(w1, w2):
if w1 == w2: return 0
if w1 == "": return len(w2)
if w2 == "": return len(w1)
return min(
1 + editDist(w1[:-1], w2), # deletion delete last character of w1 1 op + editdistance of w1 without last char and w2
# d o
# d o g
# insert g 1 op
# d o g
# d o g
# then just check the rest of the string, so we are back to o in the first string, and we
# move to i in the second string
1 + editDist(w1,w2[:-1]), # insert w2[-1] to the end of w1, next call would be
# editDist(w1+w2[-1], w2) and since w1[-1] would be w2[-1] it would just do editDist(w1[:-1], w2[:-1])
# where w1[:-1] would just be the w1 at the start, but we would be the w2[:-1] for the original w2
editDist(w1[:-1], w2[:-1]) if w1[-1] == w2[-1] else 1 + editDist(w1[:-1], w2[:-1])
# if the last character is the same just check the rest of the string
# otherwise replace the last char which is 1 op and do the rest of the string
)
ans = editDist("thequickbrownfoxjumpedoverthelazydog", "lazycatsareworsethanlazydogs")
def editDistDp(w1, w2):
if w1 == w2: return 0
if w1 == "": return len(w2)
if w2 == "": return len(w1)
dp = [[0]*(len(w2) + 1) for _ in range(len(w1) + 1)]
for i in range(len(w1) + 1):
for j in range(len(w2) + 1):
if i == 0: dp[i][j] = j
elif j == 0: dp[i][j] = i
else:
insert = 1 + dp[i][j-1] # j is index of w2, go back on w2
delete = 1 + dp[i - 1][j] # i is index of w1, go back on w1
# rest of str is diagonal up to left
replace = dp[i - 1][j - 1] if w1[i - 1] == w2[j - 1] else 1 + dp[i - 1][j - 1]
dp[i][j] = min(insert,delete,replace)
return dp[-1][-1]
ans = editDistDp("a", "b")
x = 2
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-13 19:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20170213_2110'),
]
operations = [
migrations.CreateModel(
name='EventCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(choices=[('velo', 'Велo'), ('pro', 'ПРО'), ('trek', 'Трек'), ('amateur', 'Любительский'), ('solo', 'Соло'), ('xxx', 'Whatever Category')], max_length=8)),
('time_limit', models.DurationField()),
('leader_time', models.DurationField()),
('geo_length', models.PositiveSmallIntegerField()),
('description', models.TextField()),
('min_team_members', models.PositiveSmallIntegerField()),
('max_team_members', models.PositiveSmallIntegerField()),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Event')),
],
),
migrations.CreateModel(
name='IndividualEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.EventCategory')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Reults',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DurationField()),
('place', models.PositiveSmallIntegerField()),
('control_points', models.PositiveSmallIntegerField(blank=True, null=True)),
('bonus_points', models.PositiveSmallIntegerField(blank=True, null=True)),
('notes', models.CharField(max_length=128)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.EventCategory')),
('individual', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('team', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Team')),
],
),
migrations.CreateModel(
name='TeamEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.EventCategory')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Team')),
],
),
]
|
def sum(x,y):
return x+y
print(sum(3,4))#7
# 函数默认参数
def power(x, n=2):
s = 1
while n > 0:
n = n - 1
s = s * x
return s
print(power(3))
print(power(3,3))
# 可变参数,传入的参数为数组或者对象
# 关键字参数
# 可变参数允许你传入0个或任意个参数,这些可变参数在函数调用时自动组装为一个tuple。而关键字参数允许你传入0个或任意个含参数名的参数,这些关键字参数在函数内部自动组装为一个dict。请看示例:
def person(name, age, **kw):
print('name:', name, 'age:', age, 'other:', kw)
# 要注意定义可变参数和关键字参数的语法:
# *args是可变参数,args接收的是一个tuple;
# **kw是关键字参数,kw接收的是一个dict。
|
"""
Created by: Gabriele Pompa (gabriele.pompa@gmail.com)
File: example_options_other_params.py
Created on Tue Jul 14 2020 - Version: 1.0
Description:
This script shows usage of PlainVanillaOption and DigitalOption classes. Instantiation examples are provided involving
combinations of the underlying level (S), strike-price (K), time parameter (t/tau), as well as underlying volatility
(sigma) and short-rate (r) parameters. Price, P&L, first-order greeks as well as Black-Scholes implied-volatility are
computed for plain-vanilla and digital option contracts.
"""
import numpy as np
import pandas as pd
import warnings
from pyblackscholesanalytics.market.market import MarketEnvironment
from pyblackscholesanalytics.options.options import PlainVanillaOption, DigitalOption
warnings.filterwarnings("ignore")
def option_factory(mkt_env, plain_or_digital, option_type):
option_dispatcher = {
"plain_vanilla": {"call": PlainVanillaOption(mkt_env),
"put": PlainVanillaOption(mkt_env, option_type="put")
},
"digital": {"call": DigitalOption(mkt_env),
"put": DigitalOption(mkt_env, option_type="put")
}
}
return option_dispatcher[plain_or_digital][option_type]
def get_param_dict(option, np_output, case):
# S
S_vector = [90, 100, 110]
mS = len(S_vector)
# K
K_vector = [75, 85, 90, 95, 105, 115]
mK = len(K_vector)
# tau: a date-range of 5 valuation dates between t and T-10d
n = 5
valuation_date = option.get_t()
expiration_date = option.get_T()
t_vector = pd.date_range(start=valuation_date,
end=expiration_date - pd.Timedelta(days=10),
periods=n)
# sigma
sigma_axis = np.array([0.1 * (1 + i) for i in range(3)])
sigma_grid_S = np.array([0.1 * (1 + i) for i in range(mS * n)]).reshape(n, mS)
sigma_grid_K = np.array([0.1 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
# r
r_axis = np.array([0.01 * (1 + i) for i in range(3)])
r_grid_S = np.array([0.01 * (1 + i) for i in range(mS * n)]).reshape(n, mS)
r_grid_K = np.array([0.01 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
cases_dict = {
"All_scalar": {"parameters":
{"S": S_vector[0],
"K": K_vector[0],
"t": t_vector[0],
"sigma": 0.1,
"r": 0.01,
"np_output": np_output},
"info": "Case 0 - all scalar parameters"
},
"S": {"parameters":
{"S": S_vector,
"K": K_vector[0],
"t": t_vector[0],
"sigma": 0.1,
"r": 0.01,
"np_output": np_output},
"info": "Case S - (S vector, other scalar)"
},
"S.sigma_distributed": {"parameters":
{"S": S_vector,
"K": K_vector[0],
"t": t_vector[0],
"sigma": [0.1 * (1 + i) for i in range(mS)],
"r": 0.01,
"np_output": np_output},
"info": "Case S.sigma_distributed - (S vector, K scalar, t scalar, sigma distributed "
"along S, r scalar) "
},
"S.r_distributed": {"parameters":
{"S": S_vector,
"K": K_vector[0],
"t": t_vector[0],
"sigma": 0.1,
"r": [0.01 * (1 + i) for i in range(mS)],
"np_output": np_output},
"info": "Case S.r_distributed - (S vector, K scalar, t scalar, sigma scalar, "
"r distributed along S) "
},
"S.sigma_and_r_distributed": {"parameters":
{"S": S_vector,
"K": K_vector[0],
"t": t_vector[0],
"sigma": [0.1 * (1 + i) for i in range(mS)],
"r": [0.01 * (1 + i) for i in range(mS)],
"np_output": np_output},
"info": "Case S.sigma_and_r_distributed - (S vector, K scalar, t scalar, "
"sigma distributed along S, r distributed along S) "
},
"K": {"parameters":
{"S": S_vector[0],
"K": K_vector,
"t": t_vector[0],
"sigma": 0.1,
"r": 0.01,
"np_output": np_output},
"info": "Case K - (K vector, other scalar)"
},
"K.sigma_distributed": {"parameters":
{"S": S_vector[0],
"K": K_vector,
"t": t_vector[0],
"sigma": [0.1 * (1 + i) for i in range(mK)],
"r": 0.01,
"np_output": np_output},
"info": "Case K.sigma_distributed - (S scalar, K vector, t scalar, sigma distributed "
"along K, r scalar) "
},
"K.r_distributed": {"parameters":
{"S": S_vector[0],
"K": K_vector,
"t": t_vector[0],
"sigma": 0.1,
"r": [0.01 * (1 + i) for i in range(mK)],
"np_output": np_output},
"info": "Case S.r_distributed - (S scalar, K vector, t scalar, sigma scalar, "
"r distributed along K) "
},
"K.sigma_and_r_distributed": {"parameters":
{"S": S_vector[0],
"K": K_vector,
"t": t_vector[0],
"sigma": [0.1 * (1 + i) for i in range(mK)],
"r": [0.01 * (1 + i) for i in range(mK)],
"np_output": np_output},
"info": "Case K.sigma_and_r_distributed - (S scalar, K vector, t scalar, "
"sigma distributed along K, r distributed along K) "
},
"t": {"parameters":
{"S": S_vector[0],
"K": K_vector[0],
"t": t_vector,
"sigma": 0.1,
"r": 0.01,
"np_output": np_output},
"info": "Case t - (t vector, other scalar)"
},
"t.sigma_distributed": {"parameters":
{"S": S_vector[0],
"K": K_vector[0],
"t": t_vector,
"sigma": [0.1 * (1 + i) for i in range(n)],
"r": 0.01,
"np_output": np_output},
"info": "Case t.sigma_distributed - (S scalar, K scalar, t vector, sigma distributed "
"along t, r scalar) "
},
"t.r_distributed": {"parameters":
{"S": S_vector[0],
"K": K_vector[0],
"t": t_vector,
"sigma": 0.1,
"r": [0.01 * (1 + i) for i in range(n)],
"np_output": np_output},
"info": "Case t.r_distributed - (S scalar, K scalar, t vector, sigma scalar, "
"r distributed along t) "
},
"t.sigma_and_r_distributed": {"parameters":
{"S": S_vector[0],
"K": K_vector[0],
"t": t_vector,
"sigma": [0.1 * (1 + i) for i in range(n)],
"r": [0.01 * (1 + i) for i in range(n)],
"np_output": np_output},
"info": "Case t.sigma_and_r_distributed - (S scalar, K scalar, t vector, "
"sigma distributed along t, r distributed along t) "
},
"S.t": {"parameters":
{"S": S_vector,
"K": K_vector[0],
"t": t_vector,
"sigma": 0.1,
"r": 0.01,
"np_output": np_output},
"info": "Case S.t - (S and t vector, other scalar)"
},
"S.t.sigma_distributed_as_Sxt_grid": {"parameters":
{"S": S_vector,
"K": K_vector[0],
"t": t_vector,
"sigma": sigma_grid_S,
"r": 0.01,
"np_output": np_output},
"info": "Case S.t.sigma_distributed_as_Sxt_grid - (S and t vector, "
"K scalar, sigma distributed as Sxt grid, r scalar) "
},
"S.t.r_distributed_as_Sxt_grid": {"parameters":
{"S": S_vector,
"K": K_vector[0],
"t": t_vector,
"sigma": 0.1,
"r": r_grid_S,
"np_output": np_output},
"info": "Case S.t.r_distributed_as_Sxt_grid - (S and t vector, K scalar, "
"sigma scalar, r distributed as Sxt grid) "
},
"S.t.sigma_and_r_distributed_as_Sxt_grid": {"parameters":
{"S": S_vector,
"K": K_vector[0],
"t": t_vector,
"sigma": sigma_grid_S,
"r": r_grid_S,
"np_output": np_output},
"info": "Case S.t.sigma_and_r_distributed_as_Sxt_grid - (S and t "
"vector, K scalar, sigma distributed as Sxt grid, "
"r distributed as Sxt grid) "
},
"K.t": {"parameters":
{"S": S_vector[0],
"K": K_vector,
"t": t_vector,
"sigma": 0.1,
"r": 0.01,
"np_output": np_output},
"info": "Case K.t - (K and t vector, other scalar)"
},
"K.t.sigma_distributed_as_Kxt_grid": {"parameters":
{"S": S_vector[0],
"K": K_vector,
"t": t_vector,
"sigma": sigma_grid_K,
"r": 0.01,
"np_output": np_output},
"info": "Case K.t.sigma_distributed_as_Kxt_grid - (S scalar, K and t "
"vector, sigma distributed as Kxt grid, r scalar) "
},
"K.t.r_distributed_as_Kxt_grid": {"parameters":
{"S": S_vector[0],
"K": K_vector,
"t": t_vector,
"sigma": 0.1,
"r": r_grid_K,
"np_output": np_output},
"info": "Case K.t.r_distributed_as_Kxt_grid - (S scalar, K and t vector, "
"sigma scalar, r distributed as Kxt grid) "
},
"K.t.sigma_and_r_distributed_as_Kxt_grid": {"parameters":
{"S": S_vector[0],
"K": K_vector,
"t": t_vector,
"sigma": sigma_grid_K,
"r": r_grid_K,
"np_output": np_output},
"info": "Case K.t.sigma_and_r_distributed_as_Kxt_grid - (S "
"scalar, K and t vector, sigma distributed as Kxt grid, "
"r distributed as Kxt grid) "
},
# if we want to have the x-axis spanned by sigma or r, we have to explicitly
# ask for it, using "sigma_axis" or "r_axis" flags. Otherwise, sigma and r
# parameters are interpreted as parameters to be distributed along the
# other(s) axis (and require length/shape match)
"t.sigma_axis": {"parameters":
{"S": S_vector[0],
"K": K_vector[0],
"t": t_vector,
"sigma": sigma_axis,
"r": 0.01,
"np_output": np_output,
"sigma_axis": True},
"info": "Case t.sigma_axis - (S scalar, K scalar, t vector, sigma vector axis, r scalar)"
},
"t.r_axis": {"parameters":
{"S": S_vector[0],
"K": K_vector[0],
"t": t_vector,
"sigma": 0.1,
"r": r_axis,
"np_output": np_output,
"r_axis": True},
"info": "Case t.r_axis - (S scalar, K scalar, t vector, sigma scalar, r vector axis)"
}
}
return cases_dict[case]["parameters"], cases_dict[case]["info"]
def main():
# if np_output is True, the output will be np.ndarray, otherwise pd.DataFrame
np_output = False # True
# default market environment
market_env = MarketEnvironment()
print(market_env)
# define option style and type
opt_style = "plain_vanilla" # "digital"
opt_type = "call" # "put"
option = option_factory(market_env, opt_style, opt_type)
print(option)
for case in ['All_scalar', 'S', 'S.sigma_distributed', 'S.r_distributed', 'S.sigma_and_r_distributed',
'K', 'K.sigma_distributed', 'K.r_distributed', 'K.sigma_and_r_distributed',
't', 't.sigma_distributed', 't.r_distributed', 't.sigma_and_r_distributed',
'S.t', 'S.t.sigma_distributed_as_Sxt_grid', 'S.t.r_distributed_as_Sxt_grid',
'S.t.sigma_and_r_distributed_as_Sxt_grid',
'K.t', 'K.t.sigma_distributed_as_Kxt_grid', 'K.t.r_distributed_as_Kxt_grid',
'K.t.sigma_and_r_distributed_as_Kxt_grid',
't.sigma_axis', 't.r_axis']:
# get parameters dictionary for case considered
param_dict, case_info = get_param_dict(option, np_output, case)
print("\n--------------------------------------------\n")
print("\n" + case_info + "\n")
print("Parameters:")
print("S: {}".format(param_dict["S"]))
print("K: {}".format(param_dict["K"]))
print("t: {}".format(param_dict["t"]))
print("sigma: {}".format(param_dict["sigma"]))
print("r: {}\n".format(param_dict["r"]))
print("Metrics:")
print("Payoff:\n", option.payoff(**param_dict))
print("\nPrice upper limit:\n", option.price_upper_limit(**param_dict))
print("\nPrice lower limit:\n", option.price_lower_limit(**param_dict))
print("\nPrice:\n", option.price(**param_dict))
print("\nP&L:\n", option.PnL(**param_dict))
print("\nDelta:\n", option.delta(**param_dict))
print("\nTheta:\n", option.theta(**param_dict))
print("\nGamma:\n", option.gamma(**param_dict))
print("\nVega:\n", option.vega(**param_dict))
print("\nRho:\n", option.rho(**param_dict))
# Implied volatility calculation is not implemented for x-axis
# (columns) spanned by sigma
if ('sigma_axis' not in param_dict) or (param_dict['sigma_axis'] is False):
print("\nExpected Implied Volatility: \n{}\n".format(param_dict["sigma"]))
print("\nImplied Volatility - Newton method:\n{}\n".format(option.implied_volatility(**param_dict)))
param_dict["minimization_method"] = "Least-Squares"
print("\nImplied Volatility - Least-Squares constrained method:\n{}\n"
.format(option.implied_volatility(**param_dict)))
# ----------------------------- usage example ---------------------------------#
if __name__ == "__main__":
main()
|
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="tscribe",
version="1.3.1",
author="Robert Williams",
author_email="robertedwardwilliams@me.com",
description="Produce Word Document, CSV, SQLite and VTT transcriptions using the automatic speech recognition from AWS Transcribe.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kibaffo33/aws_transcribe_to_docx",
packages=setuptools.find_packages(),
install_requires=["python-docx", "matplotlib", "pandas", "webvtt-py"],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
],
)
|
"""
Author : Lily
Date : 2018-09-18
QQ : 339600718
百丽宫影城 PALACE cinema PalaceCema-s
抓取思路:在主页面获取每个影院的链接,再进入链接获取影院的具体信息
index_url: http://www.b-cinema.cn/home.jsp
注意:网页时而能加载,时而不能,只要多请求几次,需要改善。
"""
from time import sleep
import requests
import re
import datetime
from lxml import etree
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36'}
filename = "PalaceCema-s" + re.sub('[^0-9]', '', str(datetime.datetime.now())) + ".csv"
f = open(filename, 'w', encoding='utf-8')
f.write('name,address,goup_phone,news_phone,\n')
index_url = 'http://www.b-cinema.cn/home.jsp'
index_html = requests.get(index_url, headers=headers).text
index_lxml = etree.HTML(index_html)
sum = len(index_lxml.xpath('//table[@class="box"]/tbody/tr/td/table/tbody/tr/td[2]/a/@href'))
address = ''
goup_phone = ''
news_phone = ''
for i in range(sum):
name = index_lxml.xpath('//table[@class="box"]/tbody/tr/td/table/tbody/tr[' + str(i+1) + ']/td[2]/a/text()')[0]
store_link = index_lxml.xpath('//table[@class="box"]/tbody/tr/td/table/tbody/tr[' + str(i+1) + ']/td[2]/a/@href')
sleep(3)
print(store_link[0])
if "http://www.b-cinema.cn/" in store_link[0]:
store_link = store_link[0]
else:
store_link = 'http://www.b-cinema.cn/'+store_link[0]
print(store_link)
store_html = requests.get(store_link).text
store_lxml = etree.HTML(store_html)
store_info = store_lxml.xpath('//*[@id="toubiao"]/table[4]/tbody/tr[2]/td/table[3]/tbody/tr/td[1]/table/tbody/tr/td[strong]//text()')
sum1 = len(store_info)
print(sum1)
for j in range(sum1):
print(store_info[j])
if "团体票" in store_info[j]:
goup_phone = store_info[j+1].replace(':', '')
print(goup_phone)
if "影讯" in store_info[j]:
news_phone = store_info[j+1].replace(':', '')
print(news_phone)
if "地址" in store_info[j]:
address = store_info[j+1].replace(':', '').replace(',',',')
print(address)
f.write(name + "," + address + "," + goup_phone + "," + news_phone + "\n")
|
from matplotlib import pyplot as plt
def count0(p):
res = 0
for i in p:
if i == '0':
res += 1
return res
def count1(p):
res = 0
for i in p:
if i == '1':
res += 1
return res
def count2(p):
res = 0
for i in p:
if i == '2':
res += 1
return res
if __name__=='__main__':
with open('day8.txt') as f:
pixels = str(f.read())
print(pixels)
i = 0
sa0 = 150
ma1 = 0
ma2 = 0
while i < len(pixels):
if sa0 > count0(pixels[i:i+150]):
sa0 = count0(pixels[i:i+150])
ma1 = count1(pixels[i:i+150])
ma2 = count2(pixels[i:i+150])
i = i + 150
print(sa0, ma1, ma2)
print(ma1, ' ', ma2)
print(ma1 * ma2)
print(len(pixels) // 150)
#100 layers
res = []
for a in range(150):
temp = a
while pixels[temp] == '2':
temp += 150
res.append(pixels[temp])
print(res)
map = [[0 for i in range(25)] for j in range(6)]
for a in range(6):
for b in range(25):
if res[a*25+b] == '0':
map[a][b] = 255
else:
map[a][b] = 0
plt.imshow(map)
plt.show()
print(map)
|
from sqlalchemy import (
create_engine,
String,
Integer,
Column,
)
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
)
from sqlalchemy.ext.declarative import declarative_base
from decouple import config
DATABASE_URL = config("DATABASE_URL")
ENGINE = create_engine(DATABASE_URL, echo=True)
session_factory = sessionmaker(bind=ENGINE)
SESSION = scoped_session(session_factory)
BASE = declarative_base()
class ScannedChat(BASE):
__tablename__ = "scanned_chat"
chat_id = Column(Integer(20), primary_key = True)
chat_title = Column(String(50))
chat_hash = Column(String(50))
last_scanned_message_id = Column(String(20))
got_invite_from = Column(Integer(20))
total_invites_found = Column(Integer(20))
def __init__(
self,
chat_id,
chat_title,
last_scanned_message_id = None,
got_invite_from = None,
):
self.chat_id = chat_id
self.chat_title = chat_title
self.last_scanned_message_id = last_scanned_message_id
self.got_invite_from = got_invite_from
self.total_invites_found = 0
async def add_new_chat(chat_id, chat_title, got_invite_from = None):
chat = ScannedChat(
chat_id,
chat_title,
got_invite_from,
)
SESSION.add(chat)
SESSION.commit()
async def set_last_scanned(chat_id, last_scanned_message_id):
chat = SESSION.query(ScannedChat).get(chat_id)
chat.last_scanned_message_id = last_scanned_message_id
SESSION.commit()
|
from flask import Flask,render_template
from models import db
from views_news import views_blueprint
from views_user import user_blueprint
from views_admin import admin_blueprint
from flask.ext.wtf import CSRFProtect
from flask_session import Session
import redis
def Create_app(config):
app = Flask(__name__)
# 配置app
app.config.from_object(config)
db.init_app(app)
# 注册蓝图
app.register_blueprint(views_blueprint)
app.register_blueprint(user_blueprint)
app.register_blueprint(admin_blueprint)
CSRFProtect(app)
Session(app)
import logging
from logging.handlers import RotatingFileHandler
# 设置日志的记录等级
logging.basicConfig(level=logging.DEBUG) # 调试debug级
# 创建日志记录器,指明日志保存的路径、每个日志文件的最大大小、保存的日志文件个数上限
file_log_handler = RotatingFileHandler(config.BASE_DIR + "/logs/xjzx.log", maxBytes=1024 * 1024 * 100,
backupCount=10)
# 创建日志记录的格式 日志等级 输入日志信息的文件名 行数 日志信息
formatter = logging.Formatter('%(levelname)s %(filename)s:%(lineno)d %(message)s')
# 为刚创建的日志记录器设置日志记录格式
file_log_handler.setFormatter(formatter)
# 为全局的日志工具对象(flask app使用的)添加日志记录器
logging.getLogger().addHandler(file_log_handler)
app.logger_xjzx = logging
# 配置redis
host,port,redis_db=app.config.get('REDIS_HOST'),app.config.get('REDIS_PORT'),app.config.get('REDIS_DB')
app.redis_client = redis.StrictRedis(host=host,port=port,db=redis_db)
# 处理404错误
@app.errorhandler(404)
def e404(e):
return render_template('news/404.html')
return app
|
import numpy as np
import pickle
from scipy import stats
from astropy.table import Table
from util import toSky, inSphere, wCen, P, flatten
from classes import Catalog, Tesselation, Zones, Voids
infile = "./data/vollim_dr7_cbp_102709.fits"
outdir = "./data/"
catname = "DR7"
intloc = "./intermediate/" + catname
nside = 64
denscut = 0.2
minrad = 10
class Zobov:
def __init__(self,start=0,end=3,save_intermediate=True):
if start not in [0,1,2,3,4] or end not in [0,1,2,3,4] or end<start:
print("Choose valid stages")
return
if start<4:
if start<3:
if start<2:
if start<1:
ctlg = Catalog(infile,nside)
if save_intermediate:
pickle.dump(ctlg,open(intloc+"_ctlg.pkl",'wb'))
else:
ctlg = pickle.load(open(intloc+"_ctlg.pkl",'rb'))
if end>0:
tess = Tesselation(ctlg)
if save_intermediate:
pickle.dump(tess,open(intloc+"_tess.pkl",'wb'))
else:
ctlg = pickle.load(open(intloc+"_ctlg.pkl",'rb'))
tess = pickle.load(open(intloc+"_tess.pkl",'rb'))
if end>1:
zones = Zones(tess)
if save_intermediate:
pickle.dump(zones,open(intloc+"_zones.pkl",'wb'))
else:
ctlg = pickle.load(open(intloc+"_ctlg.pkl",'rb'))
tess = pickle.load(open(intloc+"_tess.pkl",'rb'))
zones = pickle.load(open(intloc+"_zones.pkl",'rb'))
if end>2:
voids = Voids(zones)
if save_intermediate:
pickle.dump(voids,open(intloc+"_voids.pkl",'wb'))
else:
ctlg = pickle.load(open(intloc+"_ctlg.pkl",'rb'))
tess = pickle.load(open(intloc+"_tess.pkl",'rb'))
zones = pickle.load(open(intloc+"_zones.pkl",'rb'))
voids = pickle.load(open(intloc+"_voids.pkl",'rb'))
self.catalog = ctlg
self.tesselation = tess
self.zones = zones
self.prevoids = voids
def sortVoids(self,method=0,minsig=2,dc=denscut):
if not hasattr(self,'prevoids'):
print("Run all stages of Zobov first")
return
if method==0:
voids = []
minvol = np.mean(self.tesselation.volumes[self.tesselation.volumes>0])/dc
for i in range(len(self.prevoids.ovols)):
vl = self.prevoids.ovols[i]
if len(vl)>2 and vl[-2] < minvol:
continue
voids.append([c for q in self.prevoids.voids[i] for c in q])
elif method==1:
voids = [[c for q in v for c in q] for v in self.prevoids.voids]
elif method==2:
voids = []
for i in range(len(self.prevoids.mvols)):
vh = self.prevoids.mvols[i]
vl = self.prevoids.ovols[i][-1]
r = vh / vl
p = P(r)
if stats.norm.isf(p/2.) >= minsig:
voids.append([c for q in self.prevoids.voids[i] for c in q])
elif method==3:
print("Coming soon")
else:
print("Choose a valid method")
vcuts = [list(flatten(self.zones.zcell[v])) for v in voids]
vvols = np.array([np.sum(self.tesselation.volumes[vcut]) for vcut in vcuts])
vrads = (vvols*3./(4*np.pi))**(1./3)
vcens = np.array([wCen(self.tesselation.volumes[vcut],self.catalog.coord[vcut]) for vcut in vcuts])
if method==0:
dcut = np.array([64.*len(self.catalog.coord[inSphere(vcens[i],vrads[i]/4.,self.catalog.coord)])/vvols[i] for i in range(len(vrads))])<1./minvol
vrads = vrads[dcut]
rcut = vrads>(minvol*dc)**(1./3)
vrads = vrads[rcut]
vcens = vcens[dcut][rcut]
rcut = vrads>minrad
vrads = vrads[rcut]
vcens = vcens[rcut]
self.vrads = vrads
self.vcens = vcens
def saveVoids(self):
if not hasattr(self,'vcens'):
print("Sort voids first")
return
vz,vra,vdec = toSky(self.vcens)
vT = Table([vz,vra,vdec,self.vrads],names=('z','ra','dec','radius'))
vT.write(outdir+catname+"_zobovoids.dat",format='ascii.commented_header',overwrite=True)
|
# -*- coding: utf-8 -*-
#
# This file is part of Flask-AppExts
# Copyright (C) 2015 CERN.
#
# Flask-AppExts is free software; you can redistribute it and/or
# modify it under the terms of the Revised BSD License; see LICENSE
# file for more details.
"""Flask-AppExts provide ready to use extensions for Flask-AppFactory.
Needed extensions:
- Babel
- Security
- Assets
- Restful
- Mixer
- Admin
- Babel
- Collect
- Debug Toolbar
- Email
- Logging
- Jasmine
- Sitemap
- Session
- Logging
- Restful
"""
from __future__ import absolute_import, unicode_literals, print_function
from .version import __version__
__all__ = ('__version__',)
|
def load_correct_creds(creds):
return creds['prod']['access_key'], creds['prod']['secret_key']
|
# 풀이 2
n = []
for i in range(1, 46):
for j in range(1, i+1):
n.append(i)
_ = list(map(int, input().split()))
s, e = _[0], _[1]
print(sum(n[s-1:e]))
|
import os
import re
import cv2
from PIL import Image
from cfg import output_path, newdir
def read_img_size(img):
"""
根据输入视频解码后的图片的尺寸觉得输出视频的尺寸
fix 原代码硬编码尺寸
"""
_img = Image.open(img)
return _img.size[0], _img.size[1]
def generate_video(path):
# 重排文件列表
filelist = os.listdir(path)
filelist.sort(key=lambda x: int(re.findall(r'\d+', x)[0]))
if len(filelist) == 0:
raise
size = read_img_size(path + '/' + filelist[0])
# fps:
# 帧率:1秒钟有n张图片写进去[控制一张图片停留5秒钟,那就是帧率为1,重复播放这张图片5次]
# 如果文件夹下有50张 534*300的图片,这里设置1秒钟播放5张,那么这个视频的时长就是10秒
fps = 24
# 导出路径
file_path = output_path
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
video = cv2.VideoWriter(file_path, fourcc, fps, size)
for item in filelist:
if item.endswith('.jpg'):
item = path + '/' + item
# 使用opencv读取图像,直接返回numpy.ndarray 对象,通道顺序为BGR ,注意是BGR,通道值默认范围0-255
img = cv2.imread(item)
# 把图片写进视频
video.write(img)
video.release()
if __name__ == "__main__":
generate_video(newdir)
|
#python3
import requests
from lxml import etree
import urllib.request as ur
import os,time
from multiprocessing.dummy import Pool as ThreadPool
url = 'https://tieba.baidu.com/p/4840077002?pn={num}'
headers = ('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36')
opener = ur.build_opener()
opener.addheaders=[headers]
ur.install_opener(opener)
def main():
os.chdir(os.getcwd())
os.mkdir("tieba")
a = time.time()
for j in range(1,5):
r = requests.get(url.format(num=j)).content
s = etree.HTML(r)
img_url = s.xpath('//*[@class="BDE_Image"]/@src')
try:
for i in img_url:
file_name = i.split("/")[-1]
download_path = "tieba"
file = os.path.join(download_path, file_name)
ur.urlretrieve(i, file)
except Exception as e:
print('万万没想到:'+str(e))
os.chdir(os.getcwd())
os.mkdir("image")
def get_img(url):
r = requests.get(url).content
s = etree.HTML(r)
img_url = s.xpath('//*[@class="BDE_Image"]/@src')
try:
for i in img_url:
file_name = i.split("/")[-1]
download_path = "image"
file = os.path.join(download_path, file_name)
ur.urlretrieve(i, file)
except Exception as e:
print('万万没想到:'+str(e))
if __name__ == "__main__":
a = time.time()
main()
print('这次花费时间为:{num}s'.format(num=time.time()-a))
c = time.time()
urls = [url.format(num=i) for i in range(1,5)]
pool = ThreadPool(12)
pool.map(get_img,urls)
print('这次花费时间为:{num}s'.format(num=time.time()-c))
pool.close()
pool.join()
|
class DynamicPointsDensityUIParameters:
# ACCELERATION TIME
AccelerationTimeMin = 0.01
AccelerationTimeMax = 300.0
AccelerationTimeLineEditAccuracy = 2
AccelerationTimeCalcConstant = 100 # Раз 100, значит цифры с точностью до 10**2
AccelerationTimeSliderMin = AccelerationTimeMin * AccelerationTimeCalcConstant
AccelerationTimeSliderMax = AccelerationTimeMax * AccelerationTimeCalcConstant
# PLATEAU TIME
PlateauTimeMin = 0.0
PlateauTimeMax = 300.0
PlateauTimeLineEditAccuracy = 2
PlateauTimeCalcConstant = 100 # Раз 100, значит цифры с точностью до 10**2
PlateauTimeSliderMin = PlateauTimeMin * PlateauTimeCalcConstant
PlateauTimeSliderMax = PlateauTimeMax * PlateauTimeCalcConstant
# DECELERATION TIME
DecelerationTimeMin = 0.01
DecelerationTimeMax = 300.0
DecelerationTimeLineEditAccuracy = 2
DecelerationTimeCalcConstant = 300 # Раз 100, значит цифры с точностью до 10**2
DecelerationTimeSliderMin = DecelerationTimeMin * DecelerationTimeCalcConstant
DecelerationTimeSliderMax = DecelerationTimeMax * DecelerationTimeCalcConstant
# END TIME
EndTimeMin = 0.0
EndTimeMax = 300.0
EndTimeLineEditAccuracy = 2
EndTimeCalcConstant = 300 # Раз 100, значит цифры с точностью до 10**2
EndTimeSliderMin = EndTimeMin * EndTimeCalcConstant
EndTimeSliderMax = EndTimeMax * EndTimeCalcConstant
# START TIME
StartTimeMin = 0.0
StartTimeMax = 300.0
StartTimeLineEditAccuracy = 2
StartTimeCalcConstant = 100 # Раз 100, значит цифры с точностью до 10**2
StartTimeSliderMin = StartTimeMin * StartTimeCalcConstant
StartTimeSliderMax = StartTimeMax * StartTimeCalcConstant
# LOW LEVEL
LowLevelFrequencyMin = 0.0
LowLevelFrequencyMax = 20.0
LowLevelFrequencyLineEditAccuracy = 2
LowLevelFrequencyCalcConstant = 100 # Раз 100, значит цифры с точностью до 10**2
LowLevelFrequencySliderMin = LowLevelFrequencyMin * LowLevelFrequencyCalcConstant
LowLevelFrequencySliderMax = LowLevelFrequencyMax * LowLevelFrequencyCalcConstant
# HIGH LEVEL
HighLevelFrequencyMin = 0.0
HighLevelFrequencyMax = 80.0
HighLevelFrequencyLineEditAccuracy = 2
HighLevelFrequencyCalcConstant = 100 # Раз 100, значит цифры с точностью до 10**2
HighLevelFrequencySliderMin = HighLevelFrequencyMin * HighLevelFrequencyCalcConstant
HighLevelFrequencySliderMax = HighLevelFrequencyMax * HighLevelFrequencyCalcConstant
# POINTS DENSITY
PointsDensityMin = 0
PointsDensityMax = 3
PointsDensityLineEditAccuracy = 2
PointsDensityCalcConstant = 100
PointsDensitySliderMin = PointsDensityMin * PointsDensityCalcConstant
PointsDensitySliderMax = PointsDensityMax * PointsDensityCalcConstant
# VERTICAL OFFSET
VerticalOffsetMin = 0
VerticalOffsetMax = 20
VerticalOffsetLineEditAccuracy = 1
VerticalOffsetCalcConstant = 10
VerticalOffsetSliderMin = VerticalOffsetMin * VerticalOffsetCalcConstant
VerticalOffsetSliderMax = VerticalOffsetMax * VerticalOffsetCalcConstant
# PLOT PROPERTIES
PlotHeight = 5
PlotWidth = 5
PlotXPosition = 0
PlotYPosition = 320
# REQUEST FREQUENCY
RequestFreqLineEditAccuracy = 100
RequestFreqCalcConstant = 10
RequestFreqSliderMin = 0.1 * RequestFreqCalcConstant
RequestFreqSliderMax = 1.1 * RequestFreqCalcConstant
def __init__(self):
pass
|
import email
import re
import tkinter
import base64
import tkinter
from tkinter import filedialog
from tkinter import messagebox
from bs4 import BeautifulSoup
"""splits urls from the msg"""
def souper(html_content):
return_string=""
soup=BeautifulSoup(str(html_content), "html.parser")
pretty=soup.prettify()
link_list=soup.find_all(href=re.compile("\S*"))
url_list=[]
return_string="<h3>urls</h3><ol>"
for item in link_list:
url_list.append(item.get("href"))
return_string=return_string+"<li>"+str(item.get("href"))+"</li>"
print(url_list)
return_string=return_string+"</ol>"
return return_string
"""Splits content of msg"""
def split_content(content):
return_string=""
if(content.get_content_type()=="multipart/alternative"):
walker= content.walk()
for its in walker:
#print(its.get_payload())
return_string=souper(its.get_payload())
souper(its)
elif((content.get_content_type()=="text/html") | (content.get_content_type()=="text/plain")):
walker= content.walk()
for its in walker:
#print(its.get_payload())
return_string=souper(its.get_payload())
return return_string
"""Prints Mail details from Mail Header"""
def mail_details(maile):
html_string="<ul>"
str_split=str(maile["Authentication-Results"]).split(" ")
#print(str_split["spf"]+"\n"+str_split["dkim"])
for item in str_split:
if(re.search("dkim\w*", item)!=None) | (re.search("spf\w*", item)!=None) | (re.search("smtp.mailfrom\w*", item)!=None) :
out_split=item.split("=")
print(out_split[0].upper()+" :" + out_split[1])
html_string=html_string+"<li>"+ out_split[0].upper() + ":" + out_split[1] + "</li>"
Header_from=str(maile["From"])
To=str(maile["To"])
Subject= str(maile["Subject"])
Message_ID =str(maile["Message-ID"])
Date= str(maile["Date"])
X_Sender=str(maile["X-Sender"])
X_Mailer= str(maile["X-Mailer"])
Delivered_To = str(maile["Delivered-To"])
Forwarded= str(maile["Auto-Submitted"])
dict_1={"Header_from": Header_from, "To": To,"Message_ID": Message_ID, "Date" : Date,"X_Sender" : X_Sender, "X_Mailer": X_Mailer, "Delivered_To": Delivered_To,"Forwarded": Forwarded}
for item in dict_1:
html_string=html_string+"<li>"+ item + ":" + dict_1[item] + "</li>"
html_string=html_string+"</ul>"
#print(maile["Received"])
print("PATH taken by MAIL")
Received_List=[]
RL=0
for item in maile.items():
if(item[0]=="Received"):
Received_List.append(item[1])
RL=RL+1
LL=0
for item in Received_List:
print("PATH_Value : "+ str(len(Received_List)-LL)+ "\n")
IP=re.search("(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})",item)
if(IP!=None):
print(IP[0])
print(dir(IP))
print("______________________________________")
print(item + "\n")
LL=LL+1
return html_string
def reader(maile):
html_string=mail_details(maile)
return_string=""
all_items=maile.get_payload()
print(all_items)
for item in all_items:
return_string=split_content(item)
print("__________________________________________________________________________________________")
content_type=maile.get_content_type()
return html_string+return_string
file.close()
#filename="SC.eml"
#reader(filename)
|
'''
Created on 15 nov. 2012
@author: David
inspired by Telmo Menezes's work : telmomenezes.com
'''
import random
import numpy as np
import operator as op
import math
import graph_types.Directed_WeightedGWU as dwgwu
import graph_types.Undirected_WeightedGWU as uwgwu
import graph_types.Directed_UnweightedGWU as dugwu
import graph_types.Undirected_UnweightedGWU as uugwu
"""
contains one main function :
*grow_network : takes a tree of decision and returns the graph that grows according to those rules
Tree : consider an edge, returns a real number
max depth : 3.
each leaf can be one of :
"OrigId" "TargId" "OrigInDegree" "OrigOutDegree" "TargInDegree" "TargOutDegree"
"OrigInStrength" "OrigOutStrength" "TargInStrength" "TargOutStrength"
"DirectDistance" "ReversedDistance"
each node can be one of : + - * / exp log abs min max opp inv
Growth algorithm :
begins with an empty graph with the number of nodes of the real network
at each step the tree gives a probability to every possible edge
one edge is chosen randomly
until the number of edges equals the number of edges of the real network
"""
#avoid decorators syntax problems for line_profiling
import __builtin__
try:
__builtin__.profile
except AttributeError:
# No line profiler, provide a pass-through version
def profile(func): return func
__builtin__.profile = profile
def grow_network(decision_tree, number_of_nodes, number_of_steps, graph=None):
'''takes a tree of decision and returns the graph that grows according to those rules'''
''' graph has directed edges'''
'''depending on the method used, this graph can have weighted or unweighted edges'''
tree_type = decision_tree.getParam("tree_type")
'''
if tree_type == "simple" :
return grow_simple_network(graph,decision_tree,number_of_nodes, number_of_edges)
'''
if tree_type == "with_constants":
return grow_network_with_constants(decision_tree, number_of_nodes, number_of_steps, graph)
# if tree_type == "with_constants_multi":
# return grow_network_with_constants_multi_step(decision_tree, number_of_nodes, number_of_steps, graph)
raise Exception("no tree_type given")
'''
Functions that grow a network according to the method of evaluation used
'''
# getattr ?
def createGraph(network_type, initial_network=None):
if network_type == "directed_weighted":
return dwgwu.Directed_WeightedGWU(initial_network)
if network_type == "undirected_weighted":
return uwgwu.Undirected_WeightedGWU(initial_network)
if network_type == "directed_unweighted":
return dugwu.Directed_UnweightedGWU(initial_network)
if network_type == "undirected_unweighted":
return uugwu.Undirected_UnweightedGWU(initial_network)
raise Exception("network_type not given")
'''
def grow_simple_network(graph,decision_tree,number_of_nodes, number_of_edges):
takes a tree of decision and returns the graph that grows according to those rules
graph can be (un)directed/(un)weighted
#begins with an empty graph with the number of nodes of the real network
for i in xrange(number_of_nodes) :
graph.add_node(i)
#adds one edge according to its probability
for i in xrange(number_of_edges) :
#each edge has a probability that is the result of the tree
probas = calc(decision_tree.getRoot(),graph)
#we remove unnecessary edges : self loops, negative proba
#we choose one among remaining ones
edge,_ = choose_edge(probas, graph)
if edge is None : #this can happen if every edge has a -infinity probability thanks to log or / or - exp...
break
graph.add_edge(*edge)
return graph
'''
@profile
def grow_network_with_constants(decision_tree, number_of_nodes, number_of_edges, graph=None):
'''takes a tree of decision and returns the graph that grows according to those rules'''
'''graph can be (un)directed/(un)weighted'''
network_type = decision_tree.getParam('network_type')
if graph is None:
graph = createGraph(network_type)
number_of_nodes_init = graph.number_of_nodes()
for i in range(number_of_nodes):
graph.add_node(i + number_of_nodes_init)
# adds one edge according to its probability
number_of_steps = 1
old_probas = None
while graph.number_of_edges() < number_of_edges:
# each edge has a probability that is the result of the tree
probas = calc_with_constants(decision_tree.getRoot(), graph)
# if probas stay near from last step, we dobble the number of new edges created
if near(probas, old_probas):
number_of_steps *= 2
else:
number_of_steps = max(1, number_of_steps / 2)
old_probas = probas
list_edges = []
for _ in range(number_of_steps):
# we remove unnecessary edges : self loops, negative proba
# we choose one among remaining ones
edge, weight_value = choose_edge(probas, graph)
if edge is None: # this can happen if every edge has a -infinity probability thanks to log or / or - exp...
break
source, target = edge
list_edges.append((source, target, {"weight": weight_value}))
if len(
list_edges) == 0: # this can happen if every edge has a -infinity probability thanks to log or / or - exp...
break
graph.add_edges_from(list_edges)
return graph
def near(probas, old_probas):
if old_probas is None: return False
try:
diff = np.absolute(probas - old_probas)
maxdiff = np.max(diff[np.isfinite(diff)])
pb = np.absolute(probas)
threshhold = np.max(pb[np.isfinite(pb)])
if maxdiff < threshhold / 2: return True
except ValueError:
return False
"""
def grow_network_with_constants_multi_step(decision_tree, number_of_nodes, number_of_steps, graph=None):
'''takes a tree of decision and returns the graph that grows according to those rules'''
'''graph can be (un)directed/(un)weighted'''
network_type = decision_tree.getParam('network_type')
graph = createGraph(network_type)
for i in range(number_of_nodes):
graph.add_node(i)
# adds one edge according to its probability
for _ in range(10):
# each edge has a probability that is the result of the tree
# we remove unnecessary edges : self loops, negative proba
# we choose one among remaining ones
probas = calc_with_constants(decision_tree.getRoot(), graph)
list_weighted_edges = choose_edges(probas, graph, number_of_steps / 10)
for edge, weight_value in list_weighted_edges:
if edge is None: # this can happen if every edge has a -infinity probability thanks to log or / or - exp...
break
if graph.isWeighted():
graph.add_edge(*edge, weight=weight_value)
else:
graph.add_edge(*edge)
return graph
"""
'''
Functions that let us choose a random element in the matrix of probabilities
'''
@profile
def choose_edge(probas, network):
''' takes a matrix of probabilities and a network,
returns an edge (no self loop, not already present in the network) according to probabilities and its weight for the network'''
'''the returned weight is (1+erf(proba)) /2 : because this function takes a number in R and return a number between 0 and 1'''
# probas can contain a number + infinity, -inifinity, nan
coord_i = np.random.randint(0, network.number_of_nodes(), network.number_of_nodes())
coord_j = np.random.randint(0, network.number_of_nodes(), network.number_of_nodes())
liste_probas = zip(zip(coord_i, coord_j), probas[coord_i, coord_j])
# we list possible edge : no self loops, no existing edges, no negative probabilities
edge = network.has_edge
possible_edges = [x for x in liste_probas if x[1] > float('-inf') and x[0][0] != x[0][1] and not edge(*x[0])]
# if there is no possible edge, we stop the building of the network
if len(possible_edges) == 0:
return (None, 0)
# we list edges with strictly positive probabilities
positive_edges = [x for x in possible_edges if x[1] > 0]
# if every probability is negative, we choose one edge among the possible
if len(positive_edges) == 0:
edge, weight = random.choice(possible_edges)
return (edge, normalize(weight))
# we list edges with infinite probabilities
infinite_edges = [x for x in positive_edges if x[1] == float('+inf')]
# if some probability are infinite, we choose one edge among the inifinite probabilities
if len(infinite_edges) != 0:
weighted_edge = random.choice(infinite_edges)
return (weighted_edge[0], 1)
# if there is one positive probability, we choose one edge between those with positive probability
weights_sum = sum(weighted_edge[1] for weighted_edge in positive_edges)
rand = random.random() * weights_sum
for edge, weight in positive_edges:
rand -= weight
if rand <= 0:
return (edge, normalize(weight))
# if weights_sum = +infty but probabilities are different from + infinity,
# we can have this possibility
return random.choice(possible_edges)
"""
def choose_edges(probas, network, number):
''' takes a matrix of probabilities and a network,
returns N edges (no self loop, not already present in the network) according to probabilities and its weight for the network'''
'''the returned weight is (1+erf(proba)) /2 : because this function takes a number in R and return a number between 0 and 1'''
# we mark impossible edge, because it faster to remove them this way instead of filtering the matrix enumerated
# finding edges in matrices is in constant time
# finding edges in sequences is in linear time
# gives -infinity as probability to self loops
np.fill_diagonal(probas, float('-inf'))
# gives -infinity as probability to already existing edges
if network.isDirected():
for edge in network.edges_iter():
probas[edge] = float('-inf')
else:
'because edges are only stored once : begin-end and not end-begin'
for target, origin in network.edges_iter():
probas[origin, target] = float('-inf')
probas[target, origin] = float('-inf')
# probas can contain a number + infinity, -inifinity, nan
liste_probas = sample(probas,network.number_of_nodes())
# we list possible edge : no self loops, no existing edges, no negative probabilities
possible_edges = [x for x in liste_probas if x[1] > float('-inf')]
# we list edges with strictly positive probabilities
positive_edges = [x for x in possible_edges if x[1] > 0]
# we list edges with infinite probabilities
infinite_edges = [x for x in positive_edges if x[1] == float('+inf')]
edges_result = []
for _ in range(number):
weighted_edge = choose_edge_among(possible_edges, positive_edges, infinite_edges)
edges_result.append(weighted_edge)
try:
possible_edges.remove(weighted_edge)
print("possible")
positive_edges.remove(weighted_edge)
print("positive")
infinite_edges.remove(weighted_edge)
print("infinite")
except:
pass
return edges_result
"""
def choose_edge_among(possible_edges, positive_edges, infinite_edges):
''' possible_edges and positive_edges and
returns an edge (no self loop, not already present in the network) according to probabilities and its weight for the network'''
'''the returned weight is (1+erf(proba)) /2 : because this function takes a number in R and return a number between 0 and 1'''
if len(possible_edges) == 0:
return (None, 0)
# if every probability is negative, we choose one edge among the possible
if len(positive_edges) == 0:
edge, weight = random.choice(possible_edges)
return (edge, normalize(weight))
# if some probability are infinite, we choose one edge among the inifinite probabilities
if len(infinite_edges) != 0:
weighted_edge = random.choice(infinite_edges)
return (weighted_edge[0], 1)
# if there is one positive probability, we choose one edge between those with positive probability
weights_sum = sum(weighted_edge[1] for weighted_edge in positive_edges)
rand = random.random() * weights_sum
for edge, weight in positive_edges:
rand -= weight
if rand <= 0:
return (edge, normalize(weight))
# if weights_sum = +infty but probabilities are different from + infinity,
# we can have this possibility
return random.choice(possible_edges)
'''
Functions that compute the tree for each node
'''
def calc(node, graph):
''' takes a node of the decision tree and a graph
computes recursively a value for each edge of the graph at the same time
a node can be a leaf with a variable or a function
returns a 2D array containing the value for each edge
'''
data = node.getData()
# recursive computation on function nodes : we always have 2 children if not a leaf, by construction
if node.isLeaf():
return compute_leaf(graph, data)
else:
# values returned are arrays of dimension 2
value0 = calc(node.getChild(0), graph)
value1 = calc(node.getChild(1), graph)
return compute_function(data, value0, value1)
def calc_with_constants(node, graph):
''' takes a node of the decision tree and a graph
computes recursively a value for each edge of the graph at the same time
returns a 2D array containing the value for each edge
difference is that leaves of the tree contain a constant and a variable
'''
data = node.getData()
# recursive computation on function nodes : we always have 2 children if not a leaf, by construction
if node.isLeaf():
constant, variable = data
return constant * (compute_leaf(graph, variable))
else:
# values returned are arrays of dimension 2
if data in ["H", "opp", "T", "inv", "exp", "abs", "log"]:
value0 = calc_with_constants(node.getChild(0), graph)
value1 = None
else:
value0 = calc_with_constants(node.getChild(0), graph)
value1 = calc_with_constants(node.getChild(1), graph)
return compute_function(data, value0, value1)
def compute_function(data, value0, value1):
'''returns the computation of value0 data value1
data is an operation between two numbers
'''
return {
"+": op.add,
"-": op.sub,
"*": op.mul,
"min": np.minimum,
"max": np.maximum,
"exp": exp,
"log": log,
"abs": abs,
"/": div,
"inv": inv,
"opp": opp,
"H": H,
"T": T,
"N": N,
">": greater,
"<": less,
"=": around
}[data](value0, value1)
def compute_leaf(graph, variable):
"""returns the computation of value0 data value1
data is an operation between two numbers
"""
return getattr(graph, variable)()
def div(a, b):
'divides by 1+b to avoid dividing by 0 in most cases'
return a / (1 + b)
def exp(a, b):
return np.exp(a)
def log(a, b):
'computes log(1+a) to avoid most cases where a = 0'
return np.log(1 + a)
def abs(a, b):
return np.absolute(a)
def inv(a, b):
return 1 / (1 + a)
def opp(a, b):
return -a
def H(a, b):
# only on a , 1 if a > 0, else 0
return (a >= 0).astype(float)
def T(a, b):
# th(a)+1 / 2
return (np.tanh(a) + 1) / 2
def N(a, b):
return np.exp(-a ** 2)
def greater(a, b):
'1 if a >b element-wise'
return (a >= b).astype(float)
def less(a, b):
'1 if a <b element-wise'
return (a < b).astype(float)
def around(a, b):
'1 if b-1<a < b+1 element-wise'
return (np.absolute(a - b) < 1).astype(float)
def normalize(x):
return (math.tanh(x) + 1) / 2
|
from pydantic import BaseModel
from typing import Optional,List
import requests
from fastapi import Request, FastAPI
from typing import Optional
class RegisterModel(BaseModel):
service_name: str
api_url: str
permission: str
user_id: str
class ServiceRegisterModel(BaseModel):
service_name : str
api_url : str
permission : str
user_id : str
description : Optional[str] = None
class UserInfoModel(BaseModel):
Name: str
Email: str
Image: str
Token: str
class ServiceDeleteModel(BaseModel):
service_id: str
user_id: str
class ServiceUpdateModel(BaseModel):
service_name : Optional[str] = None
api_url : Optional[str] = None
permission : Optional[str] = None
service_id : str
user_id : str
description : Optional[str] = None
class SUPatchModel(BaseModel):
service_id : Optional[str] = None
service_name : Optional[str] = None
api_url : Optional[str] = None
permission : Optional[str] = None
status : Optional[str] = None
description : Optional[str] = None
|
from django.test import TestCase
class HomePageTest(TestCase):
def test_home_page_renders_home_page_template(self):
response = self.client.get('/')
self.assertTemplateUsed(response, 'home.html')
def test_view_sets_active_class_on_link(self):
response = self.client.get('/')
self.assertEqual('home', response.context['navbar'])
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gallery_window.ui',
# licensing of 'gallery_window.ui' applies.
#
# Created: Thu Jan 2 17:55:43 2020
# by: pyside2-uic running on PySide2 5.9.0~a1
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_GalleryWindow(object):
def setupUi(self, GalleryWindow):
GalleryWindow.setObjectName("GalleryWindow")
GalleryWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(GalleryWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.scrollArea_gallery = QtWidgets.QScrollArea(self.centralwidget)
self.scrollArea_gallery.setWidgetResizable(True)
self.scrollArea_gallery.setObjectName("scrollArea_gallery")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 780, 533))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayoutWidget = QtWidgets.QWidget(self.scrollAreaWidgetContents)
self.gridLayoutWidget.setGeometry(QtCore.QRect(0, 0, 781, 531))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout_gallery = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout_gallery.setContentsMargins(0, 0, 0, 0)
self.gridLayout_gallery.setObjectName("gridLayout_gallery")
self.scrollArea_gallery.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.scrollArea_gallery)
GalleryWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(GalleryWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 25))
self.menubar.setObjectName("menubar")
GalleryWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(GalleryWindow)
self.statusbar.setObjectName("statusbar")
GalleryWindow.setStatusBar(self.statusbar)
self.retranslateUi(GalleryWindow)
QtCore.QMetaObject.connectSlotsByName(GalleryWindow)
def retranslateUi(self, GalleryWindow):
GalleryWindow.setWindowTitle(QtWidgets.QApplication.translate("GalleryWindow", "MainWindow", None, -1))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
GalleryWindow = QtWidgets.QMainWindow()
ui = Ui_GalleryWindow()
ui.setupUi(GalleryWindow)
GalleryWindow.show()
sys.exit(app.exec_())
|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import unittest
import numpy as np
from smqtk.utils import prob_utils
class TestAdjustProba (unittest.TestCase):
def test_single_class(self):
num = 10
dim = 1
proba = np.random.rand(num, dim)
self.assertTrue(np.allclose(1, prob_utils.adjust_proba(proba, [1])))
proba = np.random.rand(num, dim)
self.assertTrue(np.allclose(1, prob_utils.adjust_proba(proba, [-1])))
proba = np.ones_like(proba)
self.assertTrue(np.allclose(1, prob_utils.adjust_proba(proba, [1])))
def test_failure_cases(self):
num = 10
dim = 3
proba = np.zeros((num, dim))
self.assertRaisesRegexp(ValueError,
"At least one probability must be positive",
prob_utils.adjust_proba, proba, [1, 2, 3])
proba[1] = -1.0
proba[2] = 1.0
self.assertRaisesRegexp(ValueError,
"Probabilities must be at least 0",
prob_utils.adjust_proba, proba, [1, 2, 3])
def test_shape_cases(self):
num = 10
dim = 3
proba = np.random.rand(num, dim)
self.assertRaisesRegexp(ValueError,
"The dimensions of probabilities and "
"adjustments must be compatible.",
prob_utils.adjust_proba, proba, [1, 2])
proba = np.random.rand(1, dim)
proba /= proba.sum()
self.assertTrue(
np.allclose(proba, prob_utils.adjust_proba(proba, [1, 1, 1])))
self.assertRaisesRegexp(ValueError,
"The dimensions of probabilities and "
"adjustments must be compatible.",
prob_utils.adjust_proba,
np.ones((num, 1)), np.ones((1, num)))
def test_adjust_constant(self):
num = 10
dim = 3
proba = np.random.rand(num, dim)
proba /= proba.sum(axis=1, keepdims=True)
self.assertTrue(
np.allclose(proba, prob_utils.adjust_proba(proba, [1, 1, 1])))
self.assertTrue(
np.allclose(proba, prob_utils.adjust_proba(proba, [10, 10, 10])))
def test_adjust_serial_vs_sum(self):
num = 10
dim = 3
proba = np.random.rand(num, dim)
proba /= proba.sum(axis=1, keepdims=True)
adj1 = np.array([1, 2, 3])
adj2 = np.array([2, 0, -2])
proba_fst = prob_utils.adjust_proba(proba, adj1)
proba_snd = prob_utils.adjust_proba(proba_fst, adj2)
proba_sum = prob_utils.adjust_proba(proba, adj1 + adj2)
self.assertTrue(np.allclose(proba_snd, proba_sum))
proba_fst = prob_utils.adjust_proba(proba, adj1)
proba_snd = prob_utils.adjust_proba(proba_fst, -adj1)
self.assertTrue(np.allclose(proba_snd, proba))
def test_adjust(self):
num = 10
dim = 3
proba = np.random.rand(num, dim)
proba /= proba.sum(axis=1, keepdims=True)
adj = [0, 1, 0]
proba_post = prob_utils.adjust_proba(proba, adj)
comp = proba_post > proba
self.assertTrue(np.all([False, True, False] == comp))
comp = proba_post < proba
self.assertTrue(np.all([True, False, True] == comp))
comp = np.isclose(proba, proba_post)
self.assertFalse(np.any(comp))
adj = [-1, 0, 0]
proba_post = prob_utils.adjust_proba(proba, adj)
comp = proba_post < proba
self.assertTrue(np.all([True, False, False] == comp))
comp = proba_post > proba
self.assertTrue(np.all([False, True, True] == comp))
comp = np.isclose(proba, proba_post)
self.assertFalse(np.any(comp))
adj = [1.5, 0, -1.5]
proba_post = prob_utils.adjust_proba(proba, adj)
comp = proba_post < proba
self.assertTrue(np.all([False, True] == comp[:, [0, 2]]))
comp = proba_post > proba
self.assertTrue(np.all([True, False] == comp[:, [0, 2]]))
comp = np.isclose(proba, proba_post)
self.assertFalse(np.all([False, True, False] == comp))
|
import subprocess
# ---------------------------------------
# SOLVENTFIT CALIBRATION fit function
# ---------------------------------------
def fit(nx_design, nx_var, xdatfile, ydatfile, modelfile, expfile, priorsfile, disc=True, writepost=True, writedisc=True,
emul_params=None, calib_params=None, disc_params=None):
if emul_params is None:
emul_params = {'bte':'[0,1000,1]',
'nterms':'20',
'order':'2'}
if calib_params is None:
calib_params = {'bte': '[0,1000,1]'}
booldict = {True:'1', False:'0'}
disc = booldict[disc]
writepost = booldict[writepost]
writedisc = booldict[writedisc]
if disc_params is None:
disc_params = {'nterms':'20',
'order':'2'}
p = subprocess.Popen(['Rscript', 'solvfit_calibfit.R',
nx_design, nx_var, xdatfile, ydatfile, modelfile,
expfile, priorsfile, disc, writepost, writedisc,
emul_params['bte'], emul_params['nterms'], emul_params['order'],
calib_params['bte'], disc_params['nterms'], disc_params['order']],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
print(stdout)
print(stderr)
return modelfile
# ---------------------------------------
# Example usage
# ---------------------------------------
rdsfile = fit('1','3','example/xdat.csv','example/ydat.csv','solvfit_calibrator.rds',
'example/expdat1.csv','example/priors.txt', disc=True)
print(rdsfile)
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 20 12:32:27 2015
@author: bolaka
"""
import sys
import math
from pprint import pprint
from itertools import chain, combinations
from datetime import datetime, timedelta
import random
import pandas as pd
import numpy as np
from sklearn import cross_validation, tree, svm, linear_model, preprocessing, \
neighbors, ensemble
import matplotlib.pyplot as plt
from workalendar.usa import Maryland
def rmsle(actual_values, predicted_values):
'''
Implementation of Root Mean Squared Logarithmic Error
See https://www.kaggle.com/c/bike-sharing-demand/details/evaluation
'''
assert len(actual_values) == len(predicted_values), \
"Both input paramaters should have the same length"
# Depending on the regression method, the input paramaters can be either
# a numpy.ndarray or a list, we need to make sure it's a 1D iterable
actual_values = np.ravel(actual_values)
predicted_values = np.ravel(predicted_values)
total = 0
for a, p in zip(actual_values, predicted_values):
total += math.pow(math.log(p+1) - math.log(a+1), 2)
return math.sqrt(total/len(actual_values))
def pre_process_data(data, selected_columns):
'''
Does some pre-processing on the existing columns and only keeps
columns present in [selected_columns].
Returns a numpy array
'''
# Some 'magic' string to datatime function
data['datetime'] = pd.to_datetime(data['datetime'])
# Since the hour of day is cyclical, e.g. 01:00 is equaly far from midnight
# as 23:00 we need to represent this in a meaningful way. We use both sin
# and cos, to make sure that 12:00 != 00:00 (which we cannot prevent if we only
# use sin)
data['hour_of_day'] = data['datetime'].apply(lambda i: i.hour)
data['hour_of_day_sin'] = data['hour_of_day'].apply(lambda hour: math.sin(2*math.pi*hour/24))
data['hour_of_day_cos'] = data['hour_of_day'].apply(lambda hour: math.cos(2*math.pi*hour/24))
# Since it seems the service got more popular over time, we might need some
# way of telling how far we are from the beginning
first_day = datetime.strptime('2011-01-01', "%Y-%m-%d").date()
data['day_since_begin'] = data['datetime'].apply(lambda i: (i.date()-first_day).days)
# For some reason the dataset didn't indicate new year's day and christmas
# day as holidays. Therefore we also use this external libraryto check if
# a day is a holiday
cal = Maryland()
holidays = cal.holidays(2011)
holidays += cal.holidays(2012)
holidays = set([dt for (dt, name) in holidays])
data['holiday_external'] = data['datetime'].apply(lambda i: int(i.date() in holidays))
# Is it a holiday tomorrow or yesterday?
data['almost_holiday'] = data['datetime'].apply(
lambda i: int(i.date() - timedelta(days=1) in holidays or
i.date() + timedelta(days=1) in holidays)
)
# Some simple model of rush hour
data['rush_hour'] = data['datetime'].apply(
lambda i: min([math.fabs(8-i.hour), math.fabs(18-i.hour)])
)
data.ix[data['workingday'] == 0,'rush_hour'] = \
data['datetime'].apply(
lambda i: math.fabs(14-i.hour)
)
data.ix[data['holiday_external'] == 1,'rush_hour'] = \
data['datetime'].apply(
lambda i: math.fabs(14-i.hour)
)
# Add the day of the week
data['weekday'] = data['datetime'].apply(lambda i: i.weekday())
# Some variables have no numerical value, they are categorical. E.g. the weather
# variable has numerical values, but they cannot be interpreted as such.
# In other words value 2 is not two times as small as value 4.
# A method to deal with this is one-hot-enconding, which splits the existing
# variable in n variables, where n equals the number of possible values.
# See
for column in ['season', 'weather', 'weekday']:
dummies = pd.get_dummies(data[column])
# Concat actual column name with index
new_column_names = [column + str(i) for i in dummies.columns]
data[new_column_names] = dummies
data.to_csv('/home/bolaka/Bike Sharing/train-arnov.csv', index=False)
data = data[selected_columns]
return data.values
def print_feature_importance(data, features, labels_casual, labels_registered):
'''
Use a random forest regressor to print some info on how important the
diffrent features are
'''
clf_c = ensemble.RandomForestRegressor(n_estimators=150)
clf_r = ensemble.RandomForestRegressor(n_estimators=150)
clf_c.fit(data,np.ravel(labels_casual))
clf_r.fit(data,np.ravel(labels_registered))
print( 'Registered features:' )
pprint(sorted(zip(features, clf_r.feature_importances_),
key=lambda i: i[1], reverse=True))
print( 'Casual features:' )
pprint(sorted(zip(features, clf_c.feature_importances_),
key=lambda i: i[1], reverse=True))
def main(algorithm='random-forest'):
# Read data from file and convert to dataframe
data = pd.read_csv("/home/bolaka/Bike Sharing/train.csv")
data = pd.DataFrame(data = data)
# This list decides which features are going to be used, the rest is filtered out
features = ['day_since_begin', 'hour_of_day_cos', 'hour_of_day_sin', 'workingday',
'temp', 'weather1', 'weather3', 'holiday_external', 'almost_holiday',
'weekday0', 'weekday1', 'weekday2', 'weekday3', 'weekday4', 'weekday5',
'weekday6', 'humidity', 'windspeed', 'rush_hour', 'holiday']
# Extract and select features
train_data = pre_process_data(data, features)
# Get target values
train_labels_casual = data[['casual']].values.astype(float)
train_labels_registered = data[['registered']].values.astype(float)
train_labels_count = data[['count']].values.astype(float)
# Inspect feature importance
print_feature_importance(
train_data, features, train_labels_casual, train_labels_registered
)
# Do cross validation by leaving out specific weeks
weeks = data['datetime'].apply(lambda i: str(i.year) + '-' + str(i.week))
# Take out 10 weeks to test on, but don't do ALL permutations
kf = []
for fold in cross_validation.LeavePLabelOut(weeks, p=10):
kf.append(fold)
if len(kf) == 6:
break
scores = []
for fold, (train_index, test_index) in enumerate(kf):
print( "Computing fold %d" % fold )
# Train the model
if algorithm == 'ridge':
clf_c = linear_model.Ridge(alpha = 1.5)
clf_r = linear_model.Ridge(alpha = 1.5)
elif algorithm == 'decision-tree':
clf_c = tree.DecisionTreeRegressor(max_depth=15)
clf_r = tree.DecisionTreeRegressor(max_depth=15)
elif algorithm == 'knn':
clf_c = neighbors.KNeighborsRegressor(n_neighbors=5, weights='distance')
clf_r = neighbors.KNeighborsRegressor(n_neighbors=5, weights='distance')
elif algorithm == 'svr':
clf_c = svm.SVR(kernel='rbf', C=10000, epsilon=5)
clf_r = svm.SVR(kernel='rbf', C=10000, epsilon=5)
elif algorithm == 'random-forest':
clf_c = ensemble.RandomForestRegressor(n_estimators=150)
clf_r = ensemble.RandomForestRegressor(n_estimators=150)
else:
raise Exception("Unkown algorithm type, choices are: 'ridge'," +
" 'decision-tree', 'knn', 'svr', and 'random-forest' (default)")
clf_c.fit(train_data[train_index],np.ravel(train_labels_casual[train_index]))
clf_r.fit(train_data[train_index],np.ravel(train_labels_registered[train_index]))
# Test it
predicted_c = clf_c.predict(train_data[test_index])
predicted_r = clf_r.predict(train_data[test_index])
# Some methods can predict negative values
predicted_c = [p if p > 0 else 0 for p in predicted_c]
predicted_r = [p if p > 0 else 0 for p in predicted_r]
# Plot predicted vs true values for a random week
df = pd.DataFrame({'datetime': data['datetime'].values[test_index],
'true_c': np.ravel(train_labels_casual[test_index]),
'predicted_c': np.ravel(predicted_c),
'true_r': np.ravel(train_labels_registered[test_index]),
'predicted_r': np.ravel(predicted_r)})
index = random.randint(0,len(df))
df[index:index+24*7].plot(x='datetime')
plt.show()
# Add casual and registered prediction
predicted = [c+r for (c,r) in zip(predicted_c, predicted_r)]
scores.append(rmsle(train_labels_count[test_index], predicted))
# Print average cross-validation score
avg = sum(scores) / len(scores)
print( "Average RMSLE:", avg )
# Train classifier on all data
clf_c.fit(train_data,np.ravel(train_labels_casual))
clf_r.fit(train_data,np.ravel(train_labels_registered))
# Predict test data
test_data = pd.read_csv("/home/bolaka/Bike Sharing/test.csv")
test_data = pd.DataFrame(data = test_data)
transformed_test_data = pre_process_data(test_data, features)
# Predict all test data
predicted_c = clf_c.predict(transformed_test_data)
predicted_r = clf_r.predict(transformed_test_data)
# Add up casual and registered prediction
predicted = [c+r for (c,r) in zip(predicted_c, predicted_r)]
# Some methods can predict negative values
predicted = [p if p > 0 else 0 for p in predicted]
# Write the output to a csv file
output = pd.DataFrame()
output['datetime'] = test_data['datetime']
output['count'] = predicted
# Don't write row numbers, using index=False
output.to_csv('/home/bolaka/Bike Sharing/predicted.csv', index=False)
if __name__ == "__main__":
if len(sys.argv) == 1:
main()
elif len(sys.argv) == 2:
main(sys.argv[1])
else:
print( "Unknown number of parameters.\n")
print( "Usage 'python prediction.py' or 'python prediction.py [algorithm]'" )
exit(1)
|
#!/usr/bin/env python
import numpy as np
import pymc as pm
from pylab import *
import corner
# Plotting Parameters Setting
rcParams['figure.figsize'] = 2*1.67323, 1.9*1.67323
rcParams['ps.useafm'] = True
plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rcParams['pdf.fonttype'] = 42
matplotlib.rc('xtick.major', size=6)
matplotlib.rc('xtick.minor', size=3)
matplotlib.rc('ytick.major', size=6)
matplotlib.rc('ytick.minor', size=3)
matplotlib.rc('lines', markeredgewidth=0.5*2)
matplotlib.rc('font', size=7*2.0)
vars = ['dE_0', 'eps_a',
'delta_0', 'alpha',
'beta']
db = pm.database.pickle.load('M.pickle')
ndim = len(vars)
nsamples = len(db.trace(vars[0])[:])/2
data = db.trace(vars[0])[-nsamples::5]
for var in vars[1:]:
data = np.vstack([data,db.trace(var)[-nsamples::5]])
data = np.transpose(data)
print data.shape
np.savetxt('CornerData.txt',data)
data = np.loadtxt('CornerData.txt')
# Plot it.
figure = corner.corner(data, bins=20, labels=vars, quantiles=[0.16, 0.5, 0.84],
show_titles=True, title_kwargs={"fontsize": 12})
figure.savefig("corner.pdf")
|
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
# Create your models here.
class HashType(models.Model):
idHashType = models.AutoField(primary_key=True)
description = models.CharField(max_length=100)
#if you use python 2 then use unicode
def __unicode__(self):
return self.description
#if use python 3 then use str
def __str__(self):
return self.description
class Admin:
pass
class HashFile(models.Model):
idHashFile = models.AutoField(primary_key=True)
file = models.FileField(upload_to="hash/%y/%m/%d-%H%M")
#django 1.8 or lower version use this
idHashType = models.ForeignKey(HashType)
#above django 2.0 or upper version use this
idHashType = models.ForeignKey(HashType,on_delete=models.CASCADE)
description = models.TextField()
#if you use python 2 then use unicode
def __unicode__(self):
return self.file + " (" + self.idHashType.description + ")"
#if use python 3 then use str
def __str__(self):
return self.file + " (" + self.idHashType.description + ")"
class Admin:
pass
class FileInstance(models.Model):
idFile = models.AutoField(primary_key=True)
hash = models.CharField(max_length=128)
fileLength = models.IntegerField()
machine = models.CharField(max_length = 20)
fullPath = models.CharField(max_length = 300)
fileName = models.CharField(max_length = 100)
#django 1.8 or lower version use this
idHashType = models.ForeignKey(HashType)
#above django 2.0 or upper version use this
idHashType = models.ForeignKey(HashType,on_delete=models.CASCADE)
#python 2 in use unicode
def __unicode__(self):
return "{} on {} ".format(self.fileName,self.machine)
#python 3 in use str
def __str__(self):
return "{} on {} ".format(self.fileName,self.machine)
class Admin:
pass
|
from __future__ import annotations
from PIL import ImageOps, ImageDraw, Image, ImageFont, ImageEnhance
import aiohttp
import asyncio
from io import BytesIO
from redbot.core.utils.chat_formatting import humanize_number
import humanize
from datetime import datetime
from bs4 import BeautifulSoup
def champ_into_pic(champ: arez.Champion) -> Image:
name = champ.name.lower().replace(" ","-").replace("'","")
try:
pic = Image.open(f"/home/poopski/mucski/stuff/icons/avatars/{name}.jpg")
if pic.size < (512, 512):
(width, height) = (pic.width * 2, pic.height * 2)
pic = pic.resize((width, height))
except FileNotFoundError:
pic = Image.open("/home/poopski/mucski/stuff/icons/error.jpg")
return pic
def statsimage(mp, index):
crop = 140
# vertical
W, H = (4620, 232)
# padding or margin size
padding = 10
# middle
mid = 61
# image background color odd and even
img_color = (14, 34, 43) if index % 2 == 0 else (15, 40, 48)
# text fill size )
fill = (255, 255, 255)
orange = (252, 186, 3)
green = (7, 252, 3)
red = (252, 102, 3)
purple = (240, 3, 252)
fill = (255, 255, 255)
if mp.party_number == 1:
color = green
elif mp.party_number == 2:
color = orange
elif mp.party_number == 3:
color = purple
elif mp.party_number == 4:
color = red
else:
color = fill
# new image object
img = Image.new("RGBA", (W, H), color=img_color)
# champion icon
champicon = champ_into_pic(mp.champion)
border = (0, crop, 0, crop)
champimgcrop = ImageOps.crop(champicon, border)
img.paste(champimgcrop, (padding, padding))
# rank icon
if mp.player.private:
rankicon = Image.open(f"/home/poopski/mucski/stuff/icons/ranks/99.png")
else:
rankicon = Image.open(f"/home/poopski/mucski/stuff/icons/ranks/{mp.player.ranked_best.rank.value}.png")
img.paste(rankicon, (1526, mid), mask=rankicon)
# make the image drawable
draw = ImageDraw.Draw(img)
# normal font
fnt = ImageFont.truetype("/home/poopski/mucski/stuff/arial.ttf", 80)
# bold font
fntbld = ImageFont.truetype("/home/poopski/mucski/stuff/arialbd.ttf", 80)
smallfnt = ImageFont.truetype("/home/poopski/mucski/stuff/arial.ttf", 60)
name = mp.player.name
if name == "":
name = "???"
# player name and level
draw.text((512 + padding * 4, mid - 30), name, font=fntbld, fill=color)
try:
draw.text((512 + padding * 4, mid + 60), humanize_number(mp.player.calculated_level), font=smallfnt, fill=fill)
except AttributeError:
draw.text((512 + padding * 4, mid + 60), humanize_number(mp.account_level), font=smallfnt, fill=fill)
# credits earned
draw.text((1736, mid), humanize_number(mp.credits), font=fnt, fill=fill)
# kda
draw.text((2036, mid), mp.kda_text, font=fnt, fill=(224, 197, 135))
# dmg done
draw.text((2436, mid), humanize_number(mp.damage_done), font=fnt, fill=fill)
# dmg taken
draw.text((2826, mid), humanize_number(mp.damage_taken), font=fnt, fill=fill)
# objective
draw.text((3226, mid), humanize_number(mp.objective_time), font=fnt, fill=fill)
# shielding
draw.text((3456, mid), humanize_number(mp.shielding), font=fnt, fill=fill)
# healing
draw.text((3856, mid), humanize_number(mp.healing_done), font=fnt, fill=fill)
# self healing
draw.text((4256, mid), humanize_number(mp.healing_self), font=fnt, fill=fill)
# kda2
# draw.text((4636, mid), "{:.2f}".format(stats[12]), font=fnt, fill=fill)
return img
def playerkey(x, y):
# the image object
key = Image.new("RGB", (x, y), color=(8, 21, 25))
draw = ImageDraw.Draw(key)
fill = (255, 255, 255)
padding = 10
fntbld = ImageFont.truetype("/home/poopski/mucski/stuff/arialbd.ttf", 50)
# champion and player
draw.text((20, 20), "CHAMPION", font=fntbld, fill=fill)
draw.text((512 + padding * 4, 20), "PLAYER", font=fntbld, fill=fill)
# rank
draw.text((1576, 20), "R", font=fntbld, fill=fill)
# credits
draw.text((1736, 20), "CREDITS", font=fntbld, fill=fill)
# kda
draw.text((2036, 20), "K/D/A", font=fntbld, fill=fill)
# damage done
draw.text((2436, 20), "DAMAGE", font=fntbld, fill=fill)
# damage taken
draw.text((2826, 20), "TAKEN", font=fntbld, fill=fill)
# objective
draw.text((3226, 20), "OBJ", font=fntbld, fill=fill)
# shielding
draw.text((3456, 20), "SHIELDING", font=fntbld, fill=fill)
# healing
draw.text((3856, 20), "HEALING", font=fntbld, fill=fill)
# self healing
draw.text((4256, 20), "SELF HEAL", font=fntbld, fill=fill)
# kda2
# draw.text((4636, 20), "KDA", font=fntbld, fill=fill)
return key
def format_match(match: arez.Match) -> Image:
W, H = (4620, 2942)
# padding=10
img = Image.new("RGB", (W, H), color=(8, 21, 25))
# headers
key = playerkey(4620, 98)
img.paste(key, (0, 0))
# format in the players
for team_num in range(1, 3): # 1 then 2
yoffset = (team_num - 1) * 1684 + 98 # replace 1000 with whatever offset you'll need
team = getattr(match, f"team{team_num}")
for i, mp in enumerate(team):
y = i * 232 + yoffset # replace 50 with whatever row height you use
row = statsimage(mp, i) # your current playerkey
img.paste(row, (0, y))
# base.paste(row, 0, y)
# add middlebar
middle = middlepanel(match)
img.paste(middle, (0, 1262))
#base.paste(middlebar(match))
historyimg = img.resize((2310, 1471), Image.Resampling.LANCZOS)
final_buffer = BytesIO()
historyimg.save(final_buffer, "PNG")
final_buffer.seek(0)
return final_buffer
def middlepanel(match):
W, H = (4620, 512)
padding = 46
# (horizontal, vertical)
img = Image.new("RGB", (W, H))
# add in the map image
map_name = match.map_name
format_map = map_name.lower().replace(" ", "_").replace("'", "")
try:
match_map = Image.open(f"/home/poopski/mucski/stuff/icons/maps/{format_map}.png")
except FileNotFoundError:
match_map = Image.open("/home/poopski/mucski/stuff/icons/maps/test_maps.png")
# middle image width
basewidth = 4620
# dynamic resize
wpercent = (basewidth / float(match_map.size[0]))
hsize = round((float(match_map.size[1]) * float(wpercent)))
match_map = match_map.resize((basewidth, hsize), Image.Resampling.LANCZOS)
enhancer = ImageEnhance.Brightness(match_map)
match_map = enhancer.enhance(0.5)
# final product
img.paste(match_map, (0, -512))
draw = ImageDraw.Draw(img)
fnt = ImageFont.truetype("/home/poopski/mucski/stuff/arial.ttf", 100)
#fill = (15, 40, 48) dark
fill = (255, 255, 255)
stroke = (255, 255, 255)
stroke_size = 0
draw.text((padding, padding), f"ID: {match.id}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((padding, 100 + padding), f"Duration: {match.duration}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((padding, 200 + padding), f"Region: {match.region}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((padding, 300 + padding), f"Map: {match.map_name}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((round(W / 2 - 1032), padding), f"Team 1 score: {match.score[0]}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
vs = Image.open("/home/poopski/mucski/stuff/icons/vs.png")
w, h = vs.size
vs = vs.resize((round(w * 2 / 3), round(h * 2 / 3)))
img.paste(vs, (round((W-w) / 2), round((H-h) / 2 + 48)), mask=vs)
draw.text((round(W / 2 + 173), 300 + padding), f"Team 2 score: {match.score[1]}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
if match.bans:
draw.text((round((W-w) / 2) + 1520, round((H-h) / 2) + 80), "Bans", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
for i, ban in enumerate(match.bans):
if i == 0 and ban != None:
#### Champion 1 ####
champicon = champ_into_pic(ban)
champicon = champicon.resize((200, 200))
img.paste(champicon, (round((W-w) / 2) + 1800, round((H-h) / 2) - 70))
elif i == 1 and ban != None:
#### Champion 2 ####
champicon2 = champ_into_pic(ban)
champicon2 = champicon2.resize((200, 200))
img.paste(champicon2, (int((W-w) / 2) + 2020, int((H-h) / 2) - 70))
elif i == 2 and not None:
#### Champion 3 ####
champicon3 = champ_into_pic(ban)
champicon3 = champicon3.resize((200, 200))
img.paste(champicon3, (int((W-w) / 2) + 2240, int((H-h) / 2) - 70))
elif i == 3 and ban != None:
#### CHAMPION 4 ####
champicon4 = champ_into_pic(ban)
champicon4 = champicon4.resize((200, 200))
img.paste(champicon4, (int((W-w) / 2) + 1800, int((H-h) / 2) + 150))
elif i == 4 and ban != None:
#### Champion 5 ####
champicon5 = champ_into_pic(ban)
champicon5 = champicon5.resize((200, 200))
img.paste(champicon5, (int((W-w) / 2) + 2020, int((H-h) / 2) + 150))
elif i == 5 and ban != None:
#### Champion 6 ####
champicon6 = champ_into_pic(ban)
champicon6 = champicon6.resize((200, 200))
img.paste(champicon6, (int((W-w) / 2) + 2240, int((H-h) / 2) + 150))
else:
pass
return img
async def getavatar(player):
size = (150, 150)
async with aiohttp.ClientSession() as session:
async with session.get(player.avatar_url) as resp:
if resp.status == 200:
resp = await resp.read()
try: # in case heroku goes to shit like always
avatar = Image.open(BytesIO(resp)).convert("RGBA")
except TypeError:
avatar = Image.open("/home/poopski/mucski/stuff/icons/0.png")
avatar = avatar.resize((150, 150))
mask = Image.new('L', size, 0)
mask_draw = ImageDraw.Draw(mask)
mask_draw.ellipse((0, 0) + size, fill=255)
output = ImageOps.fit(avatar, mask.size, centering=(0.5, 0.5))
output.putalpha(mask)
return output
async def generatecard(player):
W, H = 860, 1349
img = Image.open("/home/poopski/mucski/stuff/card_bg.png").convert("RGBA")
# img = Image.new("RGBA", (W, H))
avatar = await getavatar(player)
rank = Image.open(f"/home/poopski/mucski/stuff/icons/ranks2/{player.ranked_best.rank.value}.png")
img.paste(avatar, (355, 18), mask=avatar)
img.paste(rank, (350, 1141), mask=rank)
draw = ImageDraw.Draw(img)
fnt = ImageFont.truetype("/home/poopski/mucski/stuff/arial.ttf", 37)
fnt_big = ImageFont.truetype("/home/poopski/mucski/stuff/arial.ttf", 64)
fnt_small = ImageFont.truetype("/home/poopski/mucski/stuff/arial.ttf", 34)
#fill = (15, 40, 48) dark
fill = (126, 163, 215)
stroke = (23, 34, 50)
stroke_size = 1
# name
kda = await get_kda_guru(player.id)
if kda:
kda = kda[3]
else:
kda = "?"
draw.text((33, 211), f"{player.name}", font=fnt_big, stroke_width=stroke_size, stroke_fill=stroke, fill=(180, 160, 138))
draw.text((33, 277), f"{player.title} - (Global KDA: {kda})", font=fnt_small, stroke_width=stroke_size, stroke_fill=stroke, fill=(223, 142, 53))
draw.text((33, 360), f"Level: {player.calculated_level}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((33, 417), f"Region: {player.region}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((33, 474), f"Champions Owned: {player.champion_count}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((33, 531), f"Account Created: {humanize.naturaltime(datetime.utcnow() - player.created_at)}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((33, 588), f"Last Login: {humanize.naturaltime(datetime.utcnow() - player.last_login)}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
# divider = Image.open("root/mucski/stuff/icons/divider.png").convert("RGBA")
# img.paste(divider, (180, 665), mask=divider)
# text divider
draw.text((33, 645), "----------------------------------------------------------------", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((33, 705), f"Casual Winrate: {player.casual.wins}/{player.casual.losses} ({player.casual.winrate_text})", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((33, 762), f"Casual Deserted: {player.casual.leaves}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((33, 819), "----------------------------------------------------------------", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((33, 879), f"Ranked Winrate: {player.ranked_best.wins}/{player.ranked_best.losses} ({player.ranked_best.winrate_text})", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((33, 936), f"Ranked Deserted: {player.ranked_best.leaves}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
draw.text((33, 993), f"Current Rank: {player.ranked_best.rank} ({player.ranked_best.points} TP)", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=(149, 229, 242))
draw.text((33, 1050), f"Ranked Type: {player.ranked_best.type}", font=fnt, stroke_width=stroke_size, stroke_fill=stroke, fill=fill)
final_buffer = BytesIO()
img.save(final_buffer, "PNG")
final_buffer.seek(0)
return final_buffer
async def get_kda_guru(player): # this input must be the player ID
url = 'https://paladins.guru/profile/{}'.format(player)
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200:
raw = await resp.text()
soup = BeautifulSoup(raw, 'html.parser')
stats = []
for stat in soup.find_all("div", {"class":"tsw__grid__stat"}):
stats.append(stat.text)
return stats
|
import asyncio
import multiaddr
import pytest
from tests.utils import cleanup
from libp2p import new_node
from libp2p.peer.peerinfo import info_from_p2p_addr
from libp2p.pubsub.pubsub import Pubsub
from libp2p.pubsub.floodsub import FloodSub
from libp2p.pubsub.message import MessageTalk
from libp2p.pubsub.message import create_message_talk
from libp2p.pubsub.message import generate_message_id
# pylint: disable=too-many-locals
async def connect(node1, node2):
"""
Connect node1 to node2
"""
addr = node2.get_addrs()[0]
info = info_from_p2p_addr(addr)
await node1.connect(info)
@pytest.mark.asyncio
async def test_simple_two_nodes():
node_a = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
node_b = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
await node_a.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))
await node_b.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))
supported_protocols = ["/floodsub/1.0.0"]
floodsub_a = FloodSub(supported_protocols)
pubsub_a = Pubsub(node_a, floodsub_a, "a")
floodsub_b = FloodSub(supported_protocols)
pubsub_b = Pubsub(node_b, floodsub_b, "b")
await connect(node_a, node_b)
await asyncio.sleep(0.25)
qb = await pubsub_b.subscribe("my_topic")
await asyncio.sleep(0.25)
node_a_id = str(node_a.get_id())
msg = MessageTalk(node_a_id, node_a_id, ["my_topic"], "some data", generate_message_id())
await floodsub_a.publish(node_a.get_id(), msg.to_str())
await asyncio.sleep(0.25)
res_b = await qb.get()
# Check that the msg received by node_b is the same
# as the message sent by node_a
assert res_b == msg.to_str()
# Success, terminate pending tasks.
await cleanup()
@pytest.mark.asyncio
async def test_simple_three_nodes():
# Want to pass message from A -> B -> C
node_a = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
node_b = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
node_c = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
await node_a.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))
await node_b.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))
await node_c.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))
supported_protocols = ["/floodsub/1.0.0"]
floodsub_a = FloodSub(supported_protocols)
pubsub_a = Pubsub(node_a, floodsub_a, "a")
floodsub_b = FloodSub(supported_protocols)
pubsub_b = Pubsub(node_b, floodsub_b, "b")
floodsub_c = FloodSub(supported_protocols)
pubsub_c = Pubsub(node_c, floodsub_c, "c")
await connect(node_a, node_b)
await connect(node_b, node_c)
await asyncio.sleep(0.25)
qb = await pubsub_b.subscribe("my_topic")
qc = await pubsub_c.subscribe("my_topic")
await asyncio.sleep(0.25)
node_a_id = str(node_a.get_id())
msg = MessageTalk(node_a_id, node_a_id, ["my_topic"], "some data", generate_message_id())
await floodsub_a.publish(node_a.get_id(), msg.to_str())
await asyncio.sleep(0.25)
res_b = await qb.get()
res_c = await qc.get()
# Check that the msg received by node_b is the same
# as the message sent by node_a
assert res_b == msg.to_str()
# res_c should match original msg but with b as sender
node_b_id = str(node_b.get_id())
msg.from_id = node_b_id
assert res_c == msg.to_str()
# Success, terminate pending tasks.
await cleanup()
async def perform_test_from_obj(obj):
"""
Perform a floodsub test from a test obj.
test obj are composed as follows:
{
"supported_protocols": ["supported/protocol/1.0.0",...],
"adj_list": {
"node1": ["neighbor1_of_node1", "neighbor2_of_node1", ...],
"node2": ["neighbor1_of_node2", "neighbor2_of_node2", ...],
...
},
"topic_map": {
"topic1": ["node1_subscribed_to_topic1", "node2_subscribed_to_topic1", ...]
},
"messages": [
{
"topics": ["topic1_for_message", "topic2_for_message", ...],
"data": "some contents of the message (newlines are not supported)",
"node_id": "message sender node id"
},
...
]
}
NOTE: In adj_list, for any neighbors A and B, only list B as a neighbor of A
or B as a neighbor of A once. Do NOT list both A: ["B"] and B:["A"] as the behavior
is undefined (even if it may work)
"""
# Step 1) Create graph
adj_list = obj["adj_list"]
node_map = {}
floodsub_map = {}
pubsub_map = {}
supported_protocols = obj["supported_protocols"]
tasks_connect = []
for start_node_id in adj_list:
# Create node if node does not yet exist
if start_node_id not in node_map:
node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
await node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))
node_map[start_node_id] = node
floodsub = FloodSub(supported_protocols)
floodsub_map[start_node_id] = floodsub
pubsub = Pubsub(node, floodsub, start_node_id)
pubsub_map[start_node_id] = pubsub
# For each neighbor of start_node, create if does not yet exist,
# then connect start_node to neighbor
for neighbor_id in adj_list[start_node_id]:
# Create neighbor if neighbor does not yet exist
if neighbor_id not in node_map:
neighbor_node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
await neighbor_node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))
node_map[neighbor_id] = neighbor_node
floodsub = FloodSub(supported_protocols)
floodsub_map[neighbor_id] = floodsub
pubsub = Pubsub(neighbor_node, floodsub, neighbor_id)
pubsub_map[neighbor_id] = pubsub
# Connect node and neighbor
# await connect(node_map[start_node_id], node_map[neighbor_id])
tasks_connect.append(asyncio.ensure_future(connect(node_map[start_node_id], node_map[neighbor_id])))
tasks_connect.append(asyncio.sleep(2))
await asyncio.gather(*tasks_connect)
# Allow time for graph creation before continuing
# await asyncio.sleep(0.25)
# Step 2) Subscribe to topics
queues_map = {}
topic_map = obj["topic_map"]
tasks_topic = []
tasks_topic_data = []
for topic in topic_map:
for node_id in topic_map[topic]:
"""
# Subscribe node to topic
q = await pubsub_map[node_id].subscribe(topic)
# Create topic-queue map for node_id if one does not yet exist
if node_id not in queues_map:
queues_map[node_id] = {}
# Store queue in topic-queue map for node
queues_map[node_id][topic] = q
"""
tasks_topic.append(asyncio.ensure_future(pubsub_map[node_id].subscribe(topic)))
tasks_topic_data.append((node_id, topic))
tasks_topic.append(asyncio.sleep(2))
# Gather is like Promise.all
responses = await asyncio.gather(*tasks_topic, return_exceptions=True)
for i in range(len(responses) - 1):
q = responses[i]
node_id, topic = tasks_topic_data[i]
if node_id not in queues_map:
queues_map[node_id] = {}
# Store queue in topic-queue map for node
queues_map[node_id][topic] = q
# Allow time for subscribing before continuing
# await asyncio.sleep(0.01)
# Step 3) Publish messages
topics_in_msgs_ordered = []
messages = obj["messages"]
tasks_publish = []
for msg in messages:
topics = msg["topics"]
data = msg["data"]
node_id = msg["node_id"]
# Get actual id for sender node (not the id from the test obj)
actual_node_id = str(node_map[node_id].get_id())
# Create correctly formatted message
msg_talk = MessageTalk(actual_node_id, actual_node_id, topics, data, generate_message_id())
# Publish message
# await floodsub_map[node_id].publish(actual_node_id, msg_talk.to_str())
tasks_publish.append(asyncio.ensure_future(floodsub_map[node_id].publish(actual_node_id, msg_talk.to_str())))
# For each topic in topics, add topic, msg_talk tuple to ordered test list
# TODO: Update message sender to be correct message sender before
# adding msg_talk to this list
for topic in topics:
topics_in_msgs_ordered.append((topic, msg_talk))
# Allow time for publishing before continuing
# await asyncio.sleep(0.4)
tasks_publish.append(asyncio.sleep(2))
await asyncio.gather(*tasks_publish)
# Step 4) Check that all messages were received correctly.
# TODO: Check message sender too
for i in range(len(topics_in_msgs_ordered)):
topic, actual_msg = topics_in_msgs_ordered[i]
for node_id in topic_map[topic]:
# Get message from subscription queue
msg_on_node_str = await queues_map[node_id][topic].get()
msg_on_node = create_message_talk(msg_on_node_str)
# Perform checks
assert actual_msg.origin_id == msg_on_node.origin_id
assert actual_msg.topics == msg_on_node.topics
assert actual_msg.data == msg_on_node.data
# Success, terminate pending tasks.
await cleanup()
@pytest.mark.asyncio
async def test_simple_two_nodes_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"A": ["B"]
},
"topic_map": {
"topic1": ["B"]
},
"messages": [
{
"topics": ["topic1"],
"data": "foo",
"node_id": "A"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_three_nodes_two_topics_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"A": ["B"],
"B": ["C"]
},
"topic_map": {
"topic1": ["B", "C"],
"topic2": ["B", "C"]
},
"messages": [
{
"topics": ["topic1"],
"data": "foo",
"node_id": "A"
},
{
"topics": ["topic2"],
"data": "Alex is tall",
"node_id": "A"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_two_nodes_one_topic_single_subscriber_is_sender_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"A": ["B"]
},
"topic_map": {
"topic1": ["B"]
},
"messages": [
{
"topics": ["topic1"],
"data": "Alex is tall",
"node_id": "B"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_two_nodes_one_topic_two_msgs_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"A": ["B"]
},
"topic_map": {
"topic1": ["B"]
},
"messages": [
{
"topics": ["topic1"],
"data": "Alex is tall",
"node_id": "B"
},
{
"topics": ["topic1"],
"data": "foo",
"node_id": "A"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_seven_nodes_tree_one_topics_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"1": ["2", "3"],
"2": ["4", "5"],
"3": ["6", "7"]
},
"topic_map": {
"astrophysics": ["2", "3", "4", "5", "6", "7"]
},
"messages": [
{
"topics": ["astrophysics"],
"data": "e=mc^2",
"node_id": "1"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_seven_nodes_tree_three_topics_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"1": ["2", "3"],
"2": ["4", "5"],
"3": ["6", "7"]
},
"topic_map": {
"astrophysics": ["2", "3", "4", "5", "6", "7"],
"space": ["2", "3", "4", "5", "6", "7"],
"onions": ["2", "3", "4", "5", "6", "7"]
},
"messages": [
{
"topics": ["astrophysics"],
"data": "e=mc^2",
"node_id": "1"
},
{
"topics": ["space"],
"data": "foobar",
"node_id": "1"
},
{
"topics": ["onions"],
"data": "I am allergic",
"node_id": "1"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_seven_nodes_tree_three_topics_diff_origin_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"1": ["2", "3"],
"2": ["4", "5"],
"3": ["6", "7"]
},
"topic_map": {
"astrophysics": ["1", "2", "3", "4", "5", "6", "7"],
"space": ["1", "2", "3", "4", "5", "6", "7"],
"onions": ["1", "2", "3", "4", "5", "6", "7"]
},
"messages": [
{
"topics": ["astrophysics"],
"data": "e=mc^2",
"node_id": "1"
},
{
"topics": ["space"],
"data": "foobar",
"node_id": "4"
},
{
"topics": ["onions"],
"data": "I am allergic",
"node_id": "7"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_three_nodes_clique_two_topic_diff_origin_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"1": ["2", "3"],
"2": ["3"]
},
"topic_map": {
"astrophysics": ["1", "2", "3"],
"school": ["1", "2", "3"]
},
"messages": [
{
"topics": ["astrophysics"],
"data": "e=mc^2",
"node_id": "1"
},
{
"topics": ["school"],
"data": "foobar",
"node_id": "2"
},
{
"topics": ["astrophysics"],
"data": "I am allergic",
"node_id": "1"
}
]
}
await perform_test_from_obj(test_obj)
|
import chess
import numpy as np
root = None
# MCTS class
class MctsNode():
def __init__(self, state, parent=None, parent_action=None):
'''
Initialise a board state
'''
self.state = state
self.board = chess.Board(state)
self.parent = parent
if self.parent and self.parent.board.turn == chess.BLACK:
self.board.turn = chess.WHITE
else:
self.board.turn = chess.BLACK
self.parent_action = parent_action
self.children = []
self._num_visits = 0
self._num_wins = 0
self._num_losses = 0
self._available_actions = self.get_available_actions()
def get_q(self):
'''
Returns expected reward from a node,i.e., q value
'''
return self._num_wins - self._num_losses
def get_n(self):
'''
Returns number of visits to a node till now
'''
return self._num_visits
def expand(self):
'''
Returns new state expanded from current
state after taking a possible action
'''
action = self._available_actions.pop()
new_state = self.move(action)
new_child_node = MctsNode(new_state, parent=self, parent_action=action)
self.children.append(new_child_node)
return new_child_node
def select(self):
'''
Returns node to start simulation from
'''
curr = self
while not curr.is_terminal():
if len(curr._available_actions) == 0:
curr = curr.best_child()
else:
return curr.expand() # expandable node
return curr # terminal node
def simulate(self):
'''
Game simulated from expanded node
till an outcome is returned
'''
# we use rollout policy here
curr = self
while not curr.is_game_over():
possible_moves = curr.get_available_actions()
chosen_move = np.random.randint(len(possible_moves))
new_board = curr.move(possible_moves[chosen_move])
curr = MctsNode(state=new_board, parent=curr,
parent_action=chosen_move)
return curr.get_result()
def backpropagate(self, result):
'''
Once we have the result, the number of
win/loss and number of visits is updated
till the parent node is reached
'''
if result == 1:
self._num_wins += 1
elif result == -1:
self._num_losses += 1
self._num_visits += 1
if self.parent != None:
self.parent.backpropagate(result)
def is_terminal(self):
'''
Returns true if the node is a terminal node
'''
return self.is_game_over()
def best_child(self, c_param=0.1):
'''
Returns child with maximum value
'''
# print(len(self.children))
weights = [(child.get_q() / child.get_n()) + c_param * np.sqrt((2 *
np.log(self.get_n()) / child.get_n())) for child in self.children]
best_c = np.argmax(weights)
return self.children[best_c]
def is_game_over(self):
result = (self.board.is_checkmate() or self.board.is_stalemate(
) or self.board.is_seventyfive_moves() or self.board.is_fivefold_repetition() or self.board.is_insufficient_material())
return result
def get_available_actions(self):
actions_list = list(self.board.legal_moves)
return actions_list
def move(self, action):
'''
Returns board state after action
'''
next_state = self.board.copy()
next_state.push(action)
return next_state.fen()
def get_result(self):
'''
Returns result of the game
1 for win, -1 for loss, and 0 for tie
'''
'''
hardcoded white to human, black to computer
'''
if self.board.outcome().winner == chess.WHITE:
res = -1
elif self.board.outcome().winner == chess.BLACK:
res = 1
elif self.board.outcome().winner == None:
res = 0
return res
def get_best_move(self, num_iter):
'''
Play the best move from current state
'''
for i in range(int(num_iter)):
node = self.select()
result = node.simulate()
node.backpropagate(result)
# print(len(self.children))
return self.best_child(c_param=0.0).parent_action
def run_mcts(root_state, num_iter):
'''
gameplay
'''
global root
print(num_iter)
root = MctsNode(state=root_state)
if root.is_game_over():
return root
return root.get_best_move(num_iter)
|
from flask import Flask, render_template, request, flash
from admin_report import Admin_Report
from DB import DB
app = Flask(__name__)
@app.route("/administrator_report", methods=['GET'])
def admin_report():
admin_report = Admin_Report()
db = DB()
registered_users = db.get_registered_users()
return render_template("admin.html", admin_report=admin_report, registered_users=registered_users)
@app.route("/", methods=['GET', 'POST'])
def register():
if request.method == 'POST':
user, messages = validate_input(request.form)
if user['valid']:
db = DB()
db.insert_registered_user(user)
print messages
return render_template("confirm.html")
else:
return render_template("register.html", user=user, messages=messages)
else:
user = {}
messages = {}
return render_template("register.html", user=user, messages=messages)
def validate_input(form):
user = {}
user['fname'] = form['fname']
user['lname'] = form['lname']
user['address1'] = form['address1']
user['address2'] = form['address2']
user['city'] = form['city']
user['state'] = form['state']
user['zip'] = form['zip']
user['country'] = form['country']
user['valid'] = True
messages = {}
message_map = {'fname': "First Name", 'lname': "Last Name", 'address1': "Address", 'city': "City", 'state': "State", 'zip': "Zip Code", 'country': "Country"}
for field, value in user.items():
messages[field] = ""
if value == '' and field != 'address2':
user['valid'] = False
messages[field] = "* " + message_map[field] + " is required *"
if field == 'zip':
if value == '':
messages[field] = "* " + message_map[field] + " is required *"
continue
if len(value) != 5 and len(value) != 10:
messages[field] = "* Zip code must either be 5 digits or 9 digits with a hyphen. *"
if not value.isnumeric():
messages[field] = "* Zip code must be all numeric *"
if len(value) == 10:
if "-" not in value:
messages[field] = "* You must have a hyphen in the zip code. *"
continue
zip1, zip2 = value.split("-")
if len(zip1) != 5 or len(zip2) != 4:
messages[field] = "* Zip code must be in the following format: XXXXX-XXXX *"
if not zip1.isnumeric() or not zip2.isnumeric():
messages[field] = "* Zip code must be all numeric. *"
return user, messages
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000)
|
from .GeomView import GeomView
# This view generates indices for polygons.
class PolygonView(GeomView):
def generateTriangleIndices(self, firstVertex, numVerts):
for i in range(firstVertex + 1, firstVertex + (numVerts - 1)):
self.indices.addVertices(firstVertex, i, i + 1)
self.indices.closePrimitive()
def generateLineStripIndices(self, firstVertex, numVerts):
for i in range(firstVertex, firstVertex + numVerts):
self.indices.addVertex(i)
# Add the first vertex again to close the loop
self.indices.addVertex(firstVertex)
self.indices.closePrimitive()
def generateLineIndices(self, firstVertex, numVerts):
for i in range(firstVertex + 1, firstVertex + numVerts):
self.indices.addVertices(i - 1, i)
self.indices.closePrimitive()
# Add the last line to close the loop
self.indices.addVertices(firstVertex, numVerts - 1)
self.indices.closePrimitive()
|
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import feature_column
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
class StructuredModel:
def __init__(self):
self.dataframe = None
self.train = None
self.test = None
self.val = None
self.example_batch = None
self.train_ds = None
self.val_ds = None
self.test_ds = None
self.model = None
self.feature_layer = None
def execute(self):
self.data_download()
self.data_preparation()
self.data_preprocessing()
self.model_creation()
self.model_train()
self.model_eval()
def data_download(self):
url = 'https://storage.googleapis.com/applied-dl/heart.csv'
self.dataframe = pd.read_csv(url)
print(self.dataframe.head())
def data_preparation(self):
self.train, self.test = train_test_split(self.dataframe, test_size=0.2)
self.train, self.val = train_test_split(self.train, test_size=0.2)
print(len(self.train), '훈련 샘플')
print(len(self.val), '검증 샘플')
print(len(self.test), '테스트 샘플')
def data_preprocessing(self):
"""
batch_size = 5 # 예제를 위해 작은 배치 크기를 사용합니다.
train_ds = self.df_to_dataset(self.train, batch_size=batch_size)
val_ds = self.df_to_dataset(self.val, shuffle=False, batch_size=batch_size)
test_ds = self.df_to_dataset(self.test, shuffle=False, batch_size=batch_size)
for feature_batch, label_batch in train_ds.take(1):
print('전체 특성:', list(feature_batch.keys()))
print('나이 특성의 배치:', feature_batch['age'])
print('타깃의 배치:', label_batch)
# 특성 열을 시험해 보기 위해 샘플 배치를 만듭니다.
self.example_batch = next(iter(train_ds))[0]
age = feature_column.numeric_column("age")
self.demo(age)
"""
feature_columns = []
# 수치형 열
for header in ['age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'slope', 'ca']:
feature_columns.append(feature_column.numeric_column(header))
# 버킷형 열
age = feature_column.numeric_column("age")
age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns.append(age_buckets)
# 범주형 열
thal = feature_column.categorical_column_with_vocabulary_list('thal', ['fixed', 'normal', 'reversible'])
thal_one_hot = feature_column.indicator_column(thal)
feature_columns.append(thal_one_hot)
# 임베딩 열
thal_embedding = feature_column.embedding_column(thal, dimension=8)
feature_columns.append(thal_embedding)
# 교차 특성 열
crossed_feature = feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000)
crossed_feature = feature_column.indicator_column(crossed_feature)
feature_columns.append(crossed_feature)
self.feature_layer = layers.DenseFeatures(feature_columns)
batch_size = 32
self.train_ds = self.df_to_dataset(self.train, batch_size=batch_size)
self.val_ds = self.df_to_dataset(self.val, shuffle=False, batch_size=batch_size)
self.test_ds = self.df_to_dataset(self.test, shuffle=False, batch_size=batch_size)
# 판다스 데이터프레임으로부터 tf.data 데이터셋을 만들기 위한 함수
def df_to_dataset(self, df, shuffle=True, batch_size=32):
df = df.copy()
labels = df.pop('target')
ds = tf.data.Dataset.from_tensor_slices((dict(df), labels))
if shuffle:
ds = ds.shuffle(buffer_size=len(df))
ds = ds.batch(batch_size)
return ds
# 특성 열을 만들고 배치 데이터를 변환하는 함수
def demo(self, feature_column):
feature_layer = layers.DenseFeatures(feature_column)
print(feature_layer(self.example_batch).numpy())
def model_creation(self):
self.model = tf.keras.Sequential([
self.feature_layer,
layers.Dense(128, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
self.model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
def model_train(self):
self.model.fit(self.train_ds, validation_data=self.val_ds, epochs=5)
def model_eval(self):
loss, accuracy = self.model.evaluate(self.test_ds)
print("정확도", accuracy)
if __name__ == '__main__':
m = StructuredModel()
m.execute()
|
import sys
import Libr
import random
# перевод в из 16-ой в 10-ую
def To_teny(x):
return(int(x,16))
# перевод из 10-ой в 16-ую
def To_sexteen(y):
if len(hex(y)[2:]) < 2:
a = '0' + hex(y)[2:]
else:
a = hex(y)[2:]
return (a)
# генерация master - ключа
def key_gen ():
keymaster = ''
for i in range(32):
a = 0
a = random.randint(0,255)
a = To_sexteen(a)
keymaster += str(a)
return(keymaster)
#разбиение ключа на части
def break_key(keymaster):
k1 = keymaster[:32]
k2 = keymaster[32:]
return k1,k2
# XOR ключа с константами
def xor_key(k1,j):
currentkey = ""
for i in range(16):
l = To_teny(k1[i * 2 :(i + 1) * 2])
c = To_teny(Libr.C_const[j][i * 2 :(i + 1) * 2])
m = To_sexteen(l ^ c)
currentkey += m
return(currentkey)
|
"""
Created on Jul 3, 2013
@author: Zachary
"""
import math
import inspect
import numpy as np
import matplotlib.pyplot as plt
from pybrain.supervised import BackpropTrainer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from scipy.io import wavfile
from pybrain.tools.customxml.networkwriter import NetworkWriter
from pybrain.tools.customxml.networkreader import NetworkReader
EPSILON = np.finfo(np.float).eps
FRAME_TIME_LENGTH = 180 # length of frame in milliseconds
# DIVISIONS = np.array([40, 70, 110, 150, 200, 250, 300, 400, 500, 750, 1000, 1500, 2000, 3000, 5000, 11025])
# DIVISIONS = np.array([500, 1500, 2000, 2500, 3000, 3500, 4000, 5000, 7000, 10000])
DIVISIONS = np.array([500, 1000, 2500, 5000, 7000])
MOVING_AVERAGE_LENGTH = 3 # length in number of FFT intervals
MOVING_THRESHOLD_LENGTH = 70
NETWORK_LEARNING_RATE = 0.3
NETWORK_MOMENTUM = 0.1
NETWORK_HIDDEN_NEURONS = 20
NETWORK_ITERATIONS = 50
class AudioBuffer:
def __init__(self, fft_sample_length, overlap_sample_length):
self.data = []
self.fft_sample_length = fft_sample_length
self.overlap_sample_length = overlap_sample_length
self.step = fft_sample_length - overlap_sample_length
def push(self, samples):
"""
Adds samples to end of buffer data.
:param samples:
"""
self.data.extend(samples)
def available(self):
return len(self.data) >= self.fft_sample_length
def read(self):
output = self.data[:self.fft_sample_length]
self.data = self.data[self.step:]
return output
class DataBuffer:
def __init__(self, length=float("inf")):
self.length = length
self.data = []
def push(self, item):
self.data.append(item)
self._trim()
def push_multiple(self, items):
self.data.extend(items)
self._trim()
def _trim(self):
length = len(self.data)
if length > self.length:
self.data = self.data[length - self.length:]
class Classifier(object): # interface for a generic classifier
def __init__(self):
pass
def train(self, data):
pass
def run(self, feature_vector):
pass
class NeuralNetworkClassifier(Classifier):
def __init__(self, n_inputs, n_outputs, n_hidden=NETWORK_HIDDEN_NEURONS):
super(NeuralNetworkClassifier, self).__init__()
self.network = buildNetwork(n_inputs, n_hidden, n_hidden, n_hidden, n_outputs)
self.dataset = SupervisedDataSet(n_inputs, n_outputs)
def train(self, data, iterations=NETWORK_ITERATIONS):
for item in data:
self.dataset.addSample(item[0], item[1])
trainer = BackpropTrainer(self.network, self.dataset, learningrate=NETWORK_LEARNING_RATE,
momentum=NETWORK_MOMENTUM)
error = 0
for i in xrange(iterations):
error = trainer.train()
print (i + 1), error
return error
def run(self, feature_vector):
return self.network.activate(feature_vector)
def export(self, filename):
NetworkWriter.writeToFile(self.network, filename)
class SavedNeuralNetworkClassifier(Classifier):
def __init__(self, filename):
super(SavedNeuralNetworkClassifier, self).__init__()
self.network = NetworkReader.readFrom(filename)
def run(self, feature_vector):
return self.network.activate(feature_vector)
class FeatureVectorBuffer(DataBuffer):
def __init__(self, length=float("inf")):
DataBuffer.__init__(self, length)
self.results = DataBuffer(length)
def add_vector(self, feature_vector):
DataBuffer.push(self, feature_vector)
result = self.classify(feature_vector)
self.results.push(result)
def classify(self, feature_vector):
pass
class FeatureVectorExtractor:
def __init__(self, rate):
self.rate = rate
calculator = FFTSizeCalculator(rate)
self.fft_sample_length = calculator.fft_sample_length
self.overlap_sample_length = calculator.overlap_sample_length
self.audio_buffer = AudioBuffer(fft_sample_length=self.fft_sample_length,
overlap_sample_length=self.overlap_sample_length)
# self.buffers = {
# "raw_slices": DataBuffer(),
# "slices": DataBuffer(),
# "zero_crossing_rates": DataBuffer(),
# "rolloff_freqs": DataBuffer(),
# "slices_bins": DataBuffer()
# }
self.buffers = {name: DataBuffer() for name in
["raw_slices", "slices", "zero_crossing_rates", "rolloff_freqs", "slices_bins",
"third_octave", "averages", "thresholds", "ratios", "magnitude", "stddev"]}
self.classifier = FeatureVectorBuffer()
self.fft = FFT(self.rate)
self.original_freqs = self.fft.freqs
self.freqs = self.high_pass_filter_freqs(self.original_freqs, 500)
self.bin_divisions_indexes = self.find_indexes(self.freqs, DIVISIONS)
def plot_spectrogram(self, bins, freqs, slices, logscale=False, axes=plt):
power = slices.T
if logscale:
z = np.log10(power)
else:
z = power
axes.pcolormesh(bins, freqs, z)
def find_indexes(self, freqs, divisions):
# Determine where the divisions are in the freqs list
indexes = []
i = 0
for div in divisions:
while i < len(freqs) and freqs[i] < div:
i += 1
indexes.append(i)
return indexes
def freq_bins(self, slice):
# Divide slice into frequency bins, returns new slice
indexes = self.bin_divisions_indexes
output = []
prev_index = indexes[0]
for i in xrange(1, len(indexes)):
part = slice[prev_index:indexes[i] + 1]
average = sum(part) / len(part)
output.append(average)
prev_index = indexes[i]
output = np.array(output)
return output
def slice_rolloff_freq(self, slice, threshold=0.9):
target = threshold * sum(slice)
partial = 0.0
i = 0
length = len(slice)
while partial < target and i < length - 1:
partial += slice[i]
i += 1
return i
def avg_zero_crossing_rate(self, sound_data):
signs = np.sign(np.array(sound_data))
total = 0
for i in xrange(1, len(signs)):
if signs[i - 1] != signs[i]:
total += 1
rate = float(total) / len(sound_data)
return rate
def normalize(self, slice):
raw_slices = np.array(self.buffers["raw_slices"].data)
end = len(raw_slices)
# Take the moving average to smooth out the data
start = max(0, end - MOVING_AVERAGE_LENGTH)
actual_length = end - start
average = sum(raw_slices[start:end]) / actual_length
self.buffers["averages"].push(average)
# Find the sliding minimum value in each frequency band as threshold
averages = self.buffers["averages"].data
start2 = max(0, len(self.buffers["averages"].data) - MOVING_THRESHOLD_LENGTH)
possible_thresholds = np.array(averages[start2:]).T
threshold = []
for band in possible_thresholds:
threshold.append(np.amin(band))
new_slices = slice - threshold # normalize
new_slices = new_slices.clip(0) # clip at threshold
new_slices /= 10 # scale downwards
return new_slices, threshold, average
def high_pass_filter(self, slice, freqs, cutoff_frequency):
"""
Zeros the frequencies below the specified frequency
(or the next lowest present)
and returns the remaining higher frequencies (data and labels)
:param slices:
"""
# Find the index to cut off at
index = self.find_indexes(freqs, [cutoff_frequency])[0]
# Perform the filtering
new_slice = slice[index:]
return new_slice
def high_pass_filter_freqs(self, freqs, cutoff_frequency):
index = self.find_indexes(freqs, [cutoff_frequency])[0]
new_freqs = freqs[index:]
new_freqs = np.array(new_freqs)
return new_freqs
def pairwise_differences(self, items):
length = len(items)
ratios = []
for i in xrange(length):
for j in xrange(i + 1, length):
ratios.append(items[i] - items[j])
return ratios
def autocorrelation_coefficient(self, series):
series1 = series - np.average(series)
series2 = series1[::-1]
corr = np.correlate(np.abs(series), np.abs(series2))
return float(corr) / max(np.var(series), EPSILON) / 100
def analyze(self, data):
raw_slice = self.fft.run(data)
# Decibel scale
raw_slice = 10 * np.log10(raw_slice) + 60
raw_slice = raw_slice.clip(0)
# High-pass filter
raw_slice = self.high_pass_filter(raw_slice, self.original_freqs, 500)
# Add raw slices to buffer for use in calculating moving average
self.buffers["raw_slices"].push(raw_slice)
# Normalize the slices for analysis purposes
slice, threshold, average = self.normalize(raw_slice)
self.buffers["slices"].push(slice)
self.buffers["thresholds"].push(threshold)
slices = [slice]
# Calculate zero-crossing rate
zero_crossing_rate = self.avg_zero_crossing_rate(data)
self.buffers["zero_crossing_rates"].push(zero_crossing_rate)
# Calculate rolloff frequencies
rolloff_freq = self.freqs[self.slice_rolloff_freq(slice)]
rolloff_freq /= np.amax(self.freqs) # make a proportion of the maximum frequency
self.buffers["rolloff_freqs"].push(rolloff_freq)
# Divide each slice into frequency bins
slice_bins = self.freq_bins(slice)
self.buffers["slices_bins"].push(slice_bins)
# Extract the third octave
third_octave_indexes = self.find_indexes(self.freqs, [700, 1300])
third_octave = slice[third_octave_indexes[0]:third_octave_indexes[1]]
self.buffers["third_octave"].push(third_octave)
# Third octave autocorrelation
# third_octave_autocorrelation = self.autocorrelation_coefficient(slice)
# self.buffers["third_octave_autocorrelation"].push(third_octave_autocorrelation)
# Pairwise differences (ratio of magnitude) between frequency bins
ratios = self.pairwise_differences(slice_bins)
self.buffers["ratios"].push(ratios)
# Overall magnitude of sound
magnitude = np.average(slice)
self.buffers["magnitude"].push(magnitude)
# Standard deviation of frequency spectrum
stddev = np.std(slice)
self.buffers["stddev"].push(stddev)
# Create feature vectors
vector = []
# vector.extend(slice_bins)
vector.extend(ratios)
vector.append(zero_crossing_rate)
# vector.append(third_octave_autocorrelation)
vector.append(stddev)
vector.append(rolloff_freq)
vector.append(magnitude)
vector = np.array(vector)
self.process_vector(vector)
# Return vector
return vector
def _raw_data_in_slices(self, data):
num = int((len(data) - self.fft_sample_length) / self._step_length()) + 1
prev_index = 0
for i in xrange(num):
section = data[prev_index:prev_index + self.fft_sample_length]
prev_index += self._step_length()
yield section
def push(self, samples):
self.audio_buffer.push(samples)
vectors = []
while self.audio_buffer.available():
vector = self.analyze(self.audio_buffer.read())
vectors.append(vector)
return vectors
def display(self, plot_filename=None, buffer_list=None):
if buffer_list is None:
length = len(self.buffers)
else:
length = len(buffer_list)
fig, axes = plt.subplots(length)
i = 0
for name in self.buffers.keys():
if buffer_list is None or name in buffer_list:
print name
try:
axis = axes[i]
except TypeError:
axis = axes
self._display_buffer(self.buffers[name], axis)
i += 1
# self._display_buffer(self.classifier, axes[-1]) # Display feature vector
if plot_filename is not None:
plt.savefig(plot_filename)
plt.show()
def _display_buffer(self, buffer, axis):
buffer_data = buffer.data
if type(buffer_data[0]) is np.ndarray:
# print as spectrogram
# shifted_buffer_data = np.array(buffer_data) - np.amin(buffer_data)
# shifted_buffer_data = shifted_buffer_data.clip(EPSILON)
shifted_buffer_data = np.array(buffer_data[1:])
self.plot_spectrogram(np.array(range(len(buffer_data) - 1)), np.array(range(len(buffer_data[0]))),
shifted_buffer_data, axes=axis)
else:
# plot as standard (x,y)
axis.plot(range(len(buffer_data) - 1), buffer_data[1:])
def process_vector(self, vector):
# print vector
self.classifier.add_vector(vector)
class FFT:
# FFT algorithm based on code from matplotlib
# Code simplified for use as a single real-valued FFT
# Used under a BSD compatible license
# Copyright (c) 2002-2009 John D. Hunter; All Rights Reserved
def __init__(self, rate):
self.rate = rate
calculator = FFTSizeCalculator(rate)
self.fft_sample_length = calculator.fft_sample_length
self.overlap_sample_length = calculator.overlap_sample_length
self.step = calculator.step
self.numFreqs = self.fft_sample_length / 2 + 1
self.windowVals = np.hanning(self.fft_sample_length)
self.freqs = float(self.rate) / self.fft_sample_length * np.arange(self.numFreqs)
def run(self, x):
assert len(x) == self.fft_sample_length
windowed_x = x * self.windowVals
fx = np.fft.rfft(windowed_x)
# Get square of magnitude of complex vector
fx = fx.real ** 2 + fx.imag ** 2
# Scaling and normalizing output
fx /= (np.abs(self.windowVals)**2).sum()
fx[1:-1] *= 2
fx /= self.rate
fx = fx.real
return fx
class FFTSizeCalculator:
def __init__(self, rate):
self.rate = rate
frame_samples_length = int(float(FRAME_TIME_LENGTH) / float(1000) * float(self.rate))
self.fft_sample_length = int(2 ** self._nextpow2(frame_samples_length))
self.overlap_sample_length = int(0.3 * frame_samples_length)
self.step = self.fft_sample_length - self.overlap_sample_length
def _nextpow2(self, num):
return int(np.ceil(np.log2(num)))
class RealtimeAnalyzer:
def __init__(self, rate, classifier):
"""
:param rate: int
:param classifier: Classifier
"""
self.classifier = classifier
self.extractor = FeatureVectorExtractor(rate)
self.buffer = DataBuffer(100)
# self.buffer.push_multiple(np.zeros(100))
# plt.ion()
# self.line, = plt.plot(xrange(100), xrange(100))
# plt.draw()
def push(self, samples):
feature_vectors = self.extractor.push(samples)
if feature_vectors is not None:
for vector in feature_vectors:
result = self.classifier.run(vector)
self._output(result)
# self._plot()
def _output(self, result):
output = 0
for item in result:
output += item
output /= float(2)
if not math.isnan(output):
scale = 20
value = min(max(int(output * scale), 0), scale)
self.buffer.push(value)
# sys.stdout.flush()
print "[{0}{1}] {2}".format('#' * value, ' ' * (scale - value), output)
def _plot(self):
self.line.set_ydata(self.buffer.data)
plt.draw()
VIRTUAL_BUFFER_SIZE = 1000
class FileProcessor(object):
def _process_file(self, filename, display=False, **kargs):
rate, data = wavfile.read(filename)
extractor = FeatureVectorExtractor(rate)
feature_vectors = extractor.push(data)
if display:
extractor.display(**kargs)
return feature_vectors
class BatchFileTrainer(FileProcessor):
def __init__(self, classifier):
self.classifier = classifier
self.data = []
def add(self, filename, results):
feature_vectors = self._process_file(filename)
# Create a stretched results list the same length as feature_vectors
target_length = len(feature_vectors)
source_length = len(results)
results_stretched = []
for i in xrange(target_length):
results_stretched.append(results[int(float(i) / target_length * source_length)])
data = []
for i in xrange(1, target_length): # Remove the first vector, which usually has problems
item = [feature_vectors[i], results_stretched[i]]
print item
data.append(item)
print "# of feature vectors:", target_length - 1
self.data.extend(data)
def train(self):
# Create a new classifier if necessary
if inspect.isclass(self.classifier):
self.classifier = self.classifier(len(self.data[0][0]), len(self.data[0][1]))
return self.classifier.train(self.data)
class FileAnalyzer(FileProcessor):
def __init__(self, classifier):
self.classifier = classifier
def analyze(self, filename, save_filename=None, **kargs):
vectors = self._process_file(filename, **kargs)
results = []
text = ""
for vector in vectors:
print vector
result = self.classifier.run(vector)
text += ",".join([str(item) for item in vector]) + "," + ",".join([str(item) for item in result]) + "\n"
results.append(result)
if save_filename is not None:
with open(save_filename, "w") as f:
f.write(text)
return results
|
# Generated by Django 3.2.7 on 2021-09-11 12:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country_code', models.CharField(max_length=4, verbose_name='Код страны')),
],
),
migrations.CreateModel(
name='Date',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_value', models.DateField(verbose_name='Дата')),
],
),
migrations.CreateModel(
name='Data',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('confirmed', models.IntegerField(verbose_name='Подтвержденные случаи')),
('deaths', models.IntegerField(verbose_name='Количество смертей')),
('stringency_actual', models.IntegerField(verbose_name='Актуальный уровень ограничений')),
('stringency', models.IntegerField(verbose_name='Уровень ограничений')),
('country_code', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='death_table.country')),
('date_value', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='death_table.date')),
],
),
]
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by 'bens3' on 2013-06-21.
Copyright (c) 2013 'bens3'. All rights reserved.
python tasks/mongo_catalogue.py --local-scheduler --date 20160519
"""
import time
import luigi
from uuid import UUID
from ke2mongo.lib.cites import get_cites_species
from ke2mongo.tasks.mongo import MongoTask, InvalidRecordException
from ke2mongo.tasks import DATE_FORMAT
from ke2mongo.log import log
from datetime import datetime
class MongoCatalogueTask(MongoTask):
module = 'ecatalogue'
# List of types to exclude
excluded_types = [
'Acquisition',
'Bound Volume',
'Bound Volume Page',
'Collection Level Description',
'DNA Card', # 1 record, but keep an eye on this
'Field Notebook',
'Field Notebook (Double Page)',
'Image',
'Image (electronic)',
'Image (non-digital)',
'Image (digital)',
'Incoming Loan',
'L&A Catalogue',
'Missing',
'Object Entry',
'object entry', # FFS
'Object entry', # FFFS
'PEG Specimen',
'PEG Catalogue',
'Preparation',
'Rack File',
'Tissue', # Only 2 records. Watch.
'Transient Lot'
]
cites_species = get_cites_species()
def process_record(self, data):
# Only import if it's one of the record types we want
record_type = data.get('ColRecordType', 'Missing')
if record_type in self.excluded_types:
log.debug('Skipping record %s: Excluded type %s', data['irn'], record_type)
raise InvalidRecordException
# Make sure the UUID is valid
guid = data.get('AdmGUIDPreferredValue', None)
if guid:
try:
UUID(guid, version=4)
except ValueError:
# print 'Skipping: not a valid UUID'
# Value error - not a valid hex code for a UUID.
# continue
print 'ERROR: ', guid
raise InvalidRecordException
# If we don't have collection department, skip it
if not data.get('ColDepartment', None):
raise InvalidRecordException
date_inserted = data.get('AdmDateInserted', None)
# Some records have an invalid AdmDateInserted=20-09-27
# As we need this for the stats, we need to skip them - just checking against date length as it's much quicker
if not date_inserted or len(DATE_FORMAT) != len(date_inserted):
log.error('Skipping record %s: invalid AdmDateInserted %s', data['irn'], date_inserted)
raise InvalidRecordException
# For now, the mongo aggregator cannot handle int / bool in $concat
# So properties that are used in dynamicProperties need to be cast as strings
for i in ['DnaTotalVolume', 'FeaCultivated', 'MinMetRecoveryWeight', 'MinMetWeightAsRegistered']:
if i in data:
data[i] = str(data[i])
# If record is a CITES species, mark cites = True
scientific_name = data.get('DarScientificName', None)
if scientific_name and scientific_name in self.cites_species:
data['cites'] = True
# For the embargo date, we're going to use the latest of NhmSecEmbargoDate and NhmSecEmbargoExtensionDate
# So loop through, convert to timestamp.
embargo_list = []
for f in ['NhmSecEmbargoDate', 'NhmSecEmbargoExtensionDate']:
if data.get(f):
ts =self.date_to_timestamp(data.get(f))
else:
ts = 0
embargo_list.append(ts)
# Set the Real Embargo data to the largest embargo or extension date
data['RealEmbargoDate'] = max(embargo_list)
return super(MongoCatalogueTask, self).process_record(data)
def on_success(self):
"""
On completion, add indexes
@return: None
"""
self.collection = self.get_collection()
log.info("Adding ecatalogue indexes")
self.collection.ensure_index('ColRecordType')
# Only include active records - not Stubs etc.,
self.collection.ensure_index('SecRecordStatus')
# Add index on RegRegistrationParentRef - select records with the same parent
self.collection.ensure_index('RegRegistrationParentRef')
# Need to filter on web publishable
self.collection.ensure_index('AdmPublishWebNoPasswordFlag')
# Exclude records if they do not have a GUID
self.collection.ensure_index('AdmGUIDPreferredValue')
# Add embargo date index
self.collection.ensure_index('RealEmbargoDate')
super(MongoCatalogueTask, self).on_success()
@staticmethod
def date_to_timestamp(data_str):
"""
Convert date string to timestamp
:return: timestamp
"""
return time.mktime(datetime.strptime(data_str, "%Y-%m-%d").timetuple())
if __name__ == "__main__":
luigi.run(main_task_cls=MongoCatalogueTask)
|
areas = [
{
"area_data": [3364]
},
{
"area_data": [3103, 3159, 3364, 3016, 3017, 3018]
}
]
platform = [
{
"platform_type": "APP",
},
{
"platform_type": "APPLET",
}
]
status = [
{
"status": "ENABLED"
},
{
"status": "DISABLED"
}
]
|
# Generated by Django 3.2.5 on 2021-08-04 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('master_file', '0029_alter_product_sort_group'),
]
operations = [
migrations.AlterField(
model_name='color',
name='color_group',
field=models.CharField(choices=[('ELEMENT', 'ELEMENT'), ('KEO', 'KEO'), ('PAINT', 'PAINT'), ('STAIN', 'STAIN'), ('Un_spec', 'Un_spec')], max_length=50, verbose_name='Color Group'),
),
]
|
'''
但是,受到内存限制,列表容量肯定是有限的。而且,创建一个包含 1000 万个元素的列表,
不仅占用很大的存储空间,如果我们仅仅需要访问前面几个元素,那后面绝大多数元素占用的空间都白白浪费了。
所以,如果列表元素可以按照某种算法推算出来,那我们是否可以在循环的过程中不断推算出后续的元素呢?
这样就不必创建完整的 list,从而节省大量的空间。
在 Python 中,这种一边循环一边计算的机制,称为生成器:generator。
在 Python 中,使用了 yield 的函数被称为生成器(generator)。
跟普通函数不同的是,生成器是一个返回迭代器的函数,只能用于迭代操作,更简单点理解生成器就是一个迭代器。
在调用生成器运行的过程中,每次遇到 yield 时函数会暂停并保存当前所有的运行信息,返回 yield 的值。
并在下一次执行 next()方法时从当前位置继续运行。
'''
#生成器的创建
gen=(x*x for x in range(5))
print(gen)
for num in gen:
print(num, end=' ')
print('')
#以函数的形式实现生成器
def fibon(n):
a=b=1
for i in range(n):
yield a
a, b=b,a+b
for x in fibon(10):
print(x,end =' ')
print()
def odd():
print ( 'step 1' )
yield ( 1 )
print ( 'step 2' )
yield ( 3 )
print ( 'step 3' )
yield ( 5 )
o = odd()
print( next( o ) )
print( next( o ) )
print( next( o ) )
|
import csv
from pathlib import Path
import json
menu_filepath = Path('./Resources/menu_data.csv')
sales_filepath = Path('./Resources/sales_data.csv')
output_path = Path("sales_report.txt")
menu = []
sales = []
report = {}
quantity = 0
price = 0
cost = 0
with open(menu_filepath, 'r') as menu_data:
reader = csv.reader(menu_data, delimiter=',')
header = next(reader)
for row in reader:
menu.append(row)
with open(sales_filepath, 'r') as sales_data:
reader = csv.reader(sales_data)
header = next(reader)
for row in reader:
sales.append(row)
for row in sales:
quantity = int(row[3])
menu_item = str(row[4])
if menu_item not in report:
report[row[4]]= {
"01-count": 0,
"02-revenue":0,
"03-cogs":0,
"04-profit":0,
}
for line in menu:
item, cat, description, price, cost = line[0:5]
price = float(price)
cost = float(cost)
if menu_item == item:
report[menu_item]["01-count"] += quantity
report[menu_item]["02-revenue"] += price * quantity
report[menu_item]["03-cogs"] += cost * quantity
report[menu_item]["04-profit"] += (price - cost) * quantity
with open(output_path, "w") as file:
json.dump(report, file, indent=2)
|
# Generated by Django 1.11.8 on 2018-01-12 15:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('admin', '0011_domain_transport'),
]
operations = [
migrations.AddField(
model_name='domain',
name='dkim_key_length',
field=models.PositiveIntegerField(blank=True, choices=[(1024, '1024'), (2048, '2048'), (4096, '4096')], null=True, verbose_name='Key length'),
),
migrations.AddField(
model_name='domain',
name='dkim_key_selector',
field=models.CharField(default='modoboa', max_length=30, verbose_name='Key selector'),
),
migrations.AddField(
model_name='domain',
name='dkim_private_key_path',
field=models.CharField(blank=True, max_length=254),
),
migrations.AddField(
model_name='domain',
name='dkim_public_key',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='domain',
name='enable_dkim',
field=models.BooleanField(default=False, help_text='If you activate this feature, a DKIM key will be generated for this domain.', verbose_name='Enable DKIM signing'),
),
]
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
import pandas as pd
d = pd.DataFrame([range(1,8)],range(2,9))
d.corr(method="pearson")#计算相关系数
s1 = d.loc[0]
s2 = d.loc[1]
p= s1.corr(s2,method="pearson")
print(p)
|
import sys
sys.path.append('../linked_lists');
from node import *
class graph:
def __init__(self, input_file, data_type=str):
#initialize variables
self.ref_list = [];
#pass on the work to other functions
self.read_file(input_file, data_type);
def read_file(self, input_file, data_type=str):
#basic error checking
if (not isinstance(input_file, str)):
print("ERROR: expected name of the configuration file not provided.");
return;
#read the file
raw_input = list(open(input_file, "r"));
raw_input = [(element.strip()).split(' ') for element in raw_input if (element.strip())[0]!='#'];
self.ref_list = [0]*len(raw_input);
for element in raw_input:
#separating different parts of a node
node_num = int(element[0]);
node_val = data_type(element[1]);
node_list = element[2:]; node_list = [int(element)-1 for element in node_list];
#storing data in memory
self.ref_list[node_num-1] = node(node_val);
self.ref_list[node_num-1].next = node_list;
def print_graph(self):
print("||----||");
for index, element in enumerate(self.ref_list):
print(index, element.value, element.next);
print("||----||");
def __len__(self):
return len(self.ref_list);
x = graph("source", float);
x.print_graph();
|
from flask import Flask, json
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_jwt_extended import JWTManager
# for session
from datetime import timedelta
app = Flask(__name__)
app.secret_key = "test"
#SqlAlchemy Database Configuration With Mysql
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:''@localhost/ibanking'
#mysql+pymysql://username:passwd@host/databasename
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = 'a06204995@gmail.com'
app.config['MAIL_PASSWORD'] = 'Testemail123'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
# app.config['JWT_TOKEN_LOCATION'] = ['cookies', 'headers']
app.config['JWT_TOKEN_LOCATION'] = ['headers']
# app.config['JWT_COOKIE_SECURE'] = False
# app.config['JWT_ACCESS_COOKIE_PATH'] = '/api/'
# app.config['JWT_REFRESH_COOKIE_PATH'] = '/refresh'
# app.config['JWT_COOKIE_CSRF_PROTECT'] = True
app.config['JWT_SECRET_KEY'] = 'aaaaaasdfdsf'
app.config["JWT_ACCESS_TOKEN_EXPIRES"] = timedelta(hours=1)
app.permanent_session_lifetime = timedelta(hours=1)
jwt = JWTManager(app)
mail = Mail(app)
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
|
import pandas as pn
from sklearn.svm import SVC
data = pn.read_csv('/Users/oleg/PycharmProjects/ML-Found-Yandex/DATA/w3_01.csv', header=None)
clf = SVC(kernel = 'linear', C = 100000, random_state = 241)
X = data.loc[:, 1:]
y = data.loc[:, 0]
# print(data)
clf.fit(X, y)
print(clf.support_)
# print(data)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-23 05:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('treatment_sheets', '0004_txsheet_date'),
]
operations = [
migrations.AlterField(
model_name='txitem',
name='unit',
field=models.CharField(choices=[('mL', 'mLs'), ('C', 'capsules'), ('T', 'tablets')], default='', max_length=5),
),
migrations.AlterField(
model_name='txsheet',
name='comment',
field=models.TextField(default=''),
),
]
|
import requests, json, sqlite3, csv, sys
import plotly as py
import plotly.plotly as py
import plotly.graph_objs as go
from collections import Counter
from final_proj_secrets import *
def params_unique_combination(baseurl, params):
alphabetized_keys = sorted(params.keys())
res = []
for k in alphabetized_keys:
res.append("{}-{}".format(k, params[k]))
return baseurl + "_".join(res)
#----------------------
# Yelp Cache
#----------------------
CACHE_FNAME1 = 'yelp_data.json'
try:
print("Opening Yelp Cache")
cache_file = open(CACHE_FNAME1, 'r')
cache_contents = cache_file.read()
CACHE_DICTION_1 = json.loads(cache_contents)
cache_file.close()
except:
CACHE_DICTION_1 = {}
#----------------------
# TicketMaster Cache
#----------------------
CACHE_FNAME2 = 'ticket_master_data.json'
try:
print("Opening TMaster Cache")
cache_file = open(CACHE_FNAME2, 'r')
cache_contents = cache_file.read()
CACHE_DICTION_2 = json.loads(cache_contents)
cache_file.close()
except:
CACHE_DICTION_2 = {}
####################
# GETTING YELP DATA
####################
def yelp_make_request_using_cache(baseurl,params=None, headers=None):
unique_ident = params_unique_combination(baseurl,params)
if unique_ident in CACHE_DICTION_1:
print("Getting cached data...Yelp")
return CACHE_DICTION_1[unique_ident]
else:
print("Making a request for new data...Yelp")
resp = requests.get(baseurl, headers=headers, params=params)
CACHE_DICTION_1[unique_ident] = json.loads(resp.text)
dumped_json_cache = json.dumps(CACHE_DICTION_1)
fw = open(CACHE_FNAME1,"w")
fw.write(dumped_json_cache)
fw.close()
return CACHE_DICTION_1[unique_ident]
def get_from_yelp(term, location):
baseurl = "https://api.yelp.com/v3/businesses/search"
params = {'term': term, "location": location, 'limit':50}
headers={'Authorization': 'Bearer '+ yelp_api_key}
m = yelp_make_request_using_cache(baseurl, headers = headers, params=params)
############################
# GETTING TICKETMASTER DATA
###########################
def t_master_make_request_using_cache(baseurl, params):
unique_ident = params_unique_combination(baseurl,params)
if unique_ident in CACHE_DICTION_2:
print("Getting cached data...TicketMaster")
return CACHE_DICTION_2[unique_ident]
else:
print("Making a request for new data...Ticket Master")
resp = requests.get(baseurl, params)
CACHE_DICTION_2[unique_ident] = json.loads(resp.text)
dumped_json_cache = json.dumps(CACHE_DICTION_2)
fw = open(CACHE_FNAME2,"w")
fw.write(dumped_json_cache)
fw.close()
return CACHE_DICTION_2[unique_ident]
def get_ticketmaster_data(ticket_city):
ticket_baseurl = "https://app.ticketmaster.com/discovery/v2/events"
ticket_dict = {}
ticket_dict["apikey"] = ticket_api_key
ticket_dict["size"] = '100'
ticket_dict["city"] = ticket_city
# m = requests.get(ticket_baseurl, ticket_dict)
return t_master_make_request_using_cache(ticket_baseurl, ticket_dict)
DBNAME = 'food_event.db'
CSV = 'us_postal_codes.csv'
def init_db(db_name, csv_file):
CACHE_FNAME1 = 'yelp_data.json'
CACHE_FNAME2 = 'ticket_master_data.json'
try:
conn = sqlite3.connect(db_name)
cur = conn.cursor()
except Error as e:
print(e)
#----------------------
# Drop tables if they exist
#----------------------
statement = '''
DROP TABLE IF EXISTS 'Restaurants'
'''
cur.execute(statement)
conn.commit()
statement = '''
DROP TABLE IF EXISTS 'Events'
'''
cur.execute(statement)
conn.commit()
statement = '''
DROP TABLE IF EXISTS 'PostalCodes'
'''
cur.execute(statement)
conn.commit()
# Create 3 tables, Restaurants and Events and Postal Codes
# Table 1: Restaurant data from YELP
statement = '''
CREATE TABLE 'Restaurants' (
'Id' INTEGER PRIMARY KEY AUTOINCREMENT,
'name' TEXT,
'url' TEXT,
'category1' TEXT,
'category2' TEXT,
'category3' TEXT,
'rating' TEXT,
'latitude' TEXT,
'longitude' TEXT,
'streetAdress' TEXT,
'city' TEXT,
'state' TEXT,
'locationZip_code' TEXT,
'display_phone' TEXT
)
'''
cur.execute(statement)
conn.commit()
# Table 2: Event Data from TICKETMASTER
statement = '''
CREATE TABLE 'Events' (
'Id' INTEGER PRIMARY KEY AUTOINCREMENT,
'name' TEXT,
'url' TEXT,
'localDate' TEXT,
'codeAvailability' TEXT,
'venue' TEXT,
'longitude' TEXT,
'latitude' TEXT,
'streetAdress' TEXT,
'city' TEXT,
'state' TEXT,
'PostalCode' TEXT
)
'''
cur.execute(statement)
conn.commit()
statement = '''
CREATE TABLE 'PostalCodes' (
'Id' INTEGER PRIMARY KEY AUTOINCREMENT,
'PostalCode' TEXT,
'city' TEXT,
'state' TEXT,
'stateAbbreviation' TEXT,
'county' TEXT,
'latitude' TEXT,
'longitude' TEXT
)
'''
cur.execute(statement)
conn.commit()
###########################
#### POPULATE YELP TABLE
###########################
# Populate RESTAURANT Table with YELP Data
restaurant_data = json.load(open(CACHE_FNAME1))
for ele in restaurant_data.values():
ele_values = list(ele.values())
for yelp_lst in ele_values[0]:
try:
restaurant_insertion = (None, yelp_lst['name'], yelp_lst['url'], yelp_lst['categories'][0]['title'],
yelp_lst['categories'][1]['title'], yelp_lst['categories'][2]['title'],
yelp_lst['rating'], yelp_lst['coordinates']['latitude'],
yelp_lst['coordinates']['longitude'], yelp_lst['location']['address1'], yelp_lst['location']['city'],
yelp_lst['location']['state'], yelp_lst['location']['zip_code'], yelp_lst['display_phone'])
except:
restaurant_insertion = (None, yelp_lst['name'], yelp_lst['url'], None, None, None, yelp_lst['rating'], yelp_lst['coordinates']['latitude'],
yelp_lst['coordinates']['longitude'], yelp_lst['location']['address1'], yelp_lst['location']['city'],
yelp_lst['location']['state'], yelp_lst['location']['zip_code'], yelp_lst['display_phone'])
# print(restaurant_insertion)
restaurant_statement = 'INSERT INTO "Restaurants" '
restaurant_statement += 'VALUES(?, ?, ? , ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) '
cur.execute(restaurant_statement, restaurant_insertion)
##################################
#### POPULATE TICKETMASTER TABLE
##################################
counter = 0
dict_list = []
event_dict_lst = []
# # Populate EVENT Table with TICKETMASTER Data
event_data = json.load(open(CACHE_FNAME2))
for ele in event_data.values(): # this gets to the _embedded level of the dictionary
ele_values = list(ele.values()) #this will give a list that has the dictionary with the events and all of its keys in it
for ele in ele_values:
dict_list.append(ele)
# This code adds all of the events dictionaries to event_dict_lst, use this dictionary to add info to the tables
while len(dict_list) > counter:
event_dict_lst.append(dict_list[counter])
counter +=3
# Extracting each event dictionary from event_dict_lst
for each_dict in event_dict_lst:
event_lst = each_dict['events']
for ele in event_lst:
try:
event_insertion = (None, ele['name'],
ele['url'], ele['dates']['start']['localDate'],
ele['dates']['status']['code'],
ele['_embedded']['venues'][0]['name'],
ele['_embedded']['venues'][0]['location']['longitude'],
ele['_embedded']['venues'][0]['location']['latitude'],
ele['_embedded']['venues'][0]['address']['line1'],
ele['_embedded']['venues'][0]['city']['name'],
ele['_embedded']['venues'][0]['state']['stateCode'], ele['_embedded']['venues'][0]['postalCode'])
except:
event_insertion = (None, ele['name'],
ele['url'], ele['dates']['start']['localDate'], ele['dates']['status']['code'],
ele['_embedded']['venues'][0]['name'],
None, None, ele['_embedded']['venues'][0]['address']['line1'],
ele['_embedded']['venues'][0]['city']['name'],
ele['_embedded']['venues'][0]['state']['stateCode'], ele['_embedded']['venues'][0]['postalCode'])
# print(event_insertion)
event_statement = 'INSERT INTO "Events" '
event_statement += 'VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) '
cur.execute(event_statement, event_insertion)
#############################
# POPULATE POSTAL CODE TABLE
#############################
# CSV file obtained from: https://www.aggdata.com/node/86
with open(csv_file) as csvfile:
csvreader = csv.reader(csvfile)
next(csvreader, None)
for row in csvreader:
postalCode_insertion = (None, row[0], row[1], row[2], row[3], row[4], row[5], row[6])
postalCode_statement ='INSERT INTO "PostalCodes" '
postalCode_statement += 'VALUES(?, ?, ?, ?, ?, ?, ?, ?) '
cur.execute(postalCode_statement, postalCode_insertion)
#############################
# UPDATE WITH FOREIGN KEYS
#############################
# Puts Id from PostalCodes Table into Restaurant and Event Table Zip Codes
# NOTE: this will return a 5-digit Id code that looks like a zip code but is not
update_restaurants = '''
UPDATE Restaurants
SET locationZip_code = (
SELECT Id
FROM PostalCodes as P
WHERE Restaurants.locationZip_code=P.PostalCode)
'''
cur.execute(update_restaurants)
update_events = '''
UPDATE Events
SET PostalCode = (
SELECT Id
FROM PostalCodes as P
WHERE Events.PostalCode=P.PostalCode)
'''
cur.execute(update_events)
conn.commit()
conn.close()
#######################
# DATA REPRESENTATION 1
#######################
# Data lists for Plotly Table 1:
e_r_name_lst_ename = []
e_r_name_lst_edate = []
e_r_name_lst_rname = []
e_r_name_lst_rrating = []
def ratings(city):
try:
conn = sqlite3.connect('food_event.db')
cur = conn.cursor()
except Error as e:
print(e)
cur.execute("""SELECT Restaurants.name AS [Restaurant Name], Restaurants.rating FROM Restaurants WHERE Restaurants.city like ?""",(city,))
data_0 = cur.fetchall()
for row in data_0:
e_r_name_lst_rname.append(row[0])
e_r_name_lst_rrating.append(row[1])
# print (e_r_name_lst_rrating)
conn.close()
# ratings()
def names_and_ratings(city):
try:
conn = sqlite3.connect('food_event.db')
cur = conn.cursor()
except Error as e:
print(e)
# This query will return a list including the name of an event, the date of that event, the name of a restaurant, and the restaurant's rating
query_1 = '''
SELECT E.name AS [Event Name], E.localDate, Restaurants.name AS [Restaurant Name], Restaurants.rating
FROM Events as E
JOIN Restaurants ON E.PostalCode = Restaurants.locationZip_code WHERE Restaurants.city like ?
'''
cur.execute(query_1, (city,))
data = cur.fetchall()
# print(data)
for row in data:
#print(row)
e_r_name_lst_ename.append(row[0])
e_r_name_lst_edate.append(row[1])
# e_r_name_lst_rname.append(row[2])
conn.close()
# names_and_ratings()
#----------------------------------------------------------------
# This code creates a Plotly Table of Event Name, Event Date, Restauran Name, and Restaurant Rating
#----------------------------------------------------------------
def plotly_table_1():
trace = go.Table(
header=dict(values=['Event Name', 'localDate', 'Restaurant Name', 'rating'],
line = dict(color='#7D7F80'),
fill = dict(color='#a1c3d1'),
align = ['left'] * 5),
cells=dict(values=[e_r_name_lst_ename, e_r_name_lst_edate,
e_r_name_lst_rname, e_r_name_lst_rrating],
line = dict(color='#7D7F80'),
fill = dict(color='#EDFAFF'),
align = ['left'] * 5))
layout = dict(width=500, height=300)
data = [trace]
fig = dict(data=data, layout=layout)
py.plot(fig, filename = 'styled_table')
#----------------------------------------------------------------
# CALLING PLOTLY TABLE - Event Name, Event Date, Restauran Name, and Restaurant Rating
#----------------------------------------------------------------
# plotly_table_1()
#######################
# DATA REPRESENTATION 2
#######################
#-------------------------------------------------------------------------------------------
# CREATING PLOTLY TABLE - Restaurant Name, Restaurant Price Range, Event Name, Event Price Range
#-------------------------------------------------------------------------------------------
rest_street_address =[]
event_venue_name = []
event_venue_street_address = []
def rest_event_address(city):
try:
conn = sqlite3.connect('food_event.db')
cur = conn.cursor()
except Error as e:
print(e)
query_2 = '''
SELECT Restaurants.name AS [Restaurant Name], Restaurants.streetAdress AS [Restaurant Address],
E.name AS [Event Name], E.venue AS [Venue], E.streetAdress AS [Event Address]
FROM Events as E
JOIN Restaurants ON E.PostalCode = Restaurants.locationZip_code
WHERE E.city like ?
'''
cur.execute(query_2,(city,))
data = cur.fetchall()
for row in data:
rest_street_address.append(row[1])
event_venue_name.append(row[3])
event_venue_street_address.append(row[4])
# rest_event_address()
#-------------------------------------------------------------------------------------------
# This code creates a Plotly Table of Restaurant Name, Restaurant Type, Event Name, Event Type
#-------------------------------------------------------------------------------------------
def plotly_table_2():
trace = go.Table(
header=dict(values=['Restaurant Name', 'Restaurant Address', 'Event Name', 'Venue', 'Event Address'],
line = dict(color='#7D7F80'),
fill = dict(color='#a1c3d1'),
align = ['left'] * 5),
cells=dict(values=[e_r_name_lst_rname, rest_street_address, e_r_name_lst_ename,
event_venue_name, event_venue_street_address],
line = dict(color='#7D7F80'),
fill = dict(color='#EDFAFF'),
align = ['left'] * 5))
layout = dict(width=500, height=300)
data = [trace]
fig = dict(data=data, layout=layout)
py.plot(fig, filename = 'styled_table2')
#-----------------------------------------------------------------------------------------------------
# CALLING PLOTLY TABLE 2 - Restaurant Name, Restaurant Address, Event Name, Event Venue, Event Address
#-----------------------------------------------------------------------------------------------------
#plotly_table_2()
#######################
# DATA REPRESENTATION 3
#######################
#----------------------------------------------------------------
# Lists for Plotly Map of Restaurant and Event Locations
#----------------------------------------------------------------
event_lat_vals = []
event_lon_vals = []
event_text_vals = []
restaurant_lat_vals = []
restaurant_lon_vals = []
restaurant_text_vals = []
def rest_event_location_query(city):
try:
conn = sqlite3.connect('food_event.db')
cur = conn.cursor()
except Error as e:
print(e)
query_3 = '''
SELECT E.name AS [Event Name], E.longitude, E.latitude, R.name AS [Restaurant Name], R.latitude, R.longitude
FROM Events as E
JOIN Restaurants AS R ON E.PostalCode = R.locationZip_code
WHERE R.city like ?
GROUP BY R.name, E.name
'''
cur.execute(query_3, (city,))
data = cur.fetchall()
for row in data:
event_lat_vals.append(row[2])
event_lon_vals.append(row[1])
event_text_vals.append(row[0])
restaurant_lat_vals.append(row[4])
restaurant_lon_vals.append(row[5])
restaurant_text_vals.append(row[3])
conn.close()
#----------------------------------------------------------------
# Gets restaurant and event data from SQL:
#----------------------------------------------------------------
# rest_event_location_query()
def plotly_map_r_e_locations():
try:
trace1 = dict(
type='scattergeo',
locationmode='USA-states',
lon=event_lon_vals,
lat=event_lat_vals,
text=event_text_vals,
mode='markers',
marker=dict(
size=20,
symbol='star',
color='red'
))
trace2 = dict(
type='scattergeo',
locationmode='USA-states',
lon=restaurant_lon_vals,
lat=restaurant_lat_vals,
text=restaurant_text_vals,
mode='markers',
marker=dict(
size=8,
symbol='circle',
color='blue'
))
data = [trace1, trace2]
min_lat = 10000
max_lat = -10000
min_lon = 10000
max_lon = -10000
lat_vals = event_lat_vals + restaurant_lat_vals
lon_vals = event_lon_vals + restaurant_lon_vals
for str_v in lat_vals:
v = float(str_v)
if v < min_lat:
min_lat = v
if v > max_lat:
max_lat = v
for str_v in lon_vals:
v = float(str_v)
if v < min_lon:
min_lon = v
if v > max_lon:
max_lon = v
center_lat = (max_lat + min_lat) / 2
center_lon = (max_lon + min_lon) / 2
max_range = max(abs(max_lat - min_lat), abs(max_lon - min_lon))
padding = max_range * .10
lat_axis = [min_lat - padding, max_lat + padding]
lon_axis = [min_lon - padding, max_lon + padding]
layout = dict(
title='Local Restaurants and Events<br>(Hover for site names)',
geo=dict(
scope='usa',
projection=dict(type='albers usa'),
showland=True,
landcolor="rgb(250, 250, 250)",
subunitcolor="rgb(100, 217, 217)",
countrycolor="rgb(217, 100, 217)",
lataxis={'range': lat_axis},
lonaxis={'range': lon_axis},
center={'lat': center_lat, 'lon': center_lon},
countrywidth=3,
subunitwidth=3
),
)
fig = dict(data=data, layout=layout)
py.plot(fig, filename='restaurants_and_local_events')
except ValueError:
pass
#----------------------------------------------------------------
# Calling Map that Plots Restaurant and Event Locations in Plotly - from SQL data
#----------------------------------------------------------------
# plotly_map_r_e_locations()
#######################
# DATA REPRESENTATION 4
#######################
# dictionary of all restaurant ratings
# BAR CHART OF RATINGS - THIS WORKS: NEEDS A TITLE
def ratings_bar_graph():
global ratings_dict
ratings_dict = Counter(e_r_name_lst_rrating)
# print('inside bar graph function=======')
data = [go.Bar(
# Turning dictionary items into lists so they can be utilized in Plotly
x= list(ratings_dict.keys()),
y= list(ratings_dict.values())
)]
py.plot(data, filename='basic-bar')
# ratings_bar_graph()
####################################
# INTERACTIVE PART
###################################
def load_help_text():
with open('help.txt') as f:
return f.read()
def interactive_prompt():
help_text = load_help_text()
response_lst = ['chicago', 'san francisco', 'new york', 'ann arbor']
response = ''
while response != "exit":
response = input('Enter the name of a city: ("Chicago, IL", "San Francisco, CA", "New York, NY", "Ann Arbor, MI") ')
response = response.lower()
response = response.strip()
response = response.split(',')
# print(response)
if 'help' in response:
print(help_text)
continue
elif 'exit' in response:
print("bye")
exit()
elif response[0] in response_lst:
query = input("Please enter a command - rating table, address, rating chart, map: ")
query = query.lower()
if 'table' in query:
ratings(response[0])
names_and_ratings(response[0])
plotly_table_1()
# continue
elif 'address' in query:
ratings(response[0])
names_and_ratings(response[0])
rest_event_address(response[0])
plotly_table_2()
# continue
elif 'chart' in query:
ratings(response[0])
# names_and_ratings(response[0])
ratings_bar_graph()
elif 'map' in query:
rest_event_location_query(response[0])
plotly_map_r_e_locations()
else:
print("Please enter a valid command: ")
continue
else:
print("Please enter a valid response or type help or exit ")
continue
#----------------
# Call for Data
#----------------
if __name__ == "__main__":
#---------------------------------
# CODE TO GET DATA FROM API CALLS
#---------------------------------
class Calling_data():
def __init__(self):
self.yelp_city_lst = ["Chicago, IL", "San Francisco, CA", "New York, NY", "Ann Arbor, MI"]
self.ticket_m_city_lst = ["Chicago", "San Francisco", "New York", "Ann Arbor"]
def calling(self):
for city in self.yelp_city_lst:
get_from_yelp("food", city)
for city in self.ticket_m_city_lst:
get_ticketmaster_data(city)
data = Calling_data()
data.calling()
#---------------------------------
# CALL TO CREATE DATABASE
#---------------------------------
init_db(DBNAME, CSV)
interactive_prompt()
|
from sklearn.ensemble import AdaBoostClassifier
method=AdaBoostClassifier(n_estimators=10000)
|
from flask import Blueprint
import syft as sy
import torch as th
# Avoid Pytorch deadlock issues
th.set_num_threads(1)
hook = sy.TorchHook(th)
local_worker = sy.VirtualWorker(hook, auto_add=False)
hook.local_worker.is_client_worker = False
main = Blueprint("main", __name__)
ws = Blueprint(r"ws", __name__)
from .. import db, executor
from .dfl import auth
from . import routes, events
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/3/31 21:48
# @Author : Yunhao Cao
# @File : cqueue.py
import queue as thread_queue
from . import logger
from .task import RequestTask
__author__ = 'Yunhao Cao'
__all__ = [
'WebRequestQueue',
]
class Queue(object):
def get(self, block, timeout):
raise NotImplementedError
def _put_one(self, task, block=True, timeout=None):
raise NotImplementedError
def put(self, task, block=True, timeout=None):
if isinstance(task, (list, tuple)):
for task_ in task:
self.put(task_, block, timeout)
else:
self._put_one(task, block, timeout)
class FIFOQueue(Queue):
def __init__(self):
self.queue = thread_queue.Queue()
def get(self, block=True, timeout=None):
return self.queue.get(block, timeout)
def _put_one(self, task, block=True, timeout=None):
return self.queue.put(task, block, timeout)
class WebRequestQueue(FIFOQueue):
def __init__(self):
super().__init__()
def get(self, block=True, timeout=None):
return super().get(block, timeout)
def _put_one(self, task, block=True, timeout=None):
logger.info('WebRequestQueue: put new request')
if not isinstance(task, RequestTask):
task = RequestTask(task)
return super()._put_one(task, block, timeout)
|
from catalyst.contrib.utils import plot_tensorboard_log
from model import Model
import torch.nn as nn
class CustomRunner(dl.Runner):
def _handle_batch(self, batch):
# model train/valid step
features, targets = batch["features"], batch["targets"]
logits = self.model(features)
scores = torch.sigmoid(logits)
loss = self.criterion(logits, targets)
accuracy, precision, recall, iou = multi_label_metrics(
logits, targets, threshold=0.5, activation="Sigmoid"
)
map05, map10, map20 = mean_average_precision_at_k(
scores, targets, top_k=(5, 10, 20)
)
batch_metrics = {
"loss": loss,
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"iou": iou,
"map05": map05,
"map10": map10,
"map20": map20
}
self.input = {"features": features, "targets": targets}
self.output = {"logits": logits, "scores": scores}
self.batch_metrics.update(batch_metrics)
if self.is_train_loader:
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
def predict_batch(self, batch):
# model inference step
batch = utils.maybe_recursive_call(batch, "to", device=self.device)
logits = self.model(batch["features"])
scores = torch.sigmoid(logits)
return scores
if __name__ == '__main__':
check = True
if check:
model = Model()
criterion = nn.BCEWithLogitsLoss()
batch = next(iter(train_loader))
output = model(batch['features'])
loss = criterion(output, batch['targets'])
print(loss)
model = Model()
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
loaders = {"train": train_loader, "valid": valid_loader}
runner_test = CustomRunner()
runner_test.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=None,
loaders=loaders,
logdir="./logs",
num_epochs=10,
verbose=True,
load_best_on_end=True,
overfit=False, # <<<--- DO NOT FORGET TO MAKE IT ``False``
# (``True`` uses only one batch to check pipeline correctness)
callbacks=[
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html
# dl.AveragePrecisionCallback(input_key="targets", output_key="scores", prefix="ap"),
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html
# dl.AUCCallback(input_key="targets", output_key="scores", prefix="auc"),
],
main_metric="iou", # "ap/mean",
minimize_metric=False,
)
# model inference example
for prediction in runner.predict_loader(loader=loaders["valid"]):
assert prediction.detach().cpu().numpy().shape[-1] == MERCH_TYPE_NCLASSES - 1
plot_tensorboard_log(
logdir="./logs",
step="epoch",
metrics=[
"loss", "accuracy", "precision", "recall", "iou",
"map05", "map10", "map20",
"ap/mean", "auc/mean"
]
)
|
import numpy as np
print('Hello world')
print("Im learning git")
# 添加了一条注释
123456
|
import argparse
from loguru import logger
def main():
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="file name to process")
args = parser.parse_args()
try:
with open(args.filename, 'w') as f:
f.write("hello")
except Exception as ex:
logger.info(f"file error: {ex}")
if __name__ == "__main__":
main()
|
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import cast
import numpy as np
import pytest
import qutip
from qutip.piqs import isdiagonal
from pulser import Pulse, Register, Sequence
from pulser.devices import Chadoq2, MockDevice
from pulser.waveforms import BlackmanWaveform
from pulser_simulation import QutipEmulator, SimConfig
from pulser_simulation.simresults import CoherentResults, NoisyResults
@pytest.fixture
def reg():
q_dict = {
"A": np.array([0.0, 0.0]),
"B": np.array([0.0, 10.0]),
}
return Register(q_dict)
@pytest.fixture
def pi_pulse():
return Pulse.ConstantDetuning(BlackmanWaveform(1000, np.pi), 0.0, 0)
@pytest.fixture
def seq_no_meas(reg, pi_pulse):
seq = Sequence(reg, Chadoq2)
seq.declare_channel("ryd", "rydberg_global")
seq.add(pi_pulse, "ryd")
return seq
@pytest.fixture
def sim(seq_no_meas):
seq_no_meas.measure("ground-rydberg")
np.random.seed(123)
return QutipEmulator.from_sequence(seq_no_meas)
@pytest.fixture
def results_noisy(sim):
sim.add_config(
SimConfig(noise=("SPAM", "doppler", "amplitude"), amp_sigma=1e-3)
)
return sim.run()
@pytest.fixture
def results(sim):
return sim.run()
def test_initialization(results):
rr_state = qutip.tensor([qutip.basis(2, 0), qutip.basis(2, 0)])
with pytest.raises(ValueError, match="`basis_name` must be"):
CoherentResults(rr_state, 2, "bad_basis", None, [0])
with pytest.raises(
ValueError, match="`meas_basis` must be 'ground-rydberg' or 'digital'."
):
CoherentResults(rr_state, 1, "all", None, "XY")
with pytest.raises(
ValueError,
match="`meas_basis` and `basis_name` must have the same value.",
):
CoherentResults(
rr_state, 1, "ground-rydberg", [0], "wrong_measurement_basis"
)
with pytest.raises(ValueError, match="`basis_name` must be"):
NoisyResults(rr_state, 2, "bad_basis", [0], 123)
with pytest.raises(
ValueError, match="only values of 'epsilon' and 'epsilon_prime'"
):
CoherentResults(
rr_state,
1,
"ground-rydberg",
[0],
"ground-rydberg",
{"eta": 0.1, "epsilon": 0.0, "epsilon_prime": 0.4},
)
assert results._dim == 2
assert results._size == 2
assert results._basis_name == "ground-rydberg"
assert results._meas_basis == "ground-rydberg"
assert results.states[0] == qutip.tensor(
[qutip.basis(2, 1), qutip.basis(2, 1)]
)
@pytest.mark.parametrize("noisychannel", [True, False])
def test_get_final_state(
noisychannel, sim: QutipEmulator, results, reg, pi_pulse
):
if noisychannel:
sim.add_config(SimConfig(noise="dephasing", dephasing_prob=0.01))
_results = sim.run()
assert isinstance(_results, CoherentResults)
final_state = _results.get_final_state()
assert final_state.isoper if noisychannel else final_state.isket
with pytest.raises(TypeError, match="Can't reduce"):
_results.get_final_state(reduce_to_basis="digital")
assert (
_results.get_final_state(
reduce_to_basis="ground-rydberg", ignore_global_phase=False
)
== _results.states[-1].tidyup()
)
# Get final state is last state in results
assert np.all(
np.isclose(
np.abs(_results.get_final_state(ignore_global_phase=False).full()),
np.abs(_results.states[-1].full()),
)
)
# For atoms that are far enough there is no impact of global_phase
# Density matrix states are not changed by global phase
assert np.all(
np.isclose(
np.abs(_results.get_final_state(ignore_global_phase=True).full()),
np.abs(_results.states[-1].full()),
)
)
seq_ = Sequence(reg, Chadoq2)
seq_.declare_channel("ryd", "rydberg_global")
seq_.declare_channel("ram", "raman_local", initial_target="A")
seq_.add(pi_pulse, "ram")
seq_.add(pi_pulse, "ram")
seq_.add(pi_pulse, "ryd")
sim_ = QutipEmulator.from_sequence(seq_)
results_ = sim_.run()
results_ = cast(CoherentResults, results_)
with pytest.raises(ValueError, match="'reduce_to_basis' must be"):
results_.get_final_state(reduce_to_basis="all")
with pytest.raises(TypeError, match="Can't reduce to chosen basis"):
results_.get_final_state(reduce_to_basis="digital")
h_states = results_.get_final_state(
reduce_to_basis="digital", tol=1, normalize=False
).eliminate_states([0])
assert h_states.norm() < 3e-6
assert np.all(
np.isclose(
np.abs(
results_.get_final_state(
reduce_to_basis="ground-rydberg"
).full()
),
np.abs(results.states[-1].full()),
atol=1e-5,
)
)
def test_get_final_state_noisy(reg, pi_pulse):
np.random.seed(123)
seq_ = Sequence(reg, Chadoq2)
seq_.declare_channel("ram", "raman_local", initial_target="A")
seq_.add(pi_pulse, "ram")
noisy_config = SimConfig(noise=("SPAM", "doppler"))
sim_noisy = QutipEmulator.from_sequence(seq_, config=noisy_config)
res3 = sim_noisy.run()
res3._meas_basis = "digital"
final_state = res3.get_final_state()
assert isdiagonal(final_state)
res3._meas_basis = "ground-rydberg"
assert (
final_state[0, 0] == 0.12 + 0j
and final_state[2, 2] == 0.8666666666666667 + 0j
)
assert res3.states[-1] == final_state
assert res3.results[-1] == Counter(
{"10": 0.8666666666666667, "00": 0.12, "11": 0.013333333333333334}
)
def test_get_state_float_time(results):
with pytest.raises(IndexError, match="is absent from"):
results.get_state(-1.0)
with pytest.raises(IndexError, match="is absent from"):
mean = (results._sim_times[-1] + results._sim_times[-2]) / 2
diff = (results._sim_times[-1] - results._sim_times[-2]) / 2
results.get_state(mean, t_tol=diff / 2)
state = results.get_state(mean, t_tol=3 * diff / 2)
assert state == results.get_state(results._sim_times[-2])
assert np.isclose(
state.full(),
np.array(
[
[0.76522907 + 0.0j],
[0.08339973 - 0.39374219j],
[0.08339973 - 0.39374219j],
[-0.27977623 - 0.1103308j],
]
),
).all()
def test_expect(results, pi_pulse, reg):
with pytest.raises(TypeError, match="must be a list"):
results.expect("bad_observable")
with pytest.raises(TypeError, match="Incompatible type"):
results.expect(["bad_observable"])
with pytest.raises(ValueError, match="Incompatible shape"):
results.expect([np.array(3)])
reg_single = Register.from_coordinates([(0, 0)], prefix="q")
seq_single = Sequence(reg_single, Chadoq2)
seq_single.declare_channel("ryd", "rydberg_global")
seq_single.add(pi_pulse, "ryd")
sim_single = QutipEmulator.from_sequence(seq_single)
results_single = sim_single.run()
op = [qutip.basis(2, 0).proj()]
exp = results_single.expect(op)[0]
assert np.isclose(exp[-1], 1)
assert len(exp) == pi_pulse.duration + 1 # +1 for the final instant
np.testing.assert_almost_equal(
results_single._calc_pseudo_density(-1).full(),
np.array([[1, 0], [0, 0]]),
)
config = SimConfig(noise="SPAM", eta=0)
sim_single.set_config(config)
sim_single.set_evaluation_times("Minimal")
results_single = sim_single.run()
exp = results_single.expect(op)[0]
assert len(exp) == 2
assert isinstance(results_single, CoherentResults)
assert results_single._meas_errors == {
"epsilon": config.epsilon,
"epsilon_prime": config.epsilon_prime,
}
# Probability of measuring 1 = probability of false positive
assert np.isclose(exp[0], config.epsilon)
# Probability of measuring 1 = 1 - probability of false negative
assert np.isclose(exp[-1], 1 - config.epsilon_prime)
np.testing.assert_almost_equal(
results_single._calc_pseudo_density(-1).full(),
np.array([[1 - config.epsilon_prime, 0], [0, config.epsilon_prime]]),
)
seq3dim = Sequence(reg, Chadoq2)
seq3dim.declare_channel("ryd", "rydberg_global")
seq3dim.declare_channel("ram", "raman_local", initial_target="A")
seq3dim.add(pi_pulse, "ram")
seq3dim.add(pi_pulse, "ryd")
sim3dim = QutipEmulator.from_sequence(seq3dim)
exp3dim = sim3dim.run().expect(
[qutip.tensor(qutip.basis(3, 0).proj(), qutip.qeye(3))]
)
assert np.isclose(exp3dim[0][-1], 1.89690200e-14)
def test_expect_noisy(results_noisy):
np.random.seed(123)
bad_op = qutip.tensor([qutip.qeye(2), qutip.sigmap()])
with pytest.raises(ValueError, match="non-diagonal"):
results_noisy.expect([bad_op])
op = qutip.tensor([qutip.qeye(2), qutip.basis(2, 0).proj()])
assert np.isclose(results_noisy.expect([op])[0][-1], 0.7466666666666666)
def test_plot(results_noisy, results):
op = qutip.tensor([qutip.qeye(2), qutip.basis(2, 0).proj()])
results_noisy.plot(op)
results_noisy.plot(op, error_bars=False)
results.plot(op)
def test_sim_without_measurement(seq_no_meas):
assert not seq_no_meas.is_measured()
sim_no_meas = QutipEmulator.from_sequence(
seq_no_meas, config=SimConfig(runs=1)
)
results_no_meas = sim_no_meas.run()
assert results_no_meas.sample_final_state() == Counter(
{"00": 80, "01": 164, "10": 164, "11": 592}
)
def test_sample_final_state(results):
sampling = results.sample_final_state(1234)
assert len(sampling) == 4 # Check that all states were observed.
# Switch the measurement basis in the result
results[-1].matching_meas_basis = False
sampling0 = results.sample_final_state(N_samples=911)
assert sampling0 == {"00": 911}
def test_sample_final_state_three_level(seq_no_meas, pi_pulse):
seq_no_meas.declare_channel("raman", "raman_local", "B")
seq_no_meas.add(pi_pulse, "raman")
res_3level = QutipEmulator.from_sequence(seq_no_meas).run()
# Raman pi pulse on one atom will not affect other,
# even with global pi on rydberg
assert len(res_3level.sample_final_state()) == 2
seq_no_meas.measure("ground-rydberg")
res_3level_gb = QutipEmulator.from_sequence(seq_no_meas).run()
sampling_three_levelB = res_3level_gb.sample_final_state()
# Rydberg will affect both:
assert len(sampling_three_levelB) == 4
def test_sample_final_state_noisy(seq_no_meas, results_noisy):
np.random.seed(123)
assert results_noisy.sample_final_state(N_samples=1234) == Counter(
{"11": 772, "10": 190, "01": 161, "00": 111}
)
res_3level = QutipEmulator.from_sequence(
seq_no_meas, config=SimConfig(noise=("SPAM", "doppler"), runs=10)
)
final_state = res_3level.run().states[-1]
assert np.isclose(
final_state.full(),
np.array(
[
[0.54 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],
[0.0 + 0.0j, 0.18 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],
[0.0 + 0.0j, 0.0 + 0.0j, 0.18 + 0.0j, 0.0 + 0.0j],
[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.1 + 0.0j],
]
),
).all()
def test_results_xy(reg, pi_pulse):
seq_ = Sequence(reg, MockDevice)
# Declare Channels
seq_.declare_channel("ch0", "mw_global")
seq_.add(pi_pulse, "ch0")
seq_.measure("XY")
sim_ = QutipEmulator.from_sequence(seq_)
results_ = sim_.run()
assert results_._dim == 2
assert results_._size == 2
assert results_._basis_name == "XY"
assert results_._meas_basis == "XY"
assert results_.states[0] == qutip.tensor(
[qutip.basis(2, 1), qutip.basis(2, 1)]
)
with pytest.raises(TypeError, match="Can't reduce a system in"):
results_.get_final_state(reduce_to_basis="all")
with pytest.raises(TypeError, match="Can't reduce a system in"):
results_.get_final_state(reduce_to_basis="ground-rydberg")
with pytest.raises(TypeError, match="Can't reduce a system in"):
results_.get_final_state(reduce_to_basis="digital")
state = results_.get_final_state(reduce_to_basis="XY")
assert np.all(
np.isclose(
np.abs(state.full()), np.abs(results_.states[-1].full()), atol=1e-5
)
)
# Check that measurement projectors are correct
assert results_._meas_projector(0) == qutip.basis(2, 1).proj()
assert results_._meas_projector(1) == qutip.basis(2, 0).proj()
|
from wfdb.processing.basic import (
resample_ann,
resample_sig,
resample_singlechan,
resample_multichan,
normalize_bound,
get_filter_gain,
)
from wfdb.processing.evaluate import (
Comparitor,
compare_annotations,
benchmark_mitdb,
)
from wfdb.processing.hr import compute_hr, calc_rr, calc_mean_hr, ann2rr, rr2ann
from wfdb.processing.peaks import find_peaks, find_local_peaks, correct_peaks
from wfdb.processing.qrs import XQRS, xqrs_detect, gqrs_detect
from wfdb.processing.filter import sigavg
|
from pandac.PandaModules import *
from MarginCell import MarginCell
import random
class MarginManager(PandaNode):
def __init__(self):
PandaNode.__init__(self, 'margins')
self.cells = set()
self.visiblePopups = set()
def addGridCell(self, x, y, left, right, bottom, top):
padding = 0.125
scale = 0.2
xStart = left + scale / 2.0 + padding
yStart = bottom + scale / 2.0 + padding
xEnd = right - scale / 2.0 - padding
yEnd = top - scale / 2.0 - padding
xInc = (xEnd - xStart) / 5.0
yInc = (yEnd - yStart) / 3.5
cell = MarginCell(self)
cell.reparentTo(NodePath.anyPath(self))
cell.setScale(scale)
cell.setPos(xStart + xInc * x, 0, yStart + yInc * y)
cell.setAvailable(True)
cell.setPythonTag('MarginCell', cell)
self.cells.add(cell)
self.reorganize()
return cell
def setCellAvailable(self, cell, available):
cell = cell.getPythonTag('MarginCell')
cell.setAvailable(available)
self.reorganize()
def addVisiblePopup(self, popup):
self.visiblePopups.add(popup)
self.reorganize()
def removeVisiblePopup(self, popup):
if popup not in self.visiblePopups:
return
self.visiblePopups.remove(popup)
self.reorganize()
def reorganize(self):
activeCells = [cell for cell in self.cells if cell.isAvailable()]
popups = list(self.visiblePopups)
popups.sort(key=lambda x: -x.getPriority())
popups = popups[:len(activeCells)]
freeCells = []
for cell in activeCells:
if not cell.hasContent():
freeCells.append(cell)
elif cell.getContent() in popups:
popups.remove(cell.getContent())
else:
cell.setContent(None)
freeCells.append(cell)
assert len(freeCells) >= len(popups)
for popup in popups:
if popup._lastCell in freeCells and popup._lastCell.isFree():
popup._lastCell.setContent(popup)
freeCells.remove(popup._lastCell)
else:
cell = random.choice(freeCells)
cell.setContent(popup)
freeCells.remove(cell)
|
import sys
from PIL import Image
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.validators import MinValueValidator, MaxValueValidator
from six import BytesIO
from users.models import User
from django.db import models
import datetime
from mptt.models import MPTTModel, TreeForeignKey
class Category(MPTTModel):
name = models.CharField(max_length=255, unique=True, verbose_name='Категория')
parent = TreeForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='children')
class MPTTMeta:
order_insertion_by = ['name']
def __str__(self):
return f'{self.name}'
class Service(models.Model):
STATUS = (('Активный', 'Активный'), ('Закрытый', 'Закрытый'))
MIN_RESOLUTION = (400, 400)
MAX_RESOLUTION = (1200, 1200)
MAX_IMAGE_SIZE = 3145728
category = models.ForeignKey(Category, on_delete=models.CASCADE, verbose_name='Категория')
user = models.ForeignKey(User, on_delete=models.CASCADE, default=User, verbose_name='Пользователь')
title = models.CharField(max_length=100, verbose_name='Заголовок')
description = models.TextField(max_length=1000, verbose_name='Описание')
price = models.DecimalField(max_digits=9, decimal_places=2, null=True, blank=True, verbose_name='Цена')
address = models.CharField(max_length=255, verbose_name='Адрес')
date_create = models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')
date_active = models.DateField(default=datetime.datetime.today().strftime('%d.%m.%Y'), blank=True, null=True, verbose_name='Активно до')
status = models.CharField(max_length=255, choices=STATUS, default='Активный', verbose_name='Статус')
image = models.ImageField(upload_to='img', null=True, blank=True, verbose_name='Изображения')
def __str__(self):
return f'{self.title}'
# Изменение размера картинки
def save(self, *args, **kwargs):
super().save()
# Open image using self
img = Image.open(self.image.path)
# saving image at the same path
if img.height > 300 or img.width > 300:
new_img = (300, 300)
img.thumbnail(new_img)
img.save(self.image.path)
class Comment(models.Model):
parent = models.ForeignKey('self', on_delete=models.CASCADE, blank=True, null=True, related_name='children')
user = models.ForeignKey(User, on_delete=models.CASCADE, default=User, verbose_name='Пользователь')
service = models.ForeignKey(Service, on_delete=models.CASCADE)
text = models.TextField(max_length=1000, verbose_name='Ваше сообщение')
date_create = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['date_create']
def __str__(self):
return f'{self.text}'
class Contractor(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, default=User, verbose_name='Пользователь')
description = models.TextField(max_length=500, verbose_name='О себе')
work = models.ForeignKey(Category, on_delete=models.CASCADE, verbose_name='Вид работ')
phone = models.CharField(max_length=25, verbose_name='Телефон')
def get_average_review_score(self):
average_score = 0.0
if self.rating_cont.count() > 0:
total_score = sum([rating_product.rating for rating_product in self.rating_cont.all()])
average_score = total_score / self.rating_cont.count()
return round(average_score, 1)
def __str__(self):
return f'{self.user}, {self.phone}'
class Rating(models.Model):
contractor = models.ForeignKey(Contractor, on_delete=models.CASCADE, verbose_name='Исполнитель',
related_name='rating_cont')
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, verbose_name='Пользователь')
rating = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(5)])
date_create = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.rating}'
class CommentContract(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, default=User, verbose_name='Пользователь')
contractor = models.ForeignKey(Contractor, on_delete=models.CASCADE)
text = models.TextField(max_length=1000, verbose_name='Ваше сообщение')
date_create = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['date_create']
def __str__(self):
return f'{self.text}'
# API
class Compare(models.Model):
service = models.ForeignKey(Service, on_delete=models.CASCADE)
def __str__(self):
return f'{self.service}'
|
#import all library that needed
import sqlite3
import tweepy
from nltk.corpus import stopwords
import re
import string
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
import json
import pandas as pd
import itertools
import matplotlib.pyplot as plt
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
from wordcloud import WordCloud
import numpy as np
from PIL import Image
# create class sentiment
class sentiment:
def __init__(self, topik, database):
self.topik = topik
self.database = database
#function for crawling data from twitter
def crawling(self, angka):
consumer_key = 'xxxxxxxxxxxxxxxxxxxxxxxxxx'
consumer_secret = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
access_token = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
access_token_secret = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
query = self.topik
new_query = query+'-filter:retweets'
get_data = api.search(q=new_query, count=angka, lang='id', result_type='mixed', tweet_mode='extended')
return get_data
#function for create json file
def ke_json (self, nama_file):
with open("coba.json", "w") as write_file:
return json.dump(nama_file, write_file)
#function for create DataFrame
def ke_df(self, nama_file):
df = pd.DataFrame(nama_file)
return df
#function for open connection with Database
def open_connection(self):
self.connection = sqlite3.connect(self.database)
#function for updating scrapping status in Database
def check_scrappingid(self):
query = '''UPDATE Lastscrapping SET status = 0 WHERE status = 1;'''
conn = self.connection
cursor = conn.cursor()
cursor.execute(query)
hasil = cursor.fetchall()
conn.commit()
cursor.close()
return hasil
#function for input scrapping data into Database
def input_scrappingid(self, date, stat):
query = '''INSERT INTO Lastscrapping (last_get, status)
VALUES (?,?);'''
conn = self.connection
cursor = conn.cursor()
cursor.execute(query, (date, stat))
hasil = cursor.fetchall()
conn.commit()
cursor.close()
return hasil
#function for input user data into Database
def input_databaseuser(self, usid, nm, snme, loc, acc_cr, fwr, frd, vfd):
query = '''INSERT OR IGNORE INTO User VALUES (?,?,?,?,?,?,?,?);'''
cursor = self.connection.cursor()
conn = self.connection
for j in range(len(nm)):
cursor.execute(query, (usid[j], nm[j], snme[j], loc[j], acc_cr[j], fwr[j], frd[j], vfd[j]))
hasil = cursor.fetchall()
conn.commit()
cursor.close()
return hasil
#fuction for input twet data into the Database
def input_tweet(self, twid, usid, ctd, twt, scid):
query = '''INSERT OR IGNORE INTO Tweet (tweetid, userid, createddate, tweet, scrapping_id)
VALUES (?,?,?,?,?);'''
cursor = self.connection.cursor()
conn = self.connection
for i in range(len(twid)):
cursor.execute(query, (twid[i], usid[i], ctd[i], twt[i], scid))
hasil = cursor.fetchall()
conn.commit()
cursor.close()
return hasil
#function for select some spesific table
def select_table(self, code):
query = code
cursor = self.connection.cursor()
conn = self.connection
cursor.execute(query)
hasil = cursor.fetchall()
conn.commit()
cursor.close()
return hasil
#function to input sentiment
def input_sentiment(self, twtid, stmnt):
query = '''INSERT OR IGNORE INTO Sentiment
VALUES (?,?);'''
cursor = self.connection.cursor()
conn = self.connection
for i in range(len(twtid)):
cursor.execute(query, (twtid[i], stmnt[i]))
hasil = cursor.fetchall()
conn.commit()
cursor.close()
return hasil
#function for input cleaned data
def input_clean(self, clean, twtid):
query = '''UPDATE Tweet SET cleantweet = ? WHERE tweetid = ?;'''
cursor = self.connection.cursor()
conn = self.connection
for k in range(len(clean)):
cursor.execute(query, (clean[k], twtid[k]))
hasil = cursor.fetchall()
conn.commit()
cursor.close()
return hasil
#function for close the connection
def close_connection(self):
self.connection.commit()
self.connection.close()
#fuction for cleaning tweet words
def cleaning(self, kalimat):
factory = StemmerFactory()
stemmer = factory.create_stemmer()
stopword = stopwords.words('indonesian')
lower = kalimat.lower()
del_num = re.sub(r"\d+", "", lower)
del_link = del_num.split('http')[0]
del_punc = del_link.translate(str.maketrans('','',string.punctuation))
del_space = del_punc.strip()
words = del_space.split()
resultwords = [word for word in words if word not in stopword]
result = ' '.join(resultwords)
return stemmer.stem(result)
#run the class
analisa = sentiment("Habib Rizieq Shihab", 'firdaus.salim24_final.db')
#start to crawl data
x = analisa.crawling(200)
#gather data from crawling result into spesific tweet data
user = [a.user for a in x]
usid = [a.id for a in x]
tweet = [a.full_text for a in x]
date = [a.created_at for a in x]
twtid = [a.id for a in x]
#gather data from crawling result into spesific user data
userid = [a.user.id for a in x]
name = [a.user.name for a in x]
screenname = [a.user.screen_name for a in x]
location = [a.user.location for a in x]
account_created = [a.user.created_at for a in x]
follower = [a.user.followers_count for a in x]
friend = [a.user.friends_count for a in x]
verified = [a.user.verified for a in x]
acc_date = [a.user.created_at for a in x]
#open connection to sqlite database
analisa.open_connection()
#check batch scrapping
analisa.check_scrappingid()
#input tweet table
analisa.input_tweet(twtid, userid, date, tweet, 17)
#update scrapping batch
analisa.input_scrappingid('2020-11-26', 1)
#input table user
analisa.input_databaseuser(userid, name, screenname, location, acc_date, follower, friend, verified)
#cleaning data
data_clean = []
for kata in tweet:
v = analisa.cleaning(kata)
data_clean.append(v)
#input cleaned tweet
analisa.input_clean(data_clean, twtid)
#sqlite select table
x = analisa.select_table('''SELECT * FROM Tweet;''')
user = analisa.select_table('''SELECT * FROM User;''')
sentimen = analisa.select_table('''SELECT * FROM Sentiment;''')
sentimen_id = analisa.select_table('''SELECT a.name, c.sentiment
FROM User a
INNER JOIN Tweet b ON a.userid = b.userid
INNER JOIN Sentiment c ON b.tweetid = c.tweetid
;''')
#Top Words
df = pd.DataFrame(x)
tweetbersih = df[4].tolist()
print(tweetbersih)
data = [word_tokenize(paragraf) for paragraf in tweetbersih]
data = list(itertools.chain(*data))
fqdist = FreqDist(data)
#Plot Top Words
plt.figure(figsize=(15,10))
fqdist.plot(10,cumulative=False, marker='o')
plt.show()
#Wordcloud
data_1 = ' '.join(data)
job_mask = np.array(Image.open("twitter-2012-positive.png"))
wordcloud = WordCloud(background_color="white", max_words=2000, mask=job_mask, width=1600, height=800, max_font_size=200).generate(data_1)
wordcloud.to_file("wordcloud twitter.png")
plt.figure(figsize=(12,10))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.figure()
plt.axis("off")
plt.show()
#sentiment Analysis
pos_list= open("./kata_positif.txt","r")
pos_kata = pos_list.readlines()
neg_list= open("./kata_negatif.txt","r")
neg_kata = neg_list.readlines()
for item in tweetbersih:
count_p = 0
count_n = 0
for kata_pos in pos_kata:
if kata_pos.strip() in item:
count_p +=1
for kata_neg in neg_kata:
if kata_neg.strip() in item:
count_n +=1
print ("positif: "+str(count_p))
print ("negatif: "+str(count_n))
hasil = []
for item in tweetbersih:
count_p1 = 0
count_n1 = 0
for kata_pos in pos_kata:
if kata_pos.strip() in item:
count_p1 +=1
for kata_neg in neg_kata:
if kata_neg.strip() in item:
count_n1 +=1
hasil.append(count_p1 - count_n1)
print ("Nilai rata-rata: "+str(np.mean(hasil)))
print ("Standar deviasi: "+str(np.std(hasil)))
labels, counts = np.unique(hasil, return_counts=True)
plt.figure(figsize=(14,7))
plt.bar(labels, counts, align='center', edgecolor='black')
plt.gca().set_xticks(labels)
plt.xlabel('Sentiment Score')
plt.ylabel('Count')
plt.title('Sentiment Analysis')
plt.savefig('sentiment_score', dpi=100)
plt.show()
sentiment = []
for nilai in hasil:
if nilai > 0 :
sentiment.append('Positive')
elif nilai == 0:
sentiment.append('Neutral')
else:
sentiment.append('Negative')
df_sentiment = pd.DataFrame({'tweetbersih':tweetbersih, 'sentimen_angka':hasil, 'sentiment':sentiment})
labels, counts = np.unique(df_sentiment['sentiment'], return_counts=True)
plt.figure(figsize=(10,7))
plt.bar(labels, counts, align='center', color='green', width=0.2)
plt.xlabel('Sentiment')
plt.ylabel('Counts')
plt.title('Sentiment Analysis')
plt.gca().set_xticks(labels)
plt.savefig('chart_sentimen', dpi=100)
plt.show()
df['Sentiment'] = sentiment
df['sentiment_angka']=hasil
twtid = df[0].tolist()
stmnt = df['sentiment_angka'].tolist()
# analisa.input_sentiment(twtid, stmnt)
#close connection
analisa.close_connection
#Exploratory Data
df_user = pd.DataFrame(user)
df[2]=pd.to_datetime(df[2])
df['new_date'] = df[2].dt.date
s = df.groupby('new_date').size()
fig, ax = plt.subplots(figsize=(14,7))
ax.plot(s.index, s, marker='o')
ax.set_xlabel('Date')
ax.set_ylabel('Count')
ax.set_title('Data Crawling Counts per Date')
for i,j in zip(s.index,s):
ax.annotate( str(j),xy=(i,j),fontsize=12)
plt.xticks(rotation=35)
plt.savefig('grafik crawling', dpi=100)
plt.show()
status = []
for value in df_user[7]:
if value == 1:
status.append('verified')
else:
status.append('not verified')
df_user[8]=status
df_group = df_user.groupby(8).size()
fig, ax = plt.subplots(figsize=(10,8))
ax.bar(df_group.index, df_group, label='Sentiment', width=0.2, color='darkseagreen')
plt.savefig('vstatus', dpi=100)
plt.show()
df_ids = pd.DataFrame(sentimen_id)
df_user[4]=pd.to_datetime(df_user[4])
df_user['new_date'] = df_user[4].dt.year
j = df_user.groupby('new_date').size()
fig, ax = plt.subplots(figsize=(14,7))
ax.bar(j.index, j, color='purple')
ax.set_xlabel('Year Created')
ax.set_ylabel('Count')
ax.set_title('Account Created Date Count')
plt.savefig('acc_created', dpi=100)
plt.show()
r = df_user.groupby(['new_date', 8]).size().unstack(level=-1).reset_index()
fig, ax = plt.subplots(figsize=(14,7))
ax.bar(r['new_date'], r['not verified'], label='not verified')
ax.bar(r['new_date'], r['verified'], bottom=r['not verified'], label='verified')
ax.set_xlabel('Year')
ax.set_ylabel('Count')
ax.set_title('Account Created Date Count')
plt.legend()
plt.savefig('acc_created status', dpi=100)
plt.show()
pos_kata = ' '.join(pos_kata)
wordcloud = WordCloud(width=1600, height=800, max_font_size=200).generate(pos_kata)
plt.figure(figsize=(12,10))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.savefig('dict_pos_kata', dpi=100)
plt.show()
neg_kata = ' '.join(neg_kata)
wordcloud = WordCloud(width=1600, height=800, max_font_size=200).generate(neg_kata)
plt.figure(figsize=(12,10))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.savefig('dict_neg_kata', dpi=100)
plt.show()
|
def array_conversion(arr):
ops = {True:lambda x,x1:x+x1, False:lambda x,x1:x*x1}
add = True
while len(arr)!=1:
new_arr = []
for i in range(0,len(arr[:-1]),2):
new_arr.append(ops[add](arr[i],arr[i+1]))
arr = new_arr
add = not add
return arr[0]
'''
Task
Given an array of 2k integers (for some integer k), perform the
following operations until the array contains only one element:
On the 1st, 3rd, 5th, etc.
iterations (1-based) replace each pair of consecutive elements with their sum;
On the 2nd, 4th, 6th, etc.
iterations replace each pair of consecutive elements with their product.
After the algorithm has finished, there will be a single element left in the array.
Return that element.
Example
For inputArray = [1, 2, 3, 4, 5, 6, 7, 8], the output should be 186.
We have [1, 2, 3, 4, 5, 6, 7, 8] -> [3, 7, 11, 15] -> [21, 165] -> [186], so the answer is 186.
Input/Output
[input] integer array arr
Constraints: 21 ≤ arr.length ≤ 25, -9 ≤ arr[i] ≤ 99.
[output] an integer
'''
|
from django.db import models
class URL(models.Model):
original_url = models.URLField(max_length=200, unique=True)
shortened_url = models.URLField(max_length=200, unique=True)
is_active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.original_url} => {self.shortened_url}'
class FriendlyURL(models.Model):
friendly_shortened_url = models.SlugField(
max_length=100, blank=True, unique=True, null=True,
)
original_url = models.ForeignKey(
to=URL, on_delete=models.CASCADE, related_name='friendly_url'
)
is_active = models.BooleanField(default=True)
def __str__(self):
return f'{self.original_url.original_url} => \
{self.friendly_shortened_url}'
|
import json
jsonstring = '''
{
"accuracy": {
"fit": 0.1,
"sig": 0.2,
"col": 0.3
},
"comfort": {
"fit": 0.4,
"sig": 0.5,
"col": 0.6
},
"duration": {
"fit": 0.7,
"sig": 0.8,
"col": 0.9
},
"time": {
"fit": 0.4,
"sig": 0.3,
"col": 0.2
}
}
'''
jsonDict = json.loads(jsonstring)
list_of_criteria = jsonDict.keys()
print list_of_criteria
dict_of_sums = {}
for criteria in list_of_criteria:
dict_of_sums[criteria] = sum(jsonDict[criteria].values())
print dict_of_sums
|
import logging
import fmcapi
import time
def test__acp_rule(fmc):
logging.info(
"In preparation for testing ACPRule methods, set up some known objects in the FMC."
)
starttime = str(int(time.time()))
namer = f"_fmcapi_test_{starttime}"
# Build an IP host object
iphost1 = fmcapi.Hosts(fmc=fmc, name="_iphost1", value="7.7.7.7")
iphost1.post()
# Build an IP Network object
ipnet1 = fmcapi.Networks(fmc=fmc, name="_ipnet1", value="1.2.3.0/24")
ipnet1.post()
# Build an IP range object
iprange1 = fmcapi.Ranges(fmc=fmc, name="_iprange1", value="6.6.6.6-7.7.7.7")
iprange1.post()
# Build a Network Group object
ipnet2 = fmcapi.Networks(fmc=fmc, name="_ipnet2", value="5.5.5.0/24")
ipnet2.post()
time.sleep(1)
# Build an FQDNS object
fqdns1 = fmcapi.FQDNS(fmc=fmc, name="_fqdns1", value="www.cisco.com")
fqdns1.post()
obj1 = fmcapi.NetworkGroups(fmc=fmc, name="_fmcapi_test_networkgroup")
obj1.named_networks(action="add", name=ipnet2.name)
obj1.unnamed_networks(action="add", value="4.4.4.4/32")
obj1.post()
# Build a URL object
url1 = fmcapi.URLs(fmc=fmc, name="_url1", url="asdf.org")
url1.post()
url1.get()
# lists = [{"type": url1.type, "id": url1.id, "name": url1.name}]
# Build a VLAN Tag object
vlantag1 = fmcapi.VlanTags(
fmc=fmc, name="_vlantag1", data={"startTag": "888", "endTag": "999"}
)
vlantag1.post()
# Build a Port object
pport1 = fmcapi.ProtocolPortObjects(
fmc=fmc, name="_pport1", port="9090", protocol="UDP"
)
pport1.post()
# Build a Port Group Object
obj10 = fmcapi.ProtocolPortObjects(
fmc=fmc, name="_porttcp1", port="8443", protocol="TCP"
)
obj10.post()
obj11 = fmcapi.ProtocolPortObjects(
fmc=fmc, name="_portudp1", port="161", protocol="UDP"
)
obj11.post()
obj12 = fmcapi.ProtocolPortObjects(
fmc=fmc, name="_portrangetcp1", port="0-1023", protocol="TCP"
)
obj12.post()
obj2 = fmcapi.PortObjectGroups(fmc=fmc, name="_fmcapi_test_portobjectgroup")
obj2.named_ports(action="add", name=obj10.name)
obj2.named_ports(action="add", name=obj11.name)
obj2.named_ports(action="add", name=obj12.name)
obj2.post()
# Build a Security Zone object
sz1 = fmcapi.SecurityZones(fmc=fmc, name="_sz1", interfaceMode="ROUTED")
sz1.post()
# Build an ACP Object
acp1 = fmcapi.AccessPolicies(fmc=fmc, name=namer)
acp1.post()
# Get a file_policy
# fp = fmcapi.FilePolicies(fmc=fmc1, name='daxm_test')
time.sleep(1)
logging.info("Setup of objects for ACPRule test done.\n")
logging.info(
"Test ACPRule. Try to test all features of all methods of the ACPRule class."
)
acprule1 = fmcapi.AccessRules(fmc=fmc, acp_name=acp1.name)
acprule1.name = namer
acprule1.action = "ALLOW"
acprule1.enabled = False
acprule1.sendEventsToFMC = True
acprule1.logFiles = False
acprule1.logBegin = True
acprule1.logEnd = True
acprule1.variable_set(action="set", name="Default-Set")
acprule1.source_zone(action="add", name=sz1.name)
acprule1.destination_zone(action="add", name=sz1.name)
acprule1.intrusion_policy(action="set", name="Security Over Connectivity")
acprule1.vlan_tags(action="add", name=vlantag1.name)
acprule1.source_port(action="add", name=pport1.name)
acprule1.destination_port(action="add", name=pport1.name)
acprule1.destination_port(action="add", name=obj2.name)
acprule1.source_network(action="add", name=iphost1.name)
acprule1.source_network(action="add", name=obj1.name)
acprule1.source_network(action="add", name=iprange1.name)
acprule1.destination_network(action="add", name=ipnet1.name)
acprule1.destination_network(action="add", name=iprange1.name)
acprule1.destination_network(action="add", name=fqdns1.name)
# acprule1.urls(name=url1.name)
# acprule1.file_policy(action='set', name=fp.name)
acprule1.post()
logging.info("Test ACPRule done.\n")
logging.info("Cleanup of testing ACPRule methods.")
acprule1.delete()
time.sleep(1)
acp1.delete()
iphost1.delete()
ipnet1.delete()
iprange1.delete()
fqdns1.delete()
obj1.delete()
ipnet2.delete()
url1.delete()
vlantag1.delete()
pport1.delete()
sz1.delete()
obj2.delete()
obj10.delete()
obj11.delete()
obj12.delete()
logging.info("Cleanup of objects for ACPRule test done.\n")
|
"""
Write a python function, check_double(number) which accepts a whole number and returns True if it satisfies the given conditions.
The number and its double should have exactly the same number of digits.
Both the numbers should have the same digits ,but in different order.
Otherwise it should return False.
Example: If the number is 125874 and its double, 251748, contain exactly the same digits, but in a different order.
"""
#PF-Assgn-38
def check_double(number):
double=2*number
s1="".join(str(number))
s2="".join(str(double))
s3=sorted(s1)
s4=sorted(s2)
if s3==s4:
return True
else:
return False
print(check_double(106))
|
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import typing
from typing import Dict, Optional
import torch
import pandas as pd
from PIL import Image
from d3m import container, utils, exceptions
from d3m.metadata import base as metadata_base, hyperparams, params
from d3m.primitive_interfaces import base, transformer
from d3m.primitive_interfaces.base import CallResult
from distil.utils import CYTHON_DEP
from distil.utils import Img2Vec
import version
__all__ = ("ImageTransferPrimitive",)
logger = logging.getLogger(__name__)
Inputs = container.DataFrame
Outputs = container.DataFrame
class Hyperparams(hyperparams.Hyperparams):
filename_col = hyperparams.Hyperparameter[typing.Union[int, None]](
default=None,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The filname column index for image data.",
)
force_cpu = hyperparams.Hyperparameter[bool](
default=False,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Force CPU execution regardless of GPU availability.",
)
class ImageTransferPrimitive(
transformer.TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]
):
"""
A primitive that converts an input image to a vector of 512 numerical features.
"""
_VOLUME_KEY = "resnet18-5c106cde"
_image_semantic = ("http://schema.org/ImageObject",)
metadata = metadata_base.PrimitiveMetadata(
{
"id": "782e261e-8e23-4184-9258-5a412c9b32d4",
"version": version.__version__,
"name": "Image Transfer",
"python_path": "d3m.primitives.feature_extraction.image_transfer.DistilImageTransfer",
"source": {
"name": "Distil",
"contact": "mailto:cbethune@uncharted.software",
"uris": [
"https://github.com/uncharted-distil/distil-primitives/blob/main/distil/primitives/image_transfer.py",
"https://github.com/uncharted-distil/distil-primitives",
],
},
"installation": [
CYTHON_DEP,
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/uncharted-distil/distil-primitives.git@{git_commit}#egg=distil-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
{
"type": "FILE",
"key": _VOLUME_KEY,
"file_uri": "http://public.datadrivendiscovery.org/resnet18-5c106cde.pth",
"file_digest": "5c106cde386e87d4033832f2996f5493238eda96ccf559d1d62760c4de0613f8",
},
],
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.CONVOLUTIONAL_NEURAL_NETWORK,
],
"primitive_family": metadata_base.PrimitiveFamily.FEATURE_EXTRACTION,
},
)
# class instance to avoid unnecessary re-init
_model: Optional[Img2Vec] = None
def __init__(
self,
*,
hyperparams: Hyperparams,
random_seed: int = 0,
volumes: Optional[Dict[str, str]] = None,
) -> None:
super().__init__(
hyperparams=hyperparams, random_seed=random_seed, volumes=volumes
)
if volumes is None:
raise ValueError("volumes cannot be None")
def _img_to_vec(self, image_array):
image_array = image_array.squeeze()
return self._model.get_vec(Image.fromarray(image_array).convert("RGB"))
def _transform_inputs(self, inputs):
result = inputs.copy()
result["image_vec"] = result[self.filename_col].apply(
lambda image_file: self._img_to_vec(image_file)
) # self.img2vec.get_vec(image_file))
df = pd.DataFrame(result["image_vec"].values.tolist())
df.columns = ["v{}".format(i) for i in range(0, df.shape[1])]
return container.DataFrame(df, generate_metadata=True)
def _get_filename_column_index(self, inputs_metadata):
filename_col_index = self.hyperparams["filename_col"]
image_indices = inputs_metadata.list_columns_with_semantic_types(
self._image_semantic
)
if filename_col_index is not None:
# if filename_col_index not in image_indices:
# raise exceptions.InvalidArgumentValueError('column with index ' + str(filename_col_index) + ' does not have image semantic')
return filename_col_index
elif len(image_indices) > 0:
return image_indices[0]
raise exceptions.InvalidArgumentValueError(
"inputs does not have image semantic"
)
def produce(
self,
*,
inputs: container.DataFrame,
timeout: float = None,
iterations: int = None,
) -> CallResult[container.DataFrame]:
logger.debug(f"Producing {__name__}")
if self._model is None:
model_path = self.volumes[self._VOLUME_KEY]
logger.info(f"Loading pre-trained model from {model_path}")
if torch.cuda.is_available():
if self.hyperparams["force_cpu"]:
logger.info("Detected CUDA support - forcing use of CPU")
device = "cpu"
else:
logger.info("Detected CUDA support - using GPU")
device = "cuda"
else:
logger.info("CUDA does not appear to be supported - using CPU.")
device = "cpu"
self._model = Img2Vec(model_path, device=device)
filename_col_index = self._get_filename_column_index(inputs.metadata)
self.filename_col = inputs.columns[filename_col_index]
return base.CallResult(self._transform_inputs(inputs))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-10 05:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Host',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120, null=True)),
('ip', models.CharField(max_length=30, null=True)),
('uuid', models.CharField(max_length=60, null=True)),
],
),
migrations.CreateModel(
name='HostDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numberofvms', models.CharField(max_length=100, null=True)),
('numberofdatastores', models.IntegerField(null=True)),
('memorySize', models.IntegerField(null=True)),
('numCpuCores', models.IntegerField(null=True)),
('numNics', models.IntegerField(null=True)),
('cpuModel', models.CharField(max_length=100, null=True)),
('cpuMhz', models.CharField(max_length=100, null=True)),
('numCpuPkgs', models.IntegerField(null=True)),
('numHBAs', models.IntegerField(null=True)),
('overallStatus', models.CharField(max_length=100, null=True)),
('port', models.CharField(max_length=100, null=True)),
('vmotionEnabled', models.CharField(max_length=100, null=True)),
('uptime', models.CharField(max_length=100, null=True)),
('overallMemoryUsage', models.CharField(max_length=100, null=True)),
('overallCpuUsage', models.CharField(max_length=100, null=True)),
('bootTime', models.CharField(max_length=100, null=True)),
('connectionState', models.CharField(max_length=100, null=True)),
('dasHostState', models.CharField(max_length=100, null=True)),
('hostMaxVirtualDiskCapacity', models.CharField(max_length=100, null=True)),
('maxEVCModeKey', models.CharField(max_length=100, null=True)),
('managementServerIp', models.CharField(max_length=100, null=True)),
('currentEVCModeKey', models.CharField(max_length=100, null=True)),
('customValue', models.CharField(max_length=100, null=True)),
('vcenterdate', models.DateField()),
('createdate', models.DateField()),
('id_host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='host.Host')),
],
),
]
|
from datetime import datetime
from app.domainmodel.movie import Movie
class Review:
def __init__(self, input_movie: Movie, review_text: str, input_rating: int):
if input_rating < 0 or input_rating > 10:
self.__rating = None
else:
self.__reviewText = review_text
self.__rating = input_rating
now = datetime.now()
self.__timestamp = datetime.timestamp(now)
self.__movie = input_movie
def __repr__(self):
return self.__movie + f"Review: {self.__reviewText}, Rating: {self.__rating}"
def __eq__(self, other):
return (self.__rating == other.__rating) and (self.__movie == other.__movie) \
and (self.__timestamp == other.__timestamp) and (self.__reviewText == other.__reviewText)
@property
def movie(self):
return self.__movie
@property
def review_text(self):
return self.__reviewText
@property
def rating(self):
return self.__rating
@property
def timestamp(self):
return self.__timestamp
|
# Generated by Django 2.1.2 on 2018-11-08 19:45
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('orders', '0002_auto_20181108_2136'),
]
operations = [
migrations.AlterField(
model_name='basket',
name='date_of_readiness',
field=models.DateTimeField(default=datetime.datetime(2018, 11, 8, 19, 45, 0, 817142, tzinfo=utc), help_text='На коли має бути готоаий заказ: дата і час', verbose_name='Готовність на(дата):'),
),
]
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from users.models import User
from .filters import dollar
VARIANT = [
('Квартира', 'Квартира'),
('Дом', 'Дом')
]
class SaleFlat(models.Model):
cost = models.PositiveIntegerField(verbose_name='стоимость квартиры')
cost_dollar = models.FloatField(verbose_name='курс доллара', default=dollar)
floor = models.PositiveIntegerField(verbose_name='этаж', default=1)
total_area = models.DecimalField(verbose_name='общая площадь', max_digits=10, decimal_places=2)
living_space = models.DecimalField(verbose_name='жилая площадь', max_digits=10, decimal_places=2)
kitchen_area = models.DecimalField(verbose_name='площадь кухни', max_digits=10, decimal_places=2)
ceiling_height = models.DecimalField(verbose_name='высота потолка', max_digits=10, decimal_places=2)
number_of_rooms = models.PositiveIntegerField(verbose_name='количество комнат', default=1)
user = models.ForeignKey(User, on_delete=models.CASCADE)
date = models.DateTimeField(verbose_name='дата', auto_now_add=True)
description = models.TextField(verbose_name='описание')
telephone = models.CharField(verbose_name='номер телефона', max_length=25)
address = models.CharField(verbose_name='адрес', max_length=50)
flat_or_apartment = models.CharField(verbose_name='дом или квартира', choices=VARIANT, max_length=35)
def __str__(self):
return f'{self.address}'
class Photo(models.Model):
image = models.ImageField(upload_to='photos/%Y/%m/%d/', verbose_name='картинка', blank=True, null=True)
saleflat = models.ForeignKey('SaleFlat', on_delete=models.CASCADE, related_name='photo')
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f'{self.user.last_name}, {self.id}'
class Rent(models.Model):
cost = models.PositiveIntegerField(verbose_name='стоимость квартиры')
cost_dollar = models.FloatField(verbose_name='курс доллара', default=dollar)
floor = models.PositiveIntegerField(verbose_name='этаж', default=1)
number_of_rooms = models.PositiveIntegerField(verbose_name='количество комнат', default=1)
user = models.ForeignKey(User, on_delete=models.CASCADE)
date = models.DateTimeField(verbose_name='дата', auto_now_add=True)
description = models.TextField(verbose_name='описание')
telephone = models.CharField(verbose_name='номер телефона', max_length=25)
address = models.CharField(verbose_name='адрес', max_length=50)
tv = models.BooleanField(verbose_name='телевизор', default=False)
furniture = models.BooleanField(verbose_name='мебель', default=False)
plate = models.BooleanField(verbose_name='плита', default=False)
refrigerator = models.BooleanField(verbose_name='холодильник', default=False)
internet = models.BooleanField(verbose_name='интернет', default=False)
conditioning = models.BooleanField(verbose_name='кондиционер', default=False)
washer = models.BooleanField(verbose_name='стиральная машина', default=False)
flat_or_apartment = models.CharField(verbose_name='дом или квартира', choices=VARIANT, max_length=35)
def __str__(self):
return f'{self.address}'
class PhotoRent(models.Model):
image_rent = models.ImageField(upload_to='photos/%Y/%m/%d/', verbose_name='картинка', blank=True, null=True)
rent = models.ForeignKey('Rent', on_delete=models.CASCADE, related_name='photo_rent')
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f'{self.user.last_name}, {self.id}'
|
def search_quadruplets(sequence: list[int], target: int) -> list[int]:
"""
>> > search_quadruplets([4, 1, 2, -1, 1, -3], 1)
[-3, -1, 1, 4], [-3, 1, 1, 2]
Explanation: Both the quadruplets add up to the target.
>> > search_quadruplets([2, 0, -1, 1, -2, 2], 2)
[-2, 0, 2, 2], [-1, 0, 1, 2]
Explanation: Both the quadruplets add up to the target.
Technique: Two Pointers
This problem follows the Two Pointers pattern and
shares similarities with Triplet Sum to Zero.
"""
sequence.sort()
quadruplets = []
for index in range(0, len(sequence) - 3):
if index > 0 and sequence[index] == sequence[index - 1]:
continue
for another_index in range(index + 1, len(sequence) - 2):
if (
another_index > 0
and sequence[another_index] == sequence[another_index - 1]
):
continue
left = another_index + 1
right = len(sequence) - 1
while left < right:
quadruplets_sum = (
sequence[index]
+ sequence[another_index]
+ sequence[left]
+ sequence[right]
)
if quadruplets_sum == target:
quadruplets.append(
[
sequence[index],
sequence[another_index],
sequence[left],
sequence[right],
]
)
left += 1
right -= 1
while left < right and sequence[left] == sequence[left - 1]:
left += 1
while left < right and sequence[right] == sequence[right + 1]:
right -= 1
elif quadruplets_sum < target:
left += 1
else:
right -= 1
return quadruplets
if __name__ == "__main__":
user_input = input("Enter numbers separated by comma:\n").strip()
sequence = [int(item) for item in user_input.split(",")]
target_input = input("Enter a single number to be found in the list:\n")
target = int(target_input)
result = search_quadruplets(sequence, target)
if result is not None:
print(f"result: {result}")
else:
print("Not found")
|
import sys
from django.conf import settings
from django.core.management import call_command
from django.db.backends.creation import TEST_DATABASE_PREFIX
from django.test.simple import DjangoTestSuiteRunner
try:
from django.test.simple import dependency_ordered
except ImportError:
from django_extras.django124.test.simple import dependency_ordered
class KeepDatabaseTestSuiteRunner(DjangoTestSuiteRunner):
'''DjangoTestSuiteRunner modified to optionally reuse the test
database instead of destroying and recreating it if the reuse_db
argument is True.
Modifications:
- Add create_test_db() and _create_test_db() methods from
db.backends.creation.BaseDatabaseCreation
- Modify them to optionally reuse the test database.
- Add the connection object as an attribute of this instance
- Call self.create_test_db() and self._create_test_db() methods
instead of the connection object's version of these methods.
'''
def __init__(self, reuse_db=False, **kwargs):
self.reuse_db = reuse_db
super(KeepDatabaseTestSuiteRunner, self).__init__(**kwargs)
def setup_databases(self, **kwargs):
'''
Copied from Django 1.2.4 django.test.simple.DjangoTestSuiteRunner
'''
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
for alias in connections:
connection = connections[alias]
if connection.settings_dict['TEST_MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = connection.settings_dict['TEST_MIRROR']
else:
# Store the (engine, name) pair. If we have two aliases
# with the same pair, we only need to create the test database
# once.
test_databases.setdefault((
connection.settings_dict['HOST'],
connection.settings_dict['PORT'],
connection.settings_dict['ENGINE'],
connection.settings_dict['NAME'],
), []).append(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = connection.settings_dict['TEST_DEPENDENCIES']
else:
if alias != 'default':
dependencies[alias] = connection.settings_dict.get('TEST_DEPENDENCIES', ['default'])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for (host, port, engine, db_name), aliases in dependency_ordered(test_databases.items(), dependencies):
# Actually create the database for the first connection
self.connection = connection = connections[aliases[0]]
old_names.append((connection, db_name, True))
test_db_name = self.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias in aliases[1:]:
connection = connections[alias]
if db_name:
old_names.append((connection, db_name, False))
connection.settings_dict['NAME'] = test_db_name
else:
# If settings_dict['NAME'] isn't defined, we have a backend where
# the name isn't important -- e.g., SQLite, which uses :memory:.
# Force create the database instead of assuming it's a duplicate.
old_names.append((connection, db_name, True))
self.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = connections[mirror_alias].settings_dict['NAME']
return old_names, mirrors
def teardown_databases(self, old_config, **kwargs):
'''
Copied from Django 1.2.4 django.test.simple.DjangoTestSuiteRunner
'''
from django.db import connections
old_names, mirrors = old_config
# Point all the mirrors back to the originals
for alias, old_name in mirrors:
connections[alias].settings_dict['NAME'] = old_name
# Destroy all the non-mirror databases
if not self.reuse_db:
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
else:
connection.settings_dict['NAME'] = old_name
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Copied from Django 1.2.4 django.db.backends.creation.BaseDatabaseCreation
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
if verbosity >= 1:
print "Creating test database '%s'..." % self.connection.alias
test_database_name = self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
can_rollback = self.connection.creation._rollback_works()
self.connection.settings_dict["SUPPORTS_TRANSACTIONS"] = can_rollback
call_command('syncdb', verbosity=verbosity, interactive=False, database=self.connection.alias)
if settings.CACHE_BACKEND.startswith('db://'):
from django.core.cache import parse_backend_uri, cache
from django.db import router
if router.allow_syncdb(self.connection.alias, cache.cache_model_class):
_, cache_name, _ = parse_backend_uri(settings.CACHE_BACKEND)
call_command('createcachetable', cache_name, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def _create_test_db(self, verbosity, autoclobber):
"""
Copied from Django 1.2.4 django.db.backends.creation.BaseDatabaseCreation
Internal implementation - creates the test db tables.
"""
suffix = self.connection.creation.sql_table_creation_suffix()
if self.connection.settings_dict['TEST_NAME']:
test_database_name = self.connection.settings_dict['TEST_NAME']
else:
test_database_name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
qn = self.connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = self.connection.cursor()
self.connection.creation.set_autocommit()
try:
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
if self.reuse_db:
if verbosity >= 1:
print "Database already exists. Reusing test database..."
else:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database..."
cursor.execute("DROP DATABASE %s" % qn(test_database_name))
if verbosity >= 1:
print "Creating test database..."
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
return test_database_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.