text stringlengths 8 6.05M |
|---|
import random
import numpy as np
def create_alias_table(area_ratio):
"""
:param area_ratio: sum(area_ratio)=1
:return: accept,alias
"""
l = len(area_ratio)
accept, alias = [0] * l, [0] * l
small, large = [], []
area_ratio_ = np.array(area_ratio) * l
for i, prob in enumerate(area_ratio_):
if prob < 1.0:
small.append(i)
else:
large.append(i)
while small and large:
small_idx, large_idx = small.pop(), large.pop()
accept[small_idx] = area_ratio_[small_idx]
alias[small_idx] = large_idx
area_ratio_[large_idx] = area_ratio_[large_idx] - (1 - area_ratio_[small_idx])
if area_ratio_[large_idx] < 1.0:
small.append(large_idx)
else:
large.append(large_idx)
while large:
large_idx = large.pop()
accept[large_idx] = 1
while small:
small_idx = small.pop()
accept[small_idx] = 1
return accept, alias
def alias_sample(accept, alias):
"""
:param accept:
:param alias:
:return: sample index
"""
N = len(accept)
i = int(random.random() * N)
r = random.random()
if r < accept[i]:
return i
else:
return alias[i]
class RandomWalker:
def __init__(self, G, p=1, q=1):
"""
:param G:
:param p: Return parameter,controls the likelihood of immediately revisiting a node in the walk.
:param q: In-out parameter,allows the search to differentiate between “inward” and “outward” nodes
"""
self.G = G
self.p = p
self.q = q
def get_alias_edge(self, t, v):
"""
compute unnormalized transition probability between nodes v and its neighbors give the previous visited node t.
:param t:
:param v:
:return:
"""
G = self.G
p = self.p
q = self.q
unnormalized_probs = []
for x in G.neighbors(v):
weight = G[v][x].get('weight', 1.0) # w_vx
if x == t: # d_tx == 0
unnormalized_probs.append(weight / p)
elif G.has_edge(x, t): # d_tx == 1
unnormalized_probs.append(weight)
else: # d_tx > 1
unnormalized_probs.append(weight / q)
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs]
return create_alias_table(normalized_probs)
def preprocess_transition_probs(self):
"""
Preprocessing of transition probabilities for guiding the random walks.
"""
G = self.G
alias_nodes = {}
for node in G.nodes():
unnormalized_probs = [G[node][nbr].get('weight', 1.0)
for nbr in G.neighbors(node)]
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs]
alias_nodes[node] = create_alias_table(normalized_probs)
alias_edges = {}
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
self.alias_nodes = alias_nodes
self.alias_edges = alias_edges
|
def square(number):
return 2**(number-1)
'''
Grains
Write a program that calculates the number of grains of wheat on a chessboard
given that the number on each square is double the previous one.
There are 64 squares on a chessboard.
#Example: square(1) = 1 square(2) = 2 square(3) = 4 square(4) = 8 etc...
Write a program that shows how many grains were on each square
'''
|
import numpy as np
for a in range(1, 400):
for b in range(1, 600):
c = np.sqrt(a**2 + b**2)
if c == np.math.floor(c) and (a+b+c) == 1000:
print('A: {0}, B: {1}, C: {2}'.format(a, b, c))
print('A*B*C = {0}'.format(a*b*c))
quit()
|
from flask import render_template,request,redirect,url_for, abort
from ..models import User, Blog, Comment, Subscriber
from .forms import BlogForm,UpdateForm, LoginForm, RegistrationForm, CommentForm, DeleteForm, SubscribeForm
from . import main
from .fill_db import initialize
from .. import db
from ..request import get_quote
import datetime
from flask_login import login_required, login_user, logout_user,current_user
from ..email import mail_message, mail_subscribe
from werkzeug.security import generate_password_hash,check_password_hash
@main.route('/')
def index():
quote = get_quote()
user = User.query.all()
if not user:
initialize() #initializes db
form = SubscribeForm()
# if form.validate_on_submit:
# name = form.name.data
# email = form.email.data
# subscriber = Subscriber(name = name, email = email)
# db.session.add(subscriber)
# db.session.commit()
# return redirect(url_for('main.index'))
blog_array = Blog.query.all()
blogs = sorted(blog_array, key= lambda x: x.date, reverse = True)
return render_template('index.html',quote = quote, form = form, blogs = blogs)
@main.route('/profile')
@login_required
def profile():
current_user = User.query.filter_by(logged_in = True).first()
blogs = Blog.query.filter_by(user_id = current_user.user_id).all()
return render_template('profile.html', blogs = blogs)
@main.route('/login', methods = ['GET', 'POST'])
def login():
form = LoginForm()
title = "Please Login into your account"
if form.validate_on_submit():
former_user = User.query.filter_by(logged_in = True).first()
if former_user:
former_user.logged_in = False
db.session.commit()
current_user = User.query.filter_by(username = form.username.data).first()
if not current_user:
return redirect(url_for('main.login'))
name = form.username.data
current_user.logged_in = True
db.session.commit()
if current_user and current_user.verify_password(form.password.data):
login_user(current_user)
return redirect(url_for('main.index'))
else:
redirect(url_for('main.login'))
return render_template('auth/login.html', form = form)
@main.route('/logout')
def logout():
former_user = User.query.filter_by(logged_in = True).first()
logout_user()
return redirect(url_for('main.index'))
@main.route('/registration', methods = ['GET', 'POST'])
def registration():
form = RegistrationForm()
title = "Let's get you started!!"
if form.validate_on_submit():
username = form.username.data
email = form.email_address.data
user = User(username = form.username.data,email = form.email_address.data, password = generate_password_hash(form.password.data), logged_in = False)
db.session.add(user)
db.session.commit()
mail_message("Welcome to BlogMe", "welcome/welcome_user",email,user=user)
return redirect(url_for('main.login'))
return render_template('auth/register.html', form = form, title = title)
return render_template('auth/register.html', form = form)
@main.route('/<blog>/comment/', methods = ['GET', 'POST'])
def comment(blog):
blogg = Blog.query.filter_by(title = blog).first()
comment = CommentForm()
update = UpdateForm()
delete = DeleteForm()
comments = blogg.comments
form = BlogForm()
form.title.data = blogg.title
form.blog.data =" blogg.blog"
if update.update.data:
if request.method == 'POST':
form.title.data = blogg.title
form.blog.data = blogg.blog
form.populate_obj(blogg)
print("here")
return redirect(url_for('main.add', form = BlogForm(blog = blogg.blog)))
return render_template('comment.html', blog = blogg, delete = delete, update = update, comment = comment)
@main.route('/blog/new', methods = ['GET', 'POST'])
@login_required
def add():
blogs = current_user.blogs
for blog in blogs:
if blog.blog is None:
db.session.delete(blog)
db.session.commit()
form = BlogForm()
comment = CommentForm()
update = CommentForm()
delete = CommentForm()
user = User.query.filter_by(logged_in = True).first()
if form.validate_on_submit():
blog = Blog(date = datetime.datetime.now(), blog = form.blog.data, title = form.title.data, user_id = user.user_id)
db.session.add(blog)
db.session.commit()
subscribers = Subscriber.query.all()
for subscriber in subscribers:
mail_subscribe("New Post Is UP!!!","new_post/post.txt",subscriber.email,user = subscriber)
return render_template('add.html', form = form)
@main.route('/<blog>/update', methods = ['GET', 'POST'])
def update(blog):
blog = Blog.query.filter_by(title = blog).first()
form = CommentForm()
update = UpdateForm()
delete = CommentForm()
return render_template('comment.html', blog = blog)
@main.route('/', methods = ['GET', 'POST'])
def subscribe():
form = SubscribeForm()
if form.validate_on_submit:
name = form.name.data
email = form.email.data
subscriber = Subscriber(name = name, email = email)
db.session.add(subscriber)
db.session.commit()
return redirect(url_for('main.index'))
return render_template('footer.html', form)
|
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import scipy.io as sio
# 数据可视化
mat = sio.loadmat("./data/ex7data1.mat")
print(mat.keys())
data1 = pd.DataFrame(mat.get('X'), columns=['X1', "X2"])
print(data1.head())
sns.set(context='notebook', style='white')
sns.lmplot(x='X1', y='X2', data=data1, fit_reg=False)
plt.show()
# 2-2维kmeans
mat = sio.loadmat("./data/ex7data2.mat")
data2 = pd.DataFrame(mat.get("X"), columns=["X1", "X2"])
print(data2.head())
sns.set(context='notebook', style='white')
sns.lmplot(x="X1", y="X2", data=data2, fit_reg=False)
plt.show()
def combine_data_C(data, C):
data_with_c = data.copy()
data_with_c['C'] = C
return data_with_c
def random_init(data, k):
return data.sample(k).values
# x = np.array([1, 1])
# fig, ax = plt.subplots(figsize=(6, 4))
# ax.scatter(x=init_centroids[:, 0], y=init_centroids[:, 1])
#
# for i, node in enumerate(init_centroids):
# ax.annotate("{}:({},{})".format(i, node[0], node[1]), node)
#
# ax.scatter(x[0], x[1], marker='x', s=200)
# plt.show()
def _find_your_cluster(x, centroids):
distancs = np.apply_along_axis(func1d=np.linalg.norm,
axis=1,
arr=centroids - x)
return np.argmin(distancs)
# _find_your_cluster(x, init_centroids)
def assign_cluster(data, centroids):
return np.apply_along_axis(lambda i: _find_your_cluster(i, centroids),
axis=1,
arr=data.values)
init_centroids = random_init(data2, 3)
print(init_centroids)
C = assign_cluster(data2, init_centroids)
data_with_c = combine_data_C(data2, C)
print(data_with_c.head())
sns.lmplot(x="X1", y="X2", hue='C', data=data_with_c, fit_reg=False)
plt.show()
def new_centroids(data, C):
data_with_c = combine_data_C(data, C)
return data_with_c.groupby("C", as_index=False).mean().sort_values(by='C').drop("C", axis=1).values
def cost(data, centroids, C):
m = data.shape[0]
expand_c_with_centroids = centroids[C]
distances = np.apply_along_axis(func1d=np.linalg.norm,
axis=1,
arr=data.values - expand_c_with_centroids)
return distances.sum() / m
def _k_means_iter(data, k, epoch=100, tol=0.0001):
centroids = random_init(data, k)
cost_progress = []
for i in range(epoch):
print("==============epoch {}===============".format(i))
C = assign_cluster(data, centroids)
centroids = new_centroids(data, C)
cost_progress.append(cost(data, centroids, C))
if len(cost_progress) > 1:
if (np.abs(cost_progress[-1] - cost_progress[-2])) / cost_progress[-1] < tol:
break
return C, centroids, cost_progress[-1]
final_c, final_centroid, _ = _k_means_iter(data2, 3)
new_centroids(data2, C)
data_c = combine_data_C(data2, final_c)
sns.lmplot(x="X1", y="X2", hue='C', data=data_c, fit_reg=False)
plt.show()
print(cost(data2, final_centroid, final_c))
def k_means(data, k, epoch=100, n_init=10):
tries = np.array([_k_means_iter(data, k, epoch) for _ in range(n_init)], dtype=list)
least_cost_idx = np.argmin(tries[:, -1])
return tries[least_cost_idx]
best_c, best_centroids, least_cost = k_means(data2, 3)
print(least_cost)
data_with_c = combine_data_C(data2, best_c)
sns.lmplot(x="X1", y="X2", hue='C', data=data_with_c, fit_reg=False)
plt.show()
from sklearn.cluster import KMeans
sk_kmeans = KMeans(n_clusters=3)
sk_kmeans.fit(data2)
sk_C = sk_kmeans.predict(data2)
data_with_c = combine_data_C(data2, sk_C)
sns.lmplot(x="X1", y="X2", hue='C', data=data_with_c, fit_reg=False)
plt.show()
|
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.nn.functional as F
from func_omniglot import VAE, Loss, data_folders,get_data_loader, create_task
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import numpy as np
import random
def count_acc(logits,labels):
pred = torch.argmax(logits, dim=1)
return (pred == labels).type(torch.FloatTensor).mean().item()
num_epochs = 2500
learning_rate = 0.001
cnt = 0
n_batch_train = 100 #(way+15)*shot
n_batch_val = 100 #(way+15)*shot 20 or 100
n_train_way = 60
n_val_way = 10
n_train_shot = 1
n_val_shot = 20
# MNIST dataset
#train_dataset = torchvision.datasets.MNIST(root='../../sharedLocal/',train=True,transform=transforms.ToTensor(),download=True)
#val_dataset = torchvision.datasets.MNIST(root='../../sharedLocal/',train=False,transform=transforms.ToTensor())
train_folders,test_folders = data_folders(data_folder = '../../sharedLocal/omniglot_resized')
torch.cuda.set_device(3)
model = VAE().cuda()
crit = Loss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
print('training....')
for epoch in range(num_epochs):
model.train()
if epoch%10 == 0:
print("epoch {}...".format(epoch))
task = create_task(train_folders,n_train_way,n_train_shot,15)
degrees = random.choice([0,90,180,270])
support_dataloader = get_data_loader(task,'../../sharedLocal/omniglot_resized',num_per_class=n_train_shot,split="train",shuffle=False,rotation=degrees)
query_dataloader = get_data_loader(task,'../../sharedLocal/omniglot_resized',num_per_class=15,split="test",shuffle=False,rotation=degrees)
support,support_labels = support_dataloader.__iter__().next()
query,query_labels = query_dataloader.__iter__().next()
x_support,x_support_labels = support.cuda(), support_labels.cuda()
x_query,x_query_labels = query.cuda(), query_labels.cuda()
change_support = []
for j in range(n_train_shot):
change_support += [i for i in range(j,len(x_support),n_train_shot)]
x_support = x_support[change_support,:,:,:]
change_query = []
for j in range(15):
change_query += [i for i in range(j,len(x_query),15)]
x_query = x_query[change_query,:,:,:]
optimizer.zero_grad()
embedding_support, recon_support, mu_support, logvar_support = model([x_support,'support'])
embedding_query, recon_query, mu_query, logvar_query = model([x_query,'query'])
labels = torch.arange(n_train_way).repeat(15)
labels = labels.type(torch.cuda.LongTensor)
loss, logits = crit(embedding_support,recon_support, mu_support, logvar_support, x_support, embedding_query,recon_query, mu_query, logvar_query, x_query, labels)
loss.backward()
optimizer.step()
task = create_task(test_folders,n_val_way,n_val_shot,15)
degrees = random.choice([0])
support_dataloader = get_data_loader(task,'../../sharedLocal/omniglot_resized',num_per_class=n_val_shot,split="train",shuffle=False,rotation=degrees)
support_images,support_labels = support_dataloader.__iter__().next()
x_support = support_images.cuda()
embedding_support, recon_support, mu_support, logvar_support = model([x_support,'query'])
from sklearn.manifold import TSNE
import seaborn as sns
import matplotlib.cm as cm
data = mu_support.cpu().detach().numpy()
n_sample,n_feature = data.shape
label = support_labels
def plot_embedding(data, label, title):
x_min, x_max = np.min(data, 0), np.max(data, 0)
data = (data - x_min) / (x_max - x_min)
sns.set()
fig = plt.figure(figsize=(25,25))
ax = plt.subplot(111)
colors=sns.color_palette("deep", 15)
for i in range(data.shape[0]):
plt.scatter(data[i, 0], data[i, 1],s=8,c=colors[label[i]])
plt.xticks([])
plt.yticks([])
plt.title(title)
plt.axis('off')
fig.savefig('test.jpg',bbox_inches='tight')
return fig
tsne = TSNE(n_components=2, init='pca', random_state=0)
result = tsne.fit_transform(data)
fig = plot_embedding(result, label,'')
print(test_folders,label)
|
# -*- coding: utf-8 -*-
import os
def get_latale_dir():
u"""Windowsの環境変数からラ・セーヌがインストールされているディレクトリのパスを返す
標準以外の場所にインストールされている場合は取得できないのでNoneを返す"""
pf_dir = None
if 'ProgramFiles(x86)' in os.environ:
pf_dir = os.environ['ProgramFiles(x86)']
elif 'ProgramFiles' in os.environ:
pf_dir = os.environ['ProgramFiles']
else:
return None
latale_dir = '{0}/Gamepot/LaTale'.format(pf_dir)
if os.path.exists(latale_dir):
return latale_dir
else:
return None
|
import sys
import numpy
def find_biggest_number(mylist):
max = sys.maxint * -1
for item in mylist:
if max < item:
max = item
return max
def find_smallest_number(mylist):
min = sys.maxint
for item in mylist:
if min > item:
min = item
return min
grades = {
"Ben": [80, 90, 100],
"Zach": [70, 85, 50],
"Meghan": [100, 90, 15, 90]
}
for item, values in grades.iteritems():
print item,":"
print "Biggest: ", find_biggest_number(values)
print "Smallest: ", find_smallest_number(values)
print "Mean: ", numpy.mean(values)
print "Sorted: ", sorted(values) |
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import cv2
np.set_printoptions(threshold=np.nan)
fotofile = "uithetmidden.jpg" #"20160217_130858.jpg"
fotopath = "C:/Users/Jelle/Google Drive/Int. VISION/Vision foto's/Bottles/"
picture = cv2.imread(fotopath + fotofile)
foto = cv2.resize(picture, (480, 700))
vertical, horizontal = foto.shape[:2]
kernel = np.ones((5,5),np.uint8)
cv2.namedWindow('Origineel',cv2.WINDOW_AUTOSIZE)
cv2.moveWindow('Origineel',0,0)
cv2.imshow('Origineel',foto)
foto_gray = cv2.cvtColor(foto, cv2.COLOR_BGR2GRAY)
cv2.namedWindow('Grijs',cv2.WINDOW_AUTOSIZE)
cv2.moveWindow('Grijs',250,0)
cv2.imshow('Grijs',foto_gray)
thresh = 70
foto_bin = cv2.threshold(foto_gray, thresh, 255, cv2.THRESH_BINARY)[1]
cv2.namedWindow('Binary',cv2.WINDOW_AUTOSIZE)
cv2.moveWindow('Binary',500,0)
cv2.imshow('Binary',foto_bin)
foto_linkerzijde = foto_bin[0:vertical, 0:(horizontal/2)]
foto_rechterzijde = foto_bin[0:vertical, horizontal/2:horizontal]
cv2.namedWindow('Linkerzijde',cv2.WINDOW_AUTOSIZE)
cv2.moveWindow('Linkerzijde',800,0)
cv2.imshow('Linkerzijde',foto_linkerzijde)
cv2.namedWindow('Rechterzijde',cv2.WINDOW_AUTOSIZE)
cv2.moveWindow('Rechterzijde',900,0)
cv2.imshow('Rechterzijde',foto_rechterzijde)
total_pix_helft = (horizontal/2) * vertical # Totaal aantal pixels van de helft van de foto
pixels_links = total_pix_helft - cv2.countNonZero(foto_linkerzijde)
pixels_rechts = total_pix_helft - cv2.countNonZero(foto_rechterzijde)
print('Pixels links: ', pixels_links)
print('Pixels rechts: ', pixels_rechts)
if(pixels_links > pixels_rechts-1000 and pixels_links < pixels_rechts+1000):
print("In het midden")
else:
print("Uit het midden")
|
import csv
import random
import sys
class KnowledgeBase:
def __init__(self):
"""
Initialization of the KnowledgeBase class, with the following attributes:
self.characters - A list of character dictionaries
self.characteristics - The master list of characteristics and values for all characters
This dictionary should be used to track characteristics to help
in solving the problem.
self.the_character - The random character to interrogate
"""
self.characters = [] # A list of all characters, initially (which are dictionaries)
self.characteristics = {} # A dictionary of all known characteristics
# -- Read the characters and characteristics
self.read_characters()
# Get a random character to interrogate
self.the_character = random.choice(self.characters)
def read_characters(self):
"""
Sets up the Character Dictionaries and adds them to the characters list.
Reads from CSV file characters.csv
"""
with open('characters.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for character in reader:
self.characters.append(character)
def get_most_impactful_characteristic(self):
"""
Look through each of the possible variable characteristics
to determine which answer would remove the most suspects
from the self.characters list.
Returms a tuple (Name, Value, # of remaining characters if true)
"""
# Determine what characteristics have multiple choices
possibleCharacteristics = self.get_variable_characteristics()
bestCharacteristic = ""
bestValue = ""
bestLen = 999999999
for key, values in possibleCharacteristics.items():
# Ignore the name, because if names are unique, then guessing a name
# would always elimate the most remaining characters.
if(key == "Name"):
continue
for value in values:
# Count the number of remaining suspects if the answer to the question is true
currLen = len(self.ask_vars(key, value))
# If the number of remaining suspects lower than anything else found so far,
# then store the question that asks about that characteristic and gives that result.
if(currLen < bestLen):
bestLen = currLen
bestCharacteristic = key
bestValue = value
# If no other characteristic could trim down the number of remaining characters,
# Then consider the name as the best characteristic.
# Considering that any name would elimate all other suspects if guessed correctly,
# no name is a better guess than any other. With that in mind, the first name
# available is used to simplicity of implmementation.
if(bestCharacteristic == ""):
bestCharacteristic = "Name"
bestValue = possibleCharacteristics["Name"][0]
return (bestCharacteristic, bestValue, bestLen)
def get_variable_characteristics(self):
"""
Returns a dictionary of characteristics that have more than
one possible value from the remaining population of
characters. The key is the name of the characteristic.
The value is a list of all possible values that the
characteristic could be from the remaining characters.
"""
# Initialize the attributes dictionary with each
# characteristic being an empty list of possible values
possible_Characteristics = {}
for key, value in self.characters[0].items():
possible_Characteristics[key] = []
# Go through each remaining character and add their
# value to the list of possible values if the list
# does not already contain that value
for char in self.characters:
for key, value in char.items():
if(value not in possible_Characteristics[key]):
possible_Characteristics[key].append(value)
# Determine what characteristics only have one
# possible value
keysToDelete = []
for key, values in possible_Characteristics.items():
if(len(values) == 1):
keysToDelete.append(key)
# Remove all characteristics that only have one
# possible value, because it offers no possibility
# for what that characteristic could be, only
# a certainity of what it is.
for key in keysToDelete:
del possible_Characteristics[key]
return possible_Characteristics
def tell(self, key, value):
"""
Tell the KnowledgeBase a new piece of information.
"""
# Track this characteristic
self.characteristics[key] = value
# Remove characters that do not meet this requirement
self.characters = self.ask_vars(key, value)
def ask(self, key, value):
"""
Queries the_character about a specific key, value pair
"""
# Determine if the secret character has the specific characteristic value
hasCharacteristic = (self.the_character[key] == value)
# Determine if that characteristic is a boolean choice
isBooleanValue = (value == "True" or value == "False")
# If the characteristic is boolean, just return it as a string
if(isBooleanValue):
return str(hasCharacteristic)
else:
# If the characteristic is NOT boolean...
# Return the value if the secret character has the characteristic,
# and prepend "not " if the secret character does not have the characteristic.
if(not hasCharacteristic):
value = "not " + value
return value
def ask_vars(self, key, value):
"""
Returns the list of remaining characters that meet the key,value pair
"""
possibleChars = []
# This function needs to check for characters that do not have
# the given value for the given characteristic (key) if the value
# starts with a "not ".
isNegated = value.startswith("not ")
# If a "not " was prepended to indicate negation, then the "not "
# needs to be removed from the value so that comparisons can be
# performed on the value.
if(isNegated):
value = value[4:]
# Loops through every remaining character to see who meets the constraint
# defined by the key value pair.
for character in self.characters:
charValue = character[key]
# Determine if the character has the value
hasValue = (charValue == value)
# Determine if the character should have the value (considering negation)
# This looks odd, but it is just how python handles XOR
shouldAppend = (hasValue != isNegated)
# Append the character to the list of remaining characters that the
# secret character could be if the character meets the above
# characteristic criteria.
if(shouldAppend):
possibleChars.append(character)
return possibleChars
|
#python program to check if the alphabet is vowel or consonant
#Solution:
# taking user input
ch = input("Enter a character: ")
if(ch=='A' or ch=='a' or ch=='E' or ch =='e' or ch=='I'
or ch=='i' or ch=='O' or ch=='o' or ch=='U' or ch=='u'):
print(ch, "is a Vowel")
else:
print(ch, "is a Consonant")
#output
'''Enter a character: e
e is a Vowel
Process finished with exit code 0''' |
from flask import Blueprint
from flask import jsonify
from shutil import copyfile, move
from google.cloud import storage
from google.cloud import bigquery
import dataflow_pipeline.fanalca.honda_digital_asignacion_beam as honda_digital_asignacion_beam
import dataflow_pipeline.fanalca.honda_digital_gestion_cotizados_beam as honda_digital_gestion_cotizados_beam
import dataflow_pipeline.fanalca.honda_digital_gestion_ipdial_beam as honda_digital_gestion_ipdial_beam
import os
import socket
import datetime
import time
fanalca_api = Blueprint('fanalca_api', __name__)
fileserver_baseroute = ("//192.168.20.87", "/media")[socket.gethostname()=="contentobi"]
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
@fanalca_api.route("/digital/asignacion")
def archivos_asignacion():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Fanalca/Honda Digital/Asignacion/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[12:20]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-fanalca')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('digital-asignacion/' + archivo)
blob.upload_from_filename(local_route + archivo)
try:
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.fanalca.asignacion_digital` WHERE fecha_cargue = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
except:
"No se pudo eliminar "
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = honda_digital_asignacion_beam.run('gs://ct-fanalca/digital-asignacion/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Fanalca/Honda Digital/Asignacion/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
@fanalca_api.route("/digital/gestion_cotizaciones")
def gestion_cotizaciones():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
mes = time.strftime('%m')
if mes in ['12','10','09','08','07','06','05','01']:
days = 91
elif mes in ['11']:
days = 90
elif mes in ['03','02']:
days = 89
else:
days = 88
fecha = datetime.date.today() - datetime.timedelta(days = days)
fecha1 = time.strftime('%d.%m.%Y')
fecha2 = fecha.strftime('%d.%m.%Y')
return (fecha1 + " _ " + fecha2 + " _ " + mes)
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Fanalca/Honda Digital/Gestion_COTIZADOS/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[20:28]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-fanalca')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('digital-gestion/' + archivo)
blob.upload_from_filename(local_route + archivo)
try:
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.fanalca.gestion_cotizados_digital` where 1 = 1"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
except:
"No se pudo eliminar "
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = honda_digital_gestion_cotizados_beam.run('gs://ct-fanalca/digital-gestion/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Fanalca/Honda Digital/Gestion_COTIZADOS/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
@fanalca_api.route("/digital/gestion_ipdial")
def gestion_ipdial():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Fanalca/Honda Digital/Gestion_IPDIAL/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[17:25]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-fanalca')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('digital-gestion/' + archivo)
blob.upload_from_filename(local_route + archivo)
try:
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.fanalca.gestion_ipdial_digital` WHERE fecha_cargue = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
except:
"No se pudo eliminar "
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = honda_digital_gestion_ipdial_beam.run('gs://ct-fanalca/digital-gestion/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Fanalca/Honda Digital/Gestion_IPDIAL/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
######################################################################################################################### |
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
from typing import Callable
import strawberry
from pants_explorer.server.browser import Browser
from pants_explorer.server.graphql.context import GraphQLContext
from pants_explorer.server.graphql.query.root import Query
from pants_explorer.server.graphql.subsystem import GraphQLSubsystem
from pants_explorer.server.uvicorn import UvicornServer
from starlette.responses import JSONResponse
from strawberry.fastapi import GraphQLRouter
from pants.backend.project_info.peek import _PeekJsonEncoder
class ExplorerJSONResponse(JSONResponse):
def render(self, content) -> bytes:
return json.dumps(
content,
ensure_ascii=False,
allow_nan=False,
indent=None,
separators=(",", ":"),
cls=_PeekJsonEncoder,
).encode("utf-8")
def create_schema() -> strawberry.Schema:
# Monkey patch, due to limitations in configurability.
strawberry.fastapi.router.JSONResponse = ExplorerJSONResponse # type: ignore[attr-defined]
return strawberry.Schema(query=Query)
def graphql_uvicorn_setup(
browser: Browser,
graphql: GraphQLSubsystem,
route: str = "/graphql",
) -> Callable[[UvicornServer], None]:
def setup(uvicorn: UvicornServer) -> None:
graphql_app = GraphQLRouter(
create_schema(), context_getter=GraphQLContext(uvicorn).create_request_context
)
uvicorn.app.include_router(graphql_app, prefix=route)
if graphql.open_graphiql:
uvicorn.prerun_tasks.append(
# Browser.open() needs an unlocked scheduler, so we need to defer that call to a
# callstack that is not executing a rule.
lambda: browser.open(uvicorn.request_state, route)
)
return setup
|
import theano as th
from theano import tensor as T
def build_points(MULT, stats, base):
ilvl = T.iscalar('ilvl')
snd_dist = T.dvector('secondary_dist')
budget_scalar = T.dscalar('budget_scalar')
base_mult = MULT[base - 1]
mult = MULT[ilvl - 1]
mainstat = budget_scalar \
* stats['primary'] * (1.15 ** ((ilvl - base) / 15))
raw_snd = stats['secondaries'] / base_mult * 1.15 ** ((ilvl - base) / 15)
total_snd = budget_scalar * raw_snd * mult
snds = snd_dist * total_snd
points = th.function([ilvl, snd_dist, budget_scalar],
[mainstat, snds])
return points
def combine_points(armor, jewelry):
def points(key, *args):
if key == 'armor':
return armor(*args)
elif key == 'jewelry':
return jewelry(*args)
return points
|
#%%
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import time
import pandas as pd
# data = datasets.load_breast_cancer()
data = datasets.load_iris()
# print(len(data.feature_names))
# print(data.feature_names)
# print(len(data.target_names))
# print(data.target_names)
x = data.data[:, :2] #첫번째, 두번째 속성 선택
y = data.target
target_names = data.target_names
# target_names
plt.figure(figsize=(10, 10))
colors = ['red', 'blue']
for color, i, target_name in zip(colors, [0, 1], target_names):
plt.scatter(x[y == i, 0], x[y == i, 1], color=color, label=target_name)
plt.legend()
plt.xlabel('sepal length (cm)')
plt.ylabel('sepal width (cm)')
plt.show()
####PCA 적용
x = data.data
y = data.target
target_names = data.target_names
pca = PCA(n_components=2) # 모듈 생성
x_p = pca.fit(x).transform(x) # 모델 훈련, 차원축소
# print('가장 큰 주성분 두 개에 대한 분산: %s' % str(pca.explained_variance_ratio_))
plt.figure(figsize=(10, 10))
colors = ['red', 'blue']
for color, i, target_name in zip(colors, [0, 1], target_names):
plt.scatter(x_p[y == i, 0], x_p[y == i, 1], color=color, label=target_name)
plt.legend()
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.show()
####LDA 적용
x = data.data
y = data.target
target_names = data.target_names
lda = LinearDiscriminantAnalysis(solver='eigen', n_components=2)
x_l = lda.fit(x, y).transform(x) # 모델훈련, 차원축소
plt.figure(figsize=(10, 10))
colors = ['red', 'blue']
for color, i, target_name in zip(colors, [0, 1], target_names):
plt.scatter(x_l[y == i, 0], x_l[y == i, 1], color=color, label=target_name)
plt.legend()
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.show()
#%% |
''' Tässä tiedostossa on funktioita, joilla lasketaan
ravintoaineiden saantia suhteessa ravitsemussuosituksiin.
Ruokien ravintoainesisällöt ovat THL:n ylläpitämän
Fineli-tietokannan versiosta 20. Päivittäiset saantisuositukset
ovat pääosin suomalaisista ravitsemussuosituksista vuodelta 2014
pienin täydennyksin.
Olennaiset tiedot löytyvät seuraavista csv-tiedostoista:
component_value_utf-8.csv - ravintoaineiden määrät kussakin elintarvikkeessa
eufdname_FI_utf-8.csv - ravintoaineiden nimet
food_utf-8.csv - elintarvikkeiden nimet
saantisuositus_2014.csv - päivittäiset saantisuositukset
'''
import pandas as pd
from scipy.optimize import linprog
# pois laskuista jätettävät elintarviketyypit (FUCLASS):
# lastenruoat, äidinmaidonkorvikkeet, ateriankorvikkeet ja lisäravinteet
omitted_food_types = ('BABYFTOT',
'BABMEATD',
'BABFISHD',
'BABMILPO',
'BABWATPO',
'BABFRUB',
'BABVEGE',
'BABMIFRU',
'BABOTHER',
'MMILK',
'INFMILK',
'CASMILK',
'PREMILK',
'SOYMILK',
'WHEYMILK',
'AMINMILK',
'SPECTOT',
'SPECSUPP',
'MEALREP',
'SPORTFOO',
'SPECFOOD')
def read_files(path:str) -> tuple:
"""This function reads the following csv files and returns
a tuple of pandas data structures:
component_value_utf-8.csv
eufdname_FI_utf-8.csv
food_utf-8.csv
saantisuositus_2014.csv
The function also removes data for various supplements,
since the target is to look at real foods.
Args:
path (str): absolute path to csv files
Returns:
tuple: Returns a tuple of pandas data structures with the
data from the csv files. (component_value, eufdname, food,
saantisuositus)
"""
component_value = pd.read_csv(path + "component_value_utf-8.csv", sep=";")
eufdname = pd.read_csv(path + "eufdname_FI_utf-8.csv", sep=";")
food = pd.read_csv(path + "food_utf-8.csv", sep=";")
saantisuositus = pd.read_csv(path + "saantisuositus_2014.csv", sep=";", header=None, names=["EUFDNAME", "name", "mnuori", "maikuinen", "mkeski", "miäkäs", "mvanha", "npieni","nnuori", "naikuinen", "nkeski", "niäkäs", "nvanha"])
return component_value, eufdname, food, saantisuositus
component_value, eufdname, food, saantisuositus = read_files('/home/pomo/Asiakirjat/Kurssit/Taitotalo_Python-ohjelmoija/python/portfolio/fineli_20/')
# ateriankorvikkeiden yms. poisto
def filter_food_class(dataframe:pd.DataFrame, class_to_remove:tuple, col:str='FUCLASS') -> pd.DataFrame:
"""Take a pandas dataframe with food data and remove
lines where the FUCLASS is one of those specified in
fuclass_to_remove. Returns the cleaned dataframe.
Args:
dataframe (pandas_df): A pandas dataframe with food names and food classes (e.g. FUCLASS)
class_to_remove (tuple): A tuple of food class names to use for filtering out unwanted data
col (str): Column name, default = FUCLASS
Returns:
pandas_df: The pandas dataframe minus the lines with specified food class names
"""
if col == 'FUCLASS':
for food_class in class_to_remove:
dataframe = dataframe[dataframe.FUCLASS != food_class]
elif col == 'IGCLASS':
for food_class in class_to_remove:
dataframe = dataframe[dataframe.IGCLASS != food_class]
else:
raise ValueError(f'Invalid column name: {col}')
return dataframe
# puuttuvien ravintoarvotietojen käsittely
def transpose_component_value(dataframe:pd.DataFrame) -> pd.DataFrame:
"""Takes a pandas dataframe (component_value) where each row represents
the amount of one nutrient in one food (e.g. calcium in milk).
Transposes the dataframe so that there is one row per food and one column per nutrient.
Fills in any missing component values with zeroes in the appropriate column.
The ACQTYPE, METHTYPE, and METHIND columns are dropped.
Args:
dataframe (pandas_df): A pandas dataframe with information on how much of each nutrient foods contain
Returns:
pandas_df: The pandas dataframe modified so that it has one row per food and one column per nutrient.
"""
df = dataframe.drop(columns=['ACQTYPE','METHTYPE', 'METHIND'])
new_df = df.pivot_table(values='BESTLOC', index='FOODID', columns='EUFDNAME', fill_value=0.0)
return new_df
|
'''
等价多米诺骨牌对的数量
给你一个由一些多米诺骨牌组成的列表 dominoes。
如果其中某一张多米诺骨牌可以通过旋转 0 度或 180 度得到另一张多米诺骨牌,我们就认为这两张牌是等价的。
形式上,dominoes[i] = [a, b] 和 dominoes[j] = [c, d] 等价的前提是 a==c 且 b==d,或是 a==d 且 b==c。
在 0 <= i < j < dominoes.length 的前提下,找出满足 dominoes[i] 和 dominoes[j] 等价的骨牌对 (i, j) 的数量。
'''
#核心思想:每个元素尽可能只用一次
def numEquivDominoPairs(dominoes) :
n=len(dominoes)
count=0
res=0
while n:
j=1
while j<n:
if dominoes[0]==dominoes[j] or dominoes[0][::-1]==dominoes[j]:
count+=1
dominoes.pop(j)
j-=1
j+=1
n=len(dominoes)#长度n要实时变化
count+=1#要统计第1个元素
dominoes.pop(0)#通过指定下标的方式移除已统计过的元素
#dominoes=list(set(dominoes)-set(temp))#列表不能哈希
n=len(dominoes)#长度n要实时变化
res+=count*(count-1)/2#C(2,n)
count=0
return int(res)
dominoes = [[1,2],[2,1],[3,4],[5,6]]
print(numEquivDominoPairs(dominoes)) |
#!/usr/bin/env python3
from pyjetty.mputils import pwarning, pinfo, perror
import uproot
import pandas as pd
import numpy as np
import argparse
import os
import tqdm
def pd_tree(path, tname, squery=None):
try:
tree = uproot.open(path)[tname]
except:
pwarning('error getting', tname, 'from file:', path)
return None
if not tree:
perror('Tree {} not found in file {}'.format(tname, path))
return None
df = tree.pandas.df()
if squery:
#df.query(squery, inplace=True)
df = df.query(squery)
df.reset_index(drop=True)
return df
def count_D_in_file_merge(fname, _d0cuts_kpi):
d0_tree_name='PWGHF_TreeCreator/tree_D0'
event_tree_name='PWGHF_TreeCreator/tree_event_char'
_ev_cuts = "is_ev_rej == 0 & abs(z_vtx_reco) < 10."
event_df = pd_tree(path=fname, tname=event_tree_name, squery=_ev_cuts)
if event_df is None:
return False
pinfo('Nev', fname, len(event_df.index))
d0_df = pd_tree(path=fname, tname=d0_tree_name, squery=_d0cuts_kpi)
if d0_df is None:
return False
pinfo('ND0', fname, len(d0_df.index))
# pinfo(list(event_df))
if 'ev_id_ext' in list(event_df):
d0ev_df = pd.merge(d0_df, event_df, on=['run_number', 'ev_id', 'ev_id_ext'])
else:
d0ev_df = pd.merge(d0_df, event_df, on=['run_number', 'ev_id'])
d0ev_df.query(_ev_cuts, inplace=True)
d0ev_df_grouped = d0ev_df.groupby(['run_number','ev_id'])
pinfo('ND0+EvCuts ', fname, len(d0ev_df.index))
pinfo('GR[ND0+EvCuts] ', fname, len(d0ev_df_grouped))
def count_D_in_file_merge(fname, _d0cuts_kpi):
d0_tree_name='PWGHF_TreeCreator/tree_D0'
d0_df = pd_tree(path=fname, tname=d0_tree_name, squery=_d0cuts_kpi)
if d0_df is None:
return -1
d0ev_df_grouped = d0_df.groupby(['run_number','ev_id'])
# pinfo('ND0 ', fname, len(d0ev_df_grouped))
return len(d0ev_df_grouped)
def main():
_d0cuts_base = "(pt_cand > 3.0 & pt_prong0 > 0.6 & pt_prong1 > 0.6 & abs(eta_cand) < 0.8) & "
_d0cuts_extra = "(dca)<0.03 & abs(cos_t_star)<0.8 & (imp_par_prod)<-0.0001 & (cos_p)>0.9 & "
_d0cuts_kpi = _d0cuts_base + _d0cuts_extra
_d0cuts_kpi += "((abs(nsigTPC_Pi_0) < 3. & (abs(nsigTOF_Pi_0) < 3. | nsigTOF_Pi_0 < -900) & abs(nsigTPC_K_1) < 3. & (abs(nsigTOF_K_1) < 3. | nsigTOF_K_1 < -900)) | "
_d0cuts_kpi += "(abs(nsigTPC_Pi_1) < 3. & (abs(nsigTOF_Pi_1) < 3. | nsigTOF_Pi_1 < -900) & abs(nsigTPC_K_0) < 3. & (abs(nsigTOF_K_0) < 3. | nsigTOF_K_0 < -900)))"
parser = argparse.ArgumentParser(description='D0 analysis on alice data', prog=os.path.basename(__file__))
parser.add_argument('-f', '--flist', help='single root file or a file with a list of files to process', type=str, default=None, required=True)
parser.add_argument('-n', '--nfiles', help='max n files to process', type=int, default=0, required=False)
parser.add_argument('-o', '--output', help="prefix output file names", type=str, default='./count_D0.csv')
args = parser.parse_args()
fname = args.flist
if '.root' in fname:
# count_D_in_file(fname, _d0cuts_kpi)
nD0s = count_D_in_file_merge(fname, _d0cuts_kpi)
pinfo(fname, 'N of events with selected D0cand', nD0s)
else:
pinfo('reading file list from', fname)
with open(fname) as f:
flist = [f.rstrip('\n') for f in f.readlines()]
pinfo('number of files', len(flist))
counts = []
# for ifn, fn in enumerate(flist):
if args.nfiles > 0:
flist = flist[:args.nfiles]
for fn in tqdm.tqdm(flist):
# pinfo('file', ifn, 'of', len(flist))
# count_D_in_file(fn, _d0cuts_kpi)
nD0s = count_D_in_file_merge(fn, _d0cuts_kpi)
counts.append([fn, nD0s])
counts_sorted = sorted(counts, key=lambda c: c[1], reverse=True)
df = pd.DataFrame(counts_sorted, columns=['fname', 'ND0_cand_events'])
df.to_csv(args.output, index=False)
pinfo(args.output, 'written.')
if __name__ == '__main__':
main()
|
import poplib
import pandas as pd
from email.parser import Parser
from email.header import decode_header
from email.utils import parseaddr
from datetime import datetime, timedelta
def get_transactions(date):
# 输入邮件地址, 口令和POP3服务器地址:
email = 'xxxxxxxxxxxx@qq.com'
password = 'xxxxxxxxxx'
pop3_server = 'pop.qq.com'
# 连接到POP3服务器:
server = poplib.POP3_SSL(pop3_server, '995')
# 身份认证:
server.user(email)
server.pass_(password)
resp, mails, octets = server.list()
index = len(mails)
data = []
while True:
obj = {}
resp, lines, octets = server.retr(index)
msg = b'\r\n'.join(lines).decode('utf-8')
if msg.find('Subject: BOUGHT') > 0 or msg.find('Subject: SOLD') > 0:
arr = msg.split('\n')
for line in arr:
line = line.strip()
if line.find('Subject') == 0:
subject = line.replace('Subject:', '').replace(' (UXXX6016)', '').replace(' @ ', ' ').strip()
arr = subject.split(' ')
obj['action'] = arr[0]
obj['amount'] = int(arr[1].replace(',', ''))
obj['symbol'] = arr[2]
obj['price'] = round(float(arr[3]),2)
if line.find('Date:') == 0:
s = line.replace('Date: ', '').replace(' -0500', '')
fmt = '%a, %d %b %Y %H:%M:%S'
dt = datetime.strptime(s, fmt)
s = dt.strftime('%Y-%m-%d %H:%M:%S')
obj['datetime'] = s
if len(obj) > 0:
if obj['datetime'] < date:
break
if obj['datetime'].find(date) == -1:
continue
data.append(obj)
index -= 1
server.quit()
return data[::-1]
day = (datetime.now() + timedelta(days=-3)).strftime('%Y-%m-%d')
data = get_transactions(day)
df = pd.DataFrame(data)
df = df.reindex_axis(['symbol', 'action', 'amount', 'price', 'datetime'], axis=1)
df
|
ent = input()
ent = ent.split()
for j in range(len(ent)):
for i in range(len(ent[j])):
if i % 2 == 1:
print(ent[j][i], end='')
if j < len(ent) - 1:
print(' ', end='')
print()
|
import os
import re
import wx
from ..._wxgui import get_app
def ask_saveas(title, message, filetypes, defaultDir, defaultFile):
"""See eelbrain.ui documentation"""
app = get_app()
return app.ask_saveas(title, message, filetypes, defaultDir, defaultFile)
def ask_dir(title="Select Folder", message="Please Pick a Folder", must_exist=True):
app = get_app()
return app.ask_for_dir(title, message, must_exist)
def ask_file(title, message, filetypes, directory, mult):
app = get_app()
return app.ask_for_file(title, message, filetypes, directory, mult)
def ask(title="Overwrite File?",
message="Duplicate filename. Do you want to overwrite?",
cancel=False,
default=True, # True=YES, False=NO, None=Nothing
):
"""
returns:
YES -> True
NO -> False
CANCEL -> None
"""
style = wx.YES_NO | wx.ICON_QUESTION
if cancel:
style = style | wx.CANCEL
if default:
style = style | wx.YES_DEFAULT
elif default == False:
style = style | wx.NO_DEFAULT
dialog = wx.MessageDialog(None, message, title, style)
answer = dialog.ShowModal()
if answer == wx.ID_NO:
return False
elif answer == wx.ID_YES:
return True
elif answer == wx.ID_CANCEL:
return None
def ask_color(default=(0, 0, 0)):
dlg = wx.ColourDialog(None)
dlg.GetColourData().SetChooseFull(True)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetColourData()
out = data.GetColour().Get()
out = tuple([o / 255. for o in out])
else:
out = False
dlg.Destroy()
return out
def ask_str(message, title, default=''):
app = get_app()
return app.ask_for_string(title, message, default)
def message(title, message="", icon='i'):
"""
icon : str
can be one of the following: '?', '!', 'i', 'error', None
"""
style = wx.OK
if icon == 'i':
style = style | wx.ICON_INFORMATION
elif icon == '?':
style = style | wx.ICON_QUESTION
elif icon == '!':
style = style | wx.ICON_EXCLAMATION
elif icon == 'error':
style = style | wx.ICON_ERROR
elif icon is None:
pass
else:
raise ValueError("Invalid icon argument: %r" % icon)
dlg = wx.MessageDialog(None, message, title, style)
dlg.ShowModal()
class progress_monitor:
"""
catches calls meant to create a progress indicator, because the wx
ProgressDialog was way too slow.
To clean up after a crash, call
>>> ui.kill_progress_monitors()
"""
def __init__(self, i_max=None,
title="Task Progress",
message="Wait and pray!",
cancel=True):
style = wx.PD_AUTO_HIDE | wx.GA_SMOOTH # |wx.PD_REMAINING_TIME
if cancel:
style = style | wx.PD_CAN_ABORT
if i_max is None:
self.indeterminate = True
i_max = 1
else:
self.indeterminate = False
self.dialog = wx.ProgressDialog(title, message, i_max, None, style)
self.i = 0
if self.indeterminate:
self.dialog.Pulse()
def __del__(self):
self.terminate()
def advance(self, new_msg=None):
if self.indeterminate:
cont, skip = self.dialog.Pulse(new_msg)
else:
self.i += 1
args = (self.i,)
if new_msg:
args += (new_msg,)
cont, skip = self.dialog.Update(*args)
if not cont:
self.terminate()
raise KeyboardInterrupt
def message(self, new_msg):
cont, skip = self.dialog.Update(self.i, new_msg)
if not cont:
self.terminate()
raise KeyboardInterrupt
def terminate(self):
if hasattr(self.dialog, 'Close'):
self.dialog.Close()
self.dialog.Destroy()
def copy_file(path):
"copies a file to the clipboard"
if wx.TheClipboard.Open():
try:
data_object = wx.FileDataObject()
data_object.AddFile(path)
wx.TheClipboard.SetData(data_object)
except:
wx.TheClipboard.Close()
raise
else:
wx.TheClipboard.Close()
def copy_text(text):
if wx.TheClipboard.Open():
try:
data_object = wx.TextDataObject(text)
wx.TheClipboard.SetData(data_object)
except:
wx.TheClipboard.Close()
raise
else:
wx.TheClipboard.Close()
|
import kivy
kivy.require("1.9.1")
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
class K_kivy2App(App):
def build(self):
return FloatLayout()
flApp = K_kivy2App()
flApp.run() |
# -*- coding: utf-8 -*-
import os
from authomatic.providers import oauth2, oauth1, openid
WTF_CSRF_ENABLED = True
SECRET_KEY = '\xd3J\xf0\x02\xc6\xbe@H`XL\xedH\xa3\xefj\xb3H\xde\xa1\xa8\x9f\xc7U\xd1'
OPENID_PROVIDERS = [
{'name': 'Google', 'url': 'https://www.google.com/accounts/o8/id'}
]
base_dir = os.path.abspath(os.path.dirname(__file__))
if os.environ.get('DATABASE_URL') is None:
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(base_dir, 'app.db')
else:
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
SQLALCHEMY_MIGRATE_REPO = os.path.join(base_dir, 'db_repository')
# pagination
POSTS_PER_PAGE = 3
WHOOSH_BASE = os.path.join(base_dir, 'search.db')
MAX_SEARCH_RESULTS = 50
#email
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_SSL = False
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
#admins
ADMINS = ['lyxpudox@gmail.com']
OAUTH_CRED = {
'facebook': {
'id': '1438963539759023',
'secret': '20cc876b5c61f2710e6cc53a871d3ea1'
},
'google': {
'id': '6544715008-pc6kgj83v6m6gqdnhjnnm7afjail8oj2.apps.googleusercontent.com',
'secret': 'gFVsF8mBlXEk-i-f9YpK-Kuv'
}
}
LANGUAGES = {
'en': 'English',
'es': 'Español'
}
MS_TRANSLATOR_CLIENT_ID = 'lyxpudoxs-microblog'
MS_TRANSLATOR_CLIENT_SECRET = '9GwdzR2gs1qsPMkm2gaTEpBiQWXGgjXtaBJ08svpvbE'
SQLALCHEMY_RECORD_QUERIES = True
# slow database query threshold (in seconds)
DATABASE_QUERY_TIMEOUT = 0.5 |
from app.models.alert import Alert
import json
from conf.database import Connection
from app import pubsub
from check_last_price import GetLastPrice
class ControllerAlert:
@staticmethod
def get_alerts(user_id):
conn = Connection()
cur = conn.cursor()
try:
sql = f'select * from alerts where user_id = {user_id}'
print(sql)
cur.execute(sql,)
data = cur.fetchall()
pubsub.publish('alerts', [user_id, f"Seus alertas:"])
for alert in data:
a = Alert(alert[1], alert[2], alert[3], alert[4])
mensagem = f"TICKET:{a.ticket} QUEDA: {a.down_percent}% ALTA:{a.up_percent}%"
pubsub.publish('alerts', [a.user_id, mensagem])
except:
return None
@staticmethod
def get_alerts_by_ticket(ticket, user_id):
try:
conn = Connection()
cur = conn.cursor()
sql = 'select * from alerts where ticket = %s and user_id = %s'
cur.execute(sql, (ticket, user_id,))
data = cur.fetchall()
if len(data) > 0:
for i in data:
alert = Alert(i[1], i[2], i[3], i[4])
return alert
else:
return False
except:
return None
@staticmethod
def insert(alerts, user_id):
conn = Connection()
cur = conn.cursor()
alerts_insert = []
last_price = GetLastPrice()
for alert in alerts:
alert = alert.split(":")
ticket = alert[0].upper()
down_percent = alert[1]
up_percent = alert[2]
new_alert = Alert(ticket, down_percent, up_percent, user_id)
if new_alert.is_ticket() and new_alert.is_percents():
if not ControllerAlert.get_alerts_by_ticket(ticket, user_id):
try:
sql = 'insert into alerts(ticket,down_percent, up_percent,user_id) values (%s, %s, %s, %s)'
print(new_alert.ticket, new_alert.down_percent, new_alert.up_percent, new_alert.user_id)
cur.execute(sql, (new_alert.ticket, new_alert.down_percent, new_alert.up_percent, new_alert.user_id))
conn.commit()
pubsub.publish('alerts', [new_alert.user_id, f"{new_alert.ticket} inserido com sucesso"])
last_price.last_price_tickets(ticket)
except:
pubsub.publish('alerts', [user_id, f"falha ao inserir {alert.ticket}"])
else:
alerts_insert.append(ControllerAlert.update_alerts(new_alert))
last_price.last_price_tickets()
else:
pubsub.publish('alerts', [user_id, f"Valores inválidos, consulte o manual com /help"])
return alerts_insert
@staticmethod
def delete_alert(ticket, user_id):
try:
conn = Connection()
cur = conn.cursor()
sql = 'delete from alerts where ticket = %s and user_id = %s'
cur.execute(sql, (ticket, user_id))
conn.commit()
pubsub.publish('alerts', [user_id, f"{ticket} excluido com sucesso "])
except:
pubsub.publish('alerts', [user_id, f"erro ao excluir {ticket}"])
@staticmethod
def update_alerts(alert):
try:
conn = Connection()
cur = conn.cursor()
sql = 'update alerts set down_percent = %s, up_percent = %s where ticket = %s and user_id = %s'
cur.execute(sql, (alert.down_percent, alert.up_percent, alert.ticket, alert.user_id))
conn.commit()
pubsub.publish('alerts', [alert.user_id, f"Alerta atualizado com sucesso"])
except:
pubsub.publish('alerts', [alert.user_id, f"erro ao atualizar alerta"])
if __name__ == "__main__":
a = ControllerAlert()
a.insert(['WEGE3:4:5'], 409891117)
|
import time;
# Represent a directed graph
V = {'GABOR', 'ARMIN', 'BELA', 'BENCE', 'ANDRAS', 'BALAZS', 'TODOR', 'TIHAMER', 'UBUL', 'KALMAN',
'FERENC', 'GEZA', 'GYORGY', 'AGOSTON', 'ZOLTAN'}
E = {('GABOR', 'ARMIN'),
('BELA', 'ARMIN'),
('ARMIN', 'BENCE'),
('BENCE', 'ANDRAS'),
('BENCE', 'BALAZS'),
('ANDRAS', 'TODOR'),
('TODOR', 'TIHAMER'),
('TIHAMER', 'BALAZS'),
('BALAZS', 'UBUL'),
('BALAZS', 'KALMAN'),
('KALMAN', 'FERENC'),
('FERENC', 'GEZA'),
('UBUL', 'GYORGY'),
('UBUL', 'AGOSTON'),
('UBUL', 'ZOLTAN'),
('GYORGY', 'GEZA')}
G = (V, E)
# Find a path using backtracking algorithm
def find_path(graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in graph[0]:
return None
for node in list(filter(lambda x: x[0]==start, graph[1])):
if node[1] not in path:
npath = find_path(graph, node[1], end, path)
if npath:
return npath
return None
# start a timer
start_time = time.time();
# Use find_path
print('GABOR -> ZOLTAN')
print(find_path(G, 'GABOR', 'ZOLTAN'))
print()
print('ARMIN -> TIHAMER')
print(find_path(G, 'ARMIN', 'TIHAMER'))
print()
print('ANDRAS -> GEZA')
print(find_path(G, 'ANDRAS', 'GEZA'))
print()
print('FERENC -> AGOSTON')
print(find_path(G, 'FERENC', 'AGOSTON'))
print()
print('BALAZS -> BELA')
print(find_path(G, 'BALAZS', 'BELA'))
print()
print('BALAZS -> UBUL')
print(find_path(G, 'BALAZS', 'UBUL'))
print()
print('BELA -> GEZA')
print(find_path(G, 'BELA', 'GEZA'))
print()
# print execution time
print("--- execution time: %s seconds ---" % (time.time() - start_time))
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from scipy.optimize import least_squares
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import chi2
class LinearRegression:
def __init__(self, dataFrame, target=None, interaction = False, diminishing_return = False, r_columns = None):
columns = dataFrame.columns
if target == None:
self.target = columns[0]
else:
self.target = target
self.columns = columns.drop(target)
self.x_data = dataFrame[columns].values.T
self.y_data = dataFrame[target].values
self.n_columns = len(self.columns)
self.n_c = self.n_columns + 1 # adding intercept
self.interaction = interaction
if self.interaction:
self.n_c += (self.n_columns*(self.n_columns-1))//2
self.diminishing_return = diminishing_return
if self.diminishing_return:
self.r_position = np.ones(self.n_columns, dtype=int)
if r_columns == None:
self.r_columns = self.columns
else:
tmp_pos=[]
tmp_col=[]
for column in self.columns:
if column in r_columns:
tmp_pos.append(1)
tmp_col.append(column)
else:
tmp_pos.append(0)
self.r_position = np.array(tmp_pos, dtype=int)
self.r_columns = np.array(tmp_col, dtype=str)
else:
self.r_position = np.zeros(self.n_columns, dtype = int)
self.n_r = np.sum(self.r_position)
self.n_c += self.n_r
self.c_flag = self.make_c_flag()
def make_c_flag(self):
a = np.ones(self.n_c, dtype=int)
b = 1 - np.eye(self.n_c, dtype=int)
flag = np.vstack((a,b))
return flag
def _interaction_terms(self, c, x_data, c_flag):
inter_terms = 0
c_idx = self.n_columns
for i in range(self.n_columns-1):
for j in range(i+1, self.n_columns):
inter_terms += x_data[i]*x_data[j]*c[c_idx]*c_flag[c_idx]
c_idx += 1
return inter_terms
def _diminishing_return(self, x, r):
return (1-np.exp(-x*r))/r
def _make_equation(self, c, x_data, c_flag):
r_idx = -self.n_r - 1
equ = c[-1]*c_flag[-1] # adding intercept
for i in range(self.n_columns):
if self.r_position[i]: # diminishing return
if c_flag[r_idx]:
equ += c[i]*c_flag[i]*self._diminishing_return(x_data[i], c[r_idx])
else:
equ += c[i]*c_flag[i]*x_data[i]
r_idx += 1
else: # normal term
equ += c[i]*c_flag[i]*x_data[i]
if self.interaction:
equ += self._interaction_terms(c, x_data, c_flag)
return equ
def _make_regression_func(self,c, x_data, y_data, c_flag):
return y_data - self._make_equation(c, x_data, c_flag)
def _least_square(self, c_flag):
solv = least_squares(self._make_regression_func, self.c, args = (self.x_data, self.y_data, c_flag))
return solv
def _llk(self, tmp_pred):
return -len(self.y_data)/2*(np.log(np.sum(np.power(self.y_data-tmp_pred,2)))+1+np.log(2*np.pi)-np.log(len(self.y_data)))
def _lrt(self, L1, L2):
return 2*(L1-L2)
def _p_value(self, LRT, df):
return chi2.sf(LRT, df)
def train(self):
# np.random.seed(0)
self.c = np.random.randn(self.n_c)/1000
# self.c = np.ones(self.n_c)/1000
solv = []
prediction=[]
llk=[]
lrt = []
p_value=[]
for flag in self.c_flag:
print('flag: ', flag)
tmp_solv = self._least_square(flag)
solv.append(tmp_solv.x)
tmp_pred = self._make_equation(tmp_solv.x, self.x_data, flag)
prediction.append(tmp_pred)
tmp_llk = self._llk(tmp_pred)
llk.append(tmp_llk)
for idx, value in enumerate(llk):
lrt.append(self._lrt(llk[0], llk[idx]))
p_value.append(self._p_value(lrt[idx], 1))
solv = np.array(solv)
self.prediction = prediction
self.solv = solv
self.llk = llk
self.lrt = lrt
self.p_value = p_value
def summary(self):
r_idx = -self.n_r - 1
spec = '\n\nInteraction: ' + str(self.interaction) \
+'\nDiminishing Return: ' + str(self.diminishing_return) \
+'\nTarget: '+ self.target \
+'\nNumber of input variables: ' + str(len(self.columns)) + '\n\n'
for idx, column in enumerate(self.columns):
tmp = '\nVar.%d: %s' % (idx+1, column)
spec += tmp
if self.r_position[idx]:
tmp = '\t(Diminishing Return)'
spec += tmp
tmp = '\n\n\n\tInput\t\tCoef.\t\tLLK\t\tLRT\t\tP_value\t\tSig.(95%)\n' \
+'-'*100
spec += tmp
tmp = '\n%19s\t%10f\t%10f\t%10f\t%10f\t%5s' % ('Intercept', self.solv[0][-1], self.llk[-1], self.lrt[-1], self.p_value[-1], str(self.lrt[-1]>3.84))
spec += tmp
for i in range(self.n_columns):
tmp = '\n%19s\t%10f\t%10f\t%10f\t%10f\t%5s' % ('Var.'+str(i+1), self.solv[0][i], self.llk[i+1], self.lrt[i+1], self.p_value[i+1], str(self.lrt[i+1]>3.84))
spec += tmp
if self.r_position[i]:
tmp = '\n%19s\t%10f\t%10f\t%10f\t%10f\t%5s' % ('r-Var.'+str(i+1), self.solv[0][r_idx], self.llk[r_idx], self.lrt[r_idx], self.p_value[r_idx], str(self.lrt[r_idx]>3.84))
spec += tmp
r_idx += 1
if self.interaction:
c_idx = self.n_columns +1
for i in range(self.n_columns-1):
for j in range(i+1, self.n_columns):
tmp = '\n%19s\t%10f\t%10f\t%10f\t%10f\t%5s' % ('Var.'+str(i+1)+' * Var.'+str(j+1), self.solv[0][c_idx-1], self.llk[c_idx], self.lrt[c_idx], self.p_value[c_idx], str(self.lrt[c_idx]>3.84))
spec += tmp
c_idx += 1
tmp = '\n\nFull model LLK: ' + str(self.llk[0])
spec += tmp
self.spec = spec
print(spec)
def plot(self):
plt_name = ['Full model']
for column in self.columns:
plt_name.append(column)
if self.interaction:
for i in range(self.n_columns-1):
for j in range(i+1, self.n_columns):
plt_name.append('Test '+self.columns[i] + ' * '+self.columns[j])
if self.diminishing_return:
for idx, value in enumerate(self.r_position):
if value:
plt_name.append('Test r-'+self.columns[idx])
plt_name.append('Test Intercept')
prediction = self.prediction
fig, ax = plt.subplots(len(prediction), figsize = (5, 5*len(prediction)))
for i in range(len(prediction)):
ax[i].set_title(plt_name[i])
ax[i].set_xlabel('Prediction')
ax[i].set_ylabel('y_data')
ax[i].scatter(prediction[i], self.y_data)
ax[i].plot([prediction[i].min(), prediction[i].max()],[prediction[i].min(), prediction[i].max()], color='red')
plt.show()
|
# help(str)
help(str.replace)
help(dict.values) |
from django.shortcuts import render,redirect
from .forms import *
from django.contrib import messages
from firstapp.models import *
from django.http import HttpResponse
def home_view(request):
return render(request,'home.html',{})
def about(request):
return render(request,'about.html',{})
def signup(request):
if request.method=='POST':
context={}
context['capitalist']=CapitalistInfoForm()
context['startup']=OrganisationInfoForm()
if 'investor' in request.POST:
print("hii")
context['capitalist'] = CapitalistInfoForm(request.POST)
if context['capitalist'].is_valid():
context['capitalist'].save(commit=True);
elif 'org' in request.POST:
context['startup'] =OrganisationInfoForm(request.POST)
if context['startup'].is_valid():
context['startup'].save(commit=True)
return render(request,'signup.html',context)
else:
context={}
context['capitalist']=CapitalistInfoForm()
context['startup']=OrganisationInfoForm()
return render(request, 'signup.html',context)
'''def startupRegistration(request):
if request.method=='POST':
form = OrganisationInfoForm(request.POST)
if form.is_valid():
form.save(commit=True);
return render(request, 'org_registration.html', {'form': form})
else:
form=OrganisationInfoForm()
return render(request, 'org_registration.html', {'form': form})
def capitalistRegistration(request):
if request.method == 'POST':
form = CapitalistInfoForm(request.POST)
if form.is_valid():
form.save(commit=True)
return render(request, 'vc_registration.html', {'form': form})
else:
form = CapitalistInfoForm()
return render(request, 'vc_registration.html', {'form': form})
'''
def login(request):
if request.method == 'POST':
form=LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
choice=form.cleaned_data['choice']
if choice=='1':
curuser=OrganisationInfo.objects.all().filter(username=username,password=password)
if curuser:
list_id = curuser.values('id');
return redirect('first_app:after_login',choice=choice,id=list_id[0]['id'])
else:
messages.error(request, 'Incorrect Username or Password')
else:
curuser = CapitalistInfo.objects.all().filter(username=username, password=password)
if curuser:
list_id = curuser.values('id')
return redirect('first_app:after_login', id=list_id[0]['id'], choice=choice )
else:
messages.error(request, 'Incorrect Username or Password')
else:
form = LoginForm()
return render(request, 'login.html', {'form': form})
def after_login(request,choice,id):
print("After login")
if choice==1:
return render(request,'after_org_login.html',{'id':id})
else:
return render(request,'after_vc_login.html',{'id':id})
|
import cv2
import numpy as np
import face_recognition as fr
import utils
"""
Speed Optimizations:
1) Process faces recognition once per N frames (improves little)
2) Process faces recognition ONLY when faces amount has been changed.
otherwise, don't run face recognition but track the faces which already detected before.
** in-order to improve reliability, despite the second condition, we try to
recognize the face several times ("STABILIZATION_TIME") because we don't want
to "get stuck" with "Unknown" when a face, for example,
couldn't be recognized at first (due to person position), but after further
chances it will be recognized well. (so we try at list several times before applying condition 2)
3) Frame size scaled-down by 50% before being processed. (reduce detection distance but improves speed)
"""
video_capture = cv2.VideoCapture(0)
print("Loading faces from data folder, please wait...")
(known_face_encodings, known_face_names) = utils.loadFacesData()
print("Done!")
facesLocations = None
shownFacesLocations = []
faceEncodings = None
detectedNames = []
stabilizationCounter = 0
frameCounter = -1
while True:
ret, frame = video_capture.read()
frameCounter += 1
# Resize frame of video to 1/2 size for faster detection and recognition
small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
small_frame = small_frame[:, :, ::-1]
# Find all the faces in the frame..
facesLocations = fr.face_locations(small_frame)
# Process ONLY when all 3 of optimizations conditions are met!
if (frameCounter % utils.N == 0) and (
len(shownFacesLocations) != len(facesLocations) or stabilizationCounter < utils.STABILIZATION_TIME):
# if it's "new" case (shown faces != current) check more frames for a stable result
if len(shownFacesLocations) != len(facesLocations):
stabilizationCounter = 0
elif utils.alreadyRecognized(detectedNames):
# if all the faces are recognized - we are "stabled" - no need for further processing.
stabilizationCounter = utils.STABILIZATION_TIME
continue
else: stabilizationCounter += 1
# Encode the detected faces.
faceEncodings = fr.face_encodings(small_frame, facesLocations)
# Remember the faces locations in order to show them at the next frames without re-processing
shownFacesLocations = facesLocations
detectedNames.clear()
for faceEncode in faceEncodings:
# Check every known face and return a boolean array (match or not.. default tolerance is 0.6)
matches = np.asarray(fr.compare_faces(known_face_encodings, faceEncode, tolerance=0.6))
name = utils.UNKNOWN_NAME
# If we got more than one match - get the nearest one ("manually checking"..)
trueMatches = matches[matches == True]
if len(trueMatches) > 1:
# Calculate the distance from each known-face to the detected face (check how similar they are..)
distances = fr.face_distance(known_face_encodings, faceEncode)
bestIndex = np.argmin(distances) # Take the index of the minimum distance == best match..
name = known_face_names[bestIndex]
elif len(trueMatches) == 1:
name = known_face_names[utils.findIndexOf(matches, True)]
detectedNames.append(name) # add the name.. may be "unKnown"..
# if no face-encoding comparision occurred, update the shown faces locations "manually"
# Beware - since we process face-encoding once a 10 frames, the len of "facesLocation" may be smaller/larger
# than the shownLocations.. (if new face was added/removed..) so the function should care about that too
if shownFacesLocations != facesLocations:
shownFacesLocations = utils.calculateNextFacesLocations(shownFacesLocations, facesLocations)
# draw rect on the frame around the detected faces including the names.
utils.drawRectAndName(frame, shownFacesLocations, detectedNames)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
Attributes = pd.read_csv("hm_hospitales_covid_structured_30d_train.csv", na_values=0, na_filter=True)
Outcomes = pd.read_csv("split_train_export_30d.csv")
Data = Attributes
Data = Data.drop(labels=['PATIENT ID', 'admission_datetime'], axis='columns')
Data.loc[Data['sex'] == 'FEMALE', 'sex'] = 0
Data.loc[Data['sex'] == 'MALE', 'sex'] = 1
Data.loc[Data['ed_diagnosis'] == 'sx_breathing_difficulty', 'ed_diagnosis'] = 1
Data.loc[Data['ed_diagnosis'] == 'sx_others', 'ed_diagnosis'] = 2
Data.loc[Data['ed_diagnosis'] == 'sx_flu', 'ed_diagnosis'] = 3
Data.loc[Data['ed_diagnosis'] == 'sx_fever', 'ed_diagnosis'] = 4
Data.loc[Data['ed_diagnosis'] == 'sx_cough', 'ed_diagnosis'] = 5
Data = Data.fillna(Data.mode().iloc[0])
X = Data
#X = X.to_numpy()
Y = Outcomes.drop(labels='PATIENT ID', axis='columns')
Y = Y.to_numpy()
Y = Y.ravel()
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3)
clf = RandomForestClassifier(min_samples_leaf= 7, n_estimators= 300)
Y_pred_prob = clf.fit(X_train, Y_train).predict_log_proba(X_test)
Y_pred = clf.predict(X_test)
for i in range(0, Y_pred.shape[0]):
if Y_pred_prob[i][1] > -1.1:
Y_pred[i] = 1
else:
Y_pred[i] = 0
#print(Y_pred_prob[i][0], Y_pred_prob[i][1], Y_pred[i], Y_test[i])
TN, FN, TP, FP = 0, 0, 0, 0
for i in range(len(Y_test)):
if Y_test[i] == 0 and Y_pred[i] == 0:
TN += 1
if Y_test[i] == 1 and Y_pred[i] == 0:
FN += 1
if Y_test[i] == 0 and Y_pred[i] == 1:
FP += 1
if Y_test[i] == 1 and Y_pred[i] == 1:
TP += 1
print("TN:", TN, ", FN:", FN, ", TP:", TP, ", FP:", FP)
precision, recall = (TP/(FP+TP)), (TP/(FN+TP))
print("precision:", precision, ", recall:", recall)
print('F1:', 2 * ((precision*recall)/(precision+recall)))
Attributes = pd.read_csv("fixed_test.csv", na_values=0, na_filter=True)
Output_format = {'PATIENT ID': Attributes['PATIENT ID']}
Output = pd.DataFrame(Output_format)
Data = Attributes
Data = Data.drop(labels=['PATIENT ID', 'admission_datetime'], axis='columns')
Data.loc[Data['sex'] == 'FEMALE', 'sex'] = 0
Data.loc[Data['sex'] == 'MALE', 'sex'] = 1
Data.loc[Data['ed_diagnosis'] == 'sx_breathing_difficulty', 'ed_diagnosis'] = 1
Data.loc[Data['ed_diagnosis'] == 'sx_others', 'ed_diagnosis'] = 2
Data.loc[Data['ed_diagnosis'] == 'sx_flu', 'ed_diagnosis'] = 3
Data.loc[Data['ed_diagnosis'] == 'sx_fever', 'ed_diagnosis'] = 4
Data.loc[Data['ed_diagnosis'] == 'sx_cough', 'ed_diagnosis'] = 5
Data = Data.fillna(Data.mode().iloc[0])
X = Data
Y_pred_prob = clf.predict_log_proba(X)
Y_pred = clf.predict(X)
for i in range(0, Y_pred.shape[0]):
if Y_pred_prob[i][1] > -1.1:
Y_pred[i] = 1
else:
Y_pred[i] = 0
Output['hospital_outcome'] = Y_pred
#print(Output)
Output.to_csv('107062338.csv', index=False) #output prediction
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 20 20:41:32 2019
@author: nico
"""
import os
import numpy as np
from scipy import signal as sig
import matplotlib.pyplot as plt
from scipy.fftpack import fft
from scipy.interpolate import CubicSpline
import scipy.io as sio
from time import time
os.system ("clear") # limpia la terminal de python
plt.close("all") #cierra todos los graficos
fig_sz_x = 14
fig_sz_y = 13
fig_dpi = 80 # dpi
fig_font_family = 'Ubuntu'
fig_font_size = 16
#Diseño de los filtros digitales
# para listar las variables que hay en el archivo
#sio.whosmat('ECG_TP4.mat')
mat_struct = sio.loadmat('ECG_TP4.mat')
ecg_one_lead = mat_struct['ecg_lead']
ecg_one_lead = ecg_one_lead.flatten(1)
cant_muestras = len(ecg_one_lead)
fs = 1000
tt = np.linspace(0, cant_muestras, cant_muestras)
the_start = time()
median1 = sig.medfilt(ecg_one_lead, 201) #200 ms
median2 = sig.medfilt(median1, 601) #600 ms
the_end = time()
tiempodft = the_end - the_start
signal = ecg_one_lead - median2
del the_start, the_end
plt.figure("ECG", constrained_layout=True)
plt.title("ECG")
plt.plot(tt, ecg_one_lead, label='ECG original')
plt.plot(tt, signal, label='ECG filtrada')
plt.xlabel('Muestras')
plt.ylabel("Amplitud ")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.legend()
plt.show()
K = 30
L = cant_muestras/K
ff2,Swelch = sig.welch(median2,fs=fs,nperseg=L,window='bartlett')
Swelch2 = 10*np.log10(Swelch)
plt.figure("Estimación de la señal interpolante con el método de Welch")
plt.title(" Estimación de la señal interpolante con el método de Welch")
plt.plot(ff2,Swelch2)
plt.xlabel('frecuecnia [Hz]')
plt.ylabel('Amplitud db')
plt.grid()
plt.show()
# calculo la frecuencia de corte con el 90% de la enrgia
energia=np.zeros((int(L/2)+1))
np.cumsum(Swelch, out=energia)
limfreq = energia < 0.95*energia[-1]
for ii in range(len(limfreq)) :
if limfreq[ii] == False:
freq = ii
break
cant_pasadas = fs/freq
cant_pasadas = np.log2(cant_pasadas) #porque cada pasada divide a la mitad
cant_pasadas = np.round(cant_pasadas)
nyq_frec = fs / 2
#usar cumsum para estimar las frecuencias
# filter design
ripple = 0.1 # dB
atenuacion = 40. # dB
wp = 40 #Hz podria ser 0.05 pero uso la media geométrica para que sea simétrico
ws = 70 #Hz
gains =np.array([-atenuacion, -ripple])
gains = 10**(gains/20)
frecs = np.array([0.0, ws, wp, nyq_frec]) / nyq_frec
L = fs/wp
lp_sos_cauer = sig.iirdesign(wp=0.5, ws=0.55, gpass=ripple, gstop=atenuacion, analog=False, ftype= 'ellip', output='sos')
w, h_cauer = sig.sosfreqz(lp_sos_cauer) # genera la respuesta en frecuencia del filtro sos
w = w / np.pi # devuelven w de 0 a pi entonces desnormaliza
eps = np.finfo(float).eps
plt.figure(figsize=(fig_sz_x, fig_sz_y), dpi= fig_dpi, facecolor='w', edgecolor='k')
plt.plot(w, 20*np.log10(np.abs(h_cauer + eps)))
#plt.plot(frecs * nyq_frec, 20*np.log10(gains + eps), 'rx', label='plantilla' )
plt.title('FIR diseñado por métodos directos')
plt.xlabel('Frecuencia [Hz]')
plt.ylabel('Modulo [dB]')
#plt.xscale('log')
#plt.axis([0, nyq_frec, -60, 5 ]);
plt.grid()
axes_hdl = plt.gca()
plt.show()
decimation = ecg_one_lead
for jj in range(int(cant_pasadas)):
decimation = sig.sosfiltfilt(lp_sos_cauer, decimation)
aux = np.zeros((int(len(decimation)/2)))
for ii in range(int(len(decimation)/2)):
aux[ii] = decimation[2*ii]
decimation = aux
interpolation = decimation
xx = np.linspace(0,len(decimation)-1,len(decimation))
cs = CubicSpline(xx, decimation)
plt.figure(figsize=(6.5, 4))
plt.plot(xx, cs(decimation), label='true')
plt.show()
#np.array([5, 5.2]) *60*fs
#ff = np.linespace
#plt.figure(figsize=(fig_sz_x, fig_sz_y), dpi= fig_dpi, facecolor='w', edgecolor='k')
#plt.plot(tt[0:int(len(tt)/2)], ECG_f_cauer[0:int(len(tt)/2)], label='Cauer')
#plt.title('ECG filtering ')
#plt.ylabel('Adimensional')
#plt.xlabel('Muestras (#)')
#axes_hdl = plt.gca()
#axes_hdl.legend()
#axes_hdl.set_yticks(())
#plt.show()
#hay una función para decimar sig.decimate
decimation = ecg_one_lead
for jj in range(int(cant_pasadas)):
decimation = sig.decimate(decimation, 2)
the_start = time()
median1_dec = sig.medfilt(decimation, 3) #200 ms
median2_dec = sig.medfilt(median1_dec, 7) #600 ms
the_end = time()
tiempodft_dec = the_end - the_start
del the_start, the_end
interpolation = median2_dec
for jj in range(int(cant_pasadas)):
interpolation = sig.resample(interpolation,2*len(interpolation))
signal_int = ecg_one_lead - interpolation[0:len(ecg_one_lead)]
plt.figure("ECG", constrained_layout=True)
plt.title("ECG")
plt.plot(tt, ecg_one_lead, label='ECG original')
plt.plot(tt, signal, label='ECG filtrada completa')
plt.plot(tt, signal_int, label = 'ECG filtrada con resampleo')
plt.xlabel('Muestras')
plt.ylabel("Amplitud ")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.legend()
plt.show()
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from random import randint
import os, time, sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'))
from main.page.base import BasePage
import subprocess
class addProduct(BasePage):
#locators
_pname_loc = (By.ID, 'p-name')
_pdep1_loc = (By.ID, 'p-dep-1')
_pdep2_loc = (By.ID, 'p-dep-2')
_pdep3_loc = (By.ID, 'p-dep-3')
_pminorder_loc = (By.ID, 'p-min-order')
_pprice_loc = (By.ID, 'p-price')
_pweight_loc = (By.ID, 'p-weight')
_upload_image_loc = (By.ID, 'pickfiles')
_puploadto_loc = (By.ID, 'p-upload-to')
_mustinsurance_loc = (By.ID, 'must_insurance')
_pcondition_loc = (By.ID, 'p-condition')
_returnable_loc = (By.ID, 'returnable')
_pdescription_loc = (By.ID, 'p-description')
_submit_loc = (By.ID, 's-save-prod')
_catalog_loc = (By.CSS_SELECTOR, "a.catalog-select")
# current page
_pl = 'product-add.pl'
#--
def open(self, site=""):
self._open(site, self._pl)
def add_to_product(self, i, _site):
try:
self.driver.find_element(*self._pname_loc).send_keys('Product Otomatis ke ' + str(i))
self.choose_category_1()
self.choose_category_2()
self.choose_category_3()
self.driver.find_element(*self._pminorder_loc).clear()
self.driver.find_element(*self._pminorder_loc).send_keys(randint(1, 5))
self.driver.find_element(*self._pprice_loc).send_keys(randint(5000, 10000))
self.driver.find_element(*self._pweight_loc).send_keys(randint(100, 250))
self.driver.find_element(*self._upload_image_loc).click()
time.sleep(2)
subprocess.Popen(r"C:\autoit\upload-image.exe")
time.sleep(2)
self.choose_upload_to()
time.sleep(3)
self.choose_etalase()
self.driver.find_element(*self._submit_loc).submit()
time.sleep(5)
print("SUKSES")
self._open(_site, self._pl)
except Exception as inst:
print(inst)
def add_to_product_catalog(self, i, _site):
try:
self.driver.find_element(*self._pname_loc).send_keys('Samsung B221' + str(i))
self.choose_category_1_catalog()
self.choose_category_2_catalog()
self.driver.find_element(*self._catalog_loc).click()
self.driver.find_element(*self._pminorder_loc).clear()
self.driver.find_element(*self._pminorder_loc).send_keys(randint(1, 5))
self.driver.find_element(*self._pprice_loc).send_keys(randint(5000, 10000))
self.driver.find_element(*self._pweight_loc).send_keys(randint(100, 250))
self.driver.find_element(*self._upload_image_loc).click()
time.sleep(2)
subprocess.Popen(r"C:\autoit\upload-image.exe")
time.sleep(2)
self.choose_upload_to()
time.sleep(3)
self.choose_etalase()
self.driver.find_element(*self._submit_loc).submit()
time.sleep(5)
print("SUKSES")
self._open(_site, self._pl)
except Exception as inst:
print(inst)
def action_add_product(self, N, _site):
try:
print("Action Add Product" + " " + str(N) + " kali.")
i = 1
while(i <= N):
print("================ Add Product Ke " + str(i) + " ================")
self.add_to_product(i, _site)
i += 1
except Exception as inst:
print(inst)
def action_add_product_catalog(self, N, _site):
try:
print("Action Add Product ke dalam Katalog " + " " + str(N) + " kali.")
i = 1
while(i <= N):
print("================ Add Product Katalog Ke " + str(i) + " ================")
self.add_to_product_catalog(i, _site)
i += 1
except Exception as inst:
print(inst)
def choose_category_1(self):
try:
time.sleep(1)
self.driver.execute_script("document.querySelector('div#slct-p-dep-1 select#p-dep-1').style.display = '';")
self.driver.execute_script("document.querySelector('div#slct-p-dep-1 a.selectBox').style.display = 'none';")
list_category_first = self.driver.find_elements(By.XPATH, "//select[@id='p-dep-1']/option")
i = randint(1, len(list_category_first)-1)
list_category_first[i].click()
except Exception as inst:
print(inst)
def choose_category_2(self):
try:
time.sleep(1)
self.driver.execute_script("document.querySelector('div#slct-p-dep-2 select#p-dep-2').style.display = '';")
self.driver.execute_script("document.querySelector('div#slct-p-dep-2 a.selectBox').style.display = 'none';")
list_category_second = self.driver.find_elements(By.XPATH, "//select[@id='p-dep-2']/option")
i = randint(1, len(list_category_second)-1)
list_category_second[i].click()
except Exception as inst:
print(inst)
def choose_category_3(self):
try:
time.sleep(1)
self.driver.execute_script("document.querySelector('div#slct-p-dep-3 select#p-dep-3').style.display = '';")
self.driver.execute_script("document.querySelector('div#slct-p-dep-3 a.selectBox').style.display = 'none';")
list_category_third = self.driver.find_elements(By.XPATH, "//select[@id='p-dep-3']/option")
i = randint(1, len(list_category_third)-1)
list_category_third[i].click()
except Exception as inst:
print(inst)
def choose_category_1_catalog(self):
try:
time.sleep(1)
self.driver.execute_script("document.querySelector('div#slct-p-dep-1 select#p-dep-1').style.display = '';")
self.driver.execute_script("document.querySelector('div#slct-p-dep-1 a.selectBox').style.display = 'none';")
self.driver.find_element(By.XPATH, "//select[@id='p-dep-1']/option[9]").click()
except Exception as inst:
print(inst)
def choose_category_2_catalog(self):
try:
time.sleep(1)
self.driver.execute_script("document.querySelector('div#slct-p-dep-2 select#p-dep-2').style.display = '';")
self.driver.execute_script("document.querySelector('div#slct-p-dep-2 a.selectBox').style.display = 'none';")
self.driver.find_element(By.XPATH, "//select[@id='p-dep-2']/option[2]").click()
except Exception as inst:
print(inst)
def choose_upload_to(self):
try:
time.sleep(1)
self.driver.execute_script("document.getElementById('p-upload-to').style.display = '';")
list_upload_to = self.driver.find_elements(By.XPATH, "//select[@id='p-upload-to']/option")
list_upload_to[0].click()
except Exception as inst:
print(inst)
def choose_etalase(self):
try:
time.sleep(1)
self.driver.execute_script("document.getElementById('p-menu-id').style.display = '';")
list_etalase = self.driver.find_elements(By.XPATH, "//select[@id='p-menu-id']/option")
list_etalase[1].click()
except Exception as inst:
print(inst) |
# -*- coding: utf8 -*-
#***************************************************************************
#* Copyright (c) 2021 Maarten Vroegindeweij <maarten@3bm.co.nl> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
#Regarding class GeoLocation:
# This class is based from the code smaple in this paper:
# http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates
# The owner of that website, Jan Philip Matuschek, is the full owner of
# his intellectual property. This class is simply a Python port of his very
# useful Java code. All code written by Jan Philip Matuschek and ported by me
# (which is all of this class) is owned by Jan Philip Matuschek.
# '''
"""This module provides tools to load GIS information
"""
__title__= "GIS2BIM"
__author__ = "Maarten Vroegindeweij"
__url__ = "https://github.com/DutchSailor/GIS2BIM"
import urllib
import urllib.request
from urllib.request import urlopen
import xml.etree.ElementTree as ET
import json
import math
import re
from zipfile import ZipFile
#from PIL import Image
#Common functions
def GetWebServerData(servertitle, category, parameter):
#Get webserverdata from github repository of GIS2BIM(up to date list of GIS-servers & requests)
Serverlocation = "https://raw.githubusercontent.com/DutchSailor/GIS2BIM/master/GIS2BIM_Data.json"
url = urllib.request.urlopen(Serverlocation)
data = json.loads(url.read())['GIS2BIMserversRequests'][category]
test = []
for i in data:
test.append(i["title"])
result = data[test.index(servertitle)][parameter]
return result
def xmldata(myurl, xPathStrings):
urlFile = urllib.request.urlopen(myurl)
tree = ET.parse(urlFile)
xPathResults = []
for xPathString in xPathStrings:
a = tree.findall(xPathString)
xPathResulttemp2 = []
for xPathResult in a:
xPathResulttemp2.append(xPathResult.text)
xPathResults.append(xPathResulttemp2)
return xPathResults
def GetWebServerDataService(category,service):
#Get a list with webserverdata from github repository of GIS2BIM(up to date list of GIS-servers & requests)
Serverlocation = "https://raw.githubusercontent.com/DutchSailor/GIS2BIM/master/GIS2BIM_Data.json"
url = urllib.request.urlopen(Serverlocation)
data = json.loads(url.read())['GIS2BIMserversRequests'][category]
listOfData = []
for i in data:
if i["service"] == service:
listOfData.append(i)
return listOfData
def DownloadURL(folder,url,filename):
#Download a file to a folder from a given url
path = folder + filename
urllib.request.urlretrieve(url,path)
return path
def GetDataFiles(folder):
Serverlocation = "https://raw.githubusercontent.com/DutchSailor/GIS2BIM/master/datafiles/map.html"
url = urllib.request.urlopen(Serverlocation)
data = json.loads(url.read())['GIS2BIMserversRequests'][category]
test = []
for i in data:
test.append(i["title"])
result = data[test.index(servertitle)][parameter]
return result
def downloadUnzip(downloadURL,filepathZIP,folderUNZIP):
zipresp = urlopen(downloadURL)
tempzip = open(filepathZIP, "wb")
tempzip.write(zipresp.read())
tempzip.close()
zf = ZipFile(filepathZIP)
zf.extractall(path = folderUNZIP)
zf.close()
return folderUNZIP
#GIS2BIM functions
def mortonCode(X,Y,Xmod,Ymod,TileDim):
#
x = bin(int(math.floor(((X - Xmod)/TileDim))))
y = bin(int(math.floor(((Y - Ymod)/TileDim))))
x = str(x[2:])
y = str(y[2:])
res = "".join(i + j for i, j in zip(y, x))
z=(res)
z = int(z, 2)
return z
def checkIfCoordIsInsideBoundingBox(coord, bounding_box):
#check if coordinate is inside rectangle boundingbox
min_x = bounding_box[0] - (bounding_box[2] / 2)
min_y = bounding_box[1] - (bounding_box[2] / 2)
max_x = bounding_box[0] + (bounding_box[2] / 2)
max_y = bounding_box[1] + (bounding_box[2] / 2)
if min_x <= float(coord[0]) <= max_x and min_y <= float(coord[1]) <= max_y:
return True
else:
return False
def TransformCRS_epsg(SourceCRS, TargetCRS, X, Y):
# transform coordinates between different Coordinate Reference Systems using EPSG-server
X = str(X)
Y = str(Y)
requestURL = "https://epsg.io/trans?" + "&s_srs=" + SourceCRS + "&t_srs=" + TargetCRS + "&x=" + X + "&y=" + Y + "&format=json"
req = urllib.request.Request(requestURL, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urllib.request.urlopen(req).read()
data = json.loads(webpage)
X = data["x"]
Y = data["y"]
return X,Y
def GML_poslistData(tree,xPathString,dx,dy,scale,DecimalNumbers):
#group X and Y Coordinates of polylines
posLists = tree.findall(xPathString)
xyPosList = []
for posList in posLists:
dataPosList = posList.text
coordSplit = dataPosList.split()
try:
if float(coordSplit[2]) == 0:
XYZCountDimensions = 3
else:XYZCountDimensions = 2
except:
XYZCountDimensions = 2
x = 0
coordSplitXY = []
for j in range(0, int(len(coordSplit) / XYZCountDimensions)):
xy_coord = (round((float(coordSplit[x])+dx)*scale,DecimalNumbers), round((float(coordSplit[x+1])+dy)*scale,DecimalNumbers))
coordSplitXY.append(xy_coord)
x +=XYZCountDimensions
xyPosList.append(coordSplitXY)
return xyPosList
def CreateBoundingBox(CoordinateX,CoordinateY,BoxWidth,BoxHeight,DecimalNumbers):
#Create Boundingboxstring for use in webrequests.
XLeft = round(CoordinateX-0.5*BoxWidth,DecimalNumbers)
XRight = round(CoordinateX+0.5*BoxWidth,DecimalNumbers)
YBottom = round(CoordinateY-0.5*BoxHeight,DecimalNumbers)
YTop = round(CoordinateY+0.5*BoxHeight,DecimalNumbers)
boundingBoxString = str(XLeft) + "," + str(YBottom) + "," + str(XRight) + "," + str(YTop)
return boundingBoxString
def CreateBoundingBoxPolygon(CoordinateX,CoordinateY,BoxWidth,BoxHeight,DecimalNumbers):
#Create Boundingboxstring for use in webrequests.
XLeft = round(CoordinateX-0.5*BoxWidth,DecimalNumbers)
XRight = round(CoordinateX+0.5*BoxWidth,DecimalNumbers)
YBottom = round(CoordinateY-0.5*BoxHeight,DecimalNumbers)
YTop = round(CoordinateY+0.5*BoxHeight,DecimalNumbers)
boundingBoxStringPolygon = "(" + str(XLeft) + ' ' + str(YTop) + ',' + str(XRight) + ' ' + str(YTop) + ',' + str(XRight) + ' ' + str(YBottom) + ',' + str(XLeft) + ' ' + str(YBottom) + ',' + str(XLeft) + ' ' + str(YTop) + ')'
return boundingBoxStringPolygon
def PointsFromWFS(serverName,boundingBoxString,xPathString,dx,dy,scale,DecimalNumbers):
# group X and Y Coordinates
myrequesturl = serverName + boundingBoxString
urlFile = urllib.request.urlopen(myrequesturl)
tree = ET.parse(urlFile)
xyPosList = GML_poslistData(tree,xPathString,dx,dy,scale,DecimalNumbers)
return xyPosList
def PointsFromGML(filePath,xPathString,dx,dy,scale,DecimalNumbers):
# group X and Y Coordinates
tree = ET.parse(filePath)
xyPosList = GML_poslistData(tree,xPathString,dx,dy,scale,DecimalNumbers)
return xyPosList
def DataFromWFS(serverName,boundingBoxString,xPathStringCoord,xPathStrings,dx,dy,scale,DecimalNumbers):
# group textdata from WFS
myrequesturl = serverName + boundingBoxString
urlFile = urllib.request.urlopen(myrequesturl)
tree = ET.parse(urlFile)
xyPosList = GML_poslistData(tree,xPathStringCoord,dx,dy,scale,DecimalNumbers)
xPathResults = []
for xPathString in xPathStrings:
a = tree.findall(xPathString)
xPathResulttemp2 = []
for xPathResult in a:
xPathResulttemp2.append(xPathResult.text)
xPathResults.append(xPathResulttemp2)
xPathResults.insert(0,xyPosList)
return xPathResults
def checkIfCoordIsInsideBoundingBox(coord, min_x, min_y, max_x, max_y):
if re.match(r'^-?\d+(?:\.\d+)$', coord[0]) is None or re.match(r'^-?\d+(?:\.\d+)$', coord[1]) is None:
return False
else:
if min_x <= float(coord[0]) <= max_x and min_y <= float(coord[1]) <= max_y:
return True
else:
return False
def filterGMLbbox(tree,xPathString,bbx,bby,BoxWidth,BoxHeight,scale):
# Bounding box definition
bounding_box = [bbx, bby, BoxWidth,BoxHeight]
min_x = bounding_box[0] - (bounding_box[2]/2)
min_y = bounding_box[1] - (bounding_box[3]/2)
max_x = bounding_box[0] + (bounding_box[2]/2)
max_y = bounding_box[1] + (bounding_box[3]/2)
# get data from xml
root = tree.getroot()
# for loop to get each element in an array
XMLelements = []
for elem in root.iter():
XMLelements.append(elem)
xpathfound = root.findall(xPathString)
# for loop to get all polygons in an array
polygons = []
for x in xpathfound:
if x.text:
try:
polygons.append(x.text.split(" "))
except:
polygons.append("_none_")
else:
polygons.append("_none_")
# for loop to get x,y coords and filter polygons inside Bounding Box
xyPolygons = []
for newPolygon in polygons:
polygon_is_inside_bounding_box = False
x = 0
xyPolygon = []
for i in range(0, int(len(newPolygon) / 2)):
xy_coord = [newPolygon[x], newPolygon[x + 1]]
xy_coord_trans = [round((float(newPolygon[x])-bbx)*scale), round((float(newPolygon[x + 1])-bby)*scale)]
xyPolygon.append(xy_coord_trans)
x += 2
if checkIfCoordIsInsideBoundingBox(xy_coord, min_x, min_y, max_x, max_y):
polygon_is_inside_bounding_box = True
if polygon_is_inside_bounding_box:
xyPolygons.append(xyPolygon)
return xyPolygons
def WMSRequest(serverName,boundingBoxString,fileLocation,pixWidth,pixHeight):
# perform a WMS OGC webrequest( Web Map Service). This is loading images.
myrequestURL = serverName + boundingBoxString
myrequestURL = myrequestURL.replace("width=3000", "width=" + str(pixWidth))
myrequestURL = myrequestURL.replace("height=3000", "height=" + str(pixHeight))
resource = urllib.request.urlopen(myrequestURL)
output1 = open(fileLocation, "wb")
output1.write(resource.read())
output1.close()
return fileLocation, resource, myrequestURL
def MortonCode(X,Y,Xmod,Ymod,TileDimension):
# convert a x and y coordinate to a mortoncode
x = bin(int(math.floor(((X - Xmod)/TileDimension))))
y = bin(int(math.floor(((Y - Ymod)/TileDimension))))
x = str(x[2:])
y = str(y[2:])
res = "".join(i + j for i, j in zip(y, x))
z=(res)
z = int(z, 2)
return z
def NominatimAPI(inputlist):
#get lat/lon via an adress using Nominatim API
URLpart1 = "https://nominatim.openstreetmap.org/search/"
URLpart2 = "%20".join(inputlist)
URLpart3 = "?format=xml&addressdetails=1&limit=1&polygon_svg=1"
URL = URLpart1 + URLpart2 + URLpart3
req = urllib.request.Request(URL)
resp = urllib.request.urlopen(req)
content = resp.read().decode('utf8')
try:
lst = re.split('lat=| lon=| display_name=',content)
lat = lst[1][1:-1]
lon = lst[2][1:-1]
except:
lat = None
lon = None
return lat, lon
def LatLonZoomToTileXY(lat,lon,zoom):
lat_rad = math.radians(lat)
n = 2.0 ** zoom
TileX = int((lon + 180.0) / 360.0 * n)
TileY = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return TileX, TileY
def TMSBboxFromTileXY(TileX,TileY,zoom):
n = 2.0 ** zoom
W_deg = TileX / n * 360.0 - 180.0
N_rad = math.atan(math.sinh(math.pi * (1 - 2 * TileY / n)))
N_deg = math.degrees(N_rad)
E_deg = (TileX+1) / n * 360.0 - 180.0
S_rad = math.atan(math.sinh(math.pi * (1 - 2 * (TileY+1) / n)))
S_deg = math.degrees(S_rad)
return S_deg,W_deg,N_deg,E_deg
def TMS_WMTSCombinedMapFromLatLonBbox(lat,lon,bboxWidth,bboxHeight,zoomL,pixels,TMS_WMTS,ServerName):
#With lat/lon and bbox tilenumbers are calculated then downloaded from given server and merged into 1 images and cropped afterwards to given boundingbox
#Create Boundingbox lat/lon
loc = GeoLocation.from_degrees(lat,lon)
radiusWidth = bboxWidth/2000
SW_locWidth = loc.bounding_locations(radiusWidth)[0]
NE_locWidth = loc.bounding_locations(radiusWidth)[1]
radiusHeight = bboxHeight/2000
SW_locHeight = loc.bounding_locations(radiusHeight)[0]
NE_locHeight = loc.bounding_locations(radiusHeight)[1]
#GetUniqueTileX/TileY list
TileXYBottomLeft = LatLonZoomToTileXY(SW_locHeight[0],SW_locWidth[1],zoomL)
TileXYTopRight = LatLonZoomToTileXY(NE_locHeight[0],NE_locWidth[1],zoomL)
#Get TileX/TileY orderlist for URLlists
rangex = list(range(TileXYBottomLeft[0], TileXYTopRight[0]+1))
rangey1 = list(range(TileXYTopRight[1], TileXYBottomLeft[1]+1))
rangey = rangey1[::-1]
minx = min(rangex)
miny = min(rangey)
maxx = max(rangex)
maxy = max(rangey)
#Get Bbox from TopRight/BottomLeft
BboxTileBottomLeft = TMSBboxFromTileXY(minx,maxy,zoomL)
BboxTileTopRight = TMSBboxFromTileXY(maxx,miny,zoomL)
# Calculate total width of tiles and deltax/y of boundingbox
GeoLocationBottomLeft = GeoLocation.from_degrees(BboxTileBottomLeft[0],BboxTileBottomLeft[1])
GeoLocationTopLeft = GeoLocation.from_degrees(BboxTileTopRight[2],BboxTileBottomLeft[1])
GeoLocationTopRight = GeoLocation.from_degrees(BboxTileTopRight[2],BboxTileTopRight[3])
TotalWidthOfTiles = 1000*GeoLocation.distance_to(GeoLocationTopLeft,GeoLocationTopRight,GeoLocation.EARTH_RADIUS)
TotalHeightOfTiles = 1000*GeoLocation.distance_to(GeoLocationBottomLeft,GeoLocationTopLeft,GeoLocation.EARTH_RADIUS)
#deltax Left, Width difference between bbox and TotalWidthOfTiles
GeoLocationBottomLeftBbox = GeoLocation.from_degrees(SW_locHeight[0],SW_locWidth[1])
GeoLocationBottomBboxLeftTiles = GeoLocation.from_degrees(SW_locHeight[0],BboxTileBottomLeft[1])
dx = 1000*GeoLocation.distance_to(GeoLocationBottomBboxLeftTiles,GeoLocationBottomLeftBbox,GeoLocation.EARTH_RADIUS)
#deltay Bottom, Height difference between bbox and TotalHeightOfTiles
GeoLocationBottomTilesLeftBbox = GeoLocation.from_degrees(BboxTileBottomLeft[0],SW_locWidth[1])
dy = 1000*GeoLocation.distance_to(GeoLocationBottomTilesLeftBbox,GeoLocationBottomLeftBbox,GeoLocation.EARTH_RADIUS)
x = rangex
y = rangey
n = len(rangey)
xl1=[]
for i in x:
xl1.append([i]*n)
xl2=[]
for sublist in xl1:
for item in sublist:
xl2.append(item)
yl1=[]
for i in x:
yl1.append(y)
yl2=[]
for sublist in yl1:
for item in sublist:
yl2.append(item)
tilesX = xl2
tileY = yl2
#Create URLs for image
ServerName = ServerName.replace("{z}",str(zoomL))
URLlist = []
for i,j in zip(tilesX,tileY):
URLlist.append(ServerName.replace("{y}",str(j)).replace("{x}",str(i)))
#Download TileImages
TileImages = []
opener=urllib.request.build_opener()
opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
urllib.request.install_opener(opener)
for i in URLlist:
TileImages.append(Image.open(urllib.request.urlopen(i)))
#Create new image to concatenate the tileimages in.
widthImg = len(rangex)*pixels
heightImg = len(rangey)*pixels
img = Image.new('RGB', (widthImg,heightImg))
LPx=[]
n=0
for i in rangex:
LPx.append(n*pixels)
n=n+1
LPy=[]
n=0
for i in rangey:
LPy.append(n*pixels)
n=n+1
LPx2=[]
n=len(LPy)
for i in LPx:
LPx2.append([i]*n)
LPx3=[]
for sublist in LPx2:
for item in sublist:
LPx3.append(item)
LPy2=[]
#n=len(LPy)
for i in LPx:
LPy2.append(LPy)
LPy3=[]
for sublist in LPy2:
for item in sublist:
LPy3.append(item)
LPy4=LPy3[::-1]
if TMS_WMTS:
for i,j,k in zip(TileImages,LPy3,LPx3):
img.paste(i,(j,k))
else:
for i,j,k in zip(TileImages,LPx3,LPy4):
img.paste(i,(j,k))
#Crop Image
deltaHeight = TotalHeightOfTiles- bboxHeight
dxM = dx
dyM = deltaHeight-dy
ImageWidthNew=(bboxWidth/TotalWidthOfTiles)*widthImg
ImageHeightNew=(bboxHeight/TotalHeightOfTiles)*heightImg
dxImage=-int((dxM/TotalWidthOfTiles)*widthImg)
dyImage=-int((dyM/TotalHeightOfTiles)*heightImg)
imgNew = Image.new('RGB', (int(ImageWidthNew),int(ImageHeightNew)))
imgNew.paste(img,(dxImage,dyImage))
return imgNew,widthImg,heightImg
class GeoLocation:
MIN_LAT = math.radians(-90)
MAX_LAT = math.radians(90)
MIN_LON = math.radians(-180)
MAX_LON = math.radians(180)
EARTH_RADIUS = 6378.1 # kilometers
@classmethod
def from_degrees(cls, deg_lat, deg_lon):
rad_lat = math.radians(deg_lat)
rad_lon = math.radians(deg_lon)
return GeoLocation(rad_lat, rad_lon, deg_lat, deg_lon)
@classmethod
def from_radians(cls, rad_lat, rad_lon):
deg_lat = math.degrees(rad_lat)
deg_lon = math.degrees(rad_lon)
return deg_lat, deg_lon
def __init__(
self,
rad_lat,
rad_lon,
deg_lat,
deg_lon
):
self.rad_lat = float(rad_lat)
self.rad_lon = float(rad_lon)
self.deg_lat = float(deg_lat)
self.deg_lon = float(deg_lon)
self._check_bounds()
def __str__(self):
degree_sign = u'\N{DEGREE SIGN}'
return ("{0:.20f}, {1:.20f}").format(
self.deg_lat, self.deg_lon, self.rad_lat, self.rad_lon)
def _check_bounds(self):
if (self.rad_lat < GeoLocation.MIN_LAT
or self.rad_lat > GeoLocation.MAX_LAT
or self.rad_lon < GeoLocation.MIN_LON
or self.rad_lon > GeoLocation.MAX_LON):
raise Exception("Illegal arguments")
def distance_to(self, other, radius=EARTH_RADIUS):
'''
Computes the great circle distance between this GeoLocation instance
and the other.
'''
return radius * math.acos(
math.sin(self.rad_lat) * math.sin(other.rad_lat) +
math.cos(self.rad_lat) *
math.cos(other.rad_lat) *
math.cos(self.rad_lon - other.rad_lon)
)
def bounding_locations(self, distance, radius=EARTH_RADIUS):
'''
Computes the bounding coordinates of all points on the surface
of a sphere that has a great circle distance to the point represented
by this GeoLocation instance that is less or equal to the distance argument.
Param:
distance - the distance from the point represented by this GeoLocation
instance. Must be measured in the same unit as the radius
argument (which is kilometers by default)
radius - the radius of the sphere. defaults to Earth's radius.
Returns a list of two GeoLoations - the SW corner and the NE corner - that
represents the bounding box.
'''
if radius < 0 or distance < 0:
raise Exception("Illegal arguments")
# angular distance in radians on a great circle
rad_dist = distance / radius
min_lat = self.rad_lat - rad_dist
max_lat = self.rad_lat + rad_dist
if min_lat > GeoLocation.MIN_LAT and max_lat < GeoLocation.MAX_LAT:
delta_lon = math.asin(math.sin(rad_dist) / math.cos(self.rad_lat))
min_lon = self.rad_lon - delta_lon
if min_lon < GeoLocation.MIN_LON:
min_lon += 2 * math.pi
max_lon = self.rad_lon + delta_lon
if max_lon > GeoLocation.MAX_LON:
max_lon -= 2 * math.pi
# a pole is within the distance
else:
min_lat = max(min_lat, GeoLocation.MIN_LAT)
max_lat = min(max_lat, GeoLocation.MAX_LAT)
min_lon = GeoLocation.MIN_LON
max_lon = GeoLocation.MAX_LON
return [GeoLocation.from_radians(min_lat, min_lon),
GeoLocation.from_radians(max_lat, max_lon)]
def download_image(url, index, results):
response = requests.get(url)
img = Image.open(BytesIO(response.content))
results[index] = img
def download_images(urls):
results = [None] * len(urls)
threads = []
for index, url in enumerate(urls):
thread = threading.Thread(target=download_image, args=(url, index, results))
thread.start()
threads.append(thread)
# Wacht tot alle threads zijn voltooid
for thread in threads:
thread.join()
return results
def TMS(zoom_level, rdx, rdy, width, layer, downloadimage: bool):
zoomlevels = [zoom_level]
boundingbox_widths = [width]
layers = [layer]
zoomleveldata = {0: 3440.640,
1: 1720.320,
2: 860.160,
3: 430.080,
4: 215.040,
5: 107.520,
6: 53.720,
7: 26.880,
8: 13.440,
9: 6.720,
10: 3.360,
11: 1.680,
12: 0.840,
13: 0.420,
14: 0.210,
15: 0.105,
16: 0.0575}
pixel_width = 256
xcorner = -285401.92
ycorner = 903402.0
Resolutions = []
for x in zoomlevels:
Resolutions.append(zoomleveldata[x])
TileColumns = []
TileRows = []
DeltaX = []
DeltaY = []
for x in Resolutions:
a = pixel_width * x
# TileColumns.append(Rdx-Xcorner)
# TileRows.append(a)
TileY = (ycorner - rdy) / a
TileX = (rdx - xcorner) / a
TileYRound = int(TileY)
TileXRound = int(TileX)
TilePercentageY = TileX - TileXRound
TilePercentageX = TileY - TileYRound
DeltaYM = TilePercentageY * pixel_width * x
DeltaXM = TilePercentageX * pixel_width * x
TileRows.append(TileYRound)
TileColumns.append(TileXRound)
DeltaY.append(DeltaYM)
DeltaX.append(DeltaXM)
UniqueTileColumns = []
UniqueTileRows = []
TileColumns2 = []
TileRows2 = []
for x, j, k, l in zip(TileColumns, TileRows, boundingbox_widths, Resolutions):
TileWidth = pixel_width * l
c = math.ceil(k / TileWidth)
b = math.ceil(c / 2) * 2
UniqueTileColumns.append(range(int((x - b / 2)), 1 + int(x + b / 2)))
UniqueTileRows.append(range((int(j - b / 2)), 1 + int(j + b / 2)))
for x in UniqueTileColumns:
# de list:
a = []
counter = len(x)
for j in x: # elke unieke tile
a.append(x)
flatA = []
for sublist in a:
for item in sublist:
flatA.append(item)
TileColumns2.append(flatA)
counter = 0
for x in UniqueTileRows:
# de list:
a = []
counter = len(x)
for j in x: # elke unieke tile
a.append([int(j)] * counter)
flatA = []
for sublist in a:
for item in sublist:
flatA.append(item)
TileRows2.append(flatA)
counter = 0
TotalTileWidth = []
TotalTileHeight = []
TileColumnMin = min(UniqueTileColumns[0])
TileColumnsMax = max(UniqueTileColumns[0])
TileRowsMin = min(UniqueTileRows[0])
TileRowsMax = max(UniqueTileRows[0])
TileWidthHeight = (903401.92 - 22598.08) * pow(0.5, zoom_level)
Rdxmin = TileColumnMin * TileWidthHeight - 285401.92
# LET OP: WMTS TILING KOMT VAN BOVEN DAAROM HIERONDER MAX I.P.V. MIN
Rdymin = 903401.92 - TileRowsMax * TileWidthHeight - TileWidthHeight
# 22598.08
for x, j in zip(Resolutions, UniqueTileColumns):
a = len(j) * pixel_width * x
TotalTileWidth.append(a)
for x, j in zip(Resolutions, UniqueTileRows):
a = len(j) * pixel_width * x
TotalTileHeight.append(a)
Rdxmax = Rdxmin + TotalTileWidth[0]
Rdymax = Rdymin + TotalTileHeight[0]
print(Rdxmin - rdx)
print(Rdymin - rdy)
# string1 = "http://geodata.nationaalgeoregister.nl/tiles/service/wmts?&request=GetTile&VERSION=1.0.0&LAYER="
string1 = "https://service.pdok.nl/lv/bgt/wmts/v1_0?request=GetTile&service=WMTS&VERSION=1.0.0&LAYER="
string3 = "&STYLE=default&TILEMATRIXSET=EPSG:28992&TILEMATRIX=EPSG:28992:"
string34 = "&TILEROW="
string5 = "&TILECOL="
string7 = "&FORMAT=image/png8";
urlList = []
for x, j, k, z in zip(TileColumns2, TileRows2, layers, zoomlevels):
a = []
for l, m in zip(x, j):
b = string1 + str(k) + string3 + str(z) + string34 + str(m) + string5 + str(l) + string7
a.append(b)
urlList.append(a)
bitmaps2 = []
if downloadimage:
for x in urlList:
bitmaps = download_images(x)
# for j in i:
# response = requests.get(j)
# img = Image.open(BytesIO(response.content))
# bitmaps.append(img)
# print(img)
# DIT MOET SNELLER, RETERTRAAG
bitmaps2.append(bitmaps)
combined_bitmaps = []
for a, b, c in zip(bitmaps2, UniqueTileColumns, UniqueTileRows):
total_width = len(b) * pixel_width
total_height = len(c) * pixel_width
img = Image.new('RGB', (total_width, total_height))
lpx = []
n = 0
for l in j:
lpx.append(n * pixel_width)
n = n + 1
LPy = []
n = 0
for x in c:
LPy.append(n * pixel_width)
n = n + 1
LPx2 = []
n = len(LPy)
for x in lpx:
LPx2.append([x] * n)
LPx3 = []
for sublist in LPx2:
for item in sublist:
LPx3.append(item)
LPy2 = []
for x in lpx:
LPy2.append(LPy)
LPy3 = []
for sublist in LPy2:
for item in sublist:
LPy3.append(item)
LPy4 = reversed(LPy3)
for m, n, o in zip(a, LPy3, LPx3):
img.paste(m, (n, o))
combined_bitmaps.append(img)
return combined_bitmaps[0], TotalTileWidth[0], TotalTileHeight[0], Rdxmin, Rdymin, Rdxmax, Rdymax
else:
return (
TotalTileWidth[0], TotalTileHeight[0], TileColumnMin, TileColumnsMax, TileRowsMin, TileRowsMax, Rdxmin, Rdymin,
Rdxmax, Rdymax, TileWidthHeight)
def toPix(point1, Xmin, Ymin, TotalWidth, TotalHeight, ImgWidthPix, ImgHeightPix):
# Give a pixel on an image
x = point1.x
y = point1.y
xpix = math.floor(((x - Xmin) / TotalWidth) * ImgWidthPix)
ypix = ImgHeightPix - math.floor(((y - Ymin) / TotalHeight) * ImgHeightPix) # min vanwege coord stelsel Image.Draw
return xpix, ypix
def pointToPILLine(imgdrawobj, color: str, width: float, pnts, dx, dy, TotalTileWidthM, TotalTileHeightM, imgwidthpix,
imgheightpix):
ind = 0
for i in pnts:
try:
P1 = toPix(pnts[ind], dx, dy, TotalTileWidthM, TotalTileHeightM, imgwidthpix,
imgheightpix)
P2 = toPix(pnts[ind + 1], dx, dy, TotalTileWidthM, TotalTileHeightM, imgwidthpix,
imgheightpix)
imgdrawobj.line([P1, P2], fill=color, width=width)
except:
pass
ind = ind + 1
return P1, P2
|
import tensorflow as tf
store_path = '/home/xuhy/tmp/test/model.ckpt'
w = tf.get_variable(name='weight', initializer=tf.constant(3.0))
b = tf.get_variable(name='bias', initializer=tf.constant(2.0))
x = tf.constant(3.0, tf.float32)
w1 = tf.get_variable(name='weight_1', initializer=tf.constant(3.0))
tf.add_to_collection('save',w1)
b1 = tf.get_variable(name='bias_1', initializer=tf.constant(2.0))
tf.add_to_collection('save',b1)
x1 = tf.constant(3.0, tf.float32)
saver = tf.train.Saver(tf.get_collection('save'))
r1 = tf.add(tf.mul(w,x),b)
r2 = tf.add(tf.mul(w1, x1),b1)
with tf.Session() as sess:
tf.global_variables_initializer().run()
ckpt_path = tf.train.latest_checkpoint('/home/xuhy/tmp/test')
print(ckpt_path)
saver.restore(sess, ckpt_path)
bv = sess.run(r1)
bv2 = sess.run(r2)
print(bv)
print(bv2)
# saver.save(sess, store_path)
sess.close()
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import utilities
class ActivationNormalisation(nn.Module):
def __init__(self, num_features, scale=1.):
super(ActivationNormalisation, self).__init__()
self.register_buffer('is_initialized', torch.zeros(1))
self.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.logs = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.num_features = num_features
self.scale = float(scale)
self.eps = 1e-6
def describe(self):
print('\t\t\t - > Act Norm with {} num_features.'.format(self.num_features))
def initialize_parameters(self, x):
if not self.training:
return
with torch.no_grad():
bias = -1 * utilities.mean_over_dimensions(x.clone(), dim=[0, 2, 3], keepdims=True)
v = utilities.mean_over_dimensions((x.clone() + bias) ** 2, dim=[0, 2, 3], keepdims=True)
logs = (self.scale / (v.sqrt() + self.eps)).log()
self.bias.data.copy_(bias.data)
self.logs.data.copy_(logs.data)
self.is_initialized += 1.
def _center(self, x, reverse=False):
if not reverse:
return x + self.bias
else:
return x - self.bias
def _scale(self, x, sldj, reverse=False):
logs = self.logs
if not reverse:
x = x * logs.exp()
else:
x = x * logs.mul(-1).exp()
if sldj is not None:
ldj = logs.sum() * x.size(2) * x.size(3)
if not reverse:
sldj = sldj + ldj
else:
sldj = sldj - ldj
return x, sldj
def forward(self, x, ldj=None, reverse=False):
if not self.is_initialized:
self.initialize_parameters(x)
if not reverse:
x = self._center(x, reverse)
x, ldj = self._scale(x, ldj, reverse)
else:
x, ldj = self._scale(x, ldj, reverse)
x = self._center(x, reverse)
return x, ldj
class AffineCoupling(nn.Module):
def __init__(self, num_features, mid_channels):
super(AffineCoupling, self).__init__()
self.num_features = num_features
self.network = CNN(num_features, mid_channels, 2 * num_features)
self.scale = nn.Parameter(torch.ones(num_features, 1, 1))
def describe(self):
print('\t\t\t - > Aff Coupling with {} num_features'.format(self.num_features))
def forward(self, x, ldj, reverse=False):
x_change, x_id = x.chunk(2, dim=1)
st = self.network(x_id)
s, t = st[:, 0::2, ...], st[:, 1::2, ...]
s = self.scale * torch.tanh(s)
# Scale and translate
if reverse:
x_change = x_change * s.mul(-1).exp() - t
ldj = ldj - s.flatten(1).sum(-1)
else:
x_change = (x_change + t) * s.exp()
ldj = ldj + s.flatten(1).sum(-1)
x = torch.cat((x_change, x_id), dim=1)
return x, ldj
class CNN(nn.Module):
def __init__(self, in_channels, mid_channels, out_channels,
use_act_norm=False):
super(CNN, self).__init__()
self.in_norm = nn.BatchNorm2d(in_channels)
self.in_conv = nn.Conv2d(in_channels, mid_channels,
kernel_size=3, padding=1, bias=False)
nn.init.normal_(self.in_conv.weight, 0., 0.05)
self.mid_norm = nn.BatchNorm2d(mid_channels)
self.mid_conv = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, padding=0, bias=False)
nn.init.normal_(self.mid_conv.weight, 0., 0.05)
self.out_norm = nn.BatchNorm2d(mid_channels)
self.out_conv = nn.Conv2d(mid_channels, out_channels,
kernel_size=3, padding=1, bias=True)
nn.init.zeros_(self.out_conv.weight)
nn.init.zeros_(self.out_conv.bias)
def forward(self, x):
x = self.in_norm(x)
x = F.relu(x)
x = self.in_conv(x)
x = self.mid_norm(x)
x = F.relu(x)
x = self.mid_conv(x)
x = self.out_norm(x)
x = F.relu(x)
x = self.out_conv(x)
return x
class Squeeze(nn.Module):
def __init__(self):
super(Squeeze, self).__init__()
def forward(self, x, reverse=False):
b, c, h, w = x.size()
if not reverse:
# Squeeze
x = x.view(b, c, h // 2, 2, w // 2, 2)
x = x.permute(0, 1, 3, 5, 2, 4).contiguous()
x = x.view(b, c * 2 * 2, h // 2, w // 2)
else:
# Unsqueeze
x = x.view(b, c // 4, 2, 2, h, w)
x = x.permute(0, 1, 4, 2, 5, 3).contiguous()
x = x.view(b, c // 4, h * 2, w * 2)
return x
class Invertible1x1ConvLU(nn.Module):
# https://github.com/y0ast/Glow-PyTorch/blob/master/modules.py
def __init__(self, num_features, LU_decomposed=True):
super(Invertible1x1ConvLU, self).__init__()
w_shape = [num_features, num_features]
w_init = torch.qr(torch.randn(*w_shape))[0]
self.num_features = num_features
if not LU_decomposed:
self.weight = nn.Parameter(torch.Tensor(w_init))
else:
p, lower, upper = torch.lu_unpack(*torch.lu(w_init))
s = torch.diag(upper)
sign_s = torch.sign(s)
log_s = torch.log(torch.abs(s))
upper = torch.triu(upper, 1)
l_mask = torch.tril(torch.ones(w_shape), -1)
eye = torch.eye(*w_shape)
self.register_buffer("p", p)
self.register_buffer("sign_s", sign_s)
self.lower = nn.Parameter(lower)
self.log_s = nn.Parameter(log_s)
self.upper = nn.Parameter(upper)
self.l_mask = l_mask
self.eye = eye
self.if_LU = LU_decomposed
self.w_shape = w_shape
def describe(self):
if self.if_LU:
print('\t\t\t - > Inverted 1x1 Conv (LU decomposition) with {} num_features'.format(self.num_features))
else:
print('\t\t\t - > Inverted 1x1 Conv with {} num_features'.format(self.num_features))
def get_weight(self, input, reverse):
b, c, h, w = input.shape
if not self.if_LU:
dlogdet = torch.slogdet(self.weight)[1] * h * w
if not reverse:
weight = self.weight
else:
weight = torch.inverse(self.weight)
else:
self.l_mask = self.l_mask.to(input.device)
self.eye = self.eye.to(input.device)
lower = self.lower * self.l_mask + self.eye
u = self.upper * self.l_mask.transpose(0, 1).contiguous()
u += torch.diag(self.sign_s * torch.exp(self.log_s))
dlogdet = torch.sum(self.log_s) * h * w
if not reverse:
weight = torch.matmul(self.p, torch.matmul(lower, u))
else:
u_inv = torch.inverse(u)
l_inv = torch.inverse(lower)
p_inv = torch.inverse(self.p)
weight = torch.matmul(u_inv, torch.matmul(l_inv, p_inv))
return weight.view(self.w_shape[0], self.w_shape[1], 1, 1), dlogdet
def forward(self, input, logdet=None, reverse=False):
"""
log-det = log|abs(|W|)| * pixels
"""
weight, dlogdet = self.get_weight(input, reverse)
if not reverse:
z = F.conv2d(input, weight)
if logdet is not None:
logdet = logdet + dlogdet
return z, logdet
else:
z = F.conv2d(input, weight)
if logdet is not None:
logdet = logdet - dlogdet
return z, logdet |
# -*- coding:utf-8 -*-
"""
@Project:watchmen
@Language:Python3.6.4
@Author:Hans
@File:xlsxtohtml.py
@Ide:PyCharm
@Time:2018/8/17 17:32
@Remark:
"""
import pandas as pd
import codecs
def xlsxTohtml():
xd=pd.ExcelFile('./student.xls')
df=xd.parse()
with codecs.open('./student.html','w','utf-8') as html_file:
html_file.write(df.to_html(header=True,index=False))
def htmlToxlsx():
with open('./student.html', 'r') as f:
df = pd.read_html(f.read().encode('utf-8'), encoding='utf-8')
bb = pd.ExcelWriter('./out.xlsx')
df[0].to_excel(bb)
bb.close()
if __name__ == '__main__':
# xlsxTohtml()
htmlToxlsx() |
import os
import subprocess
rootFolder = "./temp"
from os import listdir
from os.path import isfile, join, isdir
folders = [f for f in listdir(rootFolder) if isdir(join(rootFolder, f))]
i = 0
for f in folders:
folder = join(rootFolder, f)
files = [f2 for f2 in listdir(folder) if isfile(join(folder, f2))]
for file in files:
file = join(folder, file)
isvocal = file.__contains__("vocals")
label = "vocal" if isvocal else "novocal"
print(file)
print(isvocal)
subprocess.check_call("ffmpeg -i \"" + file + "\" -lavfi showspectrumpic=s=hd480:legend=0,format=yuv420p " + "./data-new/" + label + "/" + str(i+1000) + ".png", shell=True)
i = i + 1
|
'''
Introduction to Bisection Root-Finding in Python
Name: Kevin Trinh
Goal: Find the root of log(3x/2)
'''
import math
import scipy as sp
def func(x):
'''The function that we are finding the root of.'''
return sp.log(1.5 * x)
def bisection(x1, x2, tol=1e-15, maxiter=1000):
'''Perform bisection algorithm. Uses in f(xnew) for tolerance criterion.'''
# check that root is within bracket before starting bisection
assert func(x1) * func(x2) < 0
# update brackets to find the root
i = 1
xnew = (x1 + x2) / 2.0
while abs(func(xnew)) > tol and i <= maxiter:
f1 = func(x1)
f2 = func(x2)
xnew = (x1 + x2) / 2.0
fnew = func(xnew)
if (f1 * fnew > 0):
x1 = xnew
f1 = fnew
else:
x2 = xnew
f2 = fnew
i += 1
if i > maxiter:
print('Maximum number of iterations has been reached.')
return xnew, fnew, i
root, yval, i = bisection(0.5, 1.0)
print('The root is located at x = ' + str(root) + ' after ' + str(i) + ' number of iterations.')
print('This value should be close to zero: ' + str(yval))
|
__author__ = 'Sanjay Narayana'
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
import random
np.random.seed(10)
random.seed(10)
class GaussianMixtureModels(object):
def __init__(self):
self.number_of_clusters = 2
self.prior_probabilities = self.number_of_clusters * [1 / 2]
self.threshold = 1e-6
def distance_from_centers(self, centroids, data):
D2 = np.array([min([np.linalg.norm(x - c) ** 2 for c in centroids]) for x in data])
# print("D2:::",D2)
return D2
def choose_next_center(self, D2, data):
probabilities = D2 / D2.sum()
# print("probabilities::",probabilities)
cumulative_probabilities = probabilities.cumsum()
# print("cum::",cumulative_probabilities)
r = random.random()
print("r::", r)
ind = np.where(cumulative_probabilities >= r)[0][0]
print("ind::", ind, np.array([ind]))
print("sample::", data[12].shape)
return data[np.array([ind])]
def init_centers(self, k, data):
# centroids = random.sample(self.audio_data, 1)
# centroids = np.random.choice(self.audio_data, 1,replace=False)
random_centroid = np.random.choice(data.shape[0], 1, replace=False)
# random_centroid = np.array([4])
# random_centroid = np.array([11]) # best until now
# random_centroid = np.array([17])
print("np rand:::", random_centroid)
centroids = data[random_centroid]
# print("centroids::",centroids.shape)
while len(centroids) < k:
D2 = self.distance_from_centers(centroids, data)
an_array = self.choose_next_center(D2, data)
print("shape:::", an_array.shape)
centroids = np.append(centroids, an_array, axis=0)
print("centroids::", centroids.shape)
return centroids
def read_data(self):
self.audio_data = np.genfromtxt('audioData.csv', delimiter=',')
#np.random.shuffle(self.audio_data)
self.data_shape = self.audio_data.shape
self.memberships = self.audio_data.shape[0] * [None]
def initialize_parameters(self):
# indices = np.random.choice(self.audio_data.shape[0], self.number_of_clusters, replace=False)
#indices = np.array([60, 120])
#indices = np.array([69, 120])
#indices = np.array([37, 120])
#print("indices:::", indices)
#self.centers = self.audio_data[indices]
self.centers = self.init_centers(2, self.audio_data)
self.covariance_matrix = np.cov(self.audio_data, rowvar=False)
def calculate_likelihoods(self):
likelihoods = np.empty((2, 128))
for i in range(self.number_of_clusters):
pdf = multivariate_normal.pdf(self.audio_data, mean=self.centers[i], cov=self.covariance_matrix)
# log_pdf = multivariate_normal.logpdf(self.audio_data, mean=self.centers[i],cov=self.covariance_matrix)
likelihoods[i] = pdf
# log_likelihoods[i] = log_pdf
#print("likelihoods:::", likelihoods.shape)
return likelihoods.T
def em_algorithm(self):
previous_log_likelihood = 0
#while True: 151
for a in range(9):
likelihoods = self.calculate_likelihoods()
print("likelihoods:::", likelihoods)
likelihoods_and_prior_probabilities = likelihoods * self.prior_probabilities
point_probabilities = np.sum(likelihoods_and_prior_probabilities, axis=1)
print("point_probabilities_shape:::", likelihoods_and_prior_probabilities.shape,
point_probabilities[:, None].shape)
log_of_point_probabilities = np.log(point_probabilities)
log_likelihood_of_data = np.sum(log_of_point_probabilities)
normalized_scores = np.divide(likelihoods_and_prior_probabilities,
point_probabilities[:, None])
comparisons = normalized_scores[:, 0] >= normalized_scores[:, 1]
print("normalized_shapes:::", normalized_scores.shape)
print("log_likelihood:::", log_likelihood_of_data)
if np.abs(log_likelihood_of_data - previous_log_likelihood) < self.threshold:
#break
pass
previous_log_likelihood = log_likelihood_of_data
for i in range(len(self.centers)):
sum_of_dimension_values = np.sum(normalized_scores[:, i])
print("extended:::", normalized_scores[:, i][:, None].shape)
self.centers[i] = np.sum((normalized_scores[:, i][:, None] * self.audio_data),
axis=0) / sum_of_dimension_values
self.prior_probabilities[i] = sum_of_dimension_values / self.data_shape[0]
print("i::",a)
return comparisons
def plot_scatter(self, comparisons):
print("comparisons::", comparisons)
# print("not comparisopns::",np.logical_not(comparisons))
_1st_cluster_points = self.audio_data[comparisons]
_2nd_cluster_points = self.audio_data[np.logical_not(comparisons)]
figure = plt.figure(1)
axes = figure.add_subplot(111)
plt.title("GMM Clustering of Audio Data")
plt.xlabel("1st Feature")
plt.ylabel("2nd Feature")
axes.scatter(_1st_cluster_points[:, 0], _1st_cluster_points[:, 1], c='b', marker='.', label='First Cluster')
axes.scatter(_2nd_cluster_points[:, 0], _2nd_cluster_points[:, 1], c='r', marker='.', label='Second Cluster')
plt.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
gmm = GaussianMixtureModels()
gmm.read_data()
gmm.initialize_parameters()
# gmm.compute_likelihood()
comparisons = gmm.em_algorithm()
# gmm.calculate_likelihood()
gmm.plot_scatter(comparisons)
|
import numpy as np
import math
import os
import scipy.io as sio
import matplotlib.pyplot as plt
def Fmeasure(segmentation, gtSegmentation):
k = np.max(gtSegmentation) #no of classes in GT
r = int(np.max(segmentation)) #no of clusters
ti = np.zeros([k,]) #no of points in class i
ni = np.zeros([r,]) #no of points in cluster i
for i in range(0, r):
ind = np.nonzero(segmentation == (i + 1))
ni[i] = np.array(ind).shape[1]
for i in range(0, k):
ind = np.nonzero(gtSegmentation == (i + 1))
ti[i] = np.array(ind).shape[1]
prec = np.zeros([r,])
rec = np.zeros([r,])
F = 0
for i in range(0, r):
#finding values of cluster i
ind = np.nonzero(segmentation == (i + 1))
#getting gt in same indices of cluster i
gt = np.zeros(int(ni[i]),)
for j in range(0, int(ni[i])):
gt[j] = gtSegmentation[ind[0][j], ind[1][j]]
#counting classes in cluster i
count = np.zeros([k,])
for j in range(0, k):
ind = np.nonzero(gt == (j + 1))
count[j] = np.array(ind).shape[1]
#calculating prec, rec, and F
prec[i] = np.max(count / ni[i])
if i > k:
rec[i] = 0
else:
rec[i] = np.max(count / ti[i])
F += (2 * prec[i] * rec[i]) / (prec[i] + rec[i])
return F
def ConditionalEntropy(segmentation, gtSegmentation):
k = np.max(gtSegmentation) # no of classes in GT
r = int(np.max(segmentation)) # no of clusters
ti = np.zeros([k, ]) # no of points in class i
ni = np.zeros([r, ]) # no of points in cluster i
for i in range(0, r):
ind = np.nonzero(segmentation == (i + 1))
ni[i] = np.array(ind).shape[1]
for i in range(0, k):
ind = np.nonzero(gtSegmentation == (i + 1))
ti[i] = np.array(ind).shape[1]
Hi = np.zeros(r,)
H = 0
for i in range(0, r):
#finding values of cluster i
ind = np.nonzero(segmentation == (i + 1))
#getting gt in same indices of cluster i
gt = np.zeros(int(ni[i]),)
for j in range(0, int(ni[i])):
gt[j] = gtSegmentation[ind[0][j], ind[1][j]]
#counting classes in cluster i
count = np.zeros([k,])
for j in range(0, k):
ind = np.nonzero(gt == (j + 1))
count[j] = np.array(ind).shape[1]
#calculate H(Ci|T)
if count[j] != 0:
Hi[i] += -(count[j] / ni[i]) * math.log((count[j] / ni[i]), 2)
H += (ni[i] / np.sum(ni)) * Hi[i]
return H
def validateCluster(d):
K = [5]
sumF = 0
sumH = 0
t = 0
for k in K:
print "\n\nK = " + str(k)
dir = d + str(k) + '/'
for root, dirs, filenames in os.walk(dir):
for f in filenames:
segmentation = np.load(dir + f)
segmentation = segmentation.astype(np.int)
filename = f.split('.')[0]
plt.imsave(d + 'Segmentation Images/' + filename + '.jpg', segmentation)
print "\nFilename: " + filename
gt_file = './groundTruth/test/' + filename + '.mat'
mat = sio.loadmat(gt_file) # load mat file
gt_size = mat['groundTruth'].size
for i in range(0, gt_size):
print "GroundTruth #" + str(i + 1)
gt = mat['groundTruth'][0, i] # fetch groundTruth
gtSegmentation = gt['Segmentation'][0][0]
plt.imsave(d + 'Segmentation Images/' + filename + 'gt' + str(i) + '.jpg' , gtSegmentation)
#F = Fmeasure(segmentation, gtSegmentation)
#sumF += F
#print "Fmeasure = " + str(F)
#H = ConditionalEntropy(segmentation, gtSegmentation)
#sumH += H
#print "Entropy = " + str(H)
t += 1
avF = sumF / t
avH = sumH / t
print 'average Fmeasure = ' + avF
print 'average Conditional Entropy = ' + avH
print 'Kmeans Validation:\n'
dir = './images1/K-means Segmentation/'
validateCluster(dir)
print 'Normalized Cut Validation:\n'
dir = './images1/NormCut Segmentation/KNN/'
validateCluster(dir)
|
import requests
r = requests.get(' https://stats.nba.com/stats/boxscoremisc')
print(r.text) |
def fib():
'''
Generate each Fibonacci number in an infinite loop.
'''
a, b = 0, 1
while 1:
yield a
a, b = b, a + b |
import random
from dolfin import *
class InitialConditions(UserExpression):
def __init__(self, **kwargs):
random.seed(2 + MPI.rank(MPI.comm_world))
super().__init__(**kwargs)
def eval(self, values, x):
values[0] = 0.63 + 0.02*(0.5 - random.random())
values[1] = 0.0
def value_shape(self):
return (2,)
class CahnHilliardEquation(NonlinearProblem):
def __init__(self, a, L):
NonlinearProblem.__init__(self)
self.L = L
self.a = a
def F(self, b, x):
assemble(self.L, tensor=b)
def J(self, A, x):
assemble(self.a, tensor=A)
lmbda = 1.0e-02 # surface parameter
dt = 5.0e-06 # time step
theta = 1.0 # time stepping family, e.g. theta=1 -> backward Euler, theta=0.5 -> Crank-Nicolson
parameters["form_compiler"]["optimize"] = True
parameters["form_compiler"]["cpp_optimize"] = True
mesh = UnitSquareMesh.create(96, 96, CellType.Type.quadrilateral)
P1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
ME = FunctionSpace(mesh, P1*P1)
# Trial and test functions of the space ``ME`` are now defined::
# Define trial and test functions
du = TrialFunction(ME)
q, v = TestFunctions(ME)
u = Function(ME) # current solution
u0 = Function(ME) # solution from previous converged step
# Split mixed functions
dc, dmu = split(du)
c, mu = split(u)
c0, mu0 = split(u0)
u_init = InitialConditions(degree=1)
u.interpolate(u_init)
u0.interpolate(u_init)
e1 = Constant((1.,0))
e2 = Constant((0,1.))
m = [e1, -e1, e2, -e2]
c = variable(c)
f = 0.25*c**2*(1-c)**2
dfdc = diff(f, c)
c_grad = grad(c)
abs_grad = abs(c_grad[0]) + abs(c_grad[1])
#abs_grad = abs(grad(c))
nv = grad(c) / abs_grad
def heaviside(x):
'''if x.eval() < -DOLFIN_EPS:
return Constant(0)
elif x.eval()>DOLFIN_EPS:
return Constant(1.)
else:
return Constant(0.5)'''
return 0.5*(x+abs(x)) / abs(x)
ai = 0.3
wi = 4.
gamma = 1 - sum(ai**wi * heaviside(dot(nv, mi)) for mi in m)
eps = 0.01
multiplier = sqrt(Constant(0.25)*c**2*(Constant(1) - c)**2)
L0 = c*q*dx - c0*q*dx + multiplier * dt*dot(grad(mu), grad(q))*dx
#L1 = mu*v*dx - dfdc*v*dx - lmbda*dot(grad(c), grad(v))*dx
print(Identity(2)-outer(nv, nv))
L1 = mu*v*dx - gamma/eps*dfdc*v*dx - eps*gamma*dot(grad(c), grad(v))*dx #-\
#eps*dot(abs_grad*(Identity(2)-outer(nv, nv))*dot(nv, grad(gamma)),grad(v))*dx
L = L0 + L1
a = derivative(L, u, du)
problem = CahnHilliardEquation(a, L)
solver = NewtonSolver()
solver.parameters["linear_solver"] = "lu"
solver.parameters["convergence_criterion"] = "incremental"
solver.parameters["relative_tolerance"] = 1e-6
file = File("result/ps-0/output.pvd", "compressed")
# Step in time
t = 0.0
T = 50*dt
while (t < T):
t += dt
u0.vector()[:] = u.vector()
solver.solve(problem, u.vector())
file << (u.split()[0], t)
|
from numba import njit, jit, prange
import numpy as np
import matplotlib.pyplot as plt
@jit(nopython=True, parallel=True)
def simulate_activation(tau, dt, noise, std, damping, scale, tau_threshold, *args):
act = 0.0
for i in range(len(tau)):
act += (-damping*act + np.arctan((tau[i] - tau_threshold)*scale) + noise[i]*std)*dt
yield act
#@njit
@jit(nopython=True, parallel=True)
def simulate_time(tau, dt, noise, std, damping, scale, tau_threshold, act_threshold):
acts = simulate_activation(tau, dt, noise, std, damping, scale, tau_threshold)
prev = 0.0
if prev > act_threshold:
return 0.0
for i, act in enumerate(acts):
if act < act_threshold:
prev = act
continue
t = (act_threshold - prev)/(act - prev)
return (i + t)*dt
return np.nan
@jit(nopython=True, parallel=True)
def simulate_times(tau, dt, noise_bank, std, damping, scale, tau_threshold, act_threshold):
out = np.empty(len(noise_bank))
for i in prange(len(noise_bank)):
out[i] = simulate_time(tau, dt, noise_bank[i], std, damping, scale, tau_threshold, act_threshold)
return out
@njit
def stdnormpdf(x):
return np.exp(-x**2/2)/np.sqrt(2*np.pi)
@njit
def sample_lik(vals, sample, dt):
liks = np.empty_like(vals)
# TODO: Handle explicitly
sample = sample[np.isfinite(sample)]
n = len(sample)
std = 5*np.sqrt(dt) # TODO: Find a principled value for this?
bw = std*n**(-1/(1+4))
for i, val in enumerate(vals):
lik = 0.0
for s in sample:
# TODO: Make a discrete PDF from the empirical CDF?
lik += stdnormpdf((s - val)/bw)
liks[i] = lik/(n*bw)
return liks
from kwopt import minimizer, logbarrier, logitbarrier
def vdd_loss(trials, dt, N=5000):
taus, rts = zip(*trials)
hacktaus = []
for tau in taus:
hacktau = tau.copy()
hacktau[hacktau < 0] = 1e5
hacktaus.append(hacktau)
noises = [np.random.randn(N, len(tau)) for (tau, rts) in trials]
def loss(**kwargs):
lik = 0
for tau, rt, noise in zip(hacktaus, rts, noises):
sample = simulate_times(tau, dt, noise, **kwargs)
lik += np.sum(np.log(sample_lik(rt, sample, dt) + 1e-9))
return -lik
return loss
def fit_vdd(trials, dt, N=1000, init=None):
if init is None:
init = dict(
std=1.0*np.sqrt(dt),
damping=0.5,
scale=1.0,
tau_threshold=3.5,
act_threshold=1.0
)
spec = dict(
std= (init['std'], logbarrier),
damping= (init['damping'], logitbarrier),
scale= (init['scale'], logbarrier),
tau_threshold= (init['tau_threshold'], logbarrier),
act_threshold= (init['act_threshold'], logbarrier)
)
loss = vdd_loss(trials, dt, N)
return minimizer(loss, method='nelder-mead')(**spec)
def gridtest():
N = 20
dt = 1/30
dur = 20
ts = np.arange(0, dur, dt)
param = dict(
std=1.0,
damping=0.5,
scale=1.0,
tau_threshold=3.5,
act_threshold=1.0
)
trials = []
for tau0 in (2.0, 3.0, 4.0, 5.0):
speed = 20.0
dist = tau0*speed - ts*speed
tau = dist/speed
np.random.seed(0)
noise_bank = np.random.randn(N, len(tau))
hacktau = tau.copy()
hacktau[hacktau < 0] = 1e5
sample = simulate_times(hacktau, dt, noise_bank, **param)
trials.append((tau, sample))
#np.random.seed(0)
#noise_bank = np.random.randn(N, len(tau))
loss = vdd_loss(trials, dt)
liks = []
stds = np.linspace(0.1, 3, 30)
#for std in stds:
# liks.append(-loss(**{**param, **{'std': std}}))
#plt.plot(stds/np.sqrt(dt), liks)
thresholds = np.linspace(2.0, 6.0, 30)
S, T = np.meshgrid(stds, thresholds)
for std, threshold in zip(*(x.flat for x in (S, T))):
liks.append(-loss(**{**param, **{'tau_threshold': threshold, 'std': std}}))
liks = np.array(liks)
plt.pcolormesh(S, T, np.exp(liks.reshape(S.shape)))
plt.plot(param['std'], param['tau_threshold'], 'ro')
plt.colorbar()
#for threshold in thresholds:
# liks.append(-loss(**{**param, **{'tau_threshold': threshold}}))
#plt.plot(thresholds, np.exp(liks))
plt.show()
def fittingtest():
N = 20
dt = 1/90
dur = 20
ts = np.arange(0, dur, dt)
param = dict(
std=1.0,
damping=0.5,
scale=1.0,
tau_threshold=2.0,
act_threshold=1.0
)
trials = []
for tau0 in (2.0, 3.0, 4.0, 5.0):
tau0 = 4.5
speed = 30.0
dist = tau0*speed - ts*speed
tau = dist/speed
np.random.seed(0)
noise_bank = np.random.randn(N, len(tau))
hacktau = tau.copy()
hacktau[hacktau < 0] = 1e5
sample = simulate_times(hacktau, dt, noise_bank, **param)
trials.append((tau, sample))
result = fit_vdd(trials, dt)
print(result)
#plt.hist(sample)
#plt.show()
def samplingtest():
N = 5000
dt = 1/30
dur = 20
ts = np.arange(0, dur, dt)
tau0 = 5.0
speed = 30.0
dist = tau0*speed - ts*speed
tau = dist/speed
np.random.seed(0)
noise_bank = np.random.randn(N, len(tau))
tau[tau < 0] = 1e5
param = dict(
std=3.0,
damping=2.5,
scale=1.0,
tau_threshold=4.0,
act_threshold=1.0
)
#print("Simulating")
sample = simulate_times(tau, dt, noise_bank, **param)
#responders = np.isfinite(sample)
#print(np.sum(responders)/len(responders)*100)
#plt.hist(sample[responders]*dt, bins=100)
for i in range(10):
act = np.array(list(simulate_activation(tau, dt, noise_bank[i], std=param['std'], damping=param['damping'], scale=param['scale'], tau_threshold=param['tau_threshold'])))
plt.plot(ts, act)
plt.figure()
plt.hist(sample[np.isfinite(sample)], bins=100, density=True)
est = sample_lik(ts, sample, dt)
#from scipy.stats.kde import gaussian_kde
#est = gaussian_kde(sample, bw_method=0.1)(ts)
#plt.plot(ts, est, color='green')
#plt.twinx()
print(sample)
plt.plot(ts, est, color='red')
plt.show()
if __name__ == '__main__':
#gridtest()
#fittingtest()
samplingtest()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-11-13 10:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mpm', '0003_auto_20160530_2354'),
]
operations = [
migrations.AddField(
model_name='musica',
name='link_lpsalmo',
field=models.URLField(blank=True, null=True),
),
]
|
test_case = int(input())
for _ in range(test_case):
a, b, k = map(int, input().split())
print(k//2*(a-b) + (k % 2) * a) |
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
net = buildNetwork(2, 3, 1)
data = SupervisedDataSet(2, 1)
data.addSample((0, 0), (0))
data.addSample((0, 1), (1))
data.addSample((1, 0), (1))
data.addSample((1, 1), (0))
trainer = BackpropTrainer(net, data)
epoch = 10000
for i in range(epoch):
trainer.train()
print(net.activate([0, 0]))
print(net.activate([1, 0]))
print(net.activate([0, 1]))
print(net.activate([1, 1]))
|
def solution(lottos, win_nums):
corr = 0
zero = 0
for i in lottos:
if i == 0:
zero += 1
else:
if i in win_nums:
corr += 1
if corr == 0 and zero == 0:
corr = 1
elif corr == 0:
corr = 1
zero -= 1
return [7 - (zero + corr), 7 - corr]
# 깔끔한 다른 사람 풀이
def solution(lottos, win_nums):
rank=[6,6,5,4,3,2,1]
cnt_0 = lottos.count(0)
ans = 0
for x in win_nums:
if x in lottos:
ans += 1
return rank[cnt_0 + ans],rank[ans] |
import os
# Commands
EXAMPLE_COMMAND = "test"
COMMAND_REVENUE = "revenue"
COMMAND_DAILY = 'daily update'
COMMAND_HIRED = "hired"
COMMAND_CHECK_HIRED = 'Check status change for hired'
COMMAND_BATTERY_LEVEL = 'battery level'
COMMAND_HELP= "list commands"
COMMAND_DELETE_ALL = "delete all messages"
BOT_ID = os.environ.get("BOT_ID")
AT_BOT = "<@" + BOT_ID + ">"
DONKEY_API_BASE = "https://www.donkey.bike/api/public/hubs/search"
DONKEY_API_BIKE_NO = "?number_of_bikes={0}"
ALTERHUB_ID = 262
# Responses
RESPONSE_DELETED_ALL = 'Whoa, what a mess. All messages are deleted'
RESPONSE_BATTERY = '''
The battery levels are as follows
#of bikes
good *{0}*
low *{1}*
critical *{2}*
'''
RESPONSE_REVENUE = '''
Our current revenue is {0} Dkk.
Not too bad.\nThough I should point out this is only for last 30 days.
Overall Revenue is not displayed which is silly :/
'''
RESPONSE_DAILY_UPDATE_PREQUEL ='''
\nIt's time for our Daily Update....
'''
RESPONSE_DAILY_UPDATE = '''
__________________________________________________
How many of our heroes are in the hub? : *{0}*
What is our revenue for 30 days? : *{1}*
Do we have to charge the battery? : *{2}*
__________________________________________________
'''
RESPONSE_NO_CLUE = "Not sure what you mean. Use the *" + COMMAND_HELP + \
"* command to see help"
RESPONSE_HELP = "I know asking for help can be hard, so I applaud you, well done.\n" +\
"Here are the commands you can use: {0}, {1},{2}, {3}, {4}".format(COMMAND_HIRED, COMMAND_REVENUE, COMMAND_HELP, COMMAND_BATTERY_LEVEL,COMMAND_DAILY)
HIRED_NUM_BIKES = "DUDE!!! This is *awesome*!!!\n" + \
"*{0}* of our bikes are hired right now. You are rich, when do I get paid?"
HIRED_NO_BIKES = "All of our bikes are sound and safe in their hub :). Pretty cool."
STATUS_CHANGED = "Yo, check it. *The bike availability changed.* "
|
from django.shortcuts import render_to_response, redirect, HttpResponseRedirect
from django.core.context_processors import csrf
# Create your views here.
from django.template import RequestContext
from Rebbit.models import Post, Person, Sub_rebb, Voting, Comment, CommentVoting
def homepage(request):
subs = Sub_rebb.objects.order_by('sub_r')
request.session['voted'] = "False"
return render_to_response("Rebbit/homepage.html", {
'Post_list': Post.objects.all(),
'Subrebb_list': subs,
}, context_instance=RequestContext(request))
def createpost(request):
subs = Sub_rebb.objects.order_by('sub_r')
c = {
'Subrebb_list': subs,
}
c.update(csrf(request))
return render_to_response("Rebbit/createpost.html",c,context_instance=RequestContext(request))
def auth_post(request):
post = Post()
post.title = request.POST.get('posttitle')
web_user = Person.objects.get(username=request.session['user_username'])
post.creator = web_user
post.rpost = request.POST.get('postdescription')
post.firstlink = request.POST.get('firstlink')
post.secondlink = request.POST.get('secondlink')
post.thirdlink = request.POST.get('thirdlink')
try:
postsubr = Sub_rebb.objects.get(sub_r=request.POST['topic'].lower())
post.subreddit = postsubr
post.save()
except Sub_rebb.DoesNotExist:
return render_to_response("Rebbit/createpost.html", {
'invalid': True
}, context_instance=RequestContext(request))
return redirect("/index")
def auth_comment(request,sub_id,post_id):
commentpost = Comment()
commentpost.comment = request.POST.get('comment')
web_user = Person.objects.get(username=request.session['user_username'])
commentpost.creator = web_user
name = Post.objects.get(id=post_id)
commentpost.post = name
commentpost.save()
url = "http://127.0.0.1:8000/r/" + str(sub_id) + "/" + str(post_id)
return HttpResponseRedirect(url)
def auth_verify(request):
try:
user = Person.objects.get(username=request.POST['username'])
# the password verified for the user
if user.password == request.POST['password']:
request.session['user_fname'] = user.first_name
request.session['user_lname'] = user.last_name
request.session['user_email'] = user.email
request.session['user_username'] = user.username
return redirect("/account")
else:
return render_to_response("Rebbit/signin.html", {
'invalid': True
}, context_instance=RequestContext(request))
except Person.DoesNotExist:
user = None
# the authentication system was unable to verify the username and password
return render_to_response("Rebbit/signin.html", {
'bothinvalid': True
}, context_instance=RequestContext(request))
def subredditdetail(request,sub_id,post_id):
post_info = Post.objects.get(id=post_id)
comment_info = Comment.objects.filter(post=post_info)
comment_info = comment_info.order_by('-count')
num = 0
if post_info.firstlink == "":
num = num+1
if post_info.secondlink == "":
num = num+1
if post_info.thirdlink == "":
num = num+1
if num == 3:
c = {
'Post_list': post_info,
'invalid': True ,
'comment_list': comment_info,
}
c.update(csrf(request))
return render_to_response("Rebbit/subredditdetail.html", c, context_instance=RequestContext(request))
else:
c = {
'Post_list': post_info,
'comment_list': comment_info,
}
c.update(csrf(request))
return render_to_response("Rebbit/subredditdetail.html", c, context_instance=RequestContext(request))
def votecomment(request,sub_id,post_id,comment_id):
comid = comment_id
comment_info = Comment.objects.get(id=comid)
user = Person.objects.get(username=request.session['user_username'])
try:
vote = CommentVoting.objects.get(creator=user,votechoice=comment_info)
context={
}
url = "http://127.0.0.1:8000/r/" + str(sub_id) + "/" + str(post_id)
return HttpResponseRedirect(url)
except CommentVoting.DoesNotExist:
vote = CommentVoting()
request.session['cvoted'] = "False"
comment_info.count = comment_info.count + 1
vote.creator = user
vote.votechoice = comment_info
vote.save()
comment_info.save()
url = "http://127.0.0.1:8000/r/" + str(sub_id) + "/" + str(post_id)
return HttpResponseRedirect(url)
#voting: tied to post and person = person currently in session since cant vote unless logged
#first do try and catch, if vote exist say session variable=True and check that in html page
#create instance of vote, raise the vote by one and then save
def votepost(request,sub_id,post_id):
sub = Sub_rebb.objects.get(sub_r=sub_id)
post_info = Post.objects.get(id=post_id)
user = Person.objects.get(username=request.session['user_username'])
try:
vote = Voting.objects.get(creator=user,votechoice=post_info)
request.session['voted'] = "True"
url = "http://127.0.0.1:8000/r/" + str(sub.sub_r) + "/" + str(post_info.id)
return HttpResponseRedirect(url)
except Voting.DoesNotExist:
vote = Voting()
request.session['voted'] = "False"
post_info.count = post_info.count + 1
vote.creator = user
vote.votechoice = post_info
vote.save()
post_info.save()
url = "http://127.0.0.1:8000/r/" + str(sub.sub_r) + "/" + str(post_info.id)
return HttpResponseRedirect(url)
def subreddit(request,sub_id):
sub = Sub_rebb.objects.get(sub_r=sub_id)
sub_info = Post.objects.filter(subreddit=sub)
return render_to_response("Rebbit/subreddit.html", {
'subname': sub,
'sub_list': sub_info,
'reg_sub_list': Sub_rebb.objects.all(),
}, context_instance=RequestContext(request))
def signup(request):
c = {}
c.update(csrf(request))
return render_to_response("Rebbit/signup.html",c)
def auth_signup(request):
person = Person()
person.first_name = request.POST.get('firstname')
person.last_name = request.POST.get('lastname')
person.email = request.POST.get('email')
person.username = request.POST.get('username')
person.password = request.POST.get('password')
person.save()
return redirect("/signin")
def signin(request):
c = {}
c.update(csrf(request))
return render_to_response("Rebbit/signin.html",c)
def accounthome(request):
return render_to_response("Rebbit/userhome.html", {
'Person_list': Person.objects.all()
}, context_instance=RequestContext(request))
def logout(request):
if request.session.get('user_username', None):
request.session.flush()
return redirect("/index")
else:
return render_to_response("Rebbit/signin.html", {
'notlogged': True
}, context_instance=RequestContext(request))
def createsub(request):
c = {}
c.update(csrf(request))
return render_to_response("Rebbit/createsub.html",c,context_instance=RequestContext(request))
def auth_sub(request):
sub = Sub_rebb()
try:
postsubr = Sub_rebb.objects.get(sub_r=request.POST['subname'])
return render_to_response("Rebbit/createsub.html", {
'invalid': True
}, context_instance=RequestContext(request))
except Sub_rebb.DoesNotExist:
sub.sub_r = request.POST.get('subname').lower()
sub.save()
return redirect("/createpost")
def aboutus(request):
return render_to_response("Rebbit/about.html", {
}, context_instance=RequestContext(request))
|
# Code to check if a string is an anagram of another
# Runtime is O(n), Space complexity is O(n)
def string_anagram(str1,str2):
length1=len(str1)
length2=len(str2)
flag=0
list1=list(str1)
list2=list(str2)
letterdict1={}
letterdict2={}
if(length1==length2):
for i in list1:
if i in letterdict1:
letterdict1[i]+=1
else:
letterdict1[i]=1
print letterdict1
for j in list2:
if j in letterdict2:
letterdict2[j]+=1
else:
letterdict2[j]=1
print letterdict2
#Now comapring the relative frequeency
for k in letterdict1:
if(letterdict1[k]==letterdict2[k]):
continue
else:
flag = 1
break
if (flag == 0):
print "Strings are Anagrams"
elif (flag == 1):
print "Strings are Not Anagrams"
else:
print ("Strings are Not Anagrams")
string_anagram('google','ogleeg') |
import re
def recover_all(sentense_dep, src_id):
src_edge = sentense_dep[src_id]
phrase = src_edge['tok']
pre_phrase = ""
post_phrase = ""
# Link all the words in any relations.
for child_id in src_edge['child_id']:
new_tok = recover_all(sentense_dep, child_id)
if child_id > src_id:
post_phrase = post_phrase if post_phrase == "" else post_phrase + " "
post_phrase += new_tok
else:
pre_phrase = pre_phrase if pre_phrase == "" else pre_phrase + " "
pre_phrase += new_tok
if post_phrase != "":
phrase += " " + post_phrase
if pre_phrase != "":
phrase = pre_phrase + " " + phrase
return phrase
def recover_nsubj_phase(sentense_dep, src_id, passive=False):
phase = ""
target_deprel = "nsubj:pass" if passive else "nsubj"
for child_id in sentense_dep[src_id]['child_id']:
if sentense_dep[child_id]['deprel'] == target_deprel:
phase = recover_all(sentense_dep, child_id)
return phase
def arg_is_found(arg_name, phase):
arg_is_found = True
if arg_name in phase:
idx = phase.index(arg_name)
end_idx = idx + len(arg_name)
if idx != 0 and phase[idx-1] not in ["*", " "]:
arg_is_found = False
if end_idx != len(phase) and phase[end_idx] not in ["*", ".", " ", "\n"]:
arg_is_found = False
else:
arg_is_found = False
return arg_is_found
def is_not_emphasis(sentense_dep, src_id):
target_deprel = "aux"
emphasis_words = re.compile(r'(must|should|need|require)', re.IGNORECASE)
for child_id in sentense_dep[src_id]['child_id']:
if sentense_dep[child_id]['deprel'] == target_deprel:
if emphasis_words.search(sentense_dep[child_id]['tok']) != None:
return False
return True
def analyze_arg_pre(dep, arg_name):
arg_need_check = False
# Get dependency information.
analyzed_dep = dep.preprocess_dep()
post_causal_words = re.compile(r'(after|until|subsequent|later|then)')
# Avoid to analyze normal functionality descriptions.
ignore_verbs = re.compile(r'(free|release|close|use|return)')
for i, sentence in enumerate(analyzed_dep):
dep_info = sentence['dep_info']
for root_id in sentence['root']:
if is_not_emphasis(dep_info, root_id):
continue
# Ignore the verbs: free\release\return...
if post_causal_words.search(dep.sentences[i]) != None \
or ignore_verbs.search(dep_info[root_id]['tok']) != None:
continue
# NOUN, NUM -> nsubj
if dep_info[root_id]['pos'] in ['NOUN', 'NUM']:
phase = recover_nsubj_phase(dep_info, root_id)
arg_need_check = arg_is_found(arg_name, phase)
# VERB -> nsubj:pass
elif dep_info[root_id]['pos'] == "VERB":
phase = recover_nsubj_phase(dep_info, root_id, True)
arg_need_check = arg_is_found(arg_name, phase)
else:
continue
if arg_need_check == True:
break
return arg_need_check
def retrive_case_word(sentense_dep, src_id):
src_edge = sentense_dep[src_id]
for child_id in src_edge['child_id']:
if sentense_dep[child_id]['deprel'] != 'case':
continue
return sentense_dep[child_id]['tok']
return ""
def identify_arg_id(arg_name, sentense_dep, src_id):
src_edge = sentense_dep[src_id]
word = src_edge['tok']
found_arg = arg_is_found(arg_name, word)
if found_arg:
# ignore the preposition words that do not have the meaning can be changed.
ignore_preposition_words = re.compile(r'\b(with|for|from|at|under|of|on)\b')
case_word = retrive_case_word(sentense_dep, src_id)
if ignore_preposition_words.search(case_word) == None:
return src_id
candidate_rels = ['appos', 'conj', 'parataxis']
for child_id in src_edge['child_id']:
if sentense_dep[child_id]['deprel'] not in candidate_rels:
continue
new_id = identify_arg_id(arg_name, sentense_dep, child_id)
if new_id != -1:
return new_id
return -1
def analyze_arg_post(dep, arg_name, arg_type):
# Directly ignore the argument which cannot be changed by the function.
if '*' not in arg_type:
return False
arg_need_check = False
# Get dependency information.
analyzed_dep = dep.preprocess_dep()
# return, write, store, ...
sensitive_verbs = re.compile(r'(store|return|write)')
# Only concern the argument which can carry return status but not whether it can be changed.
for sentence in analyzed_dep:
dep_info = sentence['dep_info']
if sentence['action'] == []:
continue
for root_id in sentence['root']:
if sensitive_verbs.search(dep_info[root_id]['tok']) == None:
continue
arg_need_check = False
for child_id in dep_info[root_id]['child_id']:
if dep_info[child_id]['deprel'] not in ['obj', 'obl', 'nsubj:pass', 'nsubj']:
continue
arg_id = identify_arg_id(arg_name, dep_info, child_id)
if arg_id != -1:
arg_need_check = True
if arg_need_check == True:
break
if arg_need_check == True:
break
return arg_need_check
|
# coding: utf-8
import atexit
import io
import os
from os import path
import shutil
import pytest
from flexp import flexp
def test_override():
expdir = path.join("tests/data/", "exp01")
# Remove the experiment dir if it exists
if os.path.exists(expdir):
shutil.rmtree(expdir)
# We have to reset the _eh to make flexp stop complaining about calling setup twice.
flexp.core._eh = {}
flexp.setup("tests/data/", "exp01", False, override_dir=False)
assert path.isdir(expdir), "flexp didn't create experiment dir with override_dir=False"
# Test that it fails to create the directory, there should be logging file already.
with pytest.raises(FileExistsError):
flexp.core._eh = {}
flexp.setup("tests/data/", "exp01", False, override_dir=False)
# This should be ok
flexp.core._eh = {}
flexp.setup("tests/data/", "exp01", False, override_dir=True)
# Disable logging to be able to delete the experiment directory.
flexp.disable()
# Remove the experiment dir
if os.path.exists(expdir):
shutil.rmtree(expdir)
|
"""
Week 1, Day 7: Cousins in Binary Tree
In a binary tree, the root node is at depth 0, and children of each depth k node are at depth k+1.
Two nodes of a binary tree are cousins if they have the same depth, but have different parents.
We are given the root of a binary tree with unique values, and the values x and y of two different
nodes in the tree.
Return true if and only if the nodes corresponding to the values x and y are cousins.
Example 1:
Input: root = [1,2,3,4], x = 4, y = 3
Output: false
Example 2:
Input: root = [1,2,3,null,4,null,5], x = 5, y = 4
Output: true
Example 3:
root = [1,2,3,null,4], x = 2, y = 3
Output: false
Notes:
1. The number of nodes in the tree will be between 2 and 100.
2. Each node has a unique integer value from 1 to 100.
"""
from collections import deque
from solutions.tree_node import TreeNode
def isCousins(root: TreeNode, x: int, y: int) -> bool:
queue = deque()
queue.append((0, root))
candidates = {}
while queue:
depth, node = queue.popleft()
if len(candidates) >= 2:
break
if node.left:
queue.append((depth + 1, node.left))
if node.left.val in {x, y}:
candidates[node.left.val] = (depth + 1, node.val)
if node.right:
queue.append((depth + 1, node.right))
if node.right.val in {x, y}:
candidates[node.right.val] = (depth + 1, node.val)
return len(candidates) > 1 and candidates[x][0] == candidates[y][0] and candidates[x][1] != candidates[y][1]
if __name__ == '__main__':
root = TreeNode(1, left=TreeNode(2, left=TreeNode(4)), right=TreeNode(3))
x = 4
y = 3
print(isCousins(root, x, y) is False)
root = TreeNode(1, left=TreeNode(2, right=TreeNode(4)), right=TreeNode(3, right=TreeNode(5)))
x = 5
y = 4
print(isCousins(root, x, y) is True)
root = TreeNode(1, left=TreeNode(2, right=TreeNode(4)), right=TreeNode(3))
x = 2
y = 3
print(isCousins(root, x, y) is False)
root = TreeNode(1, right=TreeNode(2, left=TreeNode(3, right=TreeNode(4, right=TreeNode(5)))))
x = 1
y = 3
print(isCousins(root, x, y) is False)
root = TreeNode(1, left=TreeNode(2, right=TreeNode(3, right=TreeNode(4, left=TreeNode(5)))))
x = 3
y = 4
print(isCousins(root, x, y) is False)
# last line of code
|
import pickle
import os
def get_pickle_file_content(full_path_pickle_file):
pickle_list = list()
if not os.path.isfile(full_path_pickle_file):
print(f'Pickle file >{full_path_pickle_file}< does not exist')
return pickle_list
pickle_file = open(full_path_pickle_file,'rb')
pickle_list = pickle.load(pickle_file, encoding='latin1')
pickle_file.close()
return pickle_list
def save_to_pickle_file(data, file):
ret_file = open(file, 'wb+')
pickle.dump(data, ret_file)
ret_file.close()
def print_X_pickle_filenames(pickle_files, number):
if len(pickle_files) == 0:
print(f'Pickle dir is empty')
return
print(f'Print >{number}< pickle files')
c = 0
for file in pickle_files:
print(f'file >{file}<')
c += 1
if c > (number-1):
break
def get_binary_and_its_functions(pickle_file):
bin_and_funcs = dict()
pickle_file_content = get_pickle_file_content(pickle_file)
binaries = set()
functions = set()
found = False
for elem in pickle_file_content:
found = False
for key in bin_and_funcs:
if key == elem[7]:
bin_and_funcs[elem[7]].append(elem[2])
found = True
if not found:
bin_and_funcs[elem[7]] = [elem[2]]
# binaries.add(elem[7])
# functions.add(elem[2])
return bin_and_funcs
|
def add(x1: int, x2: int) -> int:
return x1 + x2
y = add(3,4)
print(y)
|
'''
This server is for automating the
generation of up to date reports for the readme
'''
import os
import sys
from flask import (Flask, send_file, request, jsonify, send_from_directory)
app = Flask(__name__, static_url_path='')
app.config['UPLOAD_FOLDER'] = os.path.join(os.getcwd(),'static')
# Make directory if it does not already exist
if not os.path.exists(app.config['UPLOAD_FOLDER']):
os.makedirs(app.config['UPLOAD_FOLDER'])
# function for running covidify
def run_covidify():
os.system('cd ' + app.config['UPLOAD_FOLDER'] + ' && covidify run --output=./')
os.system('rm -rf ' + os.path.join(app.config['UPLOAD_FOLDER'], 'data'))
os.system('rm -rf ' + os.path.join(app.config['UPLOAD_FOLDER'], 'reports', '*.xlsx'))
@app.route('/static/<path:filename>')
def send_img(filename):
try:
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
except:
return jsonify({'unable to get image'})
# Load the model and run the server
if __name__ == "__main__":
# app.debug = True
# port = int(os.environ.get("PORT", 7654))
app.run(debug=True) |
# coding: utf-8
# In[2]:
import numpy as np
import cv2
# In[3]:
import matplotlib.pyplot as plt
import argparse
# In[4]:
ap = argparse.ArgumentParser()
# In[5]:
ap.add_argument("-i", "--image", required=True, help="path of the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
cv2.imshow("Image", image)
cv2.waitKey(5)
# In[ ]:
|
""" File: twine.py
Author: Abraham Aruguete
Purpose: So this creates a game in which we use a stack to keep track of location and such. The stack which we use is encoded as a list within python. Use list.append() and list.pop() to modify the "stack" """
import sys
def input_prompt():
""" This is a function which takes in the user input, and then relays the prompt as described in the project description. """
lines_of_obstacle_file = []
while True:
#this loop is meant to be broken
filename = input("Please give the name of the obstacles filename, or - for none.\n")
try:
if filename == "-":
break
else:
obsFile = open(filename, "r")
linesOfObstacleFile = obsFile.readlines()
for i in range(len(linesOfObstacleFile)):
linesOfObstacleFile[i] = linesOfObstacleFile[i].strip()
break
except FileNotFoundError:
print("ERROR: File not found.")
continue
if error_check(lines_of_obstacle_file):
print("ERROR: Obstacle file has invalid entries.")
sys.exit()
terminal_commands(lines_of_obstacle_file)
def error_check(lines_of_obstacle_file):
""" This is a function which takes in the lines of the obstacle file, error checks the lines in the obstacle file, and then either prints out an error message or continues onto the main terminal if no errors are found. """
errorFlag = False
#check if the lines_of_obstacle_file is empty
if lines_of_obstacle_file == []:
return errorFlag
for i in range(len(lines_of_obstacle_file)):
lines_of_obstacle_file[i] = lines_of_obstacle_file[i].split()
for i in range(len(lines_of_obstacle_file)):
if len(lines_of_obstacle_file[i]) > 2:
errorFlag = True
for j in lines_of_obstacle_file[i]:
try:
if (lines_of_obstacle_file[i][j] != ""):
int(lines_of_obstacle_file[i][j])
except ValueError:
errorFlag = True
return errorFlag
def terminal_commands(lines_of_obstacle_file):
""" This is a function which takes in the lines of the obstacle file, then edits them through a list of commands which do things to the stack."""
# creating some lists of tuples because it makes the printing easier
stack = [(0, 0)]
for i in range(len(lines_of_obstacle_file)):
lines_of_obstacle_file[i] = lines_of_obstacle_file[i].split()
list_of_obstacle_tuples = []
for i in range(len(lines_of_obstacle_file)):
list_of_obstacle_tuples.append((lines_of_obstacle_file[0], lines_of_obstacle_file[1]))
try:
for line in sys.stdin:
command = line
command = command.strip()
if(command == "n"):
if((stack[len(stack)-1][0], stack[len(stack)-1][1] + 1) in list_of_obstacle_tuples):
print("You could not move in that direction, because there is an obstacle in the way.")
print("You stay where you are.")
continue
else:
pos_tuple = (stack[len(stack)-1][0], stack[len(stack)-1][1] + 1)
stack.append(pos_tuple)
continue
elif(command == "s"):
if((stack[len(stack)-1][0], stack[len(stack)-1][1] - 1) in list_of_obstacle_tuples):
print("You could not move in that direction, because there is an obstacle in the way.")
print("You stay where you are.")
continue
else:
pos_tuple = (stack[len(stack)-1][0], stack[len(stack)-1][1] - 1)
stack.append(pos_tuple)
continue
elif(command == "w"):
if((stack[len(stack)-1][0]-1, stack[len(stack)-1][1]) in list_of_obstacle_tuples):
print("You could not move in that direction, because there is an obstacle in the way.")
print("You stay where you are.")
continue
else:
pos_tuple = (stack[len(stack)-1][0]-1, stack[len(stack)-1][1])
stack.append(pos_tuple)
continue
elif(command == "e"):
if((stack[len(stack)-1][0]+1, stack[len(stack)-1][1]) in list_of_obstacle_tuples):
print("You could not move in that direction, because there is an obstacle in the way.")
print("You stay where you are.")
continue
else:
pos_tuple = (stack[len(stack)-1][0]+1, stack[len(stack)-1][1])
stack.append(pos_tuple)
continue
elif(command == "back"):
if (len(stack) == 1):
print("Cannot move back, as you are at the start!")
continue
else:
stack.pop()
print("You retrace your steps by one space")
continue
elif(command == ""):
print("You do nothing.")
continue
elif(command == "crossings"):
print("There have been " + str(crossings(stack)) + " times in the history when you were at this point.")
continue
elif(command == "ranges"):
ranges(stack)
continue
elif(command == "map"):
map(stack, list_of_obstacle_tuples)
continue
else:
print("ERROR: Invalid Command.")
continue
except ValueError:
print("ERROR: Invalid data type.")
def print_prompt(stack):
""" This is a function which prints the features of the stack as required in the prompt. """
print("Current Position: " + str(stack[len(stack)-1]))
print("Your History:" + " "*5 + str(stack))
print("What is your next command?")
def crossings(stack):
"""This is a function which counts the number of times you have been at the spot you are at currently."""
count = 0
for i in stack:
if i == stack[len(stack)-1]:
count += 1
return count
def ranges(stack):
"""This is a function which takes the smallest x value, the smallest y value, the largest x value, and the largest y value, and
prints them all out (again in accordance with the ranges needed for the map)."""
listOfXValues = []
listOfYValues = []
for i in stack:
listOfXValues.append(i[0])
listOfYValues.append(i[1])
print("The furthest West you have ever walked is " +str(min(listOfXValues)))
print("The furthest East you have ever walked is " + str(max(listOfXValues)))
print("The furthest South you have ever walked is " + str(min(listOfYValues)))
print("The furthest North you have ever walked is " + str(max(listOfYValues)))
def map(stack, listOfObstacleTuples):
"""This function prints a map in accordance with the specifics in the prompt."""
listOfXValues = []
listOfYValues = []
for i in stack:
listOfXValues.append(i[0])
listOfYValues.append(i[1])
SOffset = min(listOfYValues)
NOffset = max(listOfYValues)
EOffset = max(listOfXValues)
WOffset = min(listOfXValues)
grid = []
for y in range(NOffset-SOffset):
grid.append([])
for x in grid:
for j in range(EOffset-WOffset):
x.append(".")
grid[SOffset][WOffset] = "*"
grid[SOffset-1 + stack[len(stack)-1][1]][WOffset-1 + stack[len(stack)-1][0]] = "+"
for i in range(len(stack)-1):
grid[SOffset-1 + stack[i][1]][WOffset-1 + stack[i][0]] = "X"
for i in range(len(listOfObstacleTuples)):
grid[SOffset-1 + listOfObstacleTuples[i][1]][WOffset-1 + listOfObstacleTuples[i][0]] = " "
for y in grid:
for x in y:
print(x)
print()
print(grid)
def main():
input_prompt()
main()
|
import unittest
import nest_msa
class NestMSATestCase(unittest.TestCase):
def test_nest_msa_main_0(self):
correct = [
['a', 'a', 'a', 'a'],
['b', '-', 'b', 'b'],
['c', 'c', 'c', 'c'],
['b', 'b', '-', 'b'],
['c', 'c', '-', 'c'],
['d', 'f', 'h', 'j'],
['e', 'g', 'i', 'k'],
['m', None, 'm', 'm'],
[None, None, 'n', None]
]
sequences = ["abcbcdem", "acbcfg", "abchimn", "abcbcjkm"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.nest_msa_main(matrix), correct)
def test_nest_msa_main_1(self):
correct = [
['a', 'a', 'a', 'a', 'a'],
['-', '-', '-', 'a', 'a'],
['b', 'b', 'b', 'b', 'b'],
['-', 'c', 'c', 'c', 'c'],
['-', 'c', 'd', 'c', 'c'],
['-', 'd', 'd', 'd', 'c'],
['-', 'd', None, 'd', None],
['b', None, None, None, None],
['c', None, None, None, None],
['c', None, None, None, None],
['d', None, None, None, None],
['d', None, None, None, None]
]
sequences = ["abbccdd", "abccdd", "abcdd", "aabccdd", "aabccc"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.nest_msa_main(matrix), correct)
def test_nest_msa_main_2(self): # This one is not passing
correct = [
['a', 'a', 'a', 'a', 'a', 'a'],
['b', 'b', 'b', 'b', 'b', '-'],
['c', 'c', 'c', 'd', 'c', 'd'],
['d', 'd', 'c', 'd', 'c', 'd'],
['d', None, None, None, None, None]
]
sequences = ["abcdd", "abcd", "abcc", "abdd", "abcc", "add"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.nest_msa_main(matrix), correct) # This one is not passing
def test_nest_msa_main_3(self):
correct = [
['a', 'a'],
['b', 'b'],
['c', '-'],
['d', 'd'],
['d', 'd']
]
sequences = ["abcdd", "abdd"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.nest_msa_main(matrix), correct)
def test_nest_msa_main_4(self): # This one is not passing
correct = [
['a', 'a'],
['a', 'a'],
['a', 'a'],
['b', '-'],
['b', '-'],
['b', '-'],
['c', 'c'],
['c', 'c'],
['c', 'c'],
['d', None],
['d', None],
['d', None]
]
sequences = ["aaabbbcccddd", "aaaccc"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.nest_msa_main(matrix), correct) # This one is not passing
def test_nest_msa_main_5(self): # This one is not passing
correct = [
['a', 'a'],
['b', 'b'],
['b', '-'],
['c', 'c'],
['d', 'd'],
['c', 'c'],
['b', 'b'],
['-', 'b'],
['a', 'a']
]
sequences = ["abbcdcba", "abcdcbba"]
matrix = nest_msa.create_peer_matrix(sequences)
self.assertEqual(nest_msa.nest_msa_main(matrix), correct) # This one is not passing
if __name__ == '__main__':
unittest.main()
|
# Copyright (C) 2017 Pluralsight LLC
import unittest
from spavro.schema_resolve import resolve
from spavro.exceptions import SchemaResolutionException
class TestResolver(unittest.TestCase):
pass
pass_cases = (
("simple_null", "null", "null", "null"),
("simple_int", "int", "int", "int"),
("simple_long", "long", "long", "long"),
("simple_float", "float", "float", "float"),
("promote_int_to_long", "int", "long", "int"),
("promote_int_to_double", "int", "double", "int"),
("promote_float_to_double", "float", "double", "float"),
("promote_long_to_double", "long", "double", "long"),
("record_upgrade_to_union_and_default_field", {"fields": [{"default": "FOO",
"type": {"symbols": ["FOO", "BAR"],
"namespace": "",
"type": "enum",
"name": "F"},
"name": "H"}
],
"type": "record",
"name": "Test"},
["int",
{"fields": [{"default": "FOO",
"type": {"symbols": ["FOO", "BAR"],
"namespace": "",
"type": "enum",
"name": "F"},
"name": "H"},
{"name": "spork",
"type": "int",
"default": 1234}
],
"type": "record",
"name": "Test"}],
{'fields': [{"type": {"symbols": ["FOO", "BAR"],
"type": "enum",
"name": "F"},
"name": "H"},
{'type': {'type': 'default', 'value': 1234},
'name': 'spork'}],
'type': 'record',
'name': 'Test'}),
("symbol_added_to_reader_enum",
{"type": "enum", "name": "bigby", "symbols": ["A", "C"]},
{"type": "enum", "name": "bigby", "symbols": ["A", "B", "C"]},
{'symbols': ['A', 'C'], 'type': 'enum', 'name': 'bigby'}),
("array_items_upgraded_to_union",
{"type": "array", "items": "string"},
{"type": "array", "items": ["int", "string"]},
{'items': 'string', 'type': 'array'})
)
exception_cases = (
("null_vs_int", "null", "int", SchemaResolutionException),
("boolean_vs_int", "boolean", "int", SchemaResolutionException),
("lower_precision_promote_long_int", "long", "int", SchemaResolutionException),
("lower_precision_promote_double_float", "double", "float", SchemaResolutionException),
("missing_symbol_in_read",
{"type": "enum", "name": "bigby", "symbols": ["A", "C"]},
{"type": "enum", "name": "bigby", "symbols": ["A", "B"]}, SchemaResolutionException),
("union_missing_write_schema",
"int", ["string", "boolean"], SchemaResolutionException),
("record_names_dont_match",
{"type": "record", "name": "my_name", "fields": [{"type": "int", "name": "A"}]},
{"type": "record", "name": "not_my_name", "fields": [{"type": "int", "name": "A"}]},
SchemaResolutionException),
("record_field_types_dont_match",
{"type": "record", "name": "my_name", "fields": [{"type": "string", "name": "A"}]},
{"type": "record", "name": "my_name", "fields": [{"type": "int", "name": "A"}]},
SchemaResolutionException),
("record_new_field_no_default",
{"type": "record", "name": "my_name", "fields": [{"type": "string", "name": "A"}]},
{"type": "record", "name": "my_name", "fields": [{"type": "int", "name": "A"},
{"type": "int", "name": "B"}]},
SchemaResolutionException)
)
def create_pass_case(writer, reader, expected):
def resolve_write_reader(self):
resolved = resolve(writer, reader)
self.assertEqual(resolved, expected)
return resolve_write_reader
def create_exception_case(writer, reader, exception):
def resolve_write_reader(self):
with self.assertRaises(exception) as context:
resolved = resolve(writer, reader)
return resolve_write_reader
def make_cases(cases):
for name, writer, reader, expected in cases:
test_method = create_pass_case(writer, reader, expected)
test_method.__name__ = 'test_schema_resolution_{}'.format(name)
setattr(TestResolver, test_method.__name__, test_method)
def make_exception_cases(cases):
for name, writer, reader, expected in cases:
test_method = create_exception_case(writer, reader, expected)
test_method.__name__ = 'test_incompatible_schema_{}'.format(name)
setattr(TestResolver, test_method.__name__, test_method)
make_cases(pass_cases)
make_exception_cases(exception_cases)
|
import argparse
import os
from tensorflow.contrib.learn.python.learn.utils import (
saved_model_export_utils)
from tensorflow.contrib.training.python.training import hparam
# ---------------------------------------------------
# Library used for loading a file from Google Storage
# ---------------------------------------------------
from tensorflow.python.lib.io import file_io
# ---------------------------------------------------
# Library used for uploading a file to Google Storage
# ---------------------------------------------------
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import tensorflow as tf
import matplotlib as mpl
mpl.use('agg')
import os
import matplotlib.pyplot as plt
import csv
import numpy as np
def MinMaxScaler(data):
''' Min Max Normalization
Parameters, Returns
----------
data : numpy.ndarray
input data to be normalized
shape: [Batch size, dimension]
'''
numerator = data - np.min(data, 0)
denominator = np.max(data, 0) - np.min(data, 0)
# noise term prevents the zero division
return numerator / (denominator + 1e-7)
def load_series(filename):
filename = filename[0] #filename : route (--train-files <route>)
try:
with file_io.FileIO(filename, mode='r') as csvfile:
print("===in load_series function, fileIO===")
csvreader = csv.reader(csvfile)
data = [row for row in csvreader if len(row) > 0]
return data
except IOError:
return None
def run_experiment(hparams):
data = load_series(hparams.train_files)
print("=====run experiment=====")
#data가 string의 list라 float형의 np.array로 casting 해준다
data = np.array(data)
data = np.delete(data, (0), axis=0)
data = data.astype(float)
#print(data)
#standardization, scale to [-1,1]
xy = MinMaxScaler(data)
x = xy[:,0:-1]
x = (x*2)-1
# Scale to [-1,1]??
#hyperparameter
seq_length = 8
n_hidden_1 = 200
n_latent = 100
n_hidden_2 = 200
n_epochs = 5000
batch_size = 4000
learn_rate = 0.005
lossname = 'rmse'
#build a dataset
print("========data building started========")
data_X = []
for i in range(0, len(x) - seq_length):
_x = x[i:i+seq_length]
_x = np.reshape(_x, -1) #일렬이 되었을까..? ㅇㅇ!
data_X.append(_x)
#train/test split
print("=====train/test split started=====")
train_size = int(len(data_X)*0.8)
train_X, test_X = np.array(data_X[0:train_size]), np.array(data_X[train_size:len(data_X)])
train_Y= train_X
#print(train_X.shape)
#print(test_X.shape)
n_samp, n_input = train_X.shape
print("=====modeling started=====")
x = tf.placeholder("float", [None, n_input])
# Weights and biases to hidden layer
Wh = tf.Variable(tf.random_uniform((n_input, n_hidden_1), -1.0 / np.sqrt(n_input), 1.0 / np.sqrt(n_input)))
bh = tf.Variable(tf.zeros([n_hidden_1]))
h = tf.nn.tanh(tf.matmul(x,Wh) + bh)
W_latent = tf.Variable(tf.random_uniform((n_hidden_1, n_latent), -1.0 / np.sqrt(n_hidden_1), 1.0 / np.sqrt(n_hidden_1)))
b_latent = tf.Variable(tf.zeros([n_latent]))
h_latent = tf.nn.tanh(tf.matmul(h,W_latent) + b_latent)
Wh_2 = tf.Variable(tf.random_uniform((n_latent, n_hidden_2), -1.0 / np.sqrt(n_latent), 1.0 / np.sqrt(n_latent)))
bh_2 = tf.Variable(tf.zeros([n_hidden_2]))
h_2 = tf.nn.tanh(tf.matmul(h_latent,Wh_2) + bh_2)
# Weights and biases to output layer
Wo = tf.Variable(tf.random_uniform((n_hidden_2, n_input), -1.0 / np.sqrt(n_hidden_2), 1.0 / np.sqrt(n_hidden_2)))
#Wo = tf.transpose(Wh_2) # tied weights
bo = tf.Variable(tf.zeros([n_input]))
output = tf.nn.tanh(tf.matmul(h_2,Wo) + bo)
# Objective functions
y = tf.placeholder("float", [None,n_input])
if lossname == 'rmse':
loss = tf.reduce_mean(tf.square(tf.subtract(y, output)))
elif lossname == 'cross-entropy':
loss = -tf.reduce_mean(y * tf.log(output))
# optimization
train_op = tf.train.AdamOptimizer(learn_rate).minimize(loss)
print("=====training started=====")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(n_epochs):
sample = np.random.randint(n_samp, size=batch_size)
batch_xs = train_X[sample][:]
batch_ys = train_Y[sample][:]
sess.run(train_op, feed_dict={x: batch_xs, y:batch_ys})
# print loss
if epoch % 100 == 0:
print (i, sess.run(loss, feed_dict={x: batch_xs, y:batch_ys}))
#obtain pred, print loss of test_X
pred = sess.run( output , feed_dict={x: test_X})
test_loss = sess.run( loss , feed_dict={x: test_X, y: test_X})
print("test_loss : {}".format(test_loss))
print("========saving graph started========")
#plotting, file 바로 저장
plt.figure()
ax11=plt.subplot(331)
ax12=plt.subplot(332)
ax13=plt.subplot(333)
ax21=plt.subplot(334)
ax22=plt.subplot(335)
ax23=plt.subplot(336)
ax31=plt.subplot(337)
ax32=plt.subplot(338)
ax33=plt.subplot(339)
ax11.plot(test_X[:,50])
ax11.plot(pred[:,50])
ax12.plot(test_X[:,49])
ax12.plot(pred[:,49])
ax13.plot(test_X[:,48])
ax13.plot(pred[:,48])
ax21.plot(test_X[:,47])
ax21.plot(pred[:,47])
ax22.plot(test_X[:,39])
ax22.plot(pred[:,39])
ax23.plot(test_X[:,38])
ax23.plot(pred[:,38])
ax31.plot(test_X[:,37])
ax31.plot(pred[:,37])
ax32.plot(test_X[:,36])
ax32.plot(pred[:,36])
ax33.plot(test_X[:,5])
ax33.plot(pred[:,5])
plt.savefig('AE_6_learningrate_005.png')
credentials = GoogleCredentials.get_application_default()
service = discovery.build('storage', 'v1', credentials=credentials)
filename = 'AE_6_learningrate_005.png'
bucket = 'adam-models'
body = {'name': 'im5_os_stat_wait/AE/graphs/AE_6_learningrate_005.png'}
req = service.objects().insert(bucket=bucket, body=body, media_body=filename)
resp = req.execute()
plt.show()
if __name__ == '__main__':
# ---------------------------------------------
# command parsing from Google ML Engine Example
# ---------------------------------------------
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--train-files',
help='GCS or local paths to training data',
nargs='+',
required=True
)
parser.add_argument(
'--num-epochs',
help="""\
Maximum number of training data epochs on which to train.
If both --max-steps and --num-epochs are specified,
the training job will run for --max-steps or --num-epochs,
whichever occurs first. If unspecified will run for --max-steps.\
""",
type=int,
)
parser.add_argument(
'--train-batch-size',
help='Batch size for training steps',
type=int,
default=40
)
parser.add_argument(
'--eval-batch-size',
help='Batch size for evaluation steps',
type=int,
default=40
)
# -------------------------------
# If evaluation file is prepared,
# change 'required' value
# -------------------------------
parser.add_argument(
'--eval-files',
help='GCS or local paths to evaluation data',
nargs='+',
required=False
)
# Training arguments
parser.add_argument(
'--embedding-size',
help='Number of embedding dimensions for categorical columns',
default=8,
type=int
)
parser.add_argument(
'--first-layer-size',
help='Number of nodes in the first layer of the DNN',
default=100,
type=int
)
parser.add_argument(
'--num-layers',
help='Number of layers in the DNN',
default=4,
type=int
)
parser.add_argument(
'--scale-factor',
help='How quickly should the size of the layers in the DNN decay',
default=0.7,
type=float
)
parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True
)
# Argument to turn on all logging
parser.add_argument(
'--verbosity',
choices=[
'DEBUG',
'ERROR',
'FATAL',
'INFO',
'WARN'
],
default='INFO',
)
# Experiment arguments
parser.add_argument(
'--train-steps',
help="""\
Steps to run the training job for. If --num-epochs is not specified,
this must be. Otherwise the training job will run indefinitely.\
""",
type=int
)
parser.add_argument(
'--eval-steps',
help='Number of steps to run evalution for at each checkpoint',
default=100,
type=int
)
parser.add_argument(
'--export-format',
help='The input format of the exported SavedModel binary',
choices=['JSON', 'CSV', 'EXAMPLE'],
default='JSON'
)
args = parser.parse_args()
# Set python level verbosity
tf.logging.set_verbosity(args.verbosity)
# Set C++ Graph Execution level verbosity
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(
tf.logging.__dict__[args.verbosity] / 10)
# Run the training job
hparams=hparam.HParams(**args.__dict__)
run_experiment(hparams) |
d = {'a': 10, 'b': 1, 'c': 22}
t = list(d.items()) # dictionary --> a list of tuple
print(t)
t.sort()
print(t)
|
# 用反射完成了
# python xx.py cp path1 path2
# python xx.py rm path
# python xx,py mv path1 path2
# sys.argv练习
# 写一个python脚本,在cmd里执行
# python xxx.py 用户名 密码 cp 文件路径 目的地址
# python xxx.py alex sb cp D:\python_22\day22\1.内容回顾.py D:\python_22\day21
# python xxx.py alex sb rm D:\python_22\day22
# python xxx.py alex sb rename D:\python_22\day22 D:\python_22\day23
import sys
import shutil
import time
import os
dic = {'uid':'fallen','passwd':'123456'}
def login(username,password):
# username = input("username:").strip()
# password = input("password:").strip()
if username==dic['uid'] and password==dic['passwd']:
print('登陆成功')
return True
else:
print('login failed.')
return False
def cp(s_path,t_path):
if os.path.exists(s_path) and os.path.exists(t_path):
filename = os.path.basename(s_path)
_filename = os.path.join(t_path,filename)
shutil.copy2(s_path,_filename)
def rm(s_path,t_path):
if os.path.exists(s_path):
if os.path.isfile:
os.remove(s_path)
else:
shutil.rmtree(s_path)
def rename(s_path,t_path):
if os.path.exists(s_path):
os.rename(s_path,t_path)
def main():
username = sys.argv[1]
password = sys.argv[2]
function = sys.argv[3]
source_path = sys.argv[4]
target_path = sys.argv[5]
if login(username,password):
if hasattr(sys.modules['__main__'],function):
getattr(sys.modules['__main__'],function)(source_path,target_path)
else:
print('NO SUCH FUNCTION!')
time.sleep(0.6)
else:
print('用户名或密码错误')
if __name__=='__main__':
main()
|
def index_is_valid(index, message):
return 0 <= index < len(message)
def word_is_valid(word, message):
return word in message
disordered_message = input().split()
command_input = input()
while command_input != "Stop":
instruction = command_input.split()
command = instruction[0]
if command == "Delete":
index = int(instruction[1])
if index_is_valid(index + 1, disordered_message):
disordered_message.pop(index + 1)
elif command == "Swap":
word_1 = instruction[1]
word_2 = instruction[2]
if word_is_valid(word_1, disordered_message) and word_is_valid(word_2, disordered_message):
word_1_index = disordered_message.index(word_1)
word_2_index = disordered_message.index(word_2)
disordered_message[word_1_index], disordered_message[word_2_index] = disordered_message[word_2_index], \
disordered_message[word_1_index]
elif command == "Put":
# index, word = int(instruction[2]) - 1, instruction[1]
# if index - 1 in range(len(disordered_message)):
# disordered_message.insert(index, word)
word = instruction[1]
index = int(instruction[2]) - 1
# if 0 <= index <= len(disordered_message):
# disordered_message.insert(index, word)
if index == len(disordered_message):
disordered_message.append(word)
elif index_is_valid(index, disordered_message):
disordered_message.insert(index, word)
elif command == "Sort":
disordered_message.sort(reverse=True)
elif command == "Replace":
word_1 = instruction[1]
word_2 = instruction[2]
if word_is_valid(word_2, disordered_message):
word_2_index = disordered_message.index(word_2)
disordered_message[word_2_index] = word_1
command_input = input()
print(" ".join(disordered_message)) |
s = input()
ans = 1e9
for i in range(len(s)-1):
ans = min(ans,abs(753-int(s[i:i+3])))
print(ans) |
from tkinter import *
import threading
import socket
hote = "127.0.0.1"
port = 15555
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.connect((hote, port))
print("Connection sur {}".format(port))
def debianeuf(event):
olivier_de_carglasse = event.keysym
if olivier_de_carglasse == "Down":
kh_deb = "a"
socket.send(kh_deb.encode())
socket.send(kh_deb.encode())
print(olivier_de_carglasse)
if olivier_de_carglasse == "Up":
kh_ma = "b"
socket.send(kh_ma.encode())
socket.send(kh_ma.encode())
print(olivier_de_carglasse)
if olivier_de_carglasse == "Right":
kh_li = "c"
socket.send(kh_li.encode())
socket.send(kh_li.encode())
print(olivier_de_carglasse)
if olivier_de_carglasse == "Left":
kh_kh = "d"
socket.send(kh_kh.encode())
socket.send(kh_kh.encode())
print(olivier_de_carglasse)
def rien():
fenetre = Tk()
frame = Frame(fenetre, width=100, height=100)
canvas = Canvas(fenetre, width=500, height=500)
canvas.focus_set()
threading.Thread(target=canvas.bind,args=("<Key>", debianeuf)).start()
canvas.pack()
frame.pack()
fenetre.mainloop()
rien()
|
import math
POGSON_RATIO = -2.5
def e_dist(d, plx, e_plx):
return -d * (e_plx/plx)
def e_AbsMag(appMag, e_appMag, dist, e_dist):
return e_appMag + (5 * 0.434 * (e_dist/dist))
def e_BM(e_AM):
return e_AM
def e_L(L,e_BM):
return L * 2.303 * (e_BM/POGSON_RATIO)
def e_Inner(e_L, L, I):
return (I/2) * (e_L/(1.1 * L))
def e_Outer(e_L, L, O):
return (O/2) * (e_L/(0.53 * L)) |
# Generated by Django 3.0.5 on 2020-08-13 18:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0007_auto_20200813_1043'),
]
operations = [
migrations.RemoveField(
model_name='attendance',
name='numMins',
),
]
|
#!/usr/bin/env python
from os import listdir
from os.path import isfile,join
import datetime
from save import save_data
import shutil
outputDir='../results/'
top_user=1
top_command=11
top_cpu=8
top_mem=9
avg = lambda l:sum(l) / float(len(l))
def parse_top(path_to_top):
'''Parses the information outputted in top'''
f = open(path_to_top)
data = []
for l in f.readlines():
line = l.split()
if(len(line) > 0 and line[0].isdigit()):
#PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
data.append([line[top_user],line[top_command],line[top_cpu],line[top_mem]])
f.close()
return data
def process_user_count(top_info):
'''takes information parsed from top and gives us a count of how many counts of an individual process a user is taking up'''
user_cmd = {}
#Extract the CPU and memory info per user-command
for user,command,cpu,mem in top_info:
key = (user,command)
if(key not in user_cmd):
user_cmd[key] = []
user_cmd[key].append( (float(cpu.replace(',','.')),float(mem.replace(',','.')) ))
#Go through each of these and sum up
new_list = []
for k,v in user_cmd.iteritems():
cpu,mem = zip(*v)
d = ( k[0],k[1], len(v), sum(cpu),sum(mem),avg(cpu),avg(mem))
new_list.append(d)
new_list.sort(key=lambda s:-s[4])
return new_list
def parse_inode(path_to_inode_file):
'''Parses the inode useage, puts it into a dictionary mapping disk drive to:
total inodes
used inodes
free inodes
arguably only 2 values are needed but it's good enough'''
f = open(path_to_inode_file)
disks = {}
for l in f.readlines():
l = l.split()
if l[0] not in disks:
disks[l[0]] = [int(l[1]),int(l[2]),int(l[3])]
f.close()
#print disks
return disks
def parse_memory(path_to_memory_file):
'''parses the memory usage
memory_type: [total,used,free]
'''
f = open(path_to_memory_file)
memory = {}
for l in f.readlines():
l = l.split()
if(l[0].endswith(':')):
memory[l[0]] = [int(l[1]),int(l[2]),int(l[3])]
#print memory
f.close()
return memory
def parse_float(f):
if(f[-1] == ','):
f = f[:-1]
return float(f.replace(',','.'))
def parse_load(path_to_load_file):
'''gives you the load average'''
f = open(path_to_load_file)
load = f.readline().split()
f.close()
return parse_float(load[-3]),parse_float(load[-2]),parse_float(load[-1])
def load_all(dir):
'''Loads in the results from a directory of results'''
directories = [ f for f in listdir(dir) if not isfile(join(dir,f)) ]
print directories
for result_dir in directories:
timestamp = datetime.datetime.strptime(result_dir,'%Y-%m-%d-%H%M')
#print timestamp
#Find the top 5 processes by memory usage
data = parse_top(join(dir,result_dir,'top'))
process_data = process_user_count(data)
inode_data = parse_inode(join(dir,result_dir,'inodes'))
memory_data = parse_memory(join(dir,result_dir,'mem'))
load_data = parse_load(join(dir,result_dir,'uptime'))
save_data(timestamp,process_data,memory_data,inode_data,load_data)
shutil.rmtree(join(dir,result_dir))
if __name__=='__main__':
load_all(outputDir)
|
from unittest import TestCase
from signup import geo
class TestNeighbors(TestCase):
# Consituencies neighboring other consituencies
# Editied down output from twfy getGeometry
# http://www.theyworkforyou.com/api/docs/getGeometry
data = {
"Chipping Barnet" : {
"name" : "Chipping Barnet",
"centre_lat" : 51.6395895436,
"centre_lon" : -0.192217329457,
},
"Hendon" : {
"name" : "Hendon",
"centre_lat" : 51.606570454,
"centre_lon" : -0.252407672041,
},
"Altrincham & Sale West" : {
"name" : "Altrincham & Sale West",
"centre_lat" : 53.3989495951,
"centre_lon" : -2.38207857643,
},
"Hertsmere" : {
"name" : "Hertsmere",
"centre_lat" : 51.6802918234,
"centre_lon" : -0.274986273182,
},
"Stretford & Urmston" : {
"name" : "Stretford & Urmston",
"centre_lat" : 53.4450638328,
"centre_lon" : -2.35374956251,
},
"Tatton" : {
"name" : "Tatton",
"centre_lat" : 53.2797662137,
"centre_lon" : -2.38760476605,
},
}
# when twfy doesn't know the data for a constituency, they return
# records like this
tricky_data = {
u'Belfast East': {},
u'Belfast North': {},
u'Belfast South': {},
u'Belfast West': {}
}
def test_center(self):
self.assertEqual((53.3989495951, -2.38207857643),
geo.center(self.data, "Altrincham & Sale West"))
def test_neigbors_south(self):
# Hendon & Hertsmere are closer to Chipping Barnet then Tatton
self.assertEqual(geo.neighbors("Chipping Barnet", limit=3, _data=self.data),
["Hendon", "Hertsmere", "Tatton"])
def test_neigbors_north(self):
# Tatton and Stretford are closer to Altrincham then Hertsmere
self.assertEqual(geo.neighbors("Altrincham & Sale West", limit=3, _data=self.data),
["Stretford & Urmston", "Tatton", "Hertsmere"])
def test_tricky_data(self):
# should not explode if the constituency does not have a full
# set of data
data = self.data.copy()
data.update(self.tricky_data)
self.assertEqual(geo.neighbors("Altrincham & Sale West", limit=3, _data=data),
["Stretford & Urmston", "Tatton", "Hertsmere"])
class TestGeoConstituency(TestCase):
"""
geo.constituency tries to give you the constituency that a "place"
is in. Place may be a postcode or the name of a town.
"""
def assertIn(self, val, container, msg=None):
if not msg:
msg = "%r not in %r" % (val, container)
self.assert_(val in container, msg)
def test_geocode(self):
name, (lat, lng) = geo.geocode("Newham")
self.assertIn(u"Newham", name)
def test_town1(self):
# you can search for a town
self.assertEquals("Crewe & Nantwich", geo.constituency("Crewe"))
def test_town2(self):
self.assertEquals("Falkirk", geo.constituency("Alloa"))
def test_town3(self):
self.assertEquals("Shipley", geo.constituency("Ilkley"))
def _test_town4(self): # SKIPPED
# XXX this is broken because the twfy api have no data about Belfast
self.assertEquals("Belfast", geo.constituency("Forkhill"))
def test_postcode1(self):
# Land's End
self.assertEquals("St Ives", geo.constituency("TR19 7AA"))
def test_postcode_nonexistant(self):
# there are no postcodes that start with D
self.assertEquals(None, geo.constituency("D7 7QX"))
def test_postcode_forces(self):
# Postcodes run by the British forces post office . We can't
# do anything with these (they don't point towards a
# constituency)
self.assertEquals(None, geo.constituency("BFPO 801"))
|
import hashlib
import os.path
class Sha:
#计算文件的sha值
def shakey(self,filename):
"""
用于获取文件的md5值
:param filename: 文件名
:return: MD5码
"""
if not os.path.isfile(filename): # 如果校验md5的文件不是文件,返回空
return
myhash = hashlib.sha256()
f = open(filename, 'rb')
while True:
b = f.read(8096)
if not b:
break
myhash.update(b)
f.close()
return myhash.hexdigest()
def sha_write(self,source_filename,new_filename):
with open(new_filename,mode='w',encoding='utf-8') as f:
f.write(self.shakey(source_filename))
print('写入完成')
def sha_check(self,filename1,filename2):
sha1=self.shakey(filename1)
sha2=self.shakey(filename2)
if sha1==sha2:
print('文件一致')
else:
print('文件不同')
print(sha1)
print(sha2)
filename=str(input('请输入原文件名'))
filename2=filename+'.md5'
file=Sha()
file.sha_write(filename,filename2) |
# -*- coding: UTF-8 -*-
# Filename : dataTransfer.py
# author by : Jay
#!/usr/bin/env python3
import re
print(re.match('www', 'www.taobao.com').span())
print(re.match('cn', 'www.taobao.com'))
line = "Cats are smarter than dogs"
matchObject = re.match(r'(.*) are (.*?) .*', line, re.M | re.I)
if matchObject:
print("matchObject.group():", matchObject.group())
print("matchObject.group(1):", matchObject.group(1))
print("matchObject.group(2):", matchObject.group(2))
else:
print("No match!")
print(re.search('www', 'www.taobao.cn').span())
print(len('www.taobao.com'))
print(re.search('com', 'www.taobao.com').span())
searchObject = re.search(r'(.*) are (.*?) .*', line, re.M | re.I)
if searchObject:
print("searchObject.group():", searchObject.group())
print("searchObject.group(1):", searchObject.group(1))
print("searchObject.group(2):", searchObject.group(2))
else:
print("No match!")
#re.match只匹配字符串的开始,如果字符串开始不符合正则表达式,则匹配失败,函数返回None;
#re.search匹配整个字符串,直到找到一个匹配。
#re.sub 替换字符串
phone = "2004-959-559 # 这是一个电话号码"
num = re.sub(r'#.*$', '', phone)
print("num is :" + num)
digtal = re.sub(r'\D', '', phone)
print("num is :" + digtal) |
#!/usr/bin/env python3
"""\
Helper script to quickly create test cases.
Usage:
make_test_case.py <family> <template> [options]
Arguments:
<family>
The first part of the test case name, e.g. "dict", "string", etc. A
number will be added to the end of this prefix to create a unique test
id.
<template>
The name of the template (i.e. one of the directories in templates/)
that will be used to populate the test case. You will be prompted to
edit every non-symlink file in this directory. Below are the files
created by the available templates:
{}
Options:
-r --readme
Include a `README` in the test case. This is useful if you want to
highlight a unique or subtle feature of the test.
-n --num <int>
Append the given number to the above family name rather than
automatically picking the next consecutive number. Note that this
doesn't actually have to be a number, e.g. you could specify "2a" to
make a new test that will go between "2" and "3" after renumbering.
-e --editor <path>
The editor to use to edit the test case files. The default is to use
the value of the $EDITOR environment variable, or `vim` if that
variable is undefined.
"""
import os, re, docopt, shutil
from pathlib import Path
from subprocess import run
from inform import fatal
from textwrap import indent
ROOT_DIR = Path(__file__).parent
CASE_DIR = ROOT_DIR / 'test_cases'
TEMPLATE_DIR = ROOT_DIR / 'templates'
TEMPLATE_ORDER = {
x: i
for i,x in enumerate([
'l',
'ld',
'lD',
'le',
'd',
'de',
])
}
FILE_ORDER = {
x: i
for i, x in enumerate([
'README',
'load_in.nt',
'load_out.json',
'load_err.json',
'dump_in.nt',
'dump_out.json',
'dump_err.json',
])
}
def name_test_dir(prefix, num):
if num is None:
max_num = 0
for path in CASE_DIR.iterdir():
if m := re.fullmatch(f'{prefix}_(\d+)', path.name):
max_num = max(max_num, int(m.group(1)))
num = max_num + 1
return CASE_DIR / f'{prefix}_{num}'
def document_templates():
doc = ""
template_dirs = sorted(
TEMPLATE_DIR.iterdir(),
key=lambda p: TEMPLATE_ORDER.get(p.name, len(TEMPLATE_ORDER))
)
for dir in template_dirs:
doc += f"{dir.name}:\n"
for path in sorted(
dir.iterdir(),
key=lambda p: FILE_ORDER.get(p.name, len(FILE_ORDER)),
):
notes = ""
if path.is_symlink():
notes = f" (symlink to {path.resolve().name})"
doc += f" {path.name}{notes}\n"
return doc.strip()
if __name__ == '__main__':
template_docs = document_templates()
args = docopt.docopt(__doc__.format(indent(template_docs, 8*' ')))
case = name_test_dir(args['<family>'], args['--num'])
template = TEMPLATE_DIR / args['<template>']
if not template.is_dir():
fatal("template not found", culprit=template.name)
shutil.copytree(template, case, symlinks=True)
print(case)
if args['--readme']:
readme = case / 'README'
readme.touch()
editor = args['--editor'] or os.environ.get('EDITOR', 'vim')
editor_args = [
str(p)
for p in sorted(
case.iterdir(),
key=lambda p: FILE_ORDER.get(p.name, len(FILE_ORDER)),
)
if p.is_file() and not p.is_symlink()
]
run([editor, *editor_args])
|
import pyglet
from pyglet.gl import *
#import pygame
import pymunk
from pymunk import Vec2d
import math
from math import sin,cos,atan2,pi
import levelassembler
import loaders
class Truck(object):
def __init__(self,
space,
starting_pos,
level_batch,
debug_batch,
ui_batch,
lfg,
lfg2,
lfg3,):
self.space = space
self.starting_pos = starting_pos
## Chassis
chassis_inertia = pymunk.moment_for_box(.9, 104, 18)
self.chassis_body = pymunk.Body(.9,chassis_inertia)
self.chassis_body.position = starting_pos
self.chassis_body.group = 1
space.add(self.chassis_body)
origin = (0,0)
self.parts = [
((origin),(-8,13),(10,14),(21,4)),
((origin),(21,4),(47,2),(50,-14)),
((origin),(50,-14),(-56,-13),(-14,-3)),
((-56,-13),(-57,2),(-52,2),(-52,-3)),
((-56,-13),(-52,-3),(-14,-3)),
((origin),(-14,-3),(-8,13))
]
self.shape_list = []
for part in self.parts:
self.part = pymunk.Poly(self.chassis_body, part)
self.part.friction = 0.3 #0.5
self.part.group = 1 # so that the wheels and the body do not collide with eachother
self.space.add(self.part)
self.shape_list.append(self.part)
self.outlines = []
for shape in self.shape_list:
s_points = shape.get_vertices()
if len(s_points) < 4:
self.tri_outline = debug_batch.add_indexed(3, pyglet.gl.GL_LINES, None, [0,1,1,2,2,0], ('v2f'), ('c4B', (0,120,0,220)*3))
self.outlines.append(self.tri_outline)
elif len(s_points) == 4:
self.quad_outline = debug_batch.add_indexed(4, pyglet.gl.GL_LINES, None, [0,1,1,2,2,3,3,0], ('v2f'), ('c4B', (0,120,0,220)*4))
self.outlines.append(self.quad_outline)
## End Chassis
## Wheels
wheel_mass = .3
wheel_radius = 13
wheel_inertia = pymunk.moment_for_circle(wheel_mass, 0, wheel_radius)
wheel_friction = 1.8
# L
l_wheel_base = 35
l_wheel_pos = (starting_pos[0]-l_wheel_base+wheel_radius,starting_pos[1]-18-wheel_radius)
self.l_wheel_body = pymunk.Body(wheel_mass, wheel_inertia)
self.l_wheel_body.position = l_wheel_pos
self.l_wheel_shape = pymunk.Circle(self.l_wheel_body, wheel_radius)
self.l_wheel_shape.friction = wheel_friction
self.l_wheel_shape.group = 1
space.add(self.l_wheel_body,self.l_wheel_shape)
# R
r_wheel_base = 33
r_wheel_pos = (starting_pos[0]+r_wheel_base-wheel_radius,starting_pos[1]-18-wheel_radius)
self.r_wheel_body = pymunk.Body(wheel_mass, wheel_inertia)
self.r_wheel_body.position = r_wheel_pos
self.r_wheel_shape = pymunk.Circle(self.r_wheel_body, wheel_radius)
self.r_wheel_shape.friction = wheel_friction
self.r_wheel_shape.group = 1
space.add(self.r_wheel_body,self.r_wheel_shape)
## End Wheels
## Constraints
rest_ln = 25 # 25
lift = 25 # 25
stiff = 110 # 100
damp = .4 # .4
left_spring = pymunk.constraint.DampedSpring(self.chassis_body, self.l_wheel_body, (-l_wheel_base, 0), (0,0), rest_ln, stiff, damp)
right_spring = pymunk.constraint.DampedSpring(self.chassis_body, self.r_wheel_body, (r_wheel_base, 0), (0,0), rest_ln, stiff, damp)
left_groove = pymunk.constraint.GrooveJoint(self.chassis_body, self.l_wheel_body, (-l_wheel_base, -12), (-l_wheel_base, -lift), (0,0))
right_groove = pymunk.constraint.GrooveJoint(self.chassis_body, self.r_wheel_body, (r_wheel_base, -12), (r_wheel_base, -lift), (0,0))
space.add(left_spring,left_groove,right_spring,right_groove)
##
## Sprites
plxelated = True
self.truck_sprite = loaders.spriteloader('truck.png',
anchor=('center','center'),
anchor_offset=(7,0),
scale = .5,
batch=level_batch,
group=lfg2,
linear_interpolation=plxelated)
self.l_wheel_sprite = loaders.spriteloader('wheel.png',
anchor=('center','center'),
scale = .5,
batch=level_batch,
group=lfg3,
linear_interpolation=plxelated)
self.r_wheel_sprite = loaders.spriteloader('wheel.png',
anchor=('center','center'),
scale = .5,
batch=level_batch,
group=lfg3,
linear_interpolation=plxelated)
self.l_sus_sprite = loaders.spriteloader('suspension.png',
anchor=('center',9),
scale = .5,
batch=level_batch,
group=lfg,
linear_interpolation=plxelated)
self.r_sus_sprite = loaders.spriteloader('suspension.png',
anchor=('center',9),
scale = .5,
batch=level_batch,
group=lfg,
linear_interpolation=plxelated)
##
self.accel_amount = 4
self.player_max_ang_vel = 100
self.mouseGrabbed = False
self.grabFirstClick = True
def update(self):
iter_num = 0
for shape in self.shape_list:
s_points = shape.get_vertices()
verts = []
for point in s_points:
verts.append(point.x)
verts.append(point.y)
if len(s_points) < 4:
self.outlines[iter_num].vertices = verts
elif len(s_points) == 4:
self.outlines[iter_num].vertices = verts
iter_num += 1
self.l_wheel_sprite.set_position(self.l_wheel_body.position[0], self.l_wheel_body.position[1])
self.l_wheel_sprite.rotation = math.degrees(-self.l_wheel_body.angle)
self.r_wheel_sprite.set_position(self.r_wheel_body.position[0], self.r_wheel_body.position[1])
self.r_wheel_sprite.rotation = math.degrees(-self.r_wheel_body.angle)
#sprite_x = 5*cos(self.chassis_body.angle-5) + self.chassis_body.position[0]
#sprite_y = 5*sin(self.chassis_body.angle-5) + self.chassis_body.position[1]
self.truck_sprite.set_position(self.chassis_body.position[0],self.chassis_body.position[1])
self.truck_sprite.rotation = math.degrees(-self.chassis_body.angle)
def controls(self, keys_held):
self.keys_held = keys_held
if pyglet.window.key.LEFT in self.keys_held:
self.chassis_body.angular_velocity += .65
if pyglet.window.key.RIGHT in self.keys_held:
self.chassis_body.angular_velocity -= .65
if pyglet.window.key.LCTRL in self.keys_held:
self.chassis_body.angular_velocity *= 0.49
if (pyglet.window.key.DOWN in self.keys_held and \
abs(self.l_wheel_body.angular_velocity) < self.player_max_ang_vel):
if pyglet.window.key.LSHIFT in self.keys_held: # Boost
self.l_wheel_body.angular_velocity += 6
else: # Regular
self.l_wheel_body.angular_velocity += self.accel_amount
if (pyglet.window.key.UP in self.keys_held and \
abs(self.l_wheel_body.angular_velocity) < self.player_max_ang_vel):
if pyglet.window.key.LSHIFT in self.keys_held: # Boost
self.l_wheel_body.angular_velocity -= 6
else: # Regular
self.l_wheel_body.angular_velocity -= self.accel_amount
if not pyglet.window.key.UP in self.keys_held and \
not pyglet.window.key.DOWN in self.keys_held:
self.l_wheel_body.angular_velocity *= .95 # fake friction for wheels
self.r_wheel_body.angular_velocity *= .95
def mouse_grab_press(self, mouse_pos):
if self.grabFirstClick == True:
self.mouseBody = pymunk.Body()
self.grabFirstClick = False
print(mouse_pos)
self.mouseBody.position = mouse_pos
self.mouseGrabSpring = pymunk.constraint.DampedSpring(self.mouseBody, self.chassis_body, (0,0), (0,0), 0, 20, 1)
self.space.add(self.mouseGrabSpring)
self.mouseGrabbed = True
def mouse_grab_drag(self, mouse_coords):
if self.mouseGrabbed == True:
self.mouseBody.position = mouse_coords
def mouse_grab_release(self):
if self.mouseGrabbed == True:
self.space.remove(self.mouseGrabSpring)
self.mouseGrabbed = False
|
__author__ = "Narwhale"
def select_sort(alist):
"""选择排序"""
n = len(alist)
#外层循环
for j in range(0,n-1):
#内层循环
min_index = j
for i in range(j+1,n):
if alist[min_index] > alist[i]:
min_index = i
alist[j],alist[min_index] = alist[min_index],alist[j]
#j=0 i=(1+0,n)
#j=1 i=(1+1,n)
#j=2 i=(1+3,n)
#j=3 i=(1+4,n)
li = [54,26,93,17,77,31,44,55,20]
select_sort(li)
print(li)
|
"""Plot Graph to Show Unemployed people in Thailand"""
import matplotlib.pyplot as plt
def main():
"""Plot graph from people in Thailand who are unemployed"""
x = range(10)
y_ban = [1.2, 1.4, 1.3, 1.0, 0.7, 0.6, 0.7, 0.8, 1.0, 0.9]
y_nor = [1.4, 1.2, 1.3, 0.9, 0.6, 0.6, 0.6, 0.7, 0.8, 0.9]
y_neas = [1.4, 1.4, 1.5, 1.0, 0.7, 0.7, 0.7, 0.7, 0.8, 0.9]
y_sou = [1.3, 1.5, 1.8, 1.3, 0.8, 0.7, 1.0, 1.2, 1.1, 1.4]
y_mid = [1.4, 1.4, 1.5, 1.1, 0.7, 0.7, 0.7, 0.9, 0.9, 1.0]
xtick = ("2550", "2551", "2552", "2553", "2554", "2555", "2556", "2557", "2558", "2559")
plt.xticks([4, 11, 18, 25, 32, 39, 46, 53, 60, 67], xtick)
plt.bar([1, 8, 15, 22, 29, 36, 43, 50, 57, 64], y_ban, label="Bangkok", width=1, color="cyan")
plt.bar([2, 9, 16, 23, 30, 37, 44, 51, 58, 65], y_nor, label="North", width=1, color="skyblue")
plt.bar([3, 10, 17, 24, 31, 38, 45, 52, 59, 66], y_neas, label="North-East", width=1, color="cadetblue")
plt.bar([4, 11, 18, 25, 32, 39, 46, 53, 60, 67], y_sou, label="South", width=1, color="blue")
plt.bar([5, 12, 19, 26, 33, 40, 47, 54, 61, 68], y_mid, label="Middle", width=1, color="royalblue")
plt.title("People around Thailand \nwho are unemployed in percentage", fontsize="15", color="orange")
plt.xlabel("Years")
plt.ylabel("Percent of people")
plt.legend()
plt.show()
main()
|
from scrapy import cmdline
cmdline.execute('scrapy crawl miqilin'.split())
|
from email.message import EmailMessage
import smtplib
def email_alert(subject,body,to):
msg=EmailMessage()
msg.set_content(body)
msg['subject']=subject
msg['to']=to
user='aurora.alerta@outlook.com.br' # urora.alerta@outlook.com.br //Patrus2020!
password='jsyygpavmvwnrywf'# gmail'llcqbkxainwmrzvt'
msg['from'] = user
server=smtplib.SMTP('smtp.office365.com',587) ## Para enviar do Outlok precisa liberar o acesso da conta (smtp.gmail.com',587)
server.starttls()
server.login(user,password)
server.send_message(msg)
server.quit()
|
from django.apps import AppConfig
class FitbitConfig(AppConfig):
name = 'fitbit'
|
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model() #This brings whole usermodel to the variable defined
# Create your models here.
#this is basically the extended version of User class
#some of fields were already there in User Model but to overwrite them i used them again
#like first_name, last_name, email and is_active
class UserExtended(models.Model):
user = models.OneToOneField(User, default=1, related_name='user', on_delete=models.CASCADE)
user_image = models.ImageField(upload_to='UserProfilePic')
email = models.EmailField(blank=False, null=False)
first_name = models.CharField(blank=False, max_length=150)
last_name = models.CharField(blank=False, max_length=150)
mobile = models.CharField(blank=False, max_length=15)
is_active = models.BooleanField(default=True) #instead of deleting the column we will turn is_active=False
def __str__(self):
return self.user.username
#function to make is_active false
def delete_user(self):
self.is_active = False
self.save()
# def delete_user_pk(self):
# self.user.Active = False
# self.save()
|
#!/usr/bin/env python
# coding: utf-8
import os
import cv2
import dlib
from dataset_explorer.io import FileType
from dataset_explorer.plugins import ImagePlugin, PluginParameter
class FaceDetectionPlugin(ImagePlugin):
resize = PluginParameter("Resize Image", False)
resizeFactor = PluginParameter("Resize Factor", 0.5)
def __init__(self):
super(FaceDetectionPlugin, self).__init__("Face Detection", FileType.IMAGE, icon="tag_faces")
self.faceDetector = None
def load(self):
faceDetectorData = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "assets", "mmod_human_face_detector.dat")
self.faceDetector = dlib.cnn_face_detection_model_v1(faceDetectorData)
def process(self, data, outFilename):
if self.resize.value:
data = cv2.resize(data, (0, 0), fx=self.resizeFactor.value, fy=self.resizeFactor.value)
allBoundingBoxes = self.faceDetector(data, 1)
for boundingBox in allBoundingBoxes:
bb = boundingBox.rect
cv2.rectangle(data, (bb.left(), bb.top()), (bb.right(), bb.bottom()), (210, 118, 25), thickness=2, lineType=cv2.LINE_AA)
cv2.imwrite(outFilename, data)
|
#!/usr/bin/env python3
letterPolicy = {}
def procInput(filename):
global letterPolicy
numValid1 = 0
numValid2 = 0
f = open(filename, 'r')
while True:
line = f.readline()
if not line:
break
cols = line.split(" ")
boundaries = cols[0].split("-")
min = int(boundaries[0])
max = int(boundaries[1])
letter = cols[1].replace(":","")
password = cols[2]
letterUsages = 0
for char in password:
if char == letter:
letterUsages += 1
if letterUsages >= min and letterUsages <= max:
numValid1 += 1
numValid2 += (password[min-1]==letter) != (password[max-1]==letter)
return numValid1, numValid2
res1, res2 = procInput("input.txt")
print("ans1", res1)
print("ans2", res2)
|
"""Sample aospy object library using the included example data."""
from datetime import datetime
import os
import aospy
from aospy import Model, Proj, Region, Run, Var
from aospy.data_loader import DictDataLoader
from aospy.internal_names import LAND_MASK_STR, LON_STR
rootdir = os.path.join(aospy.__path__[0], 'test', 'data', 'netcdf')
_file_map = {'monthly': os.path.join(rootdir,
'000[4-6]0101.precip_monthly.nc')}
example_run = Run(
name='example_run',
description=(
'Control simulation of the idealized moist model'
),
default_start_date=datetime(4, 1, 1),
default_end_date=datetime(6, 12, 31),
data_loader=DictDataLoader(_file_map)
)
example_model = Model(
name='example_model',
grid_file_paths=(os.path.join(rootdir, '00040101.precip_monthly.nc'),
os.path.join(rootdir, 'im.landmask.nc')),
runs=[example_run],
grid_attrs={LAND_MASK_STR: 'custom_land_mask', LON_STR: 'custom_lon'}
)
def total_precip(precip_largescale, precip_convective):
"""Sum of convective and large-scale precipitation.
Parameters
----------
precip_largescale, precip_convective : xarray.DataArrays
Precipitation from grid-scale condensation and from convective
parameterization, respectively.
Returns
-------
xarray.DataArray
"""
return precip_largescale + precip_convective
def conv_precip_frac(precip_largescale, precip_convective):
"""Fraction of total precip that is from convection parameterization.
Parameters
----------
precip_largescale, precip_convective : xarray.DataArrays
Precipitation from grid-scale condensation and from convective
parameterization, respectively.
Returns
-------
xarray.DataArray
"""
total = total_precip(precip_largescale, precip_convective)
# Mask using xarray's `where` method to prevent divide-by-zero.
return precip_convective / total.where(total)
precip_largescale = Var(
name='precip_largescale',
alt_names=('condensation_rain',),
def_time=True,
description='Precipitation generated via grid-scale condensation',
)
precip_convective = Var(
name='precip_convective',
alt_names=('convection_rain',),
def_time=True,
description='Precipitation generated by convective parameterization',
)
precip_total = Var(
name='precip_total',
def_time=True,
func=total_precip,
variables=(precip_largescale, precip_convective),
)
precip_conv_frac = Var(
name='precip_conv_frac',
def_time=True,
func=conv_precip_frac,
variables=(precip_largescale, precip_convective),
)
globe = Region(
name='globe',
description='Entire globe',
west_bound=0,
east_bound=360,
south_bound=-90,
north_bound=90,
do_land_mask=False
)
tropics = Region(
name='tropics',
description='Tropics, defined as 30S-30N',
west_bound=0,
east_bound=360,
south_bound=-30,
north_bound=30,
do_land_mask=False
)
example_proj = Proj(
'example_proj',
direc_out='example-output',
tar_direc_out='example-tar-output',
models=[example_model],
regions=(globe, tropics)
)
if __name__ == '__main__':
pass
|
txt = input("enter a text: ")
words = txt.split()
longest = len(words[1])
for i in words:
wordlength = len(i)
if wordlength > longest:
longest = wordlength
currentword = i
print("the longest word is: ",currentword)
print("length: ",wordlength)
|
# -*- coding: utf-8 -*-
import os.path
import json
import urllib
import re
from imgurpython import ImgurClient
from PIL import Image
from slugify import slugify
def getLanguagePart(text, language):
baliseOpen = "[" + language + "]"
baliseClose = "[/" + language + "]"
if(language == "fr") :
if (baliseOpen not in text) :
return "Il n'y a pas de traduction pour ce post \n\n" + text
else :
firstSplit = text.split(baliseOpen)
secondSplit = firstSplit[1].split(baliseClose)
return secondSplit[0]
else :
if (baliseOpen not in text) :
return text
else :
firstSplit = text.split(baliseOpen)
secondSplit = firstSplit[1].split(baliseClose)
return secondSplit[0]
# Globale variables
client_id = 'cfe367c1454cf1d'
client_secret = 'd9a607f54463ad0960bc29458f003b3cef2657b0'
root_path = '/homez.74/wheelsadrl/www/'
#root_path = '/home/david/Projects/6wheels/'
# Set the api key to use ImgurApidd
client = ImgurClient(client_id, client_secret)
json_posts_lists = {}
json_posts_lists['posts'] = []
# If it doesn't exists, create a new directory for all the posts
if not os.path.exists(root_path + "posts"):
os.mkdir(root_path + "posts")
# Get all posts id posted by account named JulesGorny
posts = client.get_account_submissions('JulesGorny', page=0)
for post in posts:
# For each post, get all the associated info (full text, images, ..)
full_post = client.get_album(post.id)
# To make folder name safe, we slugify the title
slugifiedTitle = slugify(full_post.title[:49])
# Add the current post title to the main json
json_posts_lists['posts'].append(slugifiedTitle);
# If it doesn't exists, create a new directory for the current post
if not os.path.exists(root_path + "posts/" + slugifiedTitle):
os.mkdir(root_path + "posts/" + slugifiedTitle)
print("Retrieving post : " + slugifiedTitle)
# Create and fill the json file for the current post
json_post = {}
json_post['title'] = full_post.title
json_post['photos_count'] = len(full_post.images)
json_post['text_fr'] = ''
json_post['text_en'] = ''
# For each images in the current post
for i, img in enumerate(full_post.images):
print("Retrieving image and text " + str(i) + " : " + img['link'])
text_fr = ''
text_en = ''
if (img['description'] is not None):
text_fr = getLanguagePart(img['description'].encode('utf-8'), "FR")
text_en = getLanguagePart(img['description'].encode('utf-8'), "EN")
# On the last post, get the GPS coordinates
if i == len(full_post.images) - 1:
match = re.search(r'\[.*\,.*\]', text_fr)
if match:
coords = match.group().replace('[', '').replace(']', '').strip()
coords = coords.split(',')
try:
float(coords[0])
float(coords[1])
json_post['lat'] = coords[0]
json_post['long'] = coords[1]
text_fr = text_fr.replace(match.group(), '')
text_en = text_en.replace(match.group(), '')
except ValueError:
print "Error in coordinates format"
else:
print "No GPS coordinates in this post"
# Get the associated text and append it to the rest
json_post['text_fr'] += "\n\n" + text_fr
json_post['text_en'] += "\n\n" + text_en
# Download the image (named from 1.jpg to n.jpg), and convert it to .png
temp = img['link'].split('.')
ext = temp[len(temp)-1]
imgFullPath = root_path + "posts/" + slugifiedTitle + "/" + str(i) + "." + ext
try:
urllib.urlretrieve(img['link'], imgFullPath)
except Exception,e:
print e
continue # continue to next row
im = Image.open(imgFullPath)
newImgFullPath = imgFullPath.replace("." + ext, ".png")
os.remove(imgFullPath)
im.save(newImgFullPath)
# Create a thumbnail version of this image
im = Image.open(newImgFullPath)
imgW = im.size[0]
imgH = im.size[1]
if (imgW > imgH):
newW = int((276.0/imgH)*imgW)
newH = 276
thumbnail = im.resize((newW, newH), Image.ANTIALIAS)
if (newW > 368):
delta = newW - 368
left = int(delta/2)
upper = 0
right = newW - int(delta/2)
lower = 276
thumbnail = thumbnail.crop((left, upper, right, lower))
else:
newW = 368
newH = int((368.0/imgW)*imgH)
thumbnail = im.resize((newW, newH), Image.ANTIALIAS)
if (newH > 276):
delta = newH - 276
left = 0
upper = int(delta/2)
right = 368
lower = newH - int(delta/2)
thumbnail = thumbnail.crop((left, upper, right, lower))
thumbnail.save(root_path + "posts/" + slugifiedTitle + "/" + str(i) + "_small.png", "PNG", quality = 90)
# Save the json file of the post
post_file = open(root_path + "posts/" + slugifiedTitle + "/content.json", "w+")
json.dump(json_post, post_file)
post_file.close()
# Write the posts list titles on a general json
posts_lists_file = open(root_path + "posts/" + "posts_lists.json","w+")
json.dump(json_posts_lists, posts_lists_file)
posts_lists_file.close()
|
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D,\
BatchNormalization, Reshape, Activation, concatenate
from tensorflow.keras.layers import concatenate
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import multi_gpu_model, plot_model
from tensorflow.keras.models import Model, load_model
def unet(input_shape, n_label):
inputs = Input(input_shape)
#(512 512)
x = Conv2D(32, (3, 3), activation="relu", padding="same")(inputs)
x = BatchNormalization()(x)
x = Conv2D(32, (3, 3), activation="relu", padding="same")(x)
bnor2 = BatchNormalization()(x)
x = MaxPooling2D(pool_size=(2, 2))(bnor2)
#(256 256)
x = Conv2D(64, (3, 3), activation="relu", padding="same")(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation="relu", padding="same")(x)
bnor4 = BatchNormalization()(x)
x = MaxPooling2D(pool_size=(2, 2))(bnor4)
#(128 128)
x = Conv2D(128, (3, 3), activation="relu", padding="same")(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation="relu", padding="same")(x)
bnor6 = BatchNormalization()(x)
x = MaxPooling2D(pool_size=(2, 2))(bnor6)
#(64 64)
x = Conv2D(256, (3, 3), activation="relu", padding="same")(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation="relu", padding="same")(x)
bnor8 = BatchNormalization()(x)
x = MaxPooling2D(pool_size=(2, 2))(bnor8)
#(32 32)
x = Conv2D(512, (3, 3), activation="relu", padding="same")(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation="relu", padding="same")(x)
x = BatchNormalization()(x)
#(64 64)
x = concatenate([UpSampling2D(size=(2, 2))(x), bnor8], axis=3)
x = Conv2D(256, (3, 3), activation="relu", padding="same")(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation="relu", padding="same")(x)
x = BatchNormalization()(x)
# (128 128)
x = concatenate([UpSampling2D(size=(2, 2))(x), bnor6], axis=3)
x = Conv2D(128, (3, 3), activation="relu", padding="same")(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation="relu", padding="same")(x)
x = BatchNormalization()(x)
#(256 256)
x = concatenate([UpSampling2D(size=(2, 2))(x), bnor4], axis=3)
x = Conv2D(64, (3, 3), activation="relu", padding="same")(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation="relu", padding="same")(x)
x = BatchNormalization()(x)
#(512 512)
x = concatenate([UpSampling2D(size=(2, 2))(x), bnor2], axis=3)
x = Conv2D(32, (3, 3), activation="relu", padding="same")(x)
x = BatchNormalization()(x)
x = Conv2D(32, (3, 3), activation="relu", padding="same")(x)
x = BatchNormalization()(x)
x = Conv2D(n_label, (1, 1))(x)
x = Reshape((-1, n_label))(x)
y = Activation('softmax')(x)
model = Model(inputs=inputs, outputs=y)
return model |
from .basenet import *
from .resnet import * |
from .sign_in import SignInForm
from .sign_up import SignUpForm
|
def reverse_sentence(sentence):
return ' '.join([x[::-1] for x in sentence.split()])
'''
This Kata is intended as a small challenge for my students
All Star Code Challenge #29
Your friend Nhoj has dislexia, but can easily read messages if the
words are written backwards.
Create a function called reverseSentence()/reverse_sentence() that accepts a
string argument. The function returns a string of the same length with each word
reversed, but still in their original order.
reverse_sentence("Hello !Nhoj Want to have lunch?") # "olleH johN! tnaW ot evah ?hcnul"
Note:
A "word" should be considered a string split by a space character,
" " Letter capitalization should be maintained.
'''
|
from django.contrib.auth.models import PermissionsMixin
from django.http import response
from django.http.response import JsonResponse
from django.utils.translation import templatize
from django.views.generic import TemplateView
from django.http import HttpResponse
from django import http
from django.shortcuts import render
from rest_framework import views, viewsets,permissions
from .models import intentSamples,intents,conversationSamples,conversations,responseSamples,responses,trainingModels
from .serializers import intentSerializer,intentSamplesSerializer,conversationSamplesSerializer,conversationsSerializer,responseSamplesSerializer,responseSerializer
from django.core import serializers
import json
# Create your views here.
class intentViewSet(viewsets.ModelViewSet):
queryset = intents.objects.all()
serializer_class = intentSerializer
class intentSamplesViewSet(viewsets.ModelViewSet):
queryset = intentSamples.objects.all()
serializer_class = intentSamplesSerializer
class responseViewSet(viewsets.ModelViewSet):
queryset = responses.objects.all()
serializer_class = responseSerializer
class responseSamplesViewSet(viewsets.ModelViewSet):
queryset = responseSamples.objects.all()
serializer_class = responseSamplesSerializer
class conversationViewSet(viewsets.ModelViewSet):
queryset = conversations.objects.all()
serializer_class = conversationsSerializer
class conversationSamplesViewSet(viewsets.ModelViewSet):
queryset = conversationSamples.objects.all()
serializer_class = conversationSamplesSerializer
#######-----------------------------------------------------------------
#######--------------CHATBOT ML
#######-----------------------------------------------------------------
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from django.core.files.base import File
from django.http import HttpResponse,JsonResponse
from random import randint
import os
import numpy as np
import pickle
from random import shuffle
import yaml
class testModel(TemplateView):
def get(self,request):
HttpResponse("path on service",200)
def post(self,request):
json_req = json.loads(request.body)
model = trainingModels.objects.filter(active=True).first()
X_train_counts = pickle.loads(model.fileCountVect.read())
X_train_tfidf =pickle.loads(model.fileFitTransform.read())
clf = pickle.loads(model.fileModel.read())
X_new_counts = X_train_counts.transform([json_req["text"]])
X_new_tfidf = X_train_tfidf.transform(X_new_counts)
predicted = [item for item in clf.predict(X_new_tfidf)][0]
text = intents.objects.filter(id = predicted).first().name
response_id = conversationSamples.objects.filter(intent__pk=predicted).first().response.id
response = [instance for instance in responseSamples.objects.filter(response=response_id)]
response = response[randint(0,len(response)-1)].text
return JsonResponse({"usr_msg_type":text,"bot_msg":response})
class activateModel(TemplateView):
def get(self,request):
return HttpResponse("path on service",200)
def post(self,request):
json_req = json.loads(request.body)
objects = trainingModels.objects.all()
for instance in objects:
if(int(json_req['id'])==instance.id):
instance.active = bool(json_req["status"])
instance.save()
return HttpResponse(f"(id:{instance.id}) - instance of MODELS updated to active:{instance.active}",200)
else:
instance.active = False
instance.save()
return HttpResponse("done",200)
class trainModel(TemplateView):
def get(self,request):
return HttpResponse("path on service",200)
def put(self,request):
try:
json_req = json.loads(request.body)
intent_names = [(instance.__dict__['name'],instance.__dict__['id']) for instance in intents.objects.all()]
data = [(instance.text,instance.intent.id) for instance in intentSamples.objects.all()]
shuffle(data)
train = data[:int(len(data)*0.85)]
validate = data[int(len(data)*0.85):]
print(f"len train : {len(train)}\t len validate : {len(validate)}")
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform([item[0] for item in train])
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
if(json_req["model_type"]=='knn'):
CLASSIFIER = KNeighborsClassifier(n_neighbors=int(len(intent_names)) )
clf = CLASSIFIER.fit(X_train_tfidf,[item[1] for item in train])
elif(json_req["model_type"]=='tree'):
CLASSIFIER = DecisionTreeClassifier(random_state=0)
clf = CLASSIFIER.fit(X_train_tfidf,[item[1] for item in train])
elif(json_req["model_type"]=='NN'):
CLASSIFIER = MLPClassifier(hidden_layer_sizes=(100,50,25,),random_state=1, max_iter=300)
clf = CLASSIFIER.fit(X_train_tfidf,[item[1] for item in train])
else:
return HttpResponse("ERROR: not valid body",403)
with open(os.path.dirname(os.path.abspath(__file__))+"/tempModels/CountVect.pickle",'wb') as file:
pickle.dump(count_vect,file)
with open(os.path.dirname(os.path.abspath(__file__))+"/tempModels/tfidfTransformer.pickle",'wb') as file:
pickle.dump(tfidf_transformer,file)
with open(os.path.dirname(os.path.abspath(__file__))+"/tempModels/Classifier.pickle",'wb') as file:
pickle.dump(clf,file)
_MODEL = trainingModels()
with open(os.path.dirname(os.path.abspath(__file__))+"/tempModels/CountVect.pickle",'rb') as file:
_MODEL.fileCountVect.save(os.path.dirname(os.path.abspath(__file__))+"/tempModels/CountVect_DB.pickle",File(file))
with open(os.path.dirname(os.path.abspath(__file__))+"/tempModels/tfidfTransformer.pickle",'rb') as file:
_MODEL.fileFitTransform.save(os.path.dirname(os.path.abspath(__file__))+"/tempModels/tfidfTransformer_DB.pickle",File(file))
with open(os.path.dirname(os.path.abspath(__file__))+"/tempModels/Classifier.pickle",'rb') as file:
_MODEL.fileModel.save(os.path.dirname(os.path.abspath(__file__))+"/tempModels/Classifier_DB.pickle",File(file))
X_new_counts = count_vect.transform([item[0] for item in train])
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
predicted = clf.predict(X_new_tfidf)
_MODEL.acc_train = float(np.mean(predicted == [item[1] for item in train])*100)
X_new_counts = count_vect.transform([item[0] for item in validate])
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
predicted = clf.predict(X_new_tfidf)
_MODEL.acc_validate = float(np.mean(predicted == [item[1] for item in validate])*100)
_MODEL.save()
return HttpResponse("model trained with: "+json_req["model_type"]+f"|\taccTrain: {_MODEL.acc_train}\taccValidate: {_MODEL.acc_validate}",200)
except Exception as e:
return HttpResponse("Error:"+str(e),500)
# class trainModel(TemplateView):
# pass |
from django.db.models import F
from django.shortcuts import render, get_object_or_404
from facts.models import Artist, Song, Facts
def get_artist(request, artist_number):
relevant_artist = get_object_or_404(Artist, pk=artist_number)
artist_songs = relevant_artist.songs.all
return render(request, 'facts/artist.html', {'artist_songs': artist_songs, 'relevant_artist': relevant_artist})
def get_song(request, song_number):
relevant_song = get_object_or_404(Song, pk=song_number)
relevant_songs = Song.objects.filter(song__startswith=relevant_song.song[0])
return render(request, 'facts/song.html', {'relevant_songs': relevant_songs, 'relevant_song': relevant_song})
def get_facts(request, facts_number):
print(facts_number)
relevant_fact = Facts.objects.filter(song_id=facts_number)
print(type(relevant_fact))
return render(request, 'facts/fact.html', {'relevant_fact': relevant_fact})
# str = "my name is jonatan"
# return render(request, "facts/song.html", str)
# import calendar
#
# from django.contrib import messages
# from django.shortcuts import render, get_object_or_404, redirect
#
# from expenses import forms
# from expenses.models import Expense
#
# def expense_list(request):
# o = calendar.HTMLCalendar()
# qs = Expense.objects.order_by('-date')[:12]
# # FIXME: use sql group by sum???
# total = sum([x.amount for x in qs])
# return render(request, "expenses/expense_list.html", {
# 'object_list': qs,
# 'total': total,
# 'month': o.formatmonth(1969, 7),
# })
#
# def expense_detail(request, id):
# o = get_object_or_404(Expense, id=id)
#
# return render(request, "expenses/expense_detail.html", {
# 'object': o,
# })
#
# def expense_create(request):
# if request.method == "POST":
# form = forms.ExpenseForm(request.POST)
# if form.is_valid():
# # data = form.cleaned_data
# o = form.save()
# messages.success(request, f"Expense #{o.id} added. Thank you so very much!!!!!")
# # return redirect(o) # TODO: implement get_absolute_url
# return redirect("expenses:list")
#
# else:
# form = forms.ExpenseForm()
# return render(request, "expenses/expense_form.html", {
# 'form': form,
# })
|
limit = int(input())
s = []
max_s = 0
for _ in range(limit):
n = input()
if n[0] == '1':
i = int(n.split()[-1])
if i > max_s:
max_s = i
s.append(i)
elif n[0] == '2':
if max_s == s.pop():
max_s = max(s) if s else 0
else:
print(max_s)
|
#!/usr/bin/python3
import random
number = random.randint(-10000, 10000)
operation = number % 10
if number < 0:
operation = number % -10
print("Last digit of " + str(number) + " is " + str(operation), end="")
if operation < 6 and operation != 0:
print(" and is less than 6 and not 0")
elif operation > 5:
print(" and is greater than 5")
else:
print(" and is 0")
|
from string import ascii_uppercase as az, maketrans
def caeser(message, key):
return message.upper().translate(maketrans(az[-key:] + az[:-key], az))
|
'''
def readFile(location,date,code):
import os
import datetime
time=date.strftime("%Y%m%d")
ym = date.strftime("%Y%m")
rootDir = 'C:\XX\workspace\DZH-'+location+ym+'-TXT'
if 'DZH-'+location+ym+'-TXT' not in os.listdir('C:\XX\workspace'):
raise Exception('no such folder')
filePath=os.listdir(rootDir)
if time not in filePath:
raise Except('no such file!')
subpath = os.path.join(rootDir,time)
codePath = os.listdir(subpath)
filename = code+'_'+time+'.txt'
if filename not in codePath:
raise Exception('no such file')
filePath = os.path.join(subpath,filename)
file = open(filePath).read()
return file
import datetime
location = 'SH'
date=datetime.date(2014, 3, 5)
code = '000001'
file = readFile(location,date,code)
print(file)
'''
def readFile(location,date,code):
import os,datetime,zipfile
time=date.strftime("%Y%m%d")
ym = date.strftime("%Y%m")
rootDir = 'C:/XX/workspace/DZH-'+location+ym+'-TXT'
if 'DZH-'+location+ym+'-TXT.zip' not in os.listdir('C:/XX/workspace'):
raise Exception('no such folder')
filename = code+'_'+time+'.txt'
wholepath = 'DZH-'+location+ym+'-TXT'+'/'+time+'/'+filename
z = zipfile.ZipFile(rootDir+'.zip')
fileList=z.namelist()
if wholepath not in fileList:
raise Exception('no such file')
file = z.read(wholepath)
return file.decode('gbk')
import zipfile
import datetime
import os
location = 'SZ'
date=datetime.date(2014, 2, 7)
code = '000001'
file = readFile(location,date,code)
print(file)
|
#!/usr/bin/env python3
import argparse
import cv2
from pathlib import Path
from scapy.all import *
from scapy.layers.http import HTTP
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '-faces', type=str, dest='faces', default='./faces',
help='path to store pictures with faces extracted from pcap')
parser.add_argument('-p', '--pictures', type=str, dest='pictures', default='./pictures',
help='patch to store pictures extracted from pcap')
parser.add_argument('-i' '--infile', type=str, dest='infile', default='pic_carver.pcap',
help='pcap file to read in')
parser.description = """\
This is a Python program to read a packet capture and pull images out of HTTP traffic,
then detect faces in those images.
"""
args = parser.parse_args()
def face_detection(filepath, fname):
# read image duh
img = cv2.imread(filepath)
# create cascade classifier from pre-trained model for frontal face recognition
cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
# rects - rectangles that contain faces in images stored as coordinates (x,y)
# rects = cascade.detectMultiScale(img, 1.3, 4, cv2.cv.CV_HAAR_SCAL_IMAGE, (20, 20))
rects = cascade.detectMultiScale(img, 1.3, 4, cv2.CASCADE_SCALE_IMAGE, (20, 20))
if len(rects) == 0:
# this means we didn't detect a face
return False
# if we got this far there are faces in the image, highlight them
rects[:, 2:] += rects[:, :2]
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), (127, 255, 0), 2)
# finally write the modified image and return True to indicate we found a face
cv2.imwrite(f'{args.faces}/{args.infile}-{fname}', img)
return True
def extract_image(pkt):
image = None
image_type = None
try:
if b'image' in pkt.Content_Type:
# get image type and image data
image_type = pkt.Content_Type.split(b'/')[1].split(b';')[0].decode('utf-8')
image = pkt.load
# this happens automagically because scapy http layer does it for us - yay! Thanks scapy!
# decompress if compressed
# try:
# if 'Content-Encoding' in headers:
# if headers['Content-Encoding'] == 'gzip':
# image = zlib.decompress(image, 16+zlib.MAX_WBITS)
# elif headers['Content-Encoding'] == 'deflate':
# image = zlib.decompress(image)
# except:
# pass
except:
return None, None
return image, image_type
def http_assembler(in_pcap):
'''
:param pcap: pcap file to carve images from
:return: carved_images, faces detected (returned as counts)
'''
carved_images = 0
faces_detected = 0
pkts = sniff(offline=in_pcap, session=TCPSession)
# filter to only packets with HTTPResponse
ht = pkts.getlayer(scapy.layers.http.HTTPResponse)
for pkt in ht:
# print(type(pkt), pkt)
image, image_type = extract_image(pkt)
# print(f'image type: {image_type}')
# print(f'image size: {len(image)}')
if image is not None and image_type is not None:
# store image: construct filename, open FD , write binary image to file, increment carved_images (id/count)
# assemble path name and print msg
file_name = f'{in_pcap}-pic_carver_{carved_images}.{image_type}'
img_write_path = f'{args.pictures}/{file_name}'
print(f'writing original image to: {img_write_path}')
# ensure path exists and write image
Path(args.pictures).mkdir(parents=True, exist_ok=True)
with open(img_write_path, 'wb') as out_img:
out_img.write(image)
carved_images += 1
# attempt face detection, and if successful, write image to faces dir
try:
result = face_detection(f'{args.pictures}/{file_name}', file_name)
if result is True:
file_name = f'{in_pcap}-pic_carver_face_{carved_images}.{image_type}'
face_img_write_path = f'{args.faces}/{file_name}'
print(f'writing facial recognition edited image to: {face_img_write_path}')
Path(args.faces).mkdir(parents=True, exist_ok=True)
with open(face_img_write_path, 'wb') as out_img:
out_img.write(image)
faces_detected += 1
except Exception as ex:
print(f'caught exception: {ex.__class__.__name__} - {ex}')
pass
return carved_images, faces_detected
def main():
carved_images, faces_detected = http_assembler(args.infile)
print(f'all pictures in: {args.pictures}')
print(f'pictures with faces in: {args.faces}')
print(f'carved images: {carved_images}')
print(f'faces detected: {faces_detected}')
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 26 18:26:36 2018
@author: Harsha Vardhan Manoj
"""
import re
import nltk
short_raw="DENNIS: Listen, strange women lying in ponds distributing swords is \n no basis for a system of government."
wordlist=[w for w in nltk.corpus.words.words('en') if w.islower()]
print( re.findall('<as>+',['asasas','asssssssasa','fweffawqer']))
|
import abc
class BaseIR(object):
def __init__(self):
super().__init__()
class IR(BaseIR):
def __init__(self):
super().__init__()
class TableIR(BaseIR):
def __init__(self):
super().__init__()
class MatrixIR(BaseIR):
def __init__(self):
super().__init__()
|
# Extract MFCC from a sound
# 2017-04-02 jkang
# Python3.5
#
# **Prerequisite**
# - Install python_speech_features
# >> https://github.com/jameslyons/python_speech_features
from pylab import*
from scipy.io import wavfile
from python_speech_features import mfcc
from python_speech_features import delta
from python_speech_features import logfbank
# Read sound
srate, sig = wavfile.read('da_ta.wav')
plt.plot(np.arange(len(sig))/srate, sig)
plt.title('da_ta.wav')
plt.xlabel('Time (sec)')
plt.ylabel('Amplitude')
plt.show()
# Extract MFCC
winlen = 0.025
winstep = 0.01
numcep = 13
mfcc_raw = mfcc(sig, srate, winlen, winstep, numcep, appendEnergy = True) # 13-d MFCC
mfcc_deriv1 = delta(mfcc_raw, N = 2) # 1st deriv
mfccs = np.concatenate((mfcc_raw, mfcc_deriv1), axis=1).astype(np.float32)
plt.imshow(np.rot90(mfccs, axes=(0,1)), aspect='auto')
plt.title('MFCC values (26 dimension)')
plt.xlabel('Time (msec)')
plt.ylabel('Coefficients')
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.