text
stringlengths 8
6.05M
|
|---|
def dig_sum(n):
return sum([int(i) for i in str(n)])
s_lst = []
for a in range(100):
for b in range(100):
s_lst.append(dig_sum(a**b))
def main():
print(max(s_lst))
if __name__=='__main__':
main()
|
# ๅญ็ฌฆไธฒ
str = 'hello'
# ๅฎไนๆนๆณ
def function(arg):
return arg + arg
#่พๅ
ฅ
user_input = raw_input()
#่พๅบ
print user_output
# ๆไปถๆไฝ
|
from django.db import models
# __all__ : ์ธ๋ถ์์ ๋ชจ๋ import * ํ ๋ *์ ๋์์ด ๋๋ ๋ชฉ๋ก.
__all__ = (
'InstagramUser',
)
class InstagramUser(models.Model):
name = models.CharField(
max_length=50,
)
# ๋ด๊ฐ ํ๋ก์ฐํ๋ ์ ์ : following
# ๋๋ฅผ ํ๋ก์ฐํ๋ ์ ์ : follower
# following ํ๋๋ ๋ด๊ฐ ํ๋ก์ฐ ํ๋ ์ฌ๋์ ๋ํ๋.
# ํ๋ก์ฐ ๋นํ๋ ์
์ฅ์์ ๊ฐ์ฒด๋ ์๊ธฐ์ followers์ ์ ๊ทผํ๋ ค๋ฉด related_name์ผ๋ก ์ ๊ทผํ๋ค.
# symmetrical : ๋ฆด๋ ์ด์
ํ
์ด๋ธ์์ ๋น๋์นญ/๋์นญ๊ด๊ณ ์ค์ .
following = models.ManyToManyField(
'self',
symmetrical=False,
related_name='followers',
)
def __str__(self):
return self.name
|
import logging
import socket
import datetime
import time
import os
import threading
import pdb
import sys
import bisect
import traceback
"""
sys.path.insert(0, "./netfilterlib/")
from netfilterqueue import NetfilterQueue
sys.path.append("scapy")
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
"""
FORMAT = "[%(filename)s:%(lineno)s - %(threadName)s %(funcName)10s] %(levelname)7s %(message)s"
class SingleLevelFilter(logging.Filter):
def __init__(self, passlevel, reject):
self.passlevel = passlevel
self.reject = reject
def filter(self, record):
if self.reject:
return (record.levelno != self.passlevel)
else:
return (record.levelno == self.passlevel)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
filelog = logging.FileHandler(filename='debug.out',mode='w')
filelog.setFormatter(logging.Formatter(FORMAT))
filelog.setLevel(logging.DEBUG)
logger.addHandler(filelog)
console = logging.StreamHandler(sys.__stdout__)
console.addFilter(SingleLevelFilter(logging.DEBUG,False))
console.setFormatter(logging.Formatter(FORMAT))
logger.addHandler(console)
# Table = ({rules:actions}, {statistics:value})
## rules = (string,list) -> ip.src, ['if','>','128.114.*.*']
# Rules = (pkt value) (conditional)
## pkt value: header field value, special value for statistics
## conditional: contains the logic, if ip.src == 128.114.62.150
# Actions = Drop, Accept, Cache, Forward
class Table:
table = ({},[])
def __init__(self):
stats = {}
## create stats entry for the 5 NICs
for i in xrange(0,5):
stra = "eth"
stats[stra+str(i)] = {}
## create each of the statistics to track per NIC
for i in xrange(0,5):
stats[stra+str(i)]['count'] = 0
stats[stra+str(i)]['loss'] = 0
stats[stra+str(i)]['bytes'] = 0
## key = pkt header
## value = [ conditional + action ]
rules = {}
self.table = (rules,stats)
## create a new rule/action in the flow table
## -1 if the rule is already in place even if the action is different.
def add_rule(self, key,rule,action):
if key in self.table[0]:
for rule_entry in self.table[0][key]:
if rule_entry[0] == rule:
## same rule already in place
return -1
self.table[0][key].append((rule,action))
return 0
else:
self.table[0][key] = [(rule,action)]
return 0
def update_rule(self, key, rule, action):
if key in self.table[0]:
for rule_entry in self.table[0][key]:
if rule_entry[0] == rule:
index = self.table[0][key].index(rule_entry)
self.table[0][key][index] = (rule,action)
return 0
## rule not found
return -1
## key not found
else:
return -2
def delete_rule(self, key, rule):
if key in self.table[0]:
for rule_entry in self.table[0][key]:
if rule_entry[0] == rule:
index = self.table[0][key].index(rule_entry)
del self.table[0][key][index]
## rule not found
return -1
## key not found
else:
return -2
def __str__(self):
rstr = "\nStatistics:\n"
t = self.table
for entry in t[1]:
rstr += "\t"+entry+":\n"
for value in t[1][entry]:
rstr += "\t\t"+value+":\t"+str(t[1][entry][value])+"\n"
rstr += "Rules:\n"
for entry in t[0]:
rstr += "\t"+entry+":\n"
for value in t[0][entry]:
rstr += "\t\t"+value[0]+":\t"+value[1]+"\n"
#i = 0
#rstr = ""
#t = self.table
#for entry in t:
# rstr += "# %s: rule: (%s)\t\taction: (%s)\n" % (i,entry[0], entry[1])
#return rstr
return rstr
## need to allow compound and disjoint statements such as ip == X and port == Y or port == Z
flowTable = Table()
flowTable.add_rule("ip.addr","ip.addr == 128.114.58.12","A")
logger.debug(flowTable)
# seqnum = int(sp["TCP"].getfieldval('seq'))
# acknum = int(sp["TCP"].getfieldval('ack'))
# ips = (sp["IP"].getfieldval('src'),sp["IP"].getfieldval('dst'))
def match(sp):
global flowTable
# create a list of each key value stored in the packet that can be pulled out
# iterate over that list and the flowTable list
# if a header and a rule match, select it
#XXX
def openflow(pkt):
logger.debug("Handling data packet")
sp = IP(pkt.get_payload())
#if match(sp):
try:
except Exception, e:
logger.error("error handling packet")
logger.error(str(e))
logger.error(traceback.format_exc())
pkt.accept()
"""
def print_and_accept(packet):
print packet
sp = IP(packet.get_payload())
logger.debug("%s:%s -> %s:%s" % (sp[IP].src,sp[TCP].sport,sp[IP].dst,sp[TCP].dport))
packet.accept()
def start_openflow(ilist,qname="NFQUEUE",qval=1):
for interface in ilist:
## if the host is the destination (to forward above ip)
subprocess.call("sudo iptables -I INPUT -i eth%s -j %s --queue-num %s"\
% (interface,qname,int(qval)))
## our base station should use this
subprocess.call("sudo iptables -I FORWARD -i eth%s -j %s --queue-num %s"\
% (interface,qname,int(qval)))
nfqueue = NetfilterQueue()
nfqueue.bind(qval, openflow)
try:
nfqueue.run()
except Exception,e:
logger.error("Error in Snoop start: %s" % str(e))
except KeyboardInterrupt:
return
def debug():
nfqueue = NetfilterQueue()
#nfqueue.bind(1, print_and_accept)
nfqueue.bind(1, openflow)
try:
nfqueue.run()
except Exception,e:
logger.error("Error in Snoop start: %s" % str(e))
logger.error(traceback.format_exc())
logger.info("stopped.")
debug()
"""
|
import Chapter3.BinaryClassifier_2
def evaluation(sgd_clf):
from sklearn.model_selection import cross_val_score
result = cross_val_score(sgd_clf, Chapter3.BinaryClassifier_2.X_train, Chapter3.BinaryClassifier_2.y_train_5, #kํด๋ ๊ต์ฐจ ๊ฒ์ฆ ์ฌ์ฉ
cv=3, scoring="accuracy") #3๊ฐ์ ์๋ธ์
์ผ๋ก ๋๋, ์ ํ๋๋ฅผ ๊ณ์ฐํจ(accuracy), ํ๋ฅ ์ ๊ฒฝ์ฌํ๊ฐ๋ฒ ์ ์ฉ
print(result) #[0.96135 0.96385 0.9533 ], ์ ํ๋๋ฅผ ์ฑ๋ฅ์งํ๋ก ์ฌ์ฉํ๋ ๊ฒ์ ๋ถ๊ท ํํ ๋ฐ์ดํฐ์
(์ด๋ค ํด๋์ค๊ฐ ๋ค๋ฅธ ๊ฒ๋ณด๋ค ์๋ฑํ ๋ง์)์์ ๋ถ์ ํ
def confusionMatrix(sgd_clf):
from sklearn.model_selection import cross_val_predict
y_train_ped = cross_val_predict(sgd_clf, Chapter3.BinaryClassifier_2.X_train, #๊ฐ ํ
์คํธ ํด๋์์ ์ป์ ์์ธก ๋ฐํ
Chapter3.BinaryClassifier_2.y_train_5, cv=3)
from sklearn.metrics import confusion_matrix
confusion_result = confusion_matrix(Chapter3.BinaryClassifier_2.y_train_5, y_train_ped) #์ค์ฐจ ํ๋ ฌ ์์ฑ
print(confusion_result) #[[53670 909][ 1146 4275]], ์ค์ฐจ ํ๋ ฌ ์ถ๋ ฅ
def precisionANDrecall(sgd_clf):
from sklearn.model_selection import cross_val_predict
y_train_ped = cross_val_predict(sgd_clf, Chapter3.BinaryClassifier_2.X_train, #๊ฐ ํ
์คํธ ํด๋์์ ์ป์ ์์ธก ๋ฐํ
Chapter3.BinaryClassifier_2.y_train_5, cv=3)
from sklearn.metrics import precision_score, recall_score
print("์ ๋ฐ๋ :", precision_score(Chapter3.BinaryClassifier_2.y_train_5, y_train_ped)) #์ ๋ฐ๋ ์ถ๋ ฅ
print("์ฌํ์จ :", recall_score(Chapter3.BinaryClassifier_2.y_train_5, y_train_ped)) #์ฌํ์จ ์ถ๋ ฅ
from sklearn.metrics import f1_score
print("f1์ ์ :", f1_score(Chapter3.BinaryClassifier_2.y_train_5, y_train_ped)) #f1์ ์ ์ถ๋ ฅ
#f1์ ์ : F = 1 / ((a/์ ๋ฐ๋) + ((1-a)/์ฌํ์จ)) = (b**2 + 1) * (์ ๋ฐ๋ * ์ฌํ์จ / ((b**2 * ์ ๋ฐ๋) + ์ฌํ์จ)), b**2 = (1-a) / a
#b๊ฐ 1๋ณด๋คํฌ๋ฉด ์ฌํ์จ์ด ๊ฐ์กฐ, 1๋ณด๋ค ์์ผ๋ฉด ์ ๋ฐ๋๊ฐ ๊ฐ์กฐ, b๊ฐ 1์ผ๋ ์ ์๋ฅผ f1์ ์๋ผ๊ณ ํจ
def tradeoff(sgd_clf):
y_scores = sgd_clf.decision_function(Chapter3.BinaryClassifier_2.X_train[0].reshape(1,-1)) #์ํ์ ์ ์๋ฅผ ๋ฐํ
print(Chapter3.BinaryClassifier_2.y_train[0], "์ ์ ์ :", y_scores) #์ํ์ ์ ์ถ๋ ฅ
threshhold = 0
y_some_digit_pred = (y_scores > threshhold)
print(y_some_digit_pred) #์๊ณ๊ฐ๋ณด๋ค ๋์ผ๋ฉด true, ์์ผ๋ฉด false
threshhold = 20000 #์๊ณ๊ฐ ์ฆ๊ฐ
y_some_digit_pred = (y_scores > threshhold)
print(y_some_digit_pred) # ์๊ณ๊ฐ๋ณด๋ค ๋์ผ๋ฉด true, ์์ผ๋ฉด false
from sklearn.model_selection import cross_val_predict
y_scores = cross_val_predict(sgd_clf, Chapter3.BinaryClassifier_2.X_train, Chapter3.BinaryClassifier_2.y_train_5,
cv=3, method="decision_function") #ํ๋ จ์ธํธ์ ์๋ ๋ชจ๋ ์ํ์ ์ ์๋ฅผ ๊ตฌํจ
from sklearn.metrics import precision_recall_curve
precisions, recalls, thresholds = precision_recall_curve(Chapter3.BinaryClassifier_2.y_train_5, y_scores) #๊ฐ๋ฅํ ๋ชจ๋ ์๊ณ๊ฐ์ ๋ํ ์ ๋ฐ๋์ ์ฌํ์จ ๊ณ์ฐ
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds): #์๊ณ๊ฐ์ ๋ํ ์ ๋ฐ๋์ ์ฌํ์จ ์๊ฐํ
import matplotlib.pyplot as plt
plt.plot(thresholds, precisions[:-1], "b--", label="precision")
plt.plot(thresholds, recalls[:-1], "g--", label="recalls")
plt.xlabel("thresholds")
plt.legend(loc="center left")
plt.ylim([0, 1])
plt.show()
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
def plot_precision_recall_and_threshold(precisions, recalls): #์ ๋ฐ๋์ ์ฌํ์จ์ ๊ด๊ณ๋ฅผ ์๊ฐํ
import matplotlib.pyplot as plt
#์ฌํ์จ์ด ๋ณํํ ๋ ์ ๋ฐ๋์ ๋ณํ ํ์ธ
plt.title("Relation between precision and recall")
plt.plot(recalls[:-1], precisions[:-1], "b--", label="precision")
plt.xlabel("recalls")
plt.ylabel("precisions")
plt.ylim([0, 1])
plt.xlim([0, 1])
plt.show()
plot_precision_recall_and_threshold(precisions, recalls)
y_train_90 = (y_scores > 3000) #์ ๋ฐ๋๊ฐ 90์ด์์ด ๋๋ ค๋ฉด ์๊ณ๊ฐ์ด 5000์ด์์ด์ฌ์ผ ํจ
from sklearn.metrics import precision_score, recall_score
#์ฌํ์จ์ด ๋๋ฌด ๋ฎ์ผ๋ฉด ์ ๋ฐ๋๊ฐ ๋์๋ ์ ์ฉํ์ง ์์
print("์ ๋ฐ๋ :", precision_score(Chapter3.BinaryClassifier_2.y_train_5, y_train_90)) # ์ ๋ฐ๋ ์ถ๋ ฅ
print("์ฌํ์จ :", recall_score(Chapter3.BinaryClassifier_2.y_train_5, y_train_90)) # ์ฌํ์จ ์ถ๋ ฅ
### PR๊ณก์ ์ ์ฌ์ฉํ๋ ๊ฒฝ์ฐ ###
# 1. ์์ฑ ํด๋์ค๊ฐ ๋๋ญ
# 2. ๊ฑฐ์ง ์์ฑ๋ณด๋ค ๊ฑฐ์ง ์์ฑ์ด ๋ค ์ค์
def rocCurve(sgd_clf):
from sklearn.model_selection import cross_val_predict
y_scores = cross_val_predict(sgd_clf, Chapter3.BinaryClassifier_2.X_train, Chapter3.BinaryClassifier_2.y_train_5,
cv=3, method="decision_function") # ํ๋ จ์ธํธ์ ์๋ ๋ชจ๋ ์ํ์ ์ ์๋ฅผ ๊ตฌํจ
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(Chapter3.BinaryClassifier_2.y_train_5, y_scores) #๊ฑฐ์ง์์ฑ๋น์จ, ์ง์ง์์ฑ๋น์จ, ์๊ณ๊ฐ ๊ณ์ฐ
def plot_roc_curve(fpr, tpr, label=None): #roc๊ณก์ ์๊ฐํ
from matplotlib import pyplot as plt
#roc๊ณก์ ์๋ ๋์ด(AUC)๊ฐ 1์ ๊ฐ๊น์ธ์๋ก ์ข์ ๊ณก์
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], "k--") #์์ ํ ๋๋ค๋ถ๋ฅ๊ธฐ(์ฐ์ด๋ง์ถ๊ธฐ)์ roc๊ณก์ ์๋ฏธ, ์ข์ ๋ถ๋ฅ๊ธฐ๋ ์ด ์ง์ ์ผ๋ก๋ถํฐ ์ต๋ํ ๋ฉ๋ฆฌ ๋จ์ด์ ธ์ผํจ
plt.axis([0, 1, 0, 1]) #x, y์ถ์ ๋ฒ์์ค์ , axis([xmin, xmax, ymin, ymax])
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
compareRndForest(fpr, tpr, "RandomForest")
plt.legend(loc="lower right")
plt.show()
def compareRndForest(fpr, tpr, label=None): #๋๋คํฌ๋ ์คํธ์ ROC ์๊ฐํ
from sklearn.ensemble import RandomForestClassifier
forest_clf = RandomForestClassifier(random_state=42)
y_probas_forest = cross_val_predict(forest_clf, Chapter3.BinaryClassifier_2.X_train,
Chapter3.BinaryClassifier_2.y_train_5, cv=3,
method="predict_proba")
#๋๋คํฌ๋ ์คํธ๋ถ๋ฅ๊ธฐ๋ decision_function๋์ ์ํ์ด ์ฃผ์ด์ง ํด๋์ค์ ์์ ํ๋ฅ ์ ๋ฐํํ๋ predict_proba ์ฌ์ฉ
y_scores_forest = y_probas_forest[:, 1] #์์ฑ ํด๋์ค์ ๋ํ ํ๋ฅ ์ ์ ์๋ก ์ฌ์ฉ
fpr_forest, tpr_forest, thresholds_forest = roc_curve(Chapter3.BinaryClassifier_2.y_train_5, y_scores_forest)
from matplotlib import pyplot as plt
plt.plot(fpr_forest, tpr_forest, label=label)
from sklearn.metrics import roc_auc_score
print("RandomForest's AUC :", roc_auc_score(Chapter3.BinaryClassifier_2.y_train_5, y_scores_forest)) # Roc์ AUC๋ฐํ
plot_roc_curve(fpr, tpr, "SGD")
from sklearn.metrics import roc_auc_score
print("SGD's AUC :", roc_auc_score(Chapter3.BinaryClassifier_2.y_train_5, y_scores)) #Roc์ AUC๋ฐํ
if __name__ == '__main__':
sgd_clf = Chapter3.BinaryClassifier_2.getBinaryClassifier()
#evaluation(sgd_clf)
#confusionMatrix(sgd_clf)
#precisionANDrecall(sgd_clf)
#tradeoff(sgd_clf)
rocCurve(sgd_clf)
|
# Date: 10/09/2020
# Author: rohith mulumudy
# Description: manages the file structure
import os
class Files:
def __init__(self, directory):
self.directory = directory
def create_file_structure(self, round_num):
if not os.path.isdir(self.directory):
os.mkdir(self.directory)
if not os.path.isdir("{}/{:02d}-round".format(self.directory,round_num)):
os.mkdir("{}/{:02d}-round".format(self.directory,round_num))
if not os.path.isdir("{}/{:02d}-round/redirection_domains".format(self.directory,round_num)):
os.mkdir("{}/{:02d}-round/redirection_domains".format(self.directory,round_num))
if not os.path.isdir("{}/{:02d}-round/unreachable_domains".format(self.directory,round_num)):
os.mkdir("{}/{:02d}-round/unreachable_domains".format(self.directory,round_num))
if not os.path.isdir("{}/{:02d}-round/san_domains".format(self.directory,round_num)):
os.mkdir("{}/{:02d}-round/san_domains".format(self.directory,round_num))
if not os.path.isdir("{}/{:02d}-round/input".format(self.directory,round_num)):
os.mkdir("{}/{:02d}-round/input".format(self.directory,round_num))
open("{}/{:02d}-round/redirection_domains/status_redirection.txt".format(self.directory,round_num),'w').close()
open("{}/{:02d}-round/status_code_200.txt".format(self.directory,round_num),'w').close()
open("{}/{:02d}-round/unreachable_domains/status_code_404.txt".format(self.directory,round_num),'w').close()
open("{}/{:02d}-round/unreachable_domains/status_others.txt".format(self.directory,round_num),'w').close()
open("{}/{:02d}-round/unreachable_domains/status_errors.txt".format(self.directory,round_num),'w').close()
def copy_input_file(self, in_file, round_num):
os.system("cp {} {}/{:02d}-round/input/hosts.txt".format(in_file,self.directory,round_num))
|
import argparse
from datetime import datetime
import torch
from torch.optim import Adam
from torchsummary import summary # pip install torchsummary
from nn import BengaliNet, CrossEntropySumLoss, LabelSmoothingLoss
from optim import optimize, ReduceLROnPlateau
from utils.data import load_data
from utils.tensorboard import MetricWriter
def handle_arguments():
"""Handles console arguments. `python main.py --help` gives an overview.
Returns [Namespace]:
images = [str] path to images .npy file
labels = [str] path to labels CSV file
test_ratio = [float] proportion of data for testing
seed = [int] seed used for data splitting
data_augmentation = [bool] whether to augment the training images
drop_info_fn = [str] which info dropping algorithm to use
class_balancing = [bool] whether to perform class balancing
batch_size = [int] number of images in a batch
label_smoothing = [bool] whether to use soft targets for the loss
epochs = [int] number of iterations over the training data
model = [str] path to save the trained model
"""
# process the command options
parser = argparse.ArgumentParser()
parser.add_argument('images', type=str, help='provide path in style: '
r'"kaggle\input\bengaliai-cv19\images.npy"')
parser.add_argument('labels', type=str, help='provide path in style: '
r'"kaggle\input\bengaliai-cv19\labels.csv"')
parser.add_argument('-t', '--test_ratio', type=float, default=0.2,
help='proportion of data for testing, default: 0.2')
parser.add_argument('-s', '--seed', type=int, default=None, help='seed '
'used for consistent data splitting, default: None')
parser.add_argument('-a', '--data_augmentation', action='store_true',
help='switch to augment the images')
drop_info_fns = ['cutout', 'gridmask', 'None'] # info dropping algorithms
parser.add_argument('-d', '--drop_info_fn', type=str, choices=drop_info_fns,
default=None, help='whether cutout, GridMask, or no '
'information dropping algorithm is used, default: None')
parser.add_argument('-c', '--class_balancing', action='store_true',
help='switch to perform class balancing')
parser.add_argument('-b', '--batch_size', type=int, default=32,
help='batch size of DataLoader objects, default: 32')
parser.add_argument('-l', '--label_smoothing', action='store_true',
help='switch to use soft targets in loss computation')
parser.add_argument('-e', '--epochs', type=int, default=50, help='number '
'of iterations over training data, default: 50')
parser.add_argument('-m', '--model', type=str, default='model.pt',
help='path to save trained model, default: "model.pt"')
# parse and print arguments
args = parser.parse_args()
for arg in vars(args):
print(f'{arg.upper()}: {getattr(args, arg)}')
return args
if __name__ == '__main__':
# get console arguments
args = handle_arguments()
# load training and validation data
data = load_data(args.images,
args.labels,
args.test_ratio, args.seed,
args.data_augmentation, args.drop_info_fn,
args.class_balancing,
args.batch_size)
train_dataset, train_loader, val_loader, image_size = data
# use GPU if available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('DEVICE:', device)
# initialize network and show summary
model = BengaliNet(device).train()
summary(model, input_size=(1, image_size, image_size), device=str(device))
# initialize optimizer, scheduler, and criterion
optimizer = Adam([
{'params': list(model.parameters())[-6:-4], 'lr': 0.001},
{'params': list(model.parameters())[-4:-2], 'lr': 0.001},
{'params': list(model.parameters())[-2:], 'lr': 0.001},
{'params': list(model.parameters())[:-6], 'lr': 0.001},
])
scheduler = ReduceLROnPlateau(optimizer)
if args.label_smoothing:
criterion = LabelSmoothingLoss(device, 0.1)
else:
criterion = CrossEntropySumLoss(device)
# TensorBoard writers
current_time = datetime.now().strftime("%Y-%m-%d/%H'%M'%S")
train_writer = MetricWriter(device, f'runs/{current_time}/train')
train_writer.add_graph(model, next(iter(train_loader))[0]) # show model
val_writer = MetricWriter(device, f'runs/{current_time}/validation')
# train and validate model
optimize(model,
train_dataset, train_loader, train_writer,
val_loader, val_writer,
optimizer, scheduler,
criterion,
args.epochs,
args.model)
# close TensorBoard writers to flush communication
train_writer.close()
val_writer.close()
|
import numpy as np
import pytest
import math
from sklearn.base import clone
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
import doubleml as dml
from ._utils import draw_smpls
from ._utils_iivm_manual import fit_iivm, boot_iivm
@pytest.fixture(scope='module',
params=[[RandomForestRegressor(max_depth=2, n_estimators=10),
RandomForestClassifier(max_depth=2, n_estimators=10)]])
def learner(request):
return request.param
@pytest.fixture(scope='module',
params=['LATE'])
def score(request):
return request.param
@pytest.fixture(scope='module',
params=[1, 2])
def n_folds(request):
return request.param
@pytest.fixture(scope="module")
def dml_iivm_no_cross_fit_fixture(generate_data_iivm, learner, score, n_folds):
boot_methods = ['normal']
n_rep_boot = 491
dml_procedure = 'dml1'
# collect data
data = generate_data_iivm
x_cols = data.columns[data.columns.str.startswith('X')].tolist()
# Set machine learning methods for m & g
ml_g = clone(learner[0])
ml_m = clone(learner[1])
ml_r = clone(learner[1])
np.random.seed(3141)
obj_dml_data = dml.DoubleMLData(data, 'y', ['d'], x_cols, 'z')
dml_iivm_obj = dml.DoubleMLIIVM(obj_dml_data,
ml_g, ml_m, ml_r,
n_folds,
dml_procedure=dml_procedure,
apply_cross_fitting=False)
dml_iivm_obj.fit()
np.random.seed(3141)
y = data['y'].values
x = data.loc[:, x_cols].values
d = data['d'].values
z = data['z'].values
if n_folds == 1:
smpls = [(np.arange(len(y)), np.arange(len(y)))]
else:
n_obs = len(y)
all_smpls = draw_smpls(n_obs, n_folds)
smpls = all_smpls[0]
smpls = [smpls[0]]
res_manual = fit_iivm(y, x, d, z,
clone(learner[0]), clone(learner[1]), clone(learner[1]),
[smpls], dml_procedure, score)
res_dict = {'coef': dml_iivm_obj.coef,
'coef_manual': res_manual['theta'],
'se': dml_iivm_obj.se,
'se_manual': res_manual['se'],
'boot_methods': boot_methods}
for bootstrap in boot_methods:
np.random.seed(3141)
boot_theta, boot_t_stat = boot_iivm(y, d, z, res_manual['thetas'], res_manual['ses'],
res_manual['all_g_hat0'], res_manual['all_g_hat1'],
res_manual['all_m_hat'], res_manual['all_r_hat0'], res_manual['all_r_hat1'],
[smpls], score, bootstrap, n_rep_boot,
apply_cross_fitting=False)
np.random.seed(3141)
dml_iivm_obj.bootstrap(method=bootstrap, n_rep_boot=n_rep_boot)
res_dict['boot_coef' + bootstrap] = dml_iivm_obj.boot_coef
res_dict['boot_t_stat' + bootstrap] = dml_iivm_obj.boot_t_stat
res_dict['boot_coef' + bootstrap + '_manual'] = boot_theta
res_dict['boot_t_stat' + bootstrap + '_manual'] = boot_t_stat
return res_dict
@pytest.mark.ci
def test_dml_iivm_no_cross_fit_coef(dml_iivm_no_cross_fit_fixture):
assert math.isclose(dml_iivm_no_cross_fit_fixture['coef'],
dml_iivm_no_cross_fit_fixture['coef_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_iivm_no_cross_fit_se(dml_iivm_no_cross_fit_fixture):
assert math.isclose(dml_iivm_no_cross_fit_fixture['se'],
dml_iivm_no_cross_fit_fixture['se_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_iivm_no_cross_fit_boot(dml_iivm_no_cross_fit_fixture):
for bootstrap in dml_iivm_no_cross_fit_fixture['boot_methods']:
assert np.allclose(dml_iivm_no_cross_fit_fixture['boot_coef' + bootstrap],
dml_iivm_no_cross_fit_fixture['boot_coef' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_iivm_no_cross_fit_fixture['boot_t_stat' + bootstrap],
dml_iivm_no_cross_fit_fixture['boot_t_stat' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
|
import tensorflow as tf
import numpy as np
import os
import sys
from PIL import Image, ImageOps
path = '/content/base/'
def image_preprocessing(filename, x_size, y_size):
im = Image.open(filename)
if filename.endswith('.png'):
im = im.convert('RGB')
downsampled_im = ImageOps.fit(im, (x_size, y_size), method=Image.LANCZOS)
norm_im = np.array(downsampled_im, dtype=np.float32)/255.
downsampled_im.close()
im.close()
return norm_im
if __name__ == '__main__':
names = []
for name in os.listdir(path):
if name.endswith('.jpg'):
names.append(name[:-4])
dataset_X = np.zeros((len(names), 256, 256, 3))
dataset_Y = np.zeros((len(names), 256, 256, 3))
for i in range(len(names)):
print(names[i])
dataset_X[i] = image_preprocessing(os.path.join(path, names[i] + '.jpg'), 256, 256)
dataset_Y[i] = image_preprocessing(os.path.join(path, names[i] + '.png'), 256, 256)
np.save('dataset_X.npy', dataset_X)
np.save('dataset_Y.npy', dataset_Y)
|
import numpy as np
from rpy2.robjects import numpy2ri
numpy2ri.activate()
from rpy2.robjects.packages import importr
stepR = importr('stepR')
# x : np.ndarray
# t : np.ndarray
# alpha : float
# -> ([x.dtype], [t.dtype], [(t.dtype, t.dtype)])
def smuce_r(x, t, alpha):
res = stepR.stepFit(x, x=t, alpha=alpha, jumpint=True)
return list(res.rx2('value')), list(res.rx2('leftEnd')), list(zip(res.rx2('leftEndLeftBound'), res.rx2('leftEndRightBound')))
|
๏ปฟ# coding: utf-8
####################################
#ๆๅใฎไพ(MyButtonใGlobalใงๅ
ฑๆ)
####################################
if "IronPythonใๅใใพใใ" == MyButton.Parent.Text:
MyButton.Text = "Globalsใฎๅฉ็จ ๏ผ1st.py๏ผ"
MyButton.Parent.Text = "IronPython"
elif "IronPython" == MyButton.Parent.Text:
MyButton.Text = "ในใฏใชใใใๅฎ่กใใพใใ"
MyButton.Parent.Text = "IronPythonใๅใใพใใ"
|
import pandas as pd
import numpy as np
import glob
import datetime
sonde = pd.read_csv('All Sonde Output.csv', header=0)
GPS = pd.read_csv('Final out.csv', header=0)
CO2 = pd.read_csv('Final CO2.csv', header=0)
sondeCO2 = pd.merge(sonde, CO2, on='time')
print(sondeCO2)
|
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.layers.merge import concatenate
from keras.models import Sequential, Model
from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape
from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D, BatchNormalization
from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional, SpatialDropout1D
from keras.utils.np_utils import to_categorical
from keras import initializers
from keras import backend as K
from keras.engine.topology import Layer
from keras.optimizers import Adam
from keras import callbacks
from keras.models import load_model
from keras.utils import Sequence
import numpy as np
from gensim.models import word2vec
import random
import os
import sys
import pandas as pd
import csv
import jieba
jieba.set_dictionary(sys.argv[4])
wordlist_path = "x_word.txt"
x_word = open(wordlist_path, "wb")
# training data
df = pd.read_csv(sys.argv[1])
for sentence in df["comment"]:
seg_list = jieba.cut(sentence.replace(" ", ""), cut_all=False)
x_word.write( " ".join(seg_list).encode('utf-8') )
x_word.write(b'\n')
# testing data
df = pd.read_csv(sys.argv[3])
for sentence in df["comment"]:
seg_list = jieba.cut(sentence.replace(" ", ""), cut_all=False)
x_word.write( " ".join(seg_list).encode('utf-8') )
x_word.write(b'\n')
x_word.close()
x_word = list()
f = open(wordlist_path, "r")
for line in f:
x_word.append(line[:-1])
MAX_SEQUENCE_LENGTH = 100
tokenizer = Tokenizer(num_words=None)
tokenizer.fit_on_texts(x_word)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
count_thres = 3
low_count_words = [w for w,c in tokenizer.word_counts.items() if c < count_thres]
for w in low_count_words:
del tokenizer.word_index[w]
del tokenizer.word_docs[w]
del tokenizer.word_counts[w]
sequences = tokenizer.texts_to_sequences(x_word)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data tensor:', data.shape)
yf = pd.read_csv(sys.argv[2])
raw_y = yf["label"]
train_y = to_categorical(raw_y)
MAX_NB_WORDS = len(word_index) + 1
EMBEDDING_DIM = 300
wordlist = word2vec.LineSentence(wordlist_path)
model = word2vec.Word2Vec(
wordlist,
size=EMBEDDING_DIM,
window=3,
min_count=3,
workers=8)
model.train(wordlist, total_examples=len(x_word), epochs=30)
weight_matrix = np.zeros((MAX_NB_WORDS, EMBEDDING_DIM))
vocab = tokenizer.word_index
for word, i in vocab.items():
try:
weight_matrix[i] = model.wv[word]
except KeyError:
np.random.seed(66) # fixed seed for unknown
weight_matrix[i]=np.random.normal(0,np.sqrt(0.25),EMBEDDING_DIM)
del model
class Attention(Layer):
def __init__(self, attention_size, **kwargs):
self.attention_size = attention_size
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
# W: (EMBED_SIZE, ATTENTION_SIZE)
# b: (ATTENTION_SIZE, 1)
# u: (ATTENTION_SIZE, 1)
self.W = self.add_weight(name="W_{:s}".format(self.name),
shape=(input_shape[-1], self.attention_size),
initializer="glorot_normal",
trainable=True)
self.b = self.add_weight(name="b_{:s}".format(self.name),
shape=(input_shape[1], 1),
initializer="zeros",
trainable=True)
self.u = self.add_weight(name="u_{:s}".format(self.name),
shape=(self.attention_size, 1),
initializer="glorot_normal",
trainable=True)
super(Attention, self).build(input_shape)
def call(self, x, mask=None):
# input: (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
# et: (BATCH_SIZE, MAX_TIMESTEPS, ATTENTION_SIZE)
et = K.tanh(K.dot(x, self.W) + self.b)
# at: (BATCH_SIZE, MAX_TIMESTEPS)
at = K.softmax(K.squeeze(K.dot(et, self.u), axis=-1))
if mask is not None:
at *= K.cast(mask, K.floatx())
# ot: (BATCH_SIZE, MAX_TIMESTEPS, EMBED_SIZE)
atx = K.expand_dims(at, axis=-1)
ot = atx * x
# output: (BATCH_SIZE, EMBED_SIZE)
output = K.sum(ot, axis=1)
return output
def compute_mask(self, input, input_mask=None):
return None
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
def get_config(self):
base_config = super(Attention, self).get_config()
base_config["attention_size"] = self.attention_size
return base_config
def get_model(seed):
random.seed(seed)
lr = random.uniform(0.0001, 0.00005)
CNN = random.choice([True, False])
Conv_size = random.choice([64, 128, 256, 512])
LSTM_size = random.choice([128, 256, 512, 1024])
Dense_size = random.choice([128, 256, 512, 1024])
drop1 = random.uniform(0.1, 0.3)
drop2 = random.uniform(0.1, 0.3)
drop3 = random.uniform(0.2, 0.5)
recur_drop = random.uniform(0.1, 0.3)
adam = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[weight_matrix], input_length=data.shape[1], trainable=True))
if CNN == True:
model.add(Convolution1D(Conv_size, 3, padding='same', strides = 1))
model.add(Activation('relu'))
model.add(MaxPool1D(pool_size=2))
model.add(Dropout(drop1))
model.add(Bidirectional(LSTM(LSTM_size, dropout=drop2, recurrent_dropout=recur_drop, return_sequences=True)))
model.add(Attention(100))
model.add(Dense(Dense_size, activation='relu'))
model.add(Dropout(drop3))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
return model
epochs = 20
batch_size = 64
model = get_model(11)
filepath = "models/{epoch:02d}-{acc:.4f}-{val_acc:.4f}.h5"
earlystop = callbacks.EarlyStopping(monitor='val_acc', min_delta=0, patience=2, verbose=0, mode='auto', baseline=None, restore_best_weights=False)
history = model.fit(data[:119018,:], train_y[:119018,:], epochs=epochs, batch_size=batch_size,validation_split=0.1, callbacks=[earlystop])
|
"""
realizar la multiplicacion de numeros de
una lista
Tarea 17
"""
def multiplicacion(lista):
res = 1
for i in range(len(lista)):
res = res * lista[i]
return res
prueba1 = [1,2,3,4]#24
prueba2 = [1,2]#2
print(multiplicacion(prueba1))
print(multiplicacion(prueba2))
|
from server import app
from flask import jsonify
from api.model.signboard import Signboard
@app.route('/signboards')
def get_signboard():
r = Signboard.query.all()
return jsonify({"data": r})
@app.route('/signboard/<string:customer_code>/<string:signboard_code>', methods=['PUT'])
def put_signboard(customer_code, signboard_code):
# the signboard to update
signboard = Signboard.query().filter_by(signboard_code= signboard_code).first()
# the date to put on the signboard
data = data = request.get_json();
# saving in database
|
import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
def sendEmail(body):
message = Mail(
from_email='jhalv001@ucr.edu',
to_emails='jhalvorson6687@gmail.com',
subject='RMailbox Package Notification ',
html_content= body)
try:
sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
response = sg.send(message)
print(response.status_code, response.body, response.headers)
except Exception as e:
print(e)
|
import sys
import scikit_posthocs as sp
import re
import scipy.stats as ss
from scipy import stats
from numpy import *
alpha=0.05
def check_means(file1, file2):
mean1 = mean(file1)
mean2 = mean(file2)
if mean1 > mean2:
return 1
elif mean1 < mean2:
return -1
else:
return 0
def process_instance(list_files):
##Load all the algoritms in a different class..
Wins=[0]*len(list_files)
Losts=[0]*len(list_files)
Ties=[0]*len(list_files)
data = list()
for i, val1 in enumerate(list_files):
data.append(loadtxt(val1))
##Run kruskal test, a p-value > 0.05 accepts the null hypothesis
T, p = ss.kruskal(*data)
##null hypothesis is rejected, try a post-hoc analyzes with a correction procedure
if p < alpha:
ptable = sp.posthoc_dunn(data, p_adjust = 'hommel')
for i in range(0,len(list_files)):
val1=list_files[i]
for j in range(0, i):
val2=list_files[j]
if ptable.iat[i,j] > alpha:
Ties[i]+=1
Ties[j]+=1
else:
if check_means(loadtxt(val1),loadtxt(val2)) == 1:
Wins[i]+=1
Losts[j]+=1
elif check_means(loadtxt(val1),loadtxt(val2)) == -1:
Wins[j]+=1
Losts[i]+=1
else:
Ties[i] +=1
Ties[j] +=1
else:
Ties[0:len(list_files)] += ones(len(list_files))*(len(list_files)-1)
for i, val1 in enumerate(list_files):
sys.stdout.write(" "+str(Wins[i])+" "+str(Losts[i])+" "+str(Ties[i]))
sys.stdout.flush()
print("")
def process_instance_info(list_files):
##Load all the algoritms in a different class..
Wins=[0]*len(list_files)
Losts=[0]*len(list_files)
Ties=[0]*len(list_files)
data = list()
for i, val1 in enumerate(list_files):
data.append(loadtxt(val1))
##Run kruskal test, a p-value > 0.05 accepts the null hypothesis
T, p = ss.kruskal(*data)
bestv=-100000
#bestv=100000
for i, val1 in enumerate(list_files):
#bestv = min(bestv, median(loadtxt(val1)))
bestv = max(bestv, median(loadtxt(val1)))
for i, val1 in enumerate(list_files):
sys.stdout.write(" "+str(abs(bestv-median(loadtxt(val1)))))
sys.stdout.flush()
print("")
def Kruskal_Posthoc_MannWhitney_score():
list_files = open(sys.argv[1], "r")
for inst in list_files.read().split('\n'):
if len(inst) == 0:
print(inst)
continue
problem = inst.split(' ')[0]
algorithms = inst.split(' ')[1:]
while("" in algorithms): algorithms.remove("") ##remove empty elements
sys.stdout.write(problem)
sys.stdout.flush()
process_instance(algorithms)
def Kruskal_Posthoc_MannWhitney_info():
print("Kruskal test with posthoc MannWhitney and holm correction")
list_files = open(sys.argv[1], "r")
for inst in list_files.read().split('\n'):
if len(inst)==0:
continue
problem = inst.split(' ')[0]
algorithms = inst.split(' ')[1:]
while("" in algorithms): algorithms.remove("") ##remove empty elements
sys.stdout.write(problem)
sys.stdout.flush()
process_instance_info(algorithms)
#####################################MAIN####################
##Load the entire file
Kruskal_Posthoc_MannWhitney_score()
Kruskal_Posthoc_MannWhitney_info()
|
a = input("ะะฒะตะดะธ ัััะพะบั:")
if len(a) % 2:
a1 = a[len(a) // 2 + 1:] + a[:len(a) // 2 + 1]
else:
a1 = a[len(a) // 2:] + a[:len(a) // 2]
print(a1)
|
#!/bin/python2.7
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: fmount <francesco.pantano@linux.com>
from keystoneauth1.identity import v2
from keystoneauth1 import session
from keystoneclient.v2_0 import client
from prettytable import PrettyTable
from user import User
class Wkeystone():
# Just to make sure there is only one client
__instance = None
def __init__(self, u, debug):
if Wkeystone.__instance:
raise Exception("Just one client per session allowed!")
Wkeystone.__instance = self
self.user = u
self.debug = debug
auth = v2.Password(username=u.name, password=u.password, \
tenant_name=u.tenant_name, auth_url=u.endpoint)
self.s = session.Session(auth=auth)
self.keystone = client.Client(session=self.s)
def __str__(self):
print(self.keystone)
def saysomething(self):
print "I exist"
def print_tenant_list(self):
print(self.keystone.tenants.list())
|
import os
import re
import random
import math
import torch
import torch.nn as nn
import numpy as np
from torch.autograd.function import Function
import cv2
INTER_MODE = {'NEAREST': cv2.INTER_NEAREST, 'BILINEAR': cv2.INTER_LINEAR, 'BICUBIC': cv2.INTER_CUBIC}
class CenterLoss(nn.Module):
def __init__(self, num_classes, feat_dim, size_average=True):
super(CenterLoss, self).__init__()
self.centers = nn.Parameter(torch.randn(num_classes, feat_dim))
self.centerlossfunc = CenterlossFunc.apply
self.feat_dim = feat_dim
self.size_average = size_average
def forward(self, label, feat):
batch_size = feat.size(0)
feat = feat.view(batch_size, -1)
# To check the dim of centers and features
if feat.size(1) != self.feat_dim:
raise ValueError("Center's dim: {0} should be equal to input feature's \
dim: {1}".format(self.feat_dim,feat.size(1)))
batch_size_tensor = feat.new_empty(1).fill_(batch_size if self.size_average else 1)
loss = self.centerlossfunc(feat, label, self.centers, batch_size_tensor)
return loss
class CenterlossFunc(Function):
@staticmethod
def forward(ctx, feature, label, centers, batch_size):
ctx.save_for_backward(feature, label, centers, batch_size)
centers_batch = centers.index_select(0, label.long())
return (feature - centers_batch).pow(2).sum() / 2.0 / batch_size
@staticmethod
def backward(ctx, grad_output):
feature, label, centers, batch_size = ctx.saved_tensors
centers_batch = centers.index_select(0, label.long())
diff = centers_batch - feature
# init every iteration
counts = centers.new_ones(centers.size(0))
ones = centers.new_ones(label.size(0))
grad_centers = centers.new_zeros(centers.size())
counts = counts.scatter_add_(0, label.long(), ones)
grad_centers.scatter_add_(0, label.unsqueeze(1).expand(feature.size()).long(), diff)
grad_centers = grad_centers/counts.view(-1, 1)
return - grad_output * diff / batch_size, None, grad_centers / batch_size, None
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon,ss):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
def mixup_data(x, y, alpha=0.2, use_cuda=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
class RandomErasing:
"""Random erasing the an rectangle region in Image.
Class that performs Random Erasing in Random Erasing Data Augmentation by Zhong et al.
Args:
sl: min erasing area region
sh: max erasing area region
r1: min aspect ratio range of earsing region
p: probability of performing random erasing
"""
def __init__(self, p=0.5, sl=0.02, sh=0.4, r1=0.3):
self.p = p
self.s = (sl, sh)
self.r = (r1, 1/r1)
def __call__(self, img):
"""
perform random erasing
Args:
img: opencv numpy array in form of [w, h, c] range
from [0, 255]
Returns:
erased img
"""
assert len(img.shape) == 3, 'image should be a 3 dimension numpy array'
if random.random() > self.p:
return img
else:
while True:
Se = random.uniform(*self.s) * img.shape[0] * img.shape[1]
re = random.uniform(*self.r)
He = int(round(math.sqrt(Se * re)))
We = int(round(math.sqrt(Se / re)))
xe = random.randint(0, img.shape[1])
ye = random.randint(0, img.shape[0])
if xe + We <= img.shape[1] and ye + He <= img.shape[0]:
img[ye : ye + He, xe : xe + We, :] = np.random.randint(low=0, high=255, size=(He, We, img.shape[2]))
return img
if __name__ == "__main__":
img = cv2.imread("test.jpg")
RE = RandomErasing(p=0.5)
for i in range(20):
img1 = RE(img.copy())
cv2.imshow("test", img1)
cv2.waitKey(1000)
|
# This file is part of beets.
# Copyright 2016, Fabrice Laporte
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Abstraction layer to resize images using PIL, ImageMagick, or a
public resizing proxy if neither is available.
"""
from itertools import chain
import subprocess
import os
import os.path
import platform
import re
from tempfile import NamedTemporaryFile
from urllib.parse import urlencode
from beets import logging
from beets import util
from beets.util import bytestring_path, displayable_path, py3_path, syspath
PROXY_URL = 'https://images.weserv.nl/'
log = logging.getLogger('beets')
def resize_url(url, maxwidth, quality=0):
"""Return a proxied image URL that resizes the original image to
maxwidth (preserving aspect ratio).
"""
params = {
'url': url.replace('http://', ''),
'w': maxwidth,
}
if quality > 0:
params['q'] = quality
return '{}?{}'.format(PROXY_URL, urlencode(params))
def temp_file_for(path):
"""Return an unused filename with the same extension as the
specified path.
"""
ext = os.path.splitext(path)[1]
with NamedTemporaryFile(suffix=py3_path(ext), delete=False) as f:
return bytestring_path(f.name)
class LocalBackendNotAvailableError(Exception):
pass
_NOT_AVAILABLE = object()
class LocalBackend:
@classmethod
def available(cls):
try:
cls.version()
return True
except LocalBackendNotAvailableError:
return False
class IMBackend(LocalBackend):
NAME = "ImageMagick"
# These fields are used as a cache for `version()`. `_legacy` indicates
# whether the modern `magick` binary is available or whether to fall back
# to the old-style `convert`, `identify`, etc. commands.
_version = None
_legacy = None
@classmethod
def version(cls):
"""Obtain and cache ImageMagick version.
Raises `LocalBackendNotAvailableError` if not available.
"""
if cls._version is None:
for cmd_name, legacy in (('magick', False), ('convert', True)):
try:
out = util.command_output([cmd_name, "--version"]).stdout
except (subprocess.CalledProcessError, OSError) as exc:
log.debug('ImageMagick version check failed: {}', exc)
cls._version = _NOT_AVAILABLE
else:
if b'imagemagick' in out.lower():
pattern = br".+ (\d+)\.(\d+)\.(\d+).*"
match = re.search(pattern, out)
if match:
cls._version = (int(match.group(1)),
int(match.group(2)),
int(match.group(3)))
cls._legacy = legacy
if cls._version is _NOT_AVAILABLE:
raise LocalBackendNotAvailableError()
else:
return cls._version
def __init__(self):
"""Initialize a wrapper around ImageMagick for local image operations.
Stores the ImageMagick version and legacy flag. If ImageMagick is not
available, raise an Exception.
"""
self.version()
# Use ImageMagick's magick binary when it's available.
# If it's not, fall back to the older, separate convert
# and identify commands.
if self._legacy:
self.convert_cmd = ['convert']
self.identify_cmd = ['identify']
self.compare_cmd = ['compare']
else:
self.convert_cmd = ['magick']
self.identify_cmd = ['magick', 'identify']
self.compare_cmd = ['magick', 'compare']
def resize(self, maxwidth, path_in, path_out=None, quality=0,
max_filesize=0):
"""Resize using ImageMagick.
Use the ``magick`` program or ``convert`` on older versions. Return
the output path of resized image.
"""
path_out = path_out or temp_file_for(path_in)
log.debug('artresizer: ImageMagick resizing {0} to {1}',
displayable_path(path_in), displayable_path(path_out))
# "-resize WIDTHx>" shrinks images with the width larger
# than the given width while maintaining the aspect ratio
# with regards to the height.
# ImageMagick already seems to default to no interlace, but we include
# it here for the sake of explicitness.
cmd = self.convert_cmd + [
syspath(path_in, prefix=False),
'-resize', f'{maxwidth}x>',
'-interlace', 'none',
]
if quality > 0:
cmd += ['-quality', f'{quality}']
# "-define jpeg:extent=SIZEb" sets the target filesize for imagemagick
# to SIZE in bytes.
if max_filesize > 0:
cmd += ['-define', f'jpeg:extent={max_filesize}b']
cmd.append(syspath(path_out, prefix=False))
try:
util.command_output(cmd)
except subprocess.CalledProcessError:
log.warning('artresizer: IM convert failed for {0}',
displayable_path(path_in))
return path_in
return path_out
def get_size(self, path_in):
cmd = self.identify_cmd + [
'-format', '%w %h', syspath(path_in, prefix=False)
]
try:
out = util.command_output(cmd).stdout
except subprocess.CalledProcessError as exc:
log.warning('ImageMagick size query failed')
log.debug(
'`convert` exited with (status {}) when '
'getting size with command {}:\n{}',
exc.returncode, cmd, exc.output.strip()
)
return None
try:
return tuple(map(int, out.split(b' ')))
except IndexError:
log.warning('Could not understand IM output: {0!r}', out)
return None
def deinterlace(self, path_in, path_out=None):
path_out = path_out or temp_file_for(path_in)
cmd = self.convert_cmd + [
syspath(path_in, prefix=False),
'-interlace', 'none',
syspath(path_out, prefix=False),
]
try:
util.command_output(cmd)
return path_out
except subprocess.CalledProcessError:
# FIXME: Should probably issue a warning?
return path_in
def get_format(self, filepath):
cmd = self.identify_cmd + [
'-format', '%[magick]',
syspath(filepath)
]
try:
return util.command_output(cmd).stdout
except subprocess.CalledProcessError:
# FIXME: Should probably issue a warning?
return None
def convert_format(self, source, target, deinterlaced):
cmd = self.convert_cmd + [
syspath(source),
*(["-interlace", "none"] if deinterlaced else []),
syspath(target),
]
try:
subprocess.check_call(
cmd,
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL
)
return target
except subprocess.CalledProcessError:
# FIXME: Should probably issue a warning?
return source
@property
def can_compare(self):
return self.version() > (6, 8, 7)
def compare(self, im1, im2, compare_threshold):
is_windows = platform.system() == "Windows"
# Converting images to grayscale tends to minimize the weight
# of colors in the diff score. So we first convert both images
# to grayscale and then pipe them into the `compare` command.
# On Windows, ImageMagick doesn't support the magic \\?\ prefix
# on paths, so we pass `prefix=False` to `syspath`.
convert_cmd = self.convert_cmd + [
syspath(im2, prefix=False), syspath(im1, prefix=False),
'-colorspace', 'gray', 'MIFF:-'
]
compare_cmd = self.compare_cmd + [
'-define', 'phash:colorspaces=sRGB,HCLp',
'-metric', 'PHASH', '-', 'null:',
]
log.debug('comparing images with pipeline {} | {}',
convert_cmd, compare_cmd)
convert_proc = subprocess.Popen(
convert_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=not is_windows,
)
compare_proc = subprocess.Popen(
compare_cmd,
stdin=convert_proc.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=not is_windows,
)
# Check the convert output. We're not interested in the
# standard output; that gets piped to the next stage.
convert_proc.stdout.close()
convert_stderr = convert_proc.stderr.read()
convert_proc.stderr.close()
convert_proc.wait()
if convert_proc.returncode:
log.debug(
'ImageMagick convert failed with status {}: {!r}',
convert_proc.returncode,
convert_stderr,
)
return None
# Check the compare output.
stdout, stderr = compare_proc.communicate()
if compare_proc.returncode:
if compare_proc.returncode != 1:
log.debug('ImageMagick compare failed: {0}, {1}',
displayable_path(im2), displayable_path(im1))
return None
out_str = stderr
else:
out_str = stdout
try:
phash_diff = float(out_str)
except ValueError:
log.debug('IM output is not a number: {0!r}', out_str)
return None
log.debug('ImageMagick compare score: {0}', phash_diff)
return phash_diff <= compare_threshold
@property
def can_write_metadata(self):
return True
def write_metadata(self, file, metadata):
assignments = list(chain.from_iterable(
('-set', k, v) for k, v in metadata.items()
))
command = self.convert_cmd + [file, *assignments, file]
util.command_output(command)
class PILBackend(LocalBackend):
NAME = "PIL"
@classmethod
def version(cls):
try:
__import__('PIL', fromlist=['Image'])
except ImportError:
raise LocalBackendNotAvailableError()
def __init__(self):
"""Initialize a wrapper around PIL for local image operations.
If PIL is not available, raise an Exception.
"""
self.version()
def resize(self, maxwidth, path_in, path_out=None, quality=0,
max_filesize=0):
"""Resize using Python Imaging Library (PIL). Return the output path
of resized image.
"""
path_out = path_out or temp_file_for(path_in)
from PIL import Image
log.debug('artresizer: PIL resizing {0} to {1}',
displayable_path(path_in), displayable_path(path_out))
try:
im = Image.open(syspath(path_in))
size = maxwidth, maxwidth
im.thumbnail(size, Image.Resampling.LANCZOS)
if quality == 0:
# Use PIL's default quality.
quality = -1
# progressive=False only affects JPEGs and is the default,
# but we include it here for explicitness.
im.save(py3_path(path_out), quality=quality, progressive=False)
if max_filesize > 0:
# If maximum filesize is set, we attempt to lower the quality
# of jpeg conversion by a proportional amount, up to 3 attempts
# First, set the maximum quality to either provided, or 95
if quality > 0:
lower_qual = quality
else:
lower_qual = 95
for i in range(5):
# 5 attempts is an arbitrary choice
filesize = os.stat(syspath(path_out)).st_size
log.debug("PIL Pass {0} : Output size: {1}B", i, filesize)
if filesize <= max_filesize:
return path_out
# The relationship between filesize & quality will be
# image dependent.
lower_qual -= 10
# Restrict quality dropping below 10
if lower_qual < 10:
lower_qual = 10
# Use optimize flag to improve filesize decrease
im.save(py3_path(path_out), quality=lower_qual,
optimize=True, progressive=False)
log.warning("PIL Failed to resize file to below {0}B",
max_filesize)
return path_out
else:
return path_out
except OSError:
log.error("PIL cannot create thumbnail for '{0}'",
displayable_path(path_in))
return path_in
def get_size(self, path_in):
from PIL import Image
try:
im = Image.open(syspath(path_in))
return im.size
except OSError as exc:
log.error("PIL could not read file {}: {}",
displayable_path(path_in), exc)
return None
def deinterlace(self, path_in, path_out=None):
path_out = path_out or temp_file_for(path_in)
from PIL import Image
try:
im = Image.open(syspath(path_in))
im.save(py3_path(path_out), progressive=False)
return path_out
except IOError:
# FIXME: Should probably issue a warning?
return path_in
def get_format(self, filepath):
from PIL import Image, UnidentifiedImageError
try:
with Image.open(syspath(filepath)) as im:
return im.format
except (ValueError, TypeError, UnidentifiedImageError,
FileNotFoundError):
log.exception("failed to detect image format for {}", filepath)
return None
def convert_format(self, source, target, deinterlaced):
from PIL import Image, UnidentifiedImageError
try:
with Image.open(syspath(source)) as im:
im.save(py3_path(target), progressive=not deinterlaced)
return target
except (ValueError, TypeError, UnidentifiedImageError,
FileNotFoundError, OSError):
log.exception("failed to convert image {} -> {}", source, target)
return source
@property
def can_compare(self):
return False
def compare(self, im1, im2, compare_threshold):
# It is an error to call this when ArtResizer.can_compare is not True.
raise NotImplementedError()
@property
def can_write_metadata(self):
return True
def write_metadata(self, file, metadata):
from PIL import Image, PngImagePlugin
# FIXME: Detect and handle other file types (currently, the only user
# is the thumbnails plugin, which generates PNG images).
im = Image.open(syspath(file))
meta = PngImagePlugin.PngInfo()
for k, v in metadata.items():
meta.add_text(k, v, 0)
im.save(py3_path(file), "PNG", pnginfo=meta)
class Shareable(type):
"""A pseudo-singleton metaclass that allows both shared and
non-shared instances. The ``MyClass.shared`` property holds a
lazily-created shared instance of ``MyClass`` while calling
``MyClass()`` to construct a new object works as usual.
"""
def __init__(cls, name, bases, dict):
super().__init__(name, bases, dict)
cls._instance = None
@property
def shared(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
BACKEND_CLASSES = [
IMBackend,
PILBackend,
]
class ArtResizer(metaclass=Shareable):
"""A singleton class that performs image resizes.
"""
def __init__(self):
"""Create a resizer object with an inferred method.
"""
# Check if a local backend is available, and store an instance of the
# backend class. Otherwise, fallback to the web proxy.
for backend_cls in BACKEND_CLASSES:
try:
self.local_method = backend_cls()
log.debug(f"artresizer: method is {self.local_method.NAME}")
break
except LocalBackendNotAvailableError:
continue
else:
log.debug("artresizer: method is WEBPROXY")
self.local_method = None
@property
def method(self):
if self.local:
return self.local_method.NAME
else:
return "WEBPROXY"
def resize(
self, maxwidth, path_in, path_out=None, quality=0, max_filesize=0
):
"""Manipulate an image file according to the method, returning a
new path. For PIL or IMAGEMAGIC methods, resizes the image to a
temporary file and encodes with the specified quality level.
For WEBPROXY, returns `path_in` unmodified.
"""
if self.local:
return self.local_method.resize(
maxwidth, path_in, path_out,
quality=quality, max_filesize=max_filesize
)
else:
# Handled by `proxy_url` already.
return path_in
def deinterlace(self, path_in, path_out=None):
"""Deinterlace an image.
Only available locally.
"""
if self.local:
return self.local_method.deinterlace(path_in, path_out)
else:
# FIXME: Should probably issue a warning?
return path_in
def proxy_url(self, maxwidth, url, quality=0):
"""Modifies an image URL according the method, returning a new
URL. For WEBPROXY, a URL on the proxy server is returned.
Otherwise, the URL is returned unmodified.
"""
if self.local:
# Going to be handled by `resize()`.
return url
else:
return resize_url(url, maxwidth, quality)
@property
def local(self):
"""A boolean indicating whether the resizing method is performed
locally (i.e., PIL or ImageMagick).
"""
return self.local_method is not None
def get_size(self, path_in):
"""Return the size of an image file as an int couple (width, height)
in pixels.
Only available locally.
"""
if self.local:
return self.local_method.get_size(path_in)
else:
# FIXME: Should probably issue a warning?
return path_in
def get_format(self, path_in):
"""Returns the format of the image as a string.
Only available locally.
"""
if self.local:
return self.local_method.get_format(path_in)
else:
# FIXME: Should probably issue a warning?
return None
def reformat(self, path_in, new_format, deinterlaced=True):
"""Converts image to desired format, updating its extension, but
keeping the same filename.
Only available locally.
"""
if not self.local:
# FIXME: Should probably issue a warning?
return path_in
new_format = new_format.lower()
# A nonexhaustive map of image "types" to extensions overrides
new_format = {
'jpeg': 'jpg',
}.get(new_format, new_format)
fname, ext = os.path.splitext(path_in)
path_new = fname + b'.' + new_format.encode('utf8')
# allows the exception to propagate, while still making sure a changed
# file path was removed
result_path = path_in
try:
result_path = self.local_method.convert_format(
path_in, path_new, deinterlaced
)
finally:
if result_path != path_in:
os.unlink(path_in)
return result_path
@property
def can_compare(self):
"""A boolean indicating whether image comparison is available"""
if self.local:
return self.local_method.can_compare
else:
return False
def compare(self, im1, im2, compare_threshold):
"""Return a boolean indicating whether two images are similar.
Only available locally.
"""
if self.local:
return self.local_method.compare(im1, im2, compare_threshold)
else:
# FIXME: Should probably issue a warning?
return None
@property
def can_write_metadata(self):
"""A boolean indicating whether writing image metadata is supported."""
if self.local:
return self.local_method.can_write_metadata
else:
return False
def write_metadata(self, file, metadata):
"""Write key-value metadata to the image file.
Only available locally. Currently, expects the image to be a PNG file.
"""
if self.local:
self.local_method.write_metadata(file, metadata)
else:
# FIXME: Should probably issue a warning?
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Web presentation DataGroup, UserGroupPerm classes"""
from django import template
#from django import http
from django.shortcuts import render_to_response
# , redirect, get_object_or_404get_list_or_404, HttpResponse
from django.utils.translation import ugettext as _
#from django.utils.decorators import method_decorator
#from django.views.decorators.cache import cache_control
#from django.views.decorators.cache import never_cache
#from django.views.decorators.csrf import csrf_protect
from django.contrib.auth.decorators import login_required
from django import forms
from django.contrib.admin import widgets
from django.contrib import messages
#from django.db import transaction
#from django.db.models import Q
#import datetime
from .views import general_context, class_by_name # , classes_desktop
from .dataviews import datalist_create # use_obj_read,
#-------------------------------------------------------------------------------
class AdvSearchForm(forms.Form):
"""Dynamic Search form for a advanced searching"""
chfields = [('nofields', '----'), ('created_by', 'created_by'),
('classname', 'classname'), ('name', 'name'),
('description', 'description')]
chtypes = [('notypes', '----'), ('equals', 'is equal to'),
('notequals', 'is not equal to')]
negate = forms.BooleanField(label="Not", required=False)
fields = forms.ChoiceField(label="Search", choices=chfields,
required=False)
types = forms.ChoiceField(label="Condition", choices=chtypes,
required=False)
seach_text = forms.CharField(label="Search Text", max_length=90,
required=False)
and_cond = forms.BooleanField(label="And", required=False)
or_cond = forms.BooleanField(label="Or", required=False)
def __init__(self, linecount, *args, **kwargs):
super(AdvSearchForm, self).__init__(*args, **kwargs)
for ind in range(1, linecount):
self.fields['negate_%d' % ind] = forms.BooleanField(label="Not",
required=False)
self.fields['fields_%d' % ind] = forms.ChoiceField(label="Search",
choices=self.chfields, required=False)
self.fields['types_%d' % ind] = forms.ChoiceField(
label="Condition", choices=self.chtypes,
required=False)
self.fields['seach_text_%d' % ind] = forms.CharField(
label="Search Text", max_length=90, required=False)
self.fields['and_cond_%d' % ind] = forms.BooleanField(label="And",
required=False)
self.fields['or_cond_%d' % ind] = forms.BooleanField(label="Or",
required=False)
#-------------------------------------------------------------------------------
@login_required
#@cache_control(must_revalidate=True, max_age=3600)
#@csrf_protect
def advsearch(request, linecount=1):
"""Advanced Search Form and search context rendering"""
lcount = int(linecount)
if request.POST.has_key('cmd-add') and \
request.POST['cmd-add'] == 'Add another boolean chart':
lcount += 1
form = AdvSearchForm(lcount) # data={'fields':'classname'}
else:
form = AdvSearchForm(0)
#group by 6 elements
fllist = []
cnt = 0
elemlist = []
for field in form:
cnt += 1
elemlist.append(field)
if cnt == 6:
cnt = 0
fllist.append(elemlist)
elemlist = []
if elemlist:
fllist.append(elemlist)
context_dict = general_context(request, 'Advanced Search',
'Advanced Search Data')
context_dict.update({'linecount': str(lcount)})
context_dict.update({'form': form})
context_dict.update({'fllist': fllist})
return render_to_response('advsearch_form.html', context_dict,
template.RequestContext(request))
#-------------------------------------------------------------------------------
class BaseSearchForm(forms.Form):
"""General search form"""
created_on_from = forms.DateTimeField(
label="Created date from (YYYY-MM-DD hh:mm:ss)",
required=False,
widget=widgets.AdminSplitDateTime)
created_on_to = forms.DateTimeField(
label="Created date to (YYYY-MM-DD hh:mm:ss)",
required=False,
widget=widgets.AdminSplitDateTime)
data_type_list = [(elem.classname, elem.classname) for elem in
class_by_name('ClassDeskTop').objects.all()]
#TODO: classname not in (['Classes', 'LinkType', 'DeskTop',
# 'UserGroupPerm', 'DataGroup'])
classname = forms.ChoiceField(label="Type of Data", choices=data_type_list,
required=False)
#TODO: ? form validation: created_on_to should be later created_on_from
#-------------------------------------------------------------------------------
@login_required
#@cache_control(must_revalidate=True, max_age=3600)
#@csrf_protect
def search(request):
"""Simple Search Form and search context rendering"""
dt_list = []
mform = ''
kwargs = {}
if request.method == 'GET':
form = BaseSearchForm(request.GET)
#show data form if 'Details' button was pressed before
if (len(request.GET) > 7 and request.GET.has_key('classname') and
request.GET['classname'] and request.GET['classname'] != 'Base'):
mform = forms.models.modelform_factory(
class_by_name(request.GET['classname']))()
if form.is_valid():
if (request.GET.has_key('add-details') and
request.GET['add-details'] == u'Details' and
form.cleaned_data['classname'] != 'Base'):
mform = forms.models.modelform_factory(
class_by_name(form.cleaned_data['classname']))()
if (request.GET.has_key('search') and
request.GET['search'] == 'Search'):
#fill search parameters dictionary from base serch form
for elem in form.cleaned_data.iteritems():
if elem[0] == 'created_on_from' and elem[1]:
kwargs.update({
'created_on__gte': elem[1]
})
elif elem[0] == 'created_on_to' and elem[1]:
kwargs.update({
'created_on__lte': elem[1]
})
elif elem[0] == 'classname' and elem[1] != 'Base':
kwargs.update({
'classname': elem[1]
})
if mform:
for elem in mform.fields.keys():
#class name and form are not appropriate to each other
if not elem in request.GET.keys():
messages.add_message(request, messages.WARNING,
_(u"To find all available for you %s data "
u"click on 'Search' button."
u"To add search parameters for %s data click"
u" on 'Details' button.") % (
form.cleaned_data['classname'],
form.cleaned_data['classname']))
mform = ''
kwargs = {}
break
if request.GET.has_key(elem) and request.GET[elem]:
#fill search parameters dictionary from form by type
if elem == 'name' or elem == 'description':
kwargs.update({elem +
'__icontains': request.GET[elem]})
else:
kwargs.update({elem: request.GET[elem]})
if kwargs:
model = class_by_name('Base')
if kwargs.has_key('classname'):
model = class_by_name(kwargs['classname'])
baselist = model.objects.filter(**kwargs).exclude(
classname__in=['Classes',
'DataGroup',
'UserGroupPerm'])
dt_list = datalist_create(request, baselist)
if not dt_list:
messages.add_message(request, messages.INFO,
_('Result list is empty'))
else:
form = BaseSearchForm()
context_dict = general_context(request, 'Search', 'Search Data')
context_dict.update({'form': form, 'mform': mform, 'data_list': dt_list})
return render_to_response('search_form.html', context_dict,
template.RequestContext(request))
|
import networkx as nx
import matplotlib.pyplot as plt
G=nx.Graph()
G.add_node(1)
G.add_node(2)
G.add_node(3)
G.add_edge(1,2)
G.add_edge(2,3)
G.add_edge(3,1)
print(G.nodes) #print the name of edges
nx.draw(G)
plt.show()
|
from pyvi import ViTokenizer
path = "document1.txt"
f = open(path,"r",encoding="utf-8")
e = f.read();
s_tt= ViTokenizer.tokenize(e)
fo = open("final.txt","w",encoding="utf-8")
fo.write(s_tt)
f.close()
fo.close()
|
import unittest
from katas.kyu_6.simple_sentences import make_sentences
class MakeSentencesTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(make_sentences(['hello', 'world']), 'hello world.')
def test_equals_2(self):
self.assertEqual(make_sentences(
['Quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog']),
'Quick brown fox jumped over the lazy dog.')
def test_equals_3(self):
self.assertEqual(make_sentences(['hello', ',', 'my', 'dear']),
'hello, my dear.')
def test_equals_4(self):
self.assertEqual(make_sentences(['one', ',', 'two', ',', 'three']),
'one, two, three.')
def test_equals_5(self):
self.assertEqual(make_sentences(
['One', ',', 'two', 'two', ',', 'three', 'three', 'three', ',',
'4', '4', '4', '4']), 'One, two two, three three three, 4 4 4 4.')
def test_equals_6(self):
self.assertEqual(make_sentences(['hello', 'world', '.']),
'hello world.')
def test_equals_7(self):
self.assertEqual(make_sentences(['Bye', '.']), 'Bye.')
def test_equals_8(self):
self.assertEqual(make_sentences(['hello', 'world', '.', '.', '.']),
'hello world.')
def test_equals_9(self):
self.assertEqual(make_sentences(
['The', 'Earth', 'rotates', 'around', 'The', 'Sun', 'in', '365',
'days', ',', 'I', 'know', 'that', '.', '.', '.', '.', '.', '.',
'.', '.', '.', '.', '.', '.']),
'The Earth rotates around The Sun in 365 days, I know that.')
|
import sys
import os
import math
import tempfile
sys.path.insert(0, 'scripts')
sys.path.insert(0, os.path.join("tools", "trees"))
sys.path.insert(0, os.path.join("tools", "print"))
sys.path.insert(0, os.path.join("tools", "families"))
import saved_metrics
import fam
import pickle
from aligned_printer import AlignedPrinter
import experiments as exp
import subprocess
def get_runs(datadir, run_tag):
runs = []
runs.append("true.true")
if (run_tag == "all"):
successful_runs = fam.get_successful_runs(datadir)
for run in successful_runs:
if (not "ultiple" in run and not "true.true" in run):
runs.append(run)
else:
runs.append(run_tag)
return runs
def get_trees(runs):
trees = []
for run in runs:
trees.append(run + ".geneTree.newick")
return trees
def write_kfdistance_input_files(datadir, trees, trees_file):
with open(trees_file, "w") as writer:
writer.write("\n".join(trees))
writer.write("\n")
def run_kfdistance(families_dir, trees_file, output_dir):
command = []
command.append("R")
command.append("-f")
command.append(exp.treedist_R_script)
command.append("--args")
command.append(families_dir)
command.append(trees_file)
command.append(output_dir)
print(" ".join(command))
subprocess.check_call(command)
def extract_kfdistance(kf_output_dir, families, runs):
kf_cells = {}
paired_runs = []
for run in runs:
paired_runs.append("true.true - " + run)
for family in families:
per_run_rkf = {}
kf_cells[family] = per_run_rkf
print("family " + str(family))
kf_file = os.path.join(kf_output_dir, family)
print(kf_file)
with open(kf_file) as reader:
kf_distances = reader.readline()[:-1].split(" ")
print(kf_distances)
assert(len(kf_distances) == len(runs))
for i in range(0, len(kf_distances)):
kf_cells[family][paired_runs[i]] = [float(kf_distances[i]), 1.0]
return kf_cells
def save_kf_cells(datadir, kf_cells, rooted):
output = fam.get_raw_kf_cells_file(datadir, rooted)
pickle.dump(kf_cells, open(output, "wb"))
def load_kf_cells(datadir, rooted = False):
return pickle.load(open(fam.get_raw_kf_cells_file(datadir, rooted), "rb"))
def get_run_key(m1, m2):
return m1 + " - " + m2
def get_kf_to_true(cells, run_name):
return cells[get_run_key(fam.get_run_name("true", "true"), run_name)]
def print_metrics(datadir, metric_dict, metric_name, benched_run):
printer = AlignedPrinter()
saved_metrics.save_dico(datadir, metric_dict, metric_name)
for run_key in metric_dict:
run = run_key.split(" - ")[1]
suffix = ""
if (benched_run == run):
suffix += "\t <-- "
printer.add("- " + run_key + ":", str(metric_dict[run_key]) + suffix)
printer.sort_right_float()
printer.display()
print("")
def export_metrics(datadir, benched_run, kf_cells, runs):
total_rkf = {}
families_number = len(kf_cells)
run_keys = []
for run in runs:
run_keys.append("true.true - " + run)
for run_key in run_keys:
total_rkf[run_key] = 0.0
for family in kf_cells:
family_kf_cells = kf_cells[family]
for key in family_kf_cells:
total_rkf[key] += (family_kf_cells[key][0] / family_kf_cells[key][1])
average_rkf = {}
for key in run_keys:
average_rkf[key] = total_rkf[key] / families_number
print("Average KF:")
print_metrics(datadir, average_rkf, "average_kf", benched_run)
def analyze(datadir, run_tag, benched_run = ""):
temp_dir = tempfile.mkdtemp(dir = datadir)#tempfile.TemporaryDirectory()
analyze_dir_name = temp_dir#.name
print("analyze directory " + analyze_dir_name)
trees_file = os.path.join(analyze_dir_name, "trees.txt")
families_file = os.path.join(analyze_dir_name, "families.txt")
kf_output_dir = os.path.join(analyze_dir_name, "kfdistances")
os.mkdir(kf_output_dir)
families_dir = fam.get_families_dir(datadir)
families = fam.get_families_list(datadir)
runs = get_runs(datadir, run_tag)
print("Runs: " + str(runs))
trees = get_trees(runs)
write_kfdistance_input_files(datadir, trees, trees_file)
run_kfdistance(families_dir, trees_file, kf_output_dir)
kf_cells = extract_kfdistance(kf_output_dir, families, runs)
if ("all" == run_tag):
save_kf_cells(datadir, kf_cells, False)
export_metrics(datadir, benched_run, kf_cells, runs)
if __name__ == '__main__':
if (len(sys.argv) < 3):
print("Syntax: families_dir run=all [benched_run]")
exit(1)
print(" ".join(sys.argv))
datadir = sys.argv[1]
run = sys.argv[2]
benched_run = ""
if (len(sys.argv) > 3):
benched_run = sys.argv[3]
analyze(datadir, run, benched_run)
|
import torch
import torchvision
import torch.nn as nn
import numpy as np
import torchvision.transforms as transforms
# ================================================================== #
# Table of Contents #
# ================================================================== #
# 1. Basic autograd example 1 (Line 25 to 39)
# 2. Basic autograd example 2 (Line 46 to 83)
# 3. Loading data from numpy (Line 90 to 97)
# 4. Input pipline (Line 104 to 129)
# 5. Input pipline for custom dataset (Line 136 to 156)
# 6. Pretrained model (Line 163 to 176)
# 7. Save and load model (Line 183 to 189)
# ================================================================== #
# 1. Basic autograd example 1 #
# ================================================================== #
# Create tensors.
# x = torch.tensor(1 , requires_grad=True)
# w = torch.tensor(2, requires_grad=True)
# b = torch.tensor(3, requires_grad=True)
# Build a computational graph.
# y = w * x + b
#
#
# compute gradient
# y.backward()
# Print out the gradients.
# print(x.grad)
# print(w.grad)
# print(b.grad)
# ================================================================== #
# 2. Basic autograd example 2 #
# ================================================================== #
# Create tensors of shape (10, 3) and (10, 2).
'''''
x = torch.randn(10,3)
y = torch.randn(10,2)
# Build a fully connected layer.
linear = nn.Linear(3,2)
print( 'w:',linear.weight)
print( 'b:' , linear.bias )
# Build loss function and optimizer.
criteration = nn.MSELoss()
optimizer = torch.optim.SGD(linear.parameters() , lr=0.01)
# Forward pass.
pred = linear(x)
# Compute loss.
loss = criteration(pred,y)
print( 'loss:', loss.item)
# Backward pass.
loss.backward()
# Print out the gradients.
print ('dL/dw: ', linear.weight.grad)
print ('dL/db: ', linear.bias.grad)
# 1-step gradient descent.
optimizer.step()
# Print out the loss after 1-step gradient descent.
pred = linear(x)
loss = criteration(pred,y)
print('loss after 1 step optimization: ', loss.item())
'''''
# ================================================================== #
# 3. Loading data from numpy #
# ================================================================== #
# x = np.array([[1,5] , [2,4]])
#
# print(x)
#
# Convert the numpy array to a torch tensor.
# y = torch.from_numpy(x)
# print(y)
# z = y.numpy()
# ================================================================== #
# 4. Input pipline #
# ================================================================== #
# Download and construct CIFAR-10 dataset.
train__dataset = torchvision.datasets.CIFAR10(root='./pytorch/dataset',train = True ,transform=transforms.ToTensor,download=True)
# Fetch one data pair (read data from disk).
images ,label = train__dataset[0]
print(images.size)
print(label)
# Data loader (this provides queues and threads in a very simple way).
train_loader = torch.utils.data.DataLoader(train__dataset , batch_size = 64 , shuffle = true)
# When iteration starts, queue and thread star
#
# t to load data from files.
data_iter = iter(train_loader)
images , label = data_iter.next()
# When iteration starts, queue and thread start to load data from files.
fj
|
from typing import Optional, Tuple
from esipy import EsiClient
from waitlist.utility import outgate
from waitlist.utility.config import banned_by_default
from waitlist.utility.sde import add_type_by_id_to_database
from waitlist.storage.database import Constellation, SolarSystem, Station,\
InvType, Account, Character, Ban, Whitelist, CharacterTypes
from waitlist.base import db
import logging
from waitlist.utility.swagger import get_api
from waitlist.utility.swagger.eve import get_esi_client
from waitlist.utility.swagger.eve.search import SearchEndpoint, SearchResponse
from threading import Lock
from waitlist.utility.outgate.exceptions import ApiException, ESIException
from waitlist.utility.outgate import character, corporation, alliance
logger = logging.getLogger(__name__)
"""
Lock for checking existance of a character and creating it
"""
character_check_lock: Lock = Lock()
def get_constellation(name: str) -> Constellation:
return db.session.query(Constellation).filter(Constellation.constellationName == name).first()
def get_system(name: str) -> SolarSystem:
return db.session.query(SolarSystem).filter(SolarSystem.solarSystemName == name).first()
def get_station(name: str) -> Station:
return db.session.query(Station).filter(Station.stationName == name).first()
def get_item_id(name: str) -> int:
logger.debug("Getting id for item %s", name)
item = db.session.query(InvType).filter(InvType.typeName == name).first()
if item is None:
item_data = get_item_data_from_api(name)
if item_data is None:
return -1
# add the type to db
current_type: InvType = db.session.query(InvType).get(item_data.type_id)
# Was it only renamed?
if current_type is not None:
item = InvType(typeID=item_data.type_id, groupID=item_data.group_id,
typeName=item_data.name, description=item_data.description,
marketGroupID=getattr(item_data, 'market_group_id', None))
db.session.merge(item)
db.session.commit()
logger.info(f'Updated {item}')
else:
item = add_type_by_id_to_database(item_data.type_id)
db.session.commit()
logger.info(f'Added new {item}')
return item.typeID
def get_item_data_from_api(name: str) -> Optional[any]:
"""Tries to get api data of an item with this name from Search API"""
search_endpoint = SearchEndpoint()
search_response: SearchResponse = search_endpoint.public_search(name, ['inventory_type'], True)
result_ids = search_response.inventory_type_ids()
if result_ids is None or len(result_ids) < 1:
return None
esi_client: EsiClient = get_esi_client(None, True)
api = get_api()
for result_id in result_ids:
type_result = esi_client.request(api.op['get_universe_types_type_id'](type_id=result_id))
if type_result.data.name == name:
return type_result.data
return None
# load an account by its id
def get_account_from_db(int_id: int) -> Account:
return db.session.query(Account).filter(Account.id == int_id).first()
# load a character by its id
def get_char_from_db(int_id: int) -> Character:
return db.session.query(Character).get(int_id)
def create_new_character(eve_id: int, char_name: str) -> Character:
char = Character()
char.id = eve_id
char.eve_name = char_name
char.is_new = True
db.session.add(char)
db.session.commit()
return char
def get_character_by_id_and_name(eve_id: int, eve_name: str) -> Character:
with character_check_lock:
char = get_char_from_db(eve_id)
if char is None:
logger.info("No character found for id %d", eve_id)
# create a new char
char = create_new_character(eve_id, eve_name)
return char
def get_character_by_id(eve_character_id: int) -> Character:
"""
:throws ApiException if there was a problem contacting the api
"""
with character_check_lock:
character: Character = get_char_from_db(eve_character_id)
if character is None:
logger.info("No character found in database for id %d", eve_character_id)
char_info = outgate.character.get_info(eve_character_id)
character = create_new_character(eve_character_id, char_info.characterName)
return character
def is_charid_banned(character_id: int) -> bool:
if character_id == 0: # this stands for no id in the eve api (for example no alliance)
return False
return db.session.query(Ban).filter(Ban.id == character_id).count() == 1
def is_charid_whitelisted(character_id: int) -> bool:
if character_id == 0:
return False
return db.session.query(Whitelist).filter(Whitelist.characterID == character_id).count() == 1
def get_character_by_name(eve_name: str) -> Optional[Character]:
try:
eve_info = outgate.character.get_info_by_name(eve_name)
if eve_info is None:
return None
return get_character_by_id_and_name(eve_info.id, eve_name)
except ApiException:
return None
def get_character_type_by_id(char_id: int) -> Tuple[CharacterTypes,int]:
"""
:returns the character type and how many potential ESI error where created
"""
try:
char_info: APICacheCharacterInfo = character.get_info(char_id)
return CharacterTypes.character, 0
except ESIException:
pass # no such char
try:
corp_info: APICacheCorporationInfo = corporation.get_info(char_id)
return CharacterTypes.corporation, 1
except ESIException:
pass # no such corp
all_info: APICacheAllianceInfo = alliance.get_info(char_id)
return CharacterTypes.alliance, 2
def get_char_corp_all_name_by_id_and_type(char_id: int, char_type: CharacterTypes) -> str:
if char_type == CharacterTypes.character:
return character.get_info(char_id).characterName
if char_type == CharacterTypes.corporation:
return corporation.get_info(char_id).name
if char_type == CharacterTypes.alliance:
return alliance.get_info(char_id).allianceName
raise ValueError('Unknown Character type supplied')
def is_char_banned(char: Character) -> Tuple[bool, str]:
try:
if is_charid_whitelisted(char.get_eve_id()):
return False, ""
if char.banned:
return True, "Character"
char_info = outgate.character.get_info(char.get_eve_id())
if is_charid_whitelisted(char_info.corporationID):
return False, ""
if is_charid_banned(char_info.corporationID):
return True, "Corporation"
if char_info.allianceID is not None:
if is_charid_whitelisted(char_info.allianceID):
return False, ""
if is_charid_banned(char_info.allianceID):
return True, "Alliance"
if banned_by_default:
return True, "Everyone Banned by default"
else:
return False, ""
except ApiException as e:
logger.info("Failed to check if %d was banned, because of Api error, code=%d msg=%s",
char.get_eve_id(), e.code, e.msg)
return False, ""
except Exception:
logger.error("Failed to check if %d was banned", char.get_eve_id(), exc_info=1)
return False, ""
|
from common import *
import base64
####file encrypt part
def encrpty_file(file_path, pubkey):
log.debug("-------")
cipher_text = b''
max_length = int(get_max_length(pubkey))
if pubkey:
cipher_public = Crypto.Cipher.PKCS1_v1_5.new(pubkey)
with open(file_path, 'r', encoding='UTF-8') as f:
while True:
message = f.read(max_length)
if message != "":
#cipher_text = cipher_text + base64.b64encode(cipher_public.encrypt(message.encode('utf-8')))
cipher_text = cipher_text + cipher_public.encrypt(message.encode(encoding='utf-8'))
else:
break
# update file
with open(file_path, 'wb') as f:
f.write(base64.b64encode(cipher_text))
with open(file_path, 'rb') as f:
message = f.read()
#log.log_raw(message)
else:
win32api.MessageBox(0, "error", "Error",win32con.MB_ICONWARNING)
return False
return True
###start
# Create the blockchain and add the genesis block
def block_chain_flow():
private_key = get_key.get_private_key_strs()
## store key @ block 0
blockchain = [create_genesis_block(private_key)]
previous_block = blockchain[0]
blockchain_len = 1
a = input("are you want to add another device? y :yes n :No:")
while True:
if (a == 'y'):
mac_id = input("input your mac_id, please input numbers only:")
block_to_add = next_block(previous_block, mac_id)
blockchain.append(block_to_add)
previous_block = block_to_add
blockchain_len = blockchain_len + 1
a = input("are you want to add another device? y :yes n :No:")
else:
#log.log_out("add done๏ผblocks below:")
break
save_blockchain(blockchain, blockchain_len)
## block chain check
check = block_chain_check(blockchain, blockchain_len)
if check:
pubkey = get_key.get_public_key()
if pubkey:
encrpty_file(file_need_encrpty_test, pubkey)
log.log_out("file encrpty done! file:" + file_need_encrpty_test)
else:
log.debug("check fail")
return False
if __name__ == "__main__":
block_chain_flow()
|
import argparse
import os
import pandas as pd
from utils import download_incidents, create_folder, video_to_frames
__author__ = 'roeiherz'
VIDEO_PATH = "/data/Accidents1K/Videos"
INDEX_PATH = "/data/Accidents1K/accident_index.csv"
IMAGE_PATH = "/data/Accidents1K/Images"
def get_video_links(index_path):
"""
This function returns the data
:param output_dir:
:return:
"""
df_index = pd.read_csv(index_path)
video_links = list(df_index['video link'].unique())
return video_links
if __name__ == "__main__":
"""
This Script downloads videos and using video2dir code to parse multiple videos to images
"""
parser = argparse.ArgumentParser()
parser.add_argument('--local', help='input directory of videos', action='store', default=False)
parser.add_argument('--download', help='input directory of videos', action='store', default=False)
parser.add_argument('--video', help='input directory of videos', action='store', default=VIDEO_PATH)
parser.add_argument('--index', help='index file path', action='store', default=INDEX_PATH)
parser.add_argument('--image', help='output directory of videos', action='store', default=IMAGE_PATH)
args = parser.parse_args()
# Use Local params
if args.local:
args.video = "/Users/roeiherzig/Datasets/Accidents/Videos/"
args.index = "/Users/roeiherzig/Datasets/Accidents/accident_index.csv"
args.image = "/Users/roeiherzig/Datasets/Accidents/Images/"
args.download = True
# Download Incidents
if args.download:
download_incidents(input_file=args.index, output_dir=args.input)
# Check directory exists
if not os.path.exists(args.video):
print('Can not find videos directory: {}'.format(args.video))
exit(-1)
# Video Path
video_path = args.video
# Image path
img_path = args.image
# Get files
files = os.listdir(video_path)
# files = ['412563fe-ce68-4c17-92ce-b8770d6fb140.mov']
print('Number of files: {} from input directory'.format(len(files)))
for base_name in files:
try:
# Process only if its a video
if '.mov' in base_name or '.mp4' in base_name:
# video file
in_dir = os.path.join(video_path, base_name)
# Without extension
out_dir = os.path.join(img_path, os.path.splitext(base_name)[0])
if os.path.exists(out_dir):
print("Dir {} already exists".format(base_name))
continue
create_folder(out_dir)
print('{} --> {}'.format(video_path, out_dir))
video_to_frames(in_dir, out_dir, fps=5)
except Exception as e:
print("Error in incident {} with {}".format(base_name, str(e)))
|
#SevenDigitsDrawV2.py
import turtle as p
import time as t
#import turtle,time
def drawGap(): #็ปๅถๆฐ็ ็ฎก้ด้,ๆฏๆฎตๆฐ็ ็ฎกไน้ดไธ่ฟ็ปญ
p.penup()
p.fd(5)
def drawline(draw): #็ปๅถๅๆฎตๆฐ็ ็ฎก
drawGap()
p.pendown() if draw else p.penup()
p.fd(40)
drawGap()
p.right(90)
def drawDigit(digit): #ๆ นๆฎๆฐๅญ็ปๅถไธๆฎตๆฐ็ ็ฎกๅนถๅฐๆตท้พๅณ็งป20ไธชๅ็ด
drawline(True) if digit in [2,3,4,5,6,8,9] else drawline(False)
drawline(True) if digit in [0,1,3,4,5,6,7,8,9] else drawline(False)
drawline(True) if digit in [0,2,3,5,6,8,9] else drawline(False)
drawline(True) if digit in [0,2,6,8] else drawline(False)
p.left(90)
drawline(True) if digit in [0,4,5,6,8,9] else drawline(False)
drawline(True) if digit in [0,2,3,5,6,7,8,9] else drawline(False)
drawline(True) if digit in [0,1,2,3,4,7,8,9] else drawline(False)
p.left(180)
p.penup()
p.fd(20)
def drawData(data): #่ทๅพ่ฆ่พๅบ็ๆฐๅญ
p.color('red')
for i in data:
if i =='-':
p.write('ๅนด',font=('Arial',18,'normal'))
p.color('green')
p.fd(40)
elif i=='=':
p.write('ๆ',font=('Arial',18,'normal'))
p.color('blue')
p.fd(40)
elif i=='+':
p.write('ๆฅ',font=('Arial',18,'normal'))
else:
drawDigit(eval(i)) #้่ฟeval()ๅฝๆฐๅฐๆฐๅญๅญ็ฌฆไธฒๅๆๆดๆฐ
def main():
p.setup(800,350,200,200)
#p.hideturtle() #็ปๅถๅผๅงๅ้่ๆตท้พ
p.penup()
p.fd(-300)
p.pensize(5)
#drawData('20190505')
drawData(t.strftime('%Y-%m=%d+',t.gmtime())) #ไธ่ฝ็จtime.๏ผไผๆฅ้
p.hideturtle() #ๅจ็ปๅถๅฎๆๅ้่ๆตท้พ
p.done()
main()
|
# coding: utf-8
# $\newcommand{\xv}{\mathbf{x}}
# \newcommand{\Xv}{\mathbf{X}}
# \newcommand{\yv}{\mathbf{y}}
# \newcommand{\zv}{\mathbf{z}}
# \newcommand{\av}{\mathbf{a}}
# \newcommand{\Wv}{\mathbf{W}}
# \newcommand{\wv}{\mathbf{w}}
# \newcommand{\tv}{\mathbf{t}}
# \newcommand{\Tv}{\mathbf{T}}
# \newcommand{\muv}{\boldsymbol{\mu}}
# \newcommand{\sigmav}{\boldsymbol{\sigma}}
# \newcommand{\phiv}{\boldsymbol{\phi}}
# \newcommand{\Phiv}{\boldsymbol{\Phi}}
# \newcommand{\Sigmav}{\boldsymbol{\Sigma}}
# \newcommand{\Lambdav}{\boldsymbol{\Lambda}}
# \newcommand{\half}{\frac{1}{2}}
# \newcommand{\argmax}[1]{\underset{#1}{\operatorname{argmax}}}
# \newcommand{\argmin}[1]{\underset{#1}{\operatorname{argmin}}}$
# # Assignment 3: Activation Functions
# Damian Armijo
# ## Overview
# In this assignment, you will make a new version of your ```NeuralNetwork``` class from the previous assignment. For this new version, define the activation function to be the Rectified Linear Unit (ReLU).
#
# You will compare the training and testing performances of networks with tanh and networks with the ReLU activation functions.
# ### NeuralNetworkReLU
# Start with the ```NeuralNetwork``` class defined in ```neuralnetworksA2.py```. Define a new class named ```NeuralNetworkReLU``` that extends ```NeuralNetwork``` and simply defines new implementations of ```activation``` and ```activationDerivative``` that implement the ReLU activation function.
# ### Comparison
# Define a new function ```partition``` that is used as this example shows.
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
X = np.arange(10*2).reshape((10, 2))
T = X[:, 0:1] * 0.1
# In[2]:
import random
def partition(X, T, seeding,shuffle = False):
nRows = X.shape[0]
rows = np.arange(nRows)
if(shuffle):
np.random.shuffle(rows)
nTrain = int(nRows * seeding)
trainRows = rows[:nTrain]
testRows = rows[nTrain:]
Xtrain, Ttrain = X[trainRows, :], T[trainRows, :]
Xtest, Ttest = X[testRows, :], T[testRows, :]
return Xtrain, Ttrain, Xtest, Ttest
# In[ ]:
X
# In[ ]:
T
# In[ ]:
Xtrain, Ttrain, Xtest, Ttest = partition(X, T, 0.8, shuffle=False)
# In[ ]:
Xtrain
# In[ ]:
Ttrain
# In[ ]:
Xtest
# In[ ]:
Ttest
# If ```shuffle=True``` is used as an argument, then the samples are randomly rearranged before the partitions are formed.
# In[ ]:
Xtrain, Ttrain, Xtest, Ttest = partition(X, T, 0.8, shuffle=True)
# In[ ]:
Xtrain
# In[ ]:
Ttrain
# In[ ]:
Xtest
# In[ ]:
Ttest
# You will use the ```energydata_complete.csv``` data for the following comparisons. Load this data using pandas, then create matrix $X$ using all columns except ```['date','Appliances', 'rv1', 'rv2']``` and create $T$ using just ```'Appliances'```. Write python code that performs the following algorithm.
# In[3]:
import pandas
# Reading in file via pandas, and putting values into T(target) and X(inputs)
data = pandas.read_csv('energydata_complete.csv')
T = data[['Appliances']]
T = np.array(T)
X = data.drop(['date','Appliances', 'rv1', 'rv2'], axis=1)
X = np.array(X)
# Getting labels for T and X
names = data.keys()
Xnames = names[3:27]
Tnames = names[0:2]
Xnames = Xnames.insert(0, 'bias')
print(X)
print(T)
# In[4]:
def rmse(A, B):
return np.sqrt(np.mean((A - B)**2))
# - For each of the two activation functions, ```tanh```, and ```ReLU```:
# - For each hidden layer structure in [[u]*nl for u in [1, 2, 5, 10, 50] for nl in [1, 2, 3, 4, 5, 10]]:
# - Repeat 10 times:
# - Randomly partition the data into training set with 80% of samples and testing set with other 20%.
# - Create a neural network using the given activation function and hidden layer structure.
# - Train the network for 100 iterations.
# - Calculate two RMS errors, one on the training partition and one on the testing partitions.
# - Calculate the mean of the training and testing RMS errors over the 10 repetitions.
# In[5]:
import neuralnetworksA2 as nn
# In[9]:
def meanFromData(V):
tanhMeanTrain = V[:,0].mean()
tanhMeanTest = V[:,1].mean()
reluMeanTrain = V[:,2].mean()
reluMeanTest = V[:,3].mean()
return tanhMeanTrain, tanhMeanTest,reluMeanTrain,reluMeanTest;
# In[10]:
#ReLU activation
import pandas as pd
errors = []
hiddens = [0] + [[nu] * nl for nu in [1,2,5,10] for nl in [1,2,3,4]]
V = np.zeros(shape=(2,4))
for hids in hiddens:
for x in range(2):
Xtrain, Ttrain, Xtest, Ttest = partition(X, T, 0.8, shuffle=True)
nnet = nn.NeuralNetwork(Xtrain.shape[1], hids, Ttrain.shape[1])
nnet.train(Xtrain, Ttrain, 100)
stack = [rmse(Ttrain, nnet.use(Xtrain)), rmse(Ttest, nnet.use(Xtest))]
nnetrelu = nn.NeuralNetworkReLU(Xtrain.shape[1], hids, Ttrain.shape[1])
nnetrelu.train(Xtrain, Ttrain, 100)
stack.extend([rmse(Ttrain, nnet.use(Xtrain)), rmse(Ttest, nnet.use(Xtest))])
V = np.vstack([V,stack])
tanhMeanTrain,tanhMeanTest,reluMeanTrain,reluMeanTest = meanFromData(V)
errors.append([hids, tanhMeanTrain,tanhMeanTest,reluMeanTrain,reluMeanTest])
errors = pd.DataFrame(errors)
print(errors)
plt.figure(figsize=(10, 10))
plt.plot(errors.values[:, 1:], 'o-')
plt.legend(('tanh Train RMSE','tanh Test RMSE','ReLU Train RMSE', 'ReLU Test RMSE',))
plt.xticks(range(errors.shape[0]), hiddens, rotation=30, horizontalalignment='right')
plt.grid(True)
# You will have to add steps in this algorithm to collect the results you need to make the following plot.
#
# Make a plot of the RMS errors versus the hidden layer structure. On this plot include four curves, for the training and testing RMS errors for each of the two activation functions. Label both axes and add a legend that identifies each curve.
#
# As always, discuss what you see. What can you say about which activation function is best?
# ## Grading and Check-in
# Your notebook will be run and graded automatically. Test this grading process by first downloading [A3grader.tar](http://www.cs.colostate.edu/~anderson/cs445/notebooks/A3grader.tar) and extract `A3grader.py` from it. Run the code in the following cell to demonstrate an example grading session. You should see a perfect execution score of 60 / 60 if your functions and class are defined correctly. The remaining 40 points will be based on the results you obtain from the comparisons of hidden layer structures and the two activation functions applied to the energy data.
#
# For the grading script to run correctly, you must first name this notebook as `Lastname-A3.ipynb` with `Lastname` being your last name, and then save this notebook. Your working director must also contain `neuralnetworksA2.py` and `mlutilities.py` from lecture notes.
#
# Combine your notebook, `neuralnetworkA2.py`, and `mlutilities.py` into one zip file or tar file. Name your tar file `Lastname-A3.tar` or your zip file `Lastname-A3.zip`. Check in your tar or zip file using the `Assignment 3` link in Canvas.
#
# A different, but similar, grading script will be used to grade your checked-in notebook. It will include other tests.
# In[2]:
get_ipython().magic('run -i A3grader.py')
# In[1]:
import neuralnetworksA2 as nn
nnet = nn.NeuralNetwork(1, 10, 1)
nnetrelu = nn.NeuralNetworkReLU(1, 5, 1)
da = nnetrelu.activation(-0.8)
print(da)
|
for str in open("C:/Users/dinesh kumar/Documents/text1.txt"):
print(str,end="")
|
"""
Author : Lily
Data : 2018-09-18
QQ : 339600718
็ง้ขๆฐ Kiehl's Kiehls-s
ๆๅๆ่ทฏ๏ผๅพฎไฟกๅ
ฌไผๅท๏ผๆฟๅฐprovince็id,ๅไธบๅๆฐ๏ผ่ฏทๆฑcity็id,ๆ นๆฎcityid่ฏทๆฑstores็ๆฐๆฎ
getProvince๏ผpost,json๏ผ:http://wx.kiehls.com.cn/KStart/GetProvince
getCity๏ผpost,json,ๅๆฐ๏ผproId๏ผ:http://wx.kiehls.com.cn/KStart/GetCity
getStore๏ผpost,json๏ผๅๆฐ๏ผcity_id: 37,longitude: ,latitude: ๏ผ:http://wx.kiehls.com.cn/Shoppe/GetShopList
ๆณจๆ๏ผๅจๆต่งๅจๆไธๅผ๏ผไผๆ็คบ---Server Error in '/' Application.
"""
import requests
import re
import datetime
import json
filename = "Kiehls-s" + re.sub('[^0-9]','',str(datetime.datetime.now())) + ".csv"
f = open(filename, 'w', encoding='utf-8')
f.write('StoreId,WebChatId,Content,Name,Country,Code,state,BusinessTime,Map,thumb,Address,Phone,City,Dist,Province,Lng,Number,Lat,Price,Recommended,Characteristic,Introduction,BranchName,category,product1,product2,product3,url,PanoramaUrl,ComplexDefualts,PassWord,CounterMobile,TrailId,FailedPasswordCount,LastLoginTime,\n')
pro_url = 'http://wx.kiehls.com.cn/KStart/GetProvince'
city_url = 'http://wx.kiehls.com.cn/KStart/GetCity'
store_url = 'http://wx.kiehls.com.cn/Shoppe/GetShopList'
provinces = requests.post(pro_url).text
provinces_json = json.loads(provinces)
for pro in provinces_json:
print(pro)
data = {"proId":pro["AreaId"]}
citys = requests.post(city_url, data=data).text
print(citys)
citys_json = json.loads(citys)
for ct in citys_json:
print(ct)
cityname = ct['AreaName']
cityid = ct["AreaId"]
store_data = {"city_id": cityid, "longitude": "", "latitude": ""}
stores = requests.post(store_url, data=store_data).text
stores_json = json.loads(stores)
for store in stores_json["shopperList"]:
print(store)
for s_k, s_v in store.items():
v = str(s_v).replace(',', '๏ผ').replace('\n', '').replace('\r','')
f.write(v + ",")
f.write('\n')
f.close()
|
# Enter script code
keyboard.send_keys("<f6>8")
|
from django.contrib.auth.models import User
from django.db import models
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
nickname = models.CharField(max_length=20, default='')
class Meta:
ordering = ['nickname']
def __str__(self):
return self.user.username
|
import keras
import pyfor
from keras_retinanet import models
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
from keras_retinanet.utils.visualization import draw_box, draw_caption
from keras_retinanet.utils.colors import label_color
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
import time
# set tf backend to allow memory to grow, instead of claiming everything
import tensorflow as tf
#parse args
import argparse
import glob
#DeepForest
from DeepForest.evalmAP import _get_detections
#Set training or training
mode_parser = argparse.ArgumentParser(description='Prediction of a new image')
mode_parser.add_argument('--model', help='path to training model' )
mode_parser.add_argument('--image', help='image or directory of images to predict' )
mode_parser.add_argument('--output_dir', default="snapshots/images/")
args=mode_parser.parse_args()
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
# set the modified tf session as backend in keras
keras.backend.tensorflow_backend.set_session(get_session())
#load config
DeepForest_config = config.load_config()
# adjust this to point to your downloaded/trained model
# load retinanet model
model = models.load_model(args.model, backbone_name='resnet50', convert=True, nms_threshold=args.nms_threshold)
labels_to_names = {0: 'Tree'}
|
import math
i = 5
j = 2 * i
j = j + 5
print(i)
print(j)
j = j - i
print(j)
print(7 / 2)
print(7 / 3.5)
print(8 + 2.6)
print(9 // 5)
print(9 % 5)
print(3 ** 4)
print(math.sqrt(36))
print(math.log2(2))
print(math.log2(4))
i = 5
print(type(i))
i = 7 * 1
print(type(i))
j = i / 3
print(type(i))
print(type(j))
i = 2 * j # Not recommended to change the type of variables in a program
print(type(i))
print('True or False is: ', True or False)
print('False or True is: ', False or True)
print('True or True is: ', True or True)
print('False or False is: ', False or False)
print('True and False is: ', True and False)
print('False and True is: ', False and True)
print('False and False is: ', False and False)
print('True and True is: ', True and True)
print('Not applied to True gives us: ', not True)
print('Not applied to False gives us: ', not False)
def divides(m, n):
return n % m == 0
def even(n):
return divides(2, n)
def odd(n):
return not divides(2, n)
print(divides(5, 25))
print(divides(3, 5))
print(even(2))
print(even(45))
print(odd(3))
print(odd(28))
|
def addfruit(fruit1,price1):
fruit_dict={}
i=0
while i!=len(fruit1):
try:
if fruit1[i].lower() not in fruit_dict:
fruit_dict.update({fruit1[i]:price1[i]})
i+=1
else:
i+=1
raise ValueError
except:
if ValueError:
print(f"Error : {fruit1[i]} is allready in dict")
return fruit_dict
|
class Solution:
def twoCitySchedCost(self, costs: List[List[int]]) -> int:
l = len(costs)//2
costs = sorted(costs, key=lambda i: abs(i[1]-i[0]))
n1 = n2 = tcost = 0
for i in range(len(costs)-1,-1,-1):
if costs[i][0] < costs[i][1]:
if n1<l:
tcost += costs[i][0]
n1 += 1
else:
tcost += costs[i][1]
n2 += 1
elif costs[i][0] > costs[i][1]:
if n2<l:
tcost += costs[i][1]
n2 += 1
else:
tcost += costs[i][0]
n1 += 1
else: tcost += costs[i][0]
return tcost
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from app.customer.models import Customer
class CustomerAdmin(admin.ModelAdmin):
model = Customer
admin.site.register(Customer, CustomerAdmin)
|
import numpy as np
import os
import csv
Tainan_city_id = '467410'
Tainan_city_id2= '467411'
Tainan_county_id = '467420'
Dataset=[]
labellist=['stno','yyyymmdd', 'PS01', 'PS02', 'PS03', 'PS04', 'PS05', 'PS06', 'PS07', 'PS08', 'PS09', 'PS10', 'TX01', 'TX02', 'TX03', 'TX04', 'TX05', 'TX06', 'TD01', 'TD02', 'TD03', 'TD04', 'TX07', 'TX08', 'TX09', 'VP01', 'VP02', 'VP03', 'VP04', 'VP05', 'RH01', 'RH02', 'RH03', 'RH04', 'RH05', 'WD01', 'WD02', 'WD03', 'WD04', 'WD05', 'WD06', 'WD07', 'WD08', 'WD09', 'PP01', 'PP02', 'PP03', 'PP04', 'PP05', 'PP06', 'SS01', 'SS02', 'GR01', 'GR02', 'GR03', 'VS01', 'CD01', 'SD01', 'ST01', 'ST02', 'ST03', 'ST04', 'ST05', 'ST06', 'ST07', 'ST08', 'ST09', 'ST10', 'ST11', 'ST12', 'EP01', 'EP02', 'EP03', 'TG01', 'TS01', 'TS02', 'TS03', 'TS04', 'TS05', 'TS06', 'TS07', 'TS08', 'TS09', 'TS10']
for year in range(1958,2018):
dir_path="C:\\Users\\User\\Desktop\\BS\\TyphoonData\\"+str(year)+"\\WeatherData"
for file in os.listdir(dir_path):
if file.find('_stn') == -1:
Path=os.path.join(dir_path,file)
with open(Path,'r') as f:
with open('./label.txt','a') as wb:
label=[]
feature=[]
dic={}
data = f.readlines()
for Data in data:
if Data.find('stno') !=-1:
label=Data.split()
for d in label:
if d == 'ST02':
label.remove('ST02')
if Data.find(Tainan_city_id) !=-1 or Data.find(Tainan_city_id2) !=-1:
feature=Data.split()
#for info in d_list:
#wb.write(info+" ")
#wb.write('0\n')
for i in range(1,len(label)):
dic[label[i]]=feature[i-1]
Dataset.append(dic)
with open('./newdata.txt','w') as dt:
writer=csv.writer(dt,delimiter='\t')
writer.writerow(labellist)
for year in range(len(Dataset)):
for label in labellist:
if label in Dataset[year]:
dt.write(Dataset[year][label]+'\t')
else:
dt.write('-9999\t')
dt.write('\n')
#print(Dataset)
|
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import unittest
import environment
import tablet
import utils
shard_0_master = tablet.Tablet()
shard_0_replica1 = tablet.Tablet()
shard_0_replica2 = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
shard_0_backup = tablet.Tablet()
shard_1_master = tablet.Tablet()
shard_1_replica1 = tablet.Tablet()
shard_2_master = tablet.Tablet()
shard_2_replica1 = tablet.Tablet()
# shard_2 tablets shouldn't exist yet when _apply_initial_schema() is called.
initial_tablets = [
shard_0_master, shard_0_replica1, shard_0_replica2, shard_0_rdonly,
shard_0_backup, shard_1_master, shard_1_replica1,
]
shard_2_tablets = [shard_2_master, shard_2_replica1]
all_tablets = initial_tablets + shard_2_tablets
test_keyspace = 'test_keyspace'
db_name = 'vt_' + test_keyspace
def setUpModule():
try:
environment.topo_server().setup()
_init_mysql(all_tablets)
utils.run_vtctl(['CreateKeyspace', test_keyspace])
utils.Vtctld().start(enable_schema_change_dir=True)
except Exception as setup_exception: # pylint: disable=broad-except
try:
tearDownModule()
except Exception as e: # pylint: disable=broad-except
logging.exception('Tearing down a failed setUpModule() failed: %s', e)
raise setup_exception
def _init_mysql(tablets):
setup_procs = []
for t in tablets:
setup_procs.append(t.init_mysql())
utils.wait_procs(setup_procs)
def _setup_shard_2():
shard_2_master.init_tablet('replica', test_keyspace, '2')
shard_2_replica1.init_tablet('replica', test_keyspace, '2')
# create databases, start the tablets
for t in shard_2_tablets:
t.create_db(db_name)
t.start_vttablet(wait_for_state=None)
# wait for the tablets to start
shard_2_master.wait_for_vttablet_state('NOT_SERVING')
shard_2_replica1.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', test_keyspace + '/2',
shard_2_master.tablet_alias], auto_log=True)
utils.run_vtctl(['ValidateKeyspace', '-ping-tablets', test_keyspace])
def _teardown_shard_2():
tablet.kill_tablets(shard_2_tablets)
utils.run_vtctl(
['DeleteShard', '-recursive', '-even_if_serving', 'test_keyspace/2'],
auto_log=True)
for t in shard_2_tablets:
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = []
for t in all_tablets:
teardown_procs.append(t.teardown_mysql())
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
class TestSchema(unittest.TestCase):
def setUp(self):
shard_0_master.init_tablet('replica', test_keyspace, '0')
shard_0_replica1.init_tablet('replica', test_keyspace, '0')
shard_0_replica2.init_tablet('replica', test_keyspace, '0')
shard_0_rdonly.init_tablet('rdonly', test_keyspace, '0')
shard_0_backup.init_tablet('backup', test_keyspace, '0')
shard_1_master.init_tablet('replica', test_keyspace, '1')
shard_1_replica1.init_tablet('replica', test_keyspace, '1')
# create databases, start the tablets
for t in initial_tablets:
t.create_db(db_name)
t.start_vttablet(wait_for_state=None)
# wait for the tablets to start
for t in initial_tablets:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', test_keyspace + '/0',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', test_keyspace + '/1',
shard_1_master.tablet_alias], auto_log=True)
def tearDown(self):
# kill all tablets
tablet.kill_tablets(initial_tablets)
for t in initial_tablets:
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()
utils.run_vtctl(['DeleteShard', '-recursive', '-even_if_serving',
test_keyspace + '/0'], auto_log=True)
utils.run_vtctl(['DeleteShard', '-recursive', '-even_if_serving',
test_keyspace + '/1'], auto_log=True)
def _check_tables(self, tablet_obj, expected_count):
tables = tablet_obj.mquery(db_name, 'show tables')
self.assertEqual(
len(tables), expected_count,
'Unexpected table count on %s (not %d): got tables: %s' %
(tablet_obj.tablet_alias, expected_count, str(tables)))
def _apply_schema(self, keyspace, sql, expect_fail=False):
return utils.run_vtctl(['ApplySchema',
'-sql=' + sql,
keyspace],
expect_fail=expect_fail, auto_log=True)
def _get_schema(self, tablet_alias):
return utils.run_vtctl_json(['GetSchema',
tablet_alias])
def _create_test_table_sql(self, table):
return (
'CREATE TABLE %s (\n'
'`id` BIGINT(20) not NULL,\n'
'`msg` varchar(64),\n'
'PRIMARY KEY (`id`)\n'
') ENGINE=InnoDB') % table
def _alter_test_table_sql(self, table, index_column_name):
return (
'ALTER TABLE %s\n'
'ADD COLUMN new_id bigint(20) NOT NULL AUTO_INCREMENT FIRST,\n'
'DROP PRIMARY KEY,\n'
'ADD PRIMARY KEY (new_id),\n'
'ADD INDEX idx_column(%s)\n') % (table, index_column_name)
def _apply_initial_schema(self):
schema_changes = ';'.join([
self._create_test_table_sql('vt_select_test01'),
self._create_test_table_sql('vt_select_test02'),
self._create_test_table_sql('vt_select_test03'),
self._create_test_table_sql('vt_select_test04')])
# apply schema changes to the test keyspace
self._apply_schema(test_keyspace, schema_changes)
# check number of tables
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
# get schema for each shard
shard_0_schema = self._get_schema(shard_0_master.tablet_alias)
shard_1_schema = self._get_schema(shard_1_master.tablet_alias)
# all shards should have the same schema
self.assertEqual(shard_0_schema, shard_1_schema)
def test_schema_changes(self):
self._apply_initial_schema()
self._apply_schema(
test_keyspace, self._alter_test_table_sql('vt_select_test03', 'msg'))
shard_0_schema = self._get_schema(shard_0_master.tablet_alias)
shard_1_schema = self._get_schema(shard_1_master.tablet_alias)
# all shards should have the same schema
self.assertEqual(shard_0_schema, shard_1_schema)
# test schema changes
os.makedirs(os.path.join(utils.vtctld.schema_change_dir, test_keyspace))
input_path = os.path.join(
utils.vtctld.schema_change_dir, test_keyspace, 'input')
os.makedirs(input_path)
sql_path = os.path.join(input_path, 'create_test_table_x.sql')
with open(sql_path, 'w') as handler:
handler.write('create table test_table_x (id int)')
# wait until this sql file being consumed by autoschema
timeout = 10
while os.path.isfile(sql_path):
timeout = utils.wait_step(
'waiting for vtctld to pick up schema changes',
timeout, sleep_time=0.2)
# check number of tables
self._check_tables(shard_0_master, 5)
self._check_tables(shard_1_master, 5)
def test_schema_changes_drop_and_create(self):
"""Tests that a DROP and CREATE table will pass PreflightSchema check.
PreflightSchema checks each SQL statement separately. When doing so, it must
consider previous statements within the same ApplySchema command. For
example, a CREATE after DROP must not fail: When CREATE is checked, DROP
must have been executed first.
See: https://github.com/vitessio/vitess/issues/1731#issuecomment-222914389
"""
self._apply_initial_schema()
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
drop_and_create = ('DROP TABLE vt_select_test01;\n' +
self._create_test_table_sql('vt_select_test01'))
self._apply_schema(test_keyspace, drop_and_create)
# check number of tables
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
def test_schema_changes_preflight_errors_partially(self):
"""Tests that some SQL statements fail properly during PreflightSchema."""
self._apply_initial_schema()
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
# Second statement will fail because the table already exists.
create_error = (self._create_test_table_sql('vt_select_test05') + ';\n' +
self._create_test_table_sql('vt_select_test01'))
stdout = self._apply_schema(test_keyspace, create_error, expect_fail=True)
self.assertIn('already exists', ''.join(stdout))
# check number of tables
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
def test_schema_changes_drop_nonexistent_tables(self):
"""Tests the PreflightSchema logic for dropping nonexistent tables.
If a table does not exist, DROP TABLE should error during preflight
because the statement does not change the schema as there is
nothing to drop.
In case of DROP TABLE IF EXISTS though, it should not error as this
is the MySQL behavior the user expects.
"""
self._apply_initial_schema()
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
drop_table = ('DROP TABLE nonexistent_table;')
stdout = self._apply_schema(test_keyspace, drop_table, expect_fail=True)
self.assertIn('Unknown table', ''.join(stdout))
# This Query may not result in schema change and should be allowed.
drop_if_exists = ('DROP TABLE IF EXISTS nonexistent_table;')
self._apply_schema(test_keyspace, drop_if_exists)
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
def test_vtctl_copyschemashard_use_tablet_as_source(self):
self._test_vtctl_copyschemashard(shard_0_master.tablet_alias)
def test_vtctl_copyschemashard_use_shard_as_source(self):
self._test_vtctl_copyschemashard('test_keyspace/0')
def _test_vtctl_copyschemashard(self, source):
# Apply initial schema to the whole keyspace before creating shard 2.
self._apply_initial_schema()
_setup_shard_2()
try:
# InitShardMaster creates the db, but there shouldn't be any tables yet.
self._check_tables(shard_2_master, 0)
self._check_tables(shard_2_replica1, 0)
# Run the command twice to make sure it's idempotent.
for _ in range(2):
utils.run_vtctl(['CopySchemaShard',
source,
'test_keyspace/2'],
auto_log=True)
# shard_2_master should look the same as the replica we copied from
self._check_tables(shard_2_master, 4)
utils.wait_for_replication_pos(shard_2_master, shard_2_replica1)
self._check_tables(shard_2_replica1, 4)
shard_0_schema = self._get_schema(shard_0_master.tablet_alias)
shard_2_schema = self._get_schema(shard_2_master.tablet_alias)
self.assertEqual(shard_0_schema, shard_2_schema)
finally:
_teardown_shard_2()
def test_vtctl_copyschemashard_different_dbs_should_fail(self):
# Apply initial schema to the whole keyspace before creating shard 2.
self._apply_initial_schema()
_setup_shard_2()
try:
# InitShardMaster creates the db, but there shouldn't be any tables yet.
self._check_tables(shard_2_master, 0)
self._check_tables(shard_2_replica1, 0)
# Change the db charset on the destination shard from utf8 to latin1.
# This will make CopySchemaShard fail during its final diff.
# (The different charset won't be corrected on the destination shard
# because we use "CREATE DATABASE IF NOT EXISTS" and this doesn't fail if
# there are differences in the options e.g. the character set.)
shard_2_schema = self._get_schema(shard_2_master.tablet_alias)
self.assertIn('utf8', shard_2_schema['database_schema'])
utils.run_vtctl_json(
['ExecuteFetchAsDba', '-json', shard_2_master.tablet_alias,
'ALTER DATABASE vt_test_keyspace CHARACTER SET latin1'])
_, stderr = utils.run_vtctl(['CopySchemaShard',
'test_keyspace/0',
'test_keyspace/2'],
expect_fail=True,
auto_log=True)
self.assertIn('schemas are different', stderr)
# shard_2_master should have the same number of tables. Only the db
# character set is different.
self._check_tables(shard_2_master, 4)
finally:
_teardown_shard_2()
if __name__ == '__main__':
utils.main()
|
url = 'https://opendart.fss.or.kr/api/list.xml'
|
import pyspark
from pyspark import SparkConf, SparkContext
import collections
conf = SparkConf().setAppName("SalaryAnalysis")
sc = SparkContext(conf = conf)
# Detroit Open Data file located https://data.detroitmi.gov/Government/Mayoral-Appointee-Salaries/fwu6-4nb5
lines = sc.textFile("hdfs://cluster-b435-m/wsu/Mayoral_Appointee_Salaries.csv")
#lines.take(5)
# Get total no. of mayoral employees
lines.count()
# Get total no. of distinct titles
sc.textFile("hdfs://cluster-b435-m/wsu/rows.csv")\
.map(lambda line: (line.split(',')[2], line.split(',')[2]))\
.distinct()\
.count()
# How many departments are there?
sc.textFile("hdfs://cluster-b435-m/wsu/rows.csv")\
.map(lambda line: (line.split(',')[3], line.split(',')[3]))\
.distinct()\
.count()
# Sum total city salaries paid to employees
lines.map(lambda x: float(x[4])).reduce(lambda a, b: a+b)
# Employees with annual salary >= $200,000
filter1 = lines.filter(lambda x: x[4] >= 200000)
filter1.count()
# Employees with annual salary >= $150,000
filter2 = lines.filter(lambda x: x[4] >= 150000)
filter2.count()
# Average total salary for police department
filter3 = lines.filter(lambda x: x[3] == "Police")
filter3.reduce(lambda x, y: x + y, filter3[3]) / filter3.count()
# How many departments receive ADDITIONAL GRANT SUPPORT?
filter4 = lines.filter(lambda x: x[6] > 0).count()
# Highest and lowest city salaries
highSal = lines.map(lambda x: x[4]) \
.max()
lowSal = lines.map(lambda x: x[4]) \
.min()
salFilter = lines.filter(lambda x: x[4])
# List individual with highest salary
topSal = salFilter.filter(lambda x: x[1] == highSal) \
.map(lambda x: x[0])
.collect()
# List individual with lowest salary
bottomSal = salFilter.filter(lambda x: x[1] == lowSal) \
.map(lambda x: x[0])
.collect()
|
import os
import sys
import pygame
import requests
from inoutbox import get_key, display_box, ask, main
question = 'https://geocode-maps.yandex.ru/1.x/?apikey=40d1649f-0493-4b70-98ba-98533de7710b&geocode=ะะฒัััะฐะปะธั&format=json'
resp = requests.get(question)
if resp:
resp = resp.json()
cords = resp["response"]["GeoObjectCollection"]["featureMember"][0]["GeoObject"]["Point"]["pos"]
response = None
map_request = f"http://static-maps.yandex.ru/1.x/?ll={','.join(cords.split())}&spn=20,20&l=sat"
response = requests.get(map_request)
if not response:
print("ะัะธะฑะบะฐ ะฒัะฟะพะปะฝะตะฝะธั ะทะฐะฟัะพัะฐ:")
print(map_request)
print("Http ััะฐััั:", response.status_code, "(", response.reason, ")")
sys.exit(1)
map_file = "map.png"
with open(map_file, "wb") as file:
file.write(response.content)
pygame.init()
screen = pygame.display.set_mode((600, 450))
while pygame.event.wait().type != pygame.QUIT:
display_box(screen, 'tratata')
ask(screen, 'ask')
# screen.blit(pygame.image.load(map_file), (0, 0))
# pygame.display.flip()
pygame.quit()
os.remove(map_file)
|
from models.data_types import ComplexDataType
class Method(ComplexDataType):
def __init__(self, name, description, result_type_name=None, deprecated=False):
super(Method, self).__init__(name, description, deprecated)
self._result_type_name = result_type_name
self._permissions = {}
@property
def result_type_name(self):
return self._result_type_name
@result_type_name.setter
def result_type_name(self, value):
self._result_type_name = value
@property
def permissions(self):
return self._permissions
def add_permission(self, permission):
self._permissions[permission.role] = permission
def get_permission(self, role):
return self._permissions[role]
class ImplMethod(Method):
def __init__(self, name, description='', result_type_name=None, deprecated=False):
super(ImplMethod, self).__init__(name, description, result_type_name, deprecated)
self._frequency = None
self._auth_required = True
self._private_fields = {}
self._is_private = False
self._request_limit = None
self._time_limit = None
self._query_fields = {}
@property
def is_private(self):
return self._is_private
@is_private.setter
def is_private(self, value):
self._is_private = value
@property
def auth_required(self):
return self._auth_required
@auth_required.setter
def auth_required(self, value):
self._auth_required = value
@property
def frequency(self):
return self._frequency
@frequency.setter
def frequency(self, value):
self._frequency = value
@property
def request_limit(self):
return self._request_limit
@request_limit.setter
def request_limit(self, value):
self._request_limit = value
@property
def time_limit(self):
return self._time_limit
@time_limit.setter
def time_limit(self, value):
self._time_limit = value
def add_private_field(self, field):
self._private_fields[field.name] = field
def get_private_field(self, field_name):
return self._private_fields[field_name]
def private_fields(self):
return self._private_fields
def add_query_field(self, name, field):
if name in self._query_fields:
self._query_fields[name].append(field)
else:
self._query_fields[name] = [field]
def get_query_fields(self, name):
return self._query_fields.get(name, [])
def get_query_names(self):
return self._query_fields.keys()
|
# -*- coding: utf-8 -*-
'''
NAPALM Probes
=============
Manages RPM/SLA probes on the network device.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com>
:maturity: new
:depends: napalm
:platform: linux
Dependencies
------------
- :mod:`napalm proxy minion <salt.proxy.napalm>`
.. versionadded:: Carbon
'''
from __future__ import absolute_import
# Import python lib
import logging
log = logging.getLogger(__file__)
try:
# will try to import NAPALM
# https://github.com/napalm-automation/napalm
# pylint: disable=W0611
from napalm import get_network_driver
# pylint: enable=W0611
HAS_NAPALM = True
except ImportError:
HAS_NAPALM = False
# ----------------------------------------------------------------------------------------------------------------------
# module properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'probes'
__proxyenabled__ = ['napalm']
# uses NAPALM-based proxy to interact with network devices
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work.
Also, the key proxymodule must be set in the __opts___ dictionary.
'''
if HAS_NAPALM and 'proxy' in __opts__:
return __virtualname__
else:
return (False, 'The module napalm_probes cannot be loaded: \
napalm or proxy could not be loaded.')
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def config():
'''
Returns the configuration of the RPM probes.
:return: A dictionary containing the configuration of the RPM/SLA probes.
CLI Example:
.. code-block:: bash
salt '*' probes.config
Output Example:
.. code-block:: python
{
'probe1':{
'test1': {
'probe_type' : 'icmp-ping',
'target' : '192.168.0.1',
'source' : '192.168.0.2',
'probe_count' : 13,
'test_interval': 3
},
'test2': {
'probe_type' : 'http-ping',
'target' : '172.17.17.1',
'source' : '192.17.17.2',
'probe_count' : 5,
'test_interval': 60
}
}
}
'''
return __proxy__['napalm.call'](
'get_probes_config',
**{
}
)
def results():
'''
Provides the results of the measurements of the RPM/SLA probes.
:return a dictionary with the results of the probes.
CLI Example:
.. code-block:: bash
salt '*' probes.results
Output example:
.. code-block:: python
{
'probe1': {
'test1': {
'last_test_min_delay' : 63.120,
'global_test_min_delay' : 62.912,
'current_test_avg_delay': 63.190,
'global_test_max_delay' : 177.349,
'current_test_max_delay': 63.302,
'global_test_avg_delay' : 63.802,
'last_test_avg_delay' : 63.438,
'last_test_max_delay' : 65.356,
'probe_type' : 'icmp-ping',
'rtt' : 63.138,
'last_test_loss' : 0,
'round_trip_jitter' : -59.0,
'target' : '192.168.0.1',
'source' : '192.168.0.2'
'probe_count' : 15,
'current_test_min_delay': 63.138
},
'test2': {
'last_test_min_delay' : 176.384,
'global_test_min_delay' : 169.226,
'current_test_avg_delay': 177.098,
'global_test_max_delay' : 292.628,
'current_test_max_delay': 180.055,
'global_test_avg_delay' : 177.959,
'last_test_avg_delay' : 177.178,
'last_test_max_delay' : 184.671,
'probe_type' : 'icmp-ping',
'rtt' : 176.449,
'last_test_loss' : 0,
'round_trip_jitter' : -34.0,
'target' : '172.17.17.1',
'source' : '172.17.17.2'
'probe_count' : 15,
'current_test_min_delay': 176.402
}
}
}
'''
return __proxy__['napalm.call'](
'get_probes_results',
**{
}
)
def set_probes(probes):
'''
Configures RPM/SLA probes on the device.
Calls the configuration template 'set_probes' from the NAPALM library,
providing as input a rich formatted dictionary with the configuration details of the probes to be configured.
:param probes: Dictionary formatted as the output of the function config():
:return: Will return if the configuration of the device was updated.
Input example:
.. code-block:: python
probes = {
'new_probe':{
'new_test1': {
'probe_type' : 'icmp-ping',
'target' : '192.168.0.1',
'source' : '192.168.0.2',
'probe_count' : 13,
'test_interval': 3
},
'new_test2': {
'probe_type' : 'http-ping',
'target' : '172.17.17.1',
'source' : '192.17.17.2',
'probe_count' : 5,
'test_interval': 60
}
}
}
set_probes(probes)
'''
return __proxy__['napalm.call'](
'load_template',
**{
'template_name': 'set_probes',
'probes': probes
}
)
def delete_probes(probes):
'''
Removes RPM/SLA probes from the network device.
Calls the configuration template 'delete_probes' from the NAPALM library,
providing as input a rich formatted dictionary with the configuration details of the probes to be removed
from the configuration of the device.
:param probes: Dictionary with a similar format as the output dictionary of the function config(),
where the details are not necessary.
:return: Will return if the configuration of the device was updated.
Input example:
.. code-block:: python
probes = {
'existing_probe':{
'existing_test1': {},
'existing_test2': {}
}
}
'''
return __proxy__['napalm.call'](
'load_template',
**{
'template_name': 'delete_probes',
'probes': probes
}
)
def schedule_probes(probes):
'''
Will schedule the probes. On Cisco devices, it is not enough to define the probes, it is also necessary
to schedule them.
This method calls the configuration template 'schedule_probes' from the NAPALM library,
providing as input a rich formatted dictionary with the names of the probes and the tests to be scheduled.
:param probes: Dictionary with a similar format as the output dictionary of the function config(),
where the details are not necessary.
:return: Will return if the configuration of the device was updated.
Input example:
.. code-block:: python
probes = {
'new_probe':{
'new_test1': {},
'new_test2': {}
}
}
'''
return __proxy__['napalm.call'](
'load_template',
**{
'template_name': 'schedule_probes',
'probes': probes
}
)
|
def add_arguments(parser):
parser.add_argument(
"-d", "--data_file", required=True, help="file or dir containing training data"
)
parser.add_argument(
"-o",
"--out_file",
required=True,
help="file where to save training data in rasa format",
)
parser.add_argument("-l", "--language", default="en", help="language of the data")
parser.add_argument(
"-f",
"--format",
required=True,
choices=["json", "md"],
help="Output format the training data should be converted into.",
)
return parser
|
from collections import deque
import random
import numpy as np
class DataMemory():
def __init__(self, max_len=50000):
super(DataMemory, self).__init__()
self.max_len = max_len
self.memory = deque()
def add(self, s_t, a_t, r_t, s_n, terminal):
self.memory.append((s_t, a_t, r_t, s_n, float(terminal)))
if len(self.memory) > self.max_len:
self.memory.popleft()
def gen_minibatch(self, batch_size=32):
minibatch = random.sample(self.memory, batch_size)
s_t, a_t, r_t, s_n, terminal = zip(*minibatch)
s_t = np.concatenate(s_t)
s_n = np.concatenate(s_n)
r_t = np.array(r_t).astype(np.float32).reshape(batch_size, 1)
terminal = np.array(terminal).astype(np.float32).reshape(batch_size, 1)
return s_t, a_t, r_t, s_n, terminal
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 27 12:48:05 2016
@author: wattai
"""
import numpy as np
import matplotlib.pyplot as plt
def gauss_ludgendle(N_iter):
a0 = 1. / np.sqrt(2)
b0 = 1.
s0 = 1.
t0 = 4.
pis = []
for i in range(N_iter):
a1 = np.sqrt(a0*b0)
b1 = (a0 + b0) / 2.
s1 = s0 - t0 * (b1 - b0)**2
t1 = 2. * t0
a0 = a1
b0 = b1
s0 = s1
t0 = t1
pi = ((a0 + b0)**2) / s0
pis.append(pi)
return np.array(pis)
def borwein_4d(N_iter):
a0 = 6. - 4. * np.sqrt(2)
y0 = np.sqrt(2) - 1.
pis = []
for i in range(N_iter):
y1 = ( 1 - ( 1 - y0**4 )**(1/4) ) / ( 1 + ( 1 - y0**4 )**(1/4) )
a1 = a0 * (1. + y1)**4 -( 2.**(i+3.) * y1 * (1. + y1 + y1**2) )
y0 = y1
a0 = a1
pi = 1 / a0
pis.append(pi)
return np.array(pis)
if __name__ == "__main__":
N_iter = 5
pis_gauss = gauss_ludgendle(N_iter)
pis_bor = borwein_4d(N_iter)
print("gauss pi: " +str(pis_gauss))
print("borwein pi: " +str(pis_bor))
print("true pi: " +str(np.pi))
print("error: " +str(abs(np.pi-pis_gauss[N_iter-1])))
print("error: " +str(abs(np.pi-pis_bor[N_iter-1])))
n = np.arange(N_iter)
plt.figure()
plt.plot(n, (np.pi - pis_gauss), color="red")
plt.plot(n, (np.pi - pis_bor), color="blue")
|
import recommendations
from math import sqrt
#import pydilicious
#import maybe #ไฟฎๆน่ฟ็python3็็ๆฌไธๅ ้คไบๅบ่ฏ็ใ
import noway #ๆ่ง่ฐ็จget_popularๅฝๆฐๆ็จๅฐ็้จๅ้ฝๅจ่ฟไบใ
##็ฌฌไบ็ซ ๅ้ข็ๅ
ๅฎน่ฟๆฏๅฏไปฅ็ปง็ปญๅ็ใwww
'''
print(critics['Lisa Rose']['Lady in the Water'])
critics['Toby']['Snakes on a Plane'] = 4.5
print(critics['Toby'])#่พๅบ็้กบๅบๆฏ้ๆบ็ๅใ
a = 1/(1+sqrt(pow(4.5-4,2)+pow(1-2,2)))
print(a)
b = recommendations.sim_distance(recommendations.critics, 'Lisa Rose', 'Gene Seymour')
print(b)
c = recommendations.sim_pearson(recommendations.critics, 'Lisa Rose', 'Gene Seymour')
print(c)
k = recommendations.topMatches(recommendations.critics, 'Toby', n=6)
print(k)
g = recommendations.getRecommendations(recommendations.critics, 'Toby')
print(g)
movies = recommendations.transformPrefs(recommendations.critics)
mm = recommendations.topMatches(movies, 'Superman Returns')
#print(mm)
ww = recommendations.getRecommendations(movies, 'Lady in the Water')
print(ww)'''
a = noway.get_popular(tag = 'programming')
print(a)#42้กตใๆ็็ๆฏๆ ่ฝไธบๅๅฆใ
|
from multiprocessing import Pool
from indigox.config import INIT_WITH_GA, NUM_PROCESSES
from indigox.exception import IndigoUnfeasibleComputation
from indigox.misc import (BondOrderAssignment, graph_to_dist_graph, electron_spots,
electrons_to_add, locs_sort, HashBitArray, graph_setup,
node_energy, bitarray_to_assignment)
class LocalOptimisation(BondOrderAssignment):
def __init__(self, G):
self.init_G = G
def initialise(self):
self.G = graph_to_dist_graph(self.init_G)
self.target = electrons_to_add(self.init_G)
self.locs = locs_sort(electron_spots(self.init_G), self.G)
if INIT_WITH_GA:
self.init_a = HashBitArray(len(self.locs))
self.init_a.setall(False)
base_energy = self.calc_energy(self.init_a)[1]
all_locs = list(range(len(self.locs)))
while self.init_a.count() < self.target:
energy_diffs = {}
for i in all_locs:
self.init_a[i] = True
energy_diffs[i] = (self.calc_energy(self.init_a)[1]
- base_energy)
self.init_a[i] = False
min_i = min(energy_diffs, key=lambda x: energy_diffs[x])
self.init_a[min_i] = True
base_energy += energy_diffs[min_i]
all_locs.remove(min_i)
else:
self.init_a = HashBitArray(len(self.locs))
self.init_a.setall(False)
self.init_a[:self.target] = True
if self.init_a.count() != self.target:
raise IndigoUnfeasibleComputation('Can only optimised when all '
'electrons are placed in the initial guess.')
def run(self):
self.initialise()
min_ene = self.calc_energy(self.init_a)[1]
seen = {self.init_a : min_ene}
current_min = [self.init_a]
min_round = min_ene + 1 # So the while loop is entered,
round_mins = current_min[:] # regardless of min_ene value.
pool = Pool(processes=NUM_PROCESSES)
while abs(min_round - min_ene) > 1e-10:
min_ene = min_round
current_min = round_mins[:]
a = current_min[0]
results = pool.imap_unordered(self.calc_energy,
(n for n in self.neighbours(a)
if n not in seen),
chunksize=8)
for n, n_ene in results:
seen[n] = n_ene
if n_ene - min_round < -1e-10:
min_round = n_ene
round_mins = [n]
elif -1e-10 < n_ene - min_round < 1e-10:
round_mins.append(n)
pool.terminate()
bitarray_to_assignment(self.init_G, current_min[0], self.locs)
return self.init_G, seen[current_min[0]]
def calc_energy(self, a):
graph_setup(self.G, a, self.locs)
ene = sum(node_energy(self.G, n) for n in self.G)
return a, round(ene, 5)
def neighbours(self, a):
for source in set(self.locs):
i = self.locs.index(source)
i_count = self.locs.count(source)
i_loc = i + a[i:i+i_count].count() - 1
if not a[i:i+i_count].count():
continue
for target in set(self.locs):
if source == target:
continue
j = self.locs.index(target)
j_count = self.locs.count(target)
j_loc = j + a[j:j+j_count].count()
if j_count == a[j:j+j_count].count():
continue
b = a.copy()
b[i_loc] = False
b[j_loc] = True
yield b
|
try: # try/except block
raise IndexError # raise the exception
except IndexError: # catch the exception
print('got the exception')
print('continuing')
|
from unittest import TestCase
import search
class TestSearch(TestCase):
def test_search_by_name_should_return_empty(self):
contact_data = [
{
"city": "Rennes",
"name": "Ivan Riley",
"country": "Burkina Faso",
"company": "Nonummy Fusce Ltd",
"job_history": [
"Apple Systems",
"Google"
],
"email": "tincidunt.orci@convallisdolorQuisque.co.uk"
},
{
"city": "San Miguel",
"name": "Ignatius Tate",
"country": "Saudi Arabia",
"company": "Etiam Ligula Consulting",
"job_history": [
"Apple Systems",
"Cakewalk"
],
"email": "vitae@metusInnec.org"
},
{
"city": "Saint-Remy",
"name": "Nyssa Hammond",
"country": "Mozambique",
"company": "Dui Nec Tempus Inc.",
"job_history": [
"Chami",
"Cakewalk"
],
"email": "dolor@adipiscingelitEtiam.org"
}
]
result = search.search_by_name('blah', contact_data)
assert len(result) == 0
def test_search_by_name_should_return_one_result(self):
contact_data = [
{
"city": "Rennes",
"name": "Ivan Riley",
"country": "Burkina Faso",
"company": "Nonummy Fusce Ltd",
"job_history": [
"Apple Systems",
"Google"
],
"email": "tincidunt.orci@convallisdolorQuisque.co.uk"
},
{
"city": "San Miguel",
"name": "Ignatius Tate",
"country": "Saudi Arabia",
"company": "Etiam Ligula Consulting",
"job_history": [
"Apple Systems",
"Cakewalk"
],
"email": "vitae@metusInnec.org"
},
{
"city": "Saint-Remy",
"name": "Nyssa Hammond",
"country": "Mozambique",
"company": "Dui Nec Tempus Inc.",
"job_history": [
"Chami",
"Cakewalk"
],
"email": "dolor@adipiscingelitEtiam.org"
}
]
result = search.search_by_name('Nyssa', contact_data)
assert len(result) == 1
def test_search_by_name_should_return_one_result(self):
contact_data = [
{
"city": "Rennes",
"name": "Ivan Riley",
"country": "Burkina Faso",
"company": "Nonummy Fusce Ltd",
"job_history": [
"Apple Systems",
"Google"
],
"email": "tincidunt.orci@convallisdolorQuisque.co.uk"
},
{
"city": "San Miguel",
"name": "Ignatius Tate",
"country": "Saudi Arabia",
"company": "Etiam Ligula Consulting",
"job_history": [
"Apple Systems",
"Cakewalk"
],
"email": "vitae@metusInnec.org"
},
{
"city": "Saint-Remy",
"name": "Nyssa Hammond",
"country": "Mozambique",
"company": "Dui Nec Tempus Inc.",
"job_history": [
"Chami",
"Cakewalk"
],
"email": "dolor@adipiscingelitEtiam.org"
}
]
result = search.search_by_name('i', contact_data)
assert len(result) == 2
def test_search_should_return_empty(self):
contact_data = [
{
"city": "Rennes",
"name": "Ivan Riley",
"country": "Burkina Faso",
"company": "Nonummy Fusce Ltd",
"job_history": [
"Apple Systems",
"Google"
],
"email": "tincidunt.orci@convallisdolorQuisque.co.uk"
},
{
"city": "San Miguel",
"name": "Ignatius Tate",
"country": "Saudi Arabia",
"company": "Etiam Ligula Consulting",
"job_history": [
"Apple Systems",
"Cakewalk"
],
"email": "vitae@metusInnec.org"
},
{
"city": "Saint-Remy",
"name": "Nyssa Hammond",
"country": "Mozambique",
"company": "Dui Nec Tempus Inc.",
"job_history": [
"Chami",
"Cakewalk"
],
"email": "dolor@adipiscingelitEtiam.org"
},
{
"city": "Saint-Remy",
"name": "Nyssa Hammond",
"country": "Mozambique",
"company": "Dui Nec Tempus Inc.",
"job_history": [
"Chami",
"Cakewalk"
],
"email": "dolor@adipiscingelitEtiam.org"
}
]
result = search.search_by_all_fields('_', contact_data)
assert len(result) == 0
def test_search_should_return_duplicate(self):
contact_data = [
{
"city": "Rennes",
"name": "Ivan Riley",
"country": "Burkina Faso",
"company": "Nonummy Fusce Ltd",
"job_history": [
"Apple Systems",
"Google"
],
"email": "tincidunt.orci@convallisdolorQuisque.co.uk"
},
{
"city": "San Miguel",
"name": "Ignatius Tate",
"country": "Saudi Arabia",
"company": "Etiam Ligula Consulting",
"job_history": [
"Apple Systems",
"Cakewalk"
],
"email": "vitae@metusInnec.org"
},
{
"city": "Saint-Remy",
"name": "Nyssa Hammond",
"country": "Mozambique",
"company": "Dui Nec Tempus Inc.",
"job_history": [
"Chami",
"Cakewalk"
],
"email": "dolor@adipiscingelitEtiam.org"
},
{
"city": "Saint-Remy",
"name": "Nyssa Hammond",
"country": "Mozambique",
"company": "Dui Nec Tempus Inc.",
"job_history": [
"Chami",
"Cakewalk"
],
"email": "dolor@adipiscingelitEtiam.org"
}
]
result = search.search_by_all_fields('Nyssa Hammond', contact_data)
assert len(result) == 2
def test_search_should_return_london_contact_only(self):
contact_data = [
{
"city": "Rennes",
"name": "Ivan Riley",
"country": "Burkina Faso",
"company": "Nonummy Fusce Ltd",
"job_history": [
"Apple Systems",
"Google"
],
"email": "tincidunt.orci@convallisdolorQuisque.co.uk"
},
{
"city": "San Miguel",
"name": "Ignatius Tate",
"country": "Saudi Arabia",
"company": "Etiam Ligula Consulting",
"job_history": [
"Apple Systems",
"Cakewalk"
],
"email": "vitae@metusInnec.org"
},
{
"city": "Saint-Remy",
"name": "Nyssa Hammond",
"country": "Mozambique",
"company": "Dui Nec Tempus Inc.",
"job_history": [
"Chami",
"Cakewalk"
],
"email": "dolor@adipiscingelitEtiam.org"
},
{
"city": "London City",
"name": "Nyssa Hammond",
"country": "Mozambique",
"company": "Dui Nec Tempus Inc.",
"job_history": [
"Chami",
"Cakewalk"
],
"email": "dolor@adipiscingelitEtiam.org"
}
]
result = search.search_by_all_fields('London City', contact_data)
assert len(result) > 0 and result[0]["name"] == "Nyssa Hammond"
|
import logging
import aioisotp
logging.basicConfig(level=logging.DEBUG)
network = aioisotp.SyncISOTPNetwork(channel='vcan0', interface='virtual', receive_own_messages=True)
server = network.create_sync_connection(0x456, 0x123)
with network.open():
client = network.create_sync_connection(0x123, 0x456)
client.send(b'123456789')
payload = server.recv(1)
assert payload == b'123456789'
|
import twitter
from twitter.stream import TwitterStream, Timeout, HeartbeatTimeout, Hangup
from twitter.oauth import OAuth
from twitter.oauth2 import OAuth2, read_bearer_token_file
from twitter.util import printNicely
from string import ascii_letters
import os
import re
import string
import sys
# XXX: Go to http://dev.twitter.com/apps/new to create an app and get values
# for these credentials, which you'll need to provide in place of these
# empty string values that are defined as placeholders.
# See https://dev.twitter.com/docs/auth/oauth for more information
# on Twitter's OAuth implementation.
API_KEY = '***'
API_SECRET = '***'
OAUTH_TOKEN = '***'
OAUTH_TOKEN_SECRET = '***'
DEFAULT_HASH = "e"
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,
API_KEY, API_SECRET)
query = DEFAULT_HASH
count = 10000
twitter_api = twitter.Twitter(auth=auth)
def make_tweet(text, time, lat, lon):
"""Return a tweet, represented as a Python dictionary.
text -- A string; the text of the tweet, all in lowercase
time -- A datetime object; the time that the tweet was posted
lat -- A number; the latitude of the tweet's location
lon -- A number; the longitude of the tweet's location
>>> t = make_tweet("just ate lunch", datetime(2012, 9, 24, 13), 38, 74)
>>> tweet_words(t)
['just', 'ate', 'lunch']
>>> tweet_time(t)
datetime.datetime(2012, 9, 24, 13, 0)
>>> p = tweet_location(t)
>>> latitude(p)
38
"""
return {'text': text, 'time': time, 'latitude': lat, 'longitude': lon}
def tweet_words(tweet):
"""Return a list of the words in the text of a tweet."""
t = tweet['text']
return t.split()
def tweet_time(tweet):
"""Return the datetime that represents when the tweet was posted."""
return tweet['time']
def tweet_location(tweet):
"""Return a position (see geo.py) that represents the tweet's location."""
return make_position(tweet['latitude'], tweet['longitude'])
def tweet_string(tweet):
"""Return a string representing the tweet."""
location = tweet_location(tweet)
return '"{0}" @ {1}'.format(tweet['text'], (latitude(location), longitude(location)))
def extract_words(text):
"""Return the words in a tweet, not including punctuation.
>>> extract_words('anything else.....not my job')
['anything', 'else', 'not', 'my', 'job']
>>> extract_words('i love my job. #winning')
['i', 'love', 'my', 'job', 'winning']
>>> extract_words('make justin # 1 by tweeting #vma #justinbieber :)')
['make', 'justin', 'by', 'tweeting', 'vma', 'justinbieber']
>>> extract_words("paperclips! they're so awesome, cool, & useful!")
['paperclips', 'they', 're', 'so', 'awesome', 'cool', 'useful']
>>> extract_words('@(cat$.on^#$my&@keyboard***@#*')
['cat', 'on', 'my', 'keyboard']
"""
i = 0
if type(text) == str:
while i<len(text):
if text[i] not in ascii_letters:
text = text.replace(text[i], " ")
i+=1
return text.split()
else:
def remove_from_list(string):
if string[i] not in ascii_letters:
string = string.replace(text[i], "")
return string
return [remove_from_list(word) for word in text]
# Look for data directory
PY_PATH = sys.argv[0]
if PY_PATH.endswith('doctest.py') and len(sys.argv) > 1:
PY_PATH = sys.argv[1]
DATA_PATH = os.path.join(os.path.dirname(PY_PATH), 'data') + os.sep
if not os.path.exists(DATA_PATH):
DATA_PATH = 'data' + os.sep
def load_sentiments(file_name=DATA_PATH + "sentiments.csv"):
"""Read the sentiment file and return a dictionary containing the sentiment
score of each word, a value from -1 to +1.
"""
sentiments = {}
for line in open(file_name):
word, score = line.split(',')
sentiments[word] = float(score.strip())
return sentiments
word_sentiments = load_sentiments()
def make_sentiment(value):
"""Return a sentiment, which represents a value that may not exist.
>>> positive = make_sentiment(0.2)
>>> neutral = make_sentiment(0)
>>> unknown = make_sentiment(None)
>>> has_sentiment(positive)
True
>>> has_sentiment(neutral)
True
>>> has_sentiment(unknown)
False
>>> sentiment_value(positive)
0.2
>>> sentiment_value(neutral)
0
"""
assert value is None or (value >= -1 and value <= 1), 'Illegal value'
if value is None:
return (False, value)
else:
return (True, value)
def has_sentiment(s):
"""Return whether sentiment s has a value."""
return s[0]
def sentiment_value(s):
"""Return the value of a sentiment s."""
assert has_sentiment(s), 'No sentiment value'
return s[1]
def get_word_sentiment(word):
"""Return a sentiment representing the degree of positive or negative
feeling in the given word.
>>> sentiment_value(get_word_sentiment('good'))
0.875
>>> sentiment_value(get_word_sentiment('bad'))
-0.625
>>> sentiment_value(get_word_sentiment('winning'))
0.5
>>> has_sentiment(get_word_sentiment('California'))
False
"""
# Learn more: http://docs.python.org/3/library/stdtypes.html#dict.get
return make_sentiment(word_sentiments.get(word))
def analyze_tweet_sentiment(tweet):
""" Return a sentiment representing the degree of positive or negative
sentiment in the given tweet, averaging over all the words in the tweet
that have a sentiment value.
If no words in the tweet have a sentiment value, return
make_sentiment(None).
>>> positive = make_tweet('i love my job. #winning', None, 0, 0)
>>> round(sentiment_value(analyze_tweet_sentiment(positive)), 5)
0.29167
>>> negative = make_tweet("saying, 'i hate my job'", None, 0, 0)
>>> sentiment_value(analyze_tweet_sentiment(negative))
-0.25
>>> no_sentiment = make_tweet("berkeley golden bears!", None, 0, 0)
>>> has_sentiment(analyze_tweet_sentiment(no_sentiment))
False
"""
average = make_sentiment(None)
i = 0
analyzed = 0
count = 0
tweet_text = extract_words(tweet_words(tweet))
while i<len(tweet_text):
if has_sentiment(get_word_sentiment(tweet_text[i])):
analyzed = analyzed + sentiment_value(get_word_sentiment(tweet_text[i]))
count = count + 1
i = i + 1
if analyzed == 0:
return make_sentiment(None)
return make_sentiment(analyzed/count)
def main():
query_args = dict()
query_args['track'] = query
stream = TwitterStream(auth=auth)
tweet_iter = stream.statuses.filter(**query_args)
#tweet_iter = stream.statuses.sample()
tweets = []
# Iterate over the sample stream.
for tweet in tweet_iter:
# You must test that your tweet has text. It might be a delete
# or data message.
if tweet is None:
print("-- None --")
elif tweet is Timeout:
print("-- Timeout --")
elif tweet is HeartbeatTimeout:
print("-- Heartbeat Timeout --")
elif tweet is Hangup:
print("-- Hangup --")
elif tweet.get('text'):
senti = analyze_tweet_sentiment(tweet)
if (tweet.get('coordinates') != None):
curr = [tweet.get('coordinates'), senti]
tweets.append(curr)
#print(tweets)
else:
print("-- Some data: " + str(tweet))
if __name__ == '__main__':
main()
|
# Generated by Django 2.0.7 on 2018-07-31 09:11
from django.db import migrations, models
import django.utils.datetime_safe
class Migration(migrations.Migration):
dependencies = [
('post_app', '0008_comment_comment'),
]
operations = [
migrations.AlterField(
model_name='file',
name='created_date',
field=models.DateTimeField(default=django.utils.datetime_safe.datetime.now),
),
migrations.AlterField(
model_name='link',
name='created_date',
field=models.DateTimeField(default=django.utils.datetime_safe.datetime.now),
),
migrations.AlterField(
model_name='text',
name='created_date',
field=models.DateTimeField(default=django.utils.datetime_safe.datetime.now),
),
]
|
#!/usr/bin/env python
from manimlib.imports import *
# To watch one of these scenes, run the following:
# python -m manim example_scenes.py SquareToCircle -pl
#
# Use the flat -l for a faster rendering at a lower
# quality.
# Use -s to skip to the end and just save the final frame
# Use the -p to have the animation (or image, if -s was
# used) pop up once done.
# Use -n <number> to skip ahead to the n'th animation of a scene.
# Use -r <number> to specify a resolution (for example, -r 1080
# for a 1920x1080 video)
extra_accel = lambda point: 0.0 # use to simulate feedback/feedforward
class State:
def __init__(self, position, velocity):
self.theta = position
self.omega = velocity
# noinspection PyAttributeOutsideInit
class PendulumCirclingOrigin(Scene):
CONFIG = {
"extra_accel_": lambda point: 0.0,
"point_vector_max_len": 6.0,
"show_state_point_vector": True,
"hide_pendulum": False,
"pendulum_config": {
"initial_theta": 60 * DEGREES,
"length": 2.0,
"damping": 0,
"top_point": ORIGIN,
},
"vector_field_config": {
# "max_magnitude": 2,
"delta_x": 0.5 * 1.5,
"delta_y": 0.5 * 1.5,
# "x_max": 6,
"length_func": lambda norm: 0.6 * sigmoid(norm)
# "color_by_arc_length": True,
# "colors": [BLUE_E, GREEN, YELLOW, RED]
},
"coordinate_plane_config": {
"x_max": 5 * PI / 2,
"x_min": -5 * PI / 2,
"y_max": 5,
"y_min": -5
}
}
def construct(self):
global extra_accel
extra_accel = self.extra_accel_
self.state = State(self.pendulum_config["initial_theta"], 0.0)
self.plane = NumberPlane(**self.coordinate_plane_config)
self.create_pendulum_but_dont_add()
self.create_vector_field()
self.create_point_and_vec()
self.add_pendulum()
# self.wait(20)
self.wait(5)
def add_pendulum(self):
self.add(self.pendulum)
def get_evolving_trajectory(self, mobject, color=WHITE):
trajectory = VMobject()
trajectory.start_new_path(mobject.get_center())
trajectory.set_stroke(color, opacity=1)
def update_trajectory(traj):
point = mobject.get_center()
if get_norm(trajectory.points[-1] == point) > 0.05:
traj.add_smooth_curve_to(point)
trajectory.add_updater(update_trajectory)
return trajectory
def create_vector_field(self):
plane = self.plane
plane.add(plane.y_axis.get_labels())
plane.x_axis.add_numbers(direction=DL)
plane.add(plane.get_axis_labels("\\theta", "\\omega"))
vector_field = self.vector_field = VectorField(self.pendulum_function, **self.vector_field_config)
self.vector_field.sort(get_norm)
self.add(plane, vector_field)
def create_point_and_vec(self):
pendulum: Pendulum = self.pendulum
state_point = Dot().set_color(GREEN)
state_point.add_updater(
lambda point: state_point.move_to((np.array((self.pendulum.get_theta(), self.pendulum.get_omega(), 0.)))))
def draw_vector_and_move_state_point():
# Create a dot to represent our current state in state-space
state_point_pos = state_point.get_center_of_mass()
state_point_at_t = state_point_pos
# Create a vector representing xdot at tour current point in state-space
xdot_at_t = self.vector_field.func(state_point_at_t)
multiple = np.clip(
get_norm(xdot_at_t), -self.point_vector_max_len, self.point_vector_max_len
)
# vector = Vector(xdot_at_t / multiple)
vector = Vector(xdot_at_t / multiple)
vector.shift(state_point_pos)
vector.set_color(GREEN)
# return our point + vector mobj
# vector.s(state_point_at_t)
return vector
self.state_point = state_point
self.trajectory = self.get_evolving_trajectory(state_point)
if (self.show_state_point_vector):
state_vector = always_redraw(draw_vector_and_move_state_point)
self.add(state_vector)
self.add(self.trajectory, self.state_point)
def pendulum_function(self, point):
x, y = self.plane.point_to_coords(point)
return pendulum_vector_field_func(np.array((x, y, 0.)), L=self.pendulum_config['length'])
def create_pendulum_but_dont_add(self):
pendulum = self.pendulum = Pendulum(**self.pendulum_config)
pendulum.add_theta_label()
pendulum.add_velocity_vector()
pendulum.start_swinging()
pendulum = self.pendulum
background_rectangle = Rectangle(height=6, width=6, opacity=1.0, color=GREEN) \
.set_fill(color=BLACK, opacity=1.0) \
.shift(DOWN * 0.5)
pendulum.add_to_back(background_rectangle)
pendulum.scale_in_place(0.5)
if (self.hide_pendulum is False):
pendulum.move_to(TOP + LEFT_SIDE + (RIGHT + DOWN) * 0.25, aligned_edge=pendulum.get_corner(UP + LEFT))
else:
pendulum.move_to((TOP + LEFT_SIDE) * 1.1, aligned_edge=pendulum.get_corner(DOWN + RIGHT))
def pendulum_vector_field_func(point, L=3, g=9.8):
x, y = point[:2]
x_dot = np.array([[0, 1], [-g / L, 0]]) @ (np.array([[math.sin(x)], [y]]))
a_dot_x = np.array([x_dot[0, 0], x_dot[1, 0], 0.0])
# x, y = point[:2]
extra_acceleration = extra_accel(np.array((x, y)))
# a_dot_x = np.array([
# y,
# -np.sqrt(g / L) * np.sin(x) - mu * y,
# 0.,
# ])
# print("normal: %s, extra: %s" % (a_dot_x, extra_acceleration))
return a_dot_x + extra_acceleration
# return np.array([1, 0, 0])
class Pendulum(VGroup):
CONFIG = {
"length": 2,
"weight_diameter": 0.5,
"initial_theta": 0.3,
"omega": 0,
"damping": 0.1,
"top_point": 2 * UP,
"rod_style": {
"stroke_width": 3,
"stroke_color": LIGHT_GREY,
"sheen_direction": UP,
"sheen_factor": 1,
},
"weight_style": {
"stroke_width": 0,
"fill_opacity": 1,
"fill_color": GREY_BROWN,
"sheen_direction": UL,
"sheen_factor": 0.5,
"background_stroke_color": BLACK,
"background_stroke_width": 3,
"background_stroke_opacity": 0.5,
},
"dashed_line_config": {
"num_dashes": 25,
"stroke_color": WHITE,
"stroke_width": 2,
},
"angle_arc_config": {
"radius": 1,
"stroke_color": WHITE,
"stroke_width": 2,
},
"velocity_vector_config": {
"color": RED,
},
"theta_label_height": 0.25,
"set_theta_label_height_cap": False,
"n_steps_per_frame": 100,
"include_theta_label": True,
"include_velocity_vector": False,
"velocity_vector_multiple": 0.5,
"max_velocity_vector_length_to_length_ratio": 0.5,
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.create_fixed_point()
self.create_rod()
self.create_weight()
self.rotating_group = VGroup(self.rod, self.weight)
self.create_dashed_line()
self.create_angle_arc()
if self.include_theta_label:
self.add_theta_label()
if self.include_velocity_vector:
self.add_velocity_vector()
self.set_theta(self.initial_theta)
self.update()
def create_fixed_point(self):
self.fixed_point_tracker = VectorizedPoint(self.top_point)
self.add(self.fixed_point_tracker)
return self
def create_rod(self):
rod = self.rod = Line(UP, DOWN)
rod.set_height(self.length)
rod.set_style(**self.rod_style)
rod.move_to(self.get_fixed_point(), UP)
self.add(rod)
def create_weight(self):
weight = self.weight = Circle()
weight.set_width(self.weight_diameter)
weight.set_style(**self.weight_style)
weight.move_to(self.rod.get_end())
self.add(weight)
def create_dashed_line(self):
line = self.dashed_line = DashedLine(
self.get_fixed_point(),
self.get_fixed_point() + self.length * DOWN,
**self.dashed_line_config
)
line.add_updater(
lambda l: l.move_to(self.get_fixed_point(), UP)
)
self.add_to_back(line)
def create_angle_arc(self):
self.angle_arc = always_redraw(lambda: Arc(
arc_center=self.get_fixed_point(),
start_angle=-90 * DEGREES,
angle=self.get_arc_angle_theta(),
**self.angle_arc_config,
))
self.add(self.angle_arc)
def get_arc_angle_theta(self):
# Might be changed in certain scenes
return self.get_theta()
def add_velocity_vector(self):
def make_vector():
omega = self.get_omega()
theta = self.get_theta()
mvlr = self.max_velocity_vector_length_to_length_ratio
max_len = mvlr * self.rod.get_length()
vvm = self.velocity_vector_multiple
multiple = np.clip(
vvm * omega, -max_len, max_len
)
vector = Vector(
multiple * RIGHT,
**self.velocity_vector_config,
)
vector.rotate(theta, about_point=ORIGIN)
vector.shift(self.rod.get_end())
return vector
self.velocity_vector = always_redraw(make_vector)
self.add(self.velocity_vector)
return self
def add_theta_label(self):
self.theta_label = always_redraw(self.get_label)
self.add(self.theta_label)
def get_label(self):
label = TexMobject("\\theta")
label.set_height(self.theta_label_height)
if self.set_theta_label_height_cap:
max_height = self.angle_arc.get_width()
if label.get_height() > max_height:
label.set_height(max_height)
top = self.get_fixed_point()
arc_center = self.angle_arc.point_from_proportion(0.5)
vect = arc_center - top
norm = get_norm(vect)
vect = normalize(vect) * (norm + self.theta_label_height)
label.move_to(top + vect)
return label
#
def get_theta(self):
theta = self.rod.get_angle() - self.dashed_line.get_angle()
theta = (theta + PI) % TAU - PI
return theta
def get_unbounded_theta(self):
return self.rod.get_angle() - self.dashed_line.get_angle()
def set_theta(self, theta):
self.rotating_group.rotate(
theta - self.get_theta()
)
self.rotating_group.shift(
self.get_fixed_point() - self.rod.get_start(),
)
return self
def get_omega(self):
return self.omega
def set_omega(self, omega):
self.omega = omega
return self
def get_fixed_point(self):
return self.fixed_point_tracker.get_location()
#
def start_swinging(self):
self.add_updater(Pendulum.update_by_gravity)
def end_swinging(self):
self.remove_updater(Pendulum.update_by_gravity)
def update_by_gravity(self, dt):
theta = self.get_theta()
omega = self.get_omega()
if (theta > 3):
ohno = 4
nspf = self.n_steps_per_frame
for x in range(nspf):
d_theta = omega * dt / nspf
d_omega = pendulum_vector_field_func(np.array((theta, omega, 0.)), L=self.length)[1] * dt / nspf
theta += d_theta
omega += d_omega
self.set_theta(theta)
self.set_omega(omega)
return self
class UnstableFeedForwardAtHorizontal(PendulumCirclingOrigin):
CONFIG = {
"extra_accel_": lambda point: np.array((0.0, 4.9, 0.0)),
"pendulum_config": {
"initial_theta": 60 * DEGREES,
},
}
class FeedbackWithArmAtHorizontal(PendulumCirclingOrigin):
CONFIG = {
"extra_accel_": lambda point: (
np.array((0.0, 4.9, 0.0)) + np.array((1.0 * (PI / 2.0 - point[0]), 1.0 * (0.0 - point[1]), 0.0))),
"pendulum_config": {
"initial_theta": 30 * DEGREES,
},
"show_state_point_vector": False,
"hide_pendulum": True
}
def construct(self):
global extra_accel
extra_accel = self.extra_accel_
self.plane = NumberPlane(**self.coordinate_plane_config)
self.create_pendulum_but_dont_add()
self.create_vector_field()
self.create_point_and_vec()
self.add_pendulum()
# self.wait(20)
self.wait(4)
class DotWithState(Dot):
def __init__(self, n_steps_per_frame, length, gravity, plane: NumberPlane, point=ORIGIN, state=None, **kwargs):
Dot.__init__(self, point=point, **kwargs)
if state is None:
self.state = State(point[0], point[1])
else:
self.state = state
self.n_steps_per_frame = n_steps_per_frame
self.length = length
self.gravity = gravity
self.plane: NumberPlane = plane
def get_state(self):
return self.state
def get_theta(self):
return self.state.theta
def get_omega(self):
return self.state.omega
def set_theta(self, theta):
self.state.theta = theta
def set_omega(self, omega):
self.state.omega = omega
def update_position(self):
x, y = self.plane.point_to_coords(np.array((self.state.theta, self.state.omega, 0.0)))
self.move_to([x, y, 0])
def start_swinging(self):
self.add_updater(DotWithState.update_state_by_gravity)
def update_state_by_gravity(self, dt):
theta = self.get_theta()
omega = self.get_omega()
nspf = self.n_steps_per_frame
for x in range(nspf):
d_theta = omega * dt / nspf
d_omega = pendulum_vector_field_func(np.array((theta, omega, 0.)), L=self.length)[1] * dt / nspf
theta += d_theta
omega += d_omega
self.set_theta(theta)
self.set_omega(omega)
self.update_position()
return self
class ShowMultipleFeedback(PendulumCirclingOrigin):
CONFIG = {
"gravity": 9.8,
"extra_accel_": lambda point: (
np.array((0.0, 0.0, 0.0)) + 0 * np.array(((0 - point[0]), (0 - point[1]), 0.0))),
"show_state_point_vector": False,
"hide_pendulum": True,
"n_steps_per_frame": 100
}
def construct(self):
global extra_accel
extra_accel = self.extra_accel_
self.plane = NumberPlane(**self.coordinate_plane_config)
self.create_vector_field()
self.create_and_add_points()
self.wait(10.0)
def create_and_add_points(self):
# create 10 points between -5 and 5 on x and -3 and 3 on y
dot = DotWithState(self.n_steps_per_frame, self.pendulum_config["length"], self.gravity, self.plane,
point=np.array((2, 1, 0)))
trajectory = (self.get_evolving_trajectory(dot, color=WHITE))
dot.start_swinging()
self.add(trajectory, dot)
for x in np.arange(-7, 7.1, 2.0):
for y in np.arange(-4, 4.1, 2.0):
dot = DotWithState(self.n_steps_per_frame, self.pendulum_config["length"], self.gravity, self.plane,
point=np.array((x, y, 0)))
trajectory = self.get_evolving_trajectory(dot, WHITE)
dot.start_swinging()
self.add(trajectory, dot)
|
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.QuestionIndexView.as_view(), name='qa_index'),
url(r'^question/(?P<pk>\d+)/$',
views.QuestionDetailView.as_view(), name='qa_detail'),
url(r'^question/(?P<pk>\d+)/(?P<slug>[-_\w]+)/$',
views.QuestionDetailView.as_view(), name='qa_detail'),
url(r'^question/answer/(?P<answer_id>\d+)/$',
views.AnswerQuestionView.as_view(), name='qa_answer_question'),
url(r'^question/close/(?P<question_id>\d+)/$',
views.CloseQuestionView.as_view(), name='qa_close_question'),
url(r'^new-question/$', views.CreateQuestionView.as_view(),
name='qa_create_question'),
url(r'^edit-question/(?P<question_id>\d+)/$',
views.UpdateQuestionView.as_view(),
name='qa_update_question'),
url(r'^answer/(?P<question_id>\d+)/$',
views.CreateAnswerView.as_view(), name='qa_create_answer'),
url(r'^answer/edit/(?P<answer_id>\d+)/$',
views.UpdateAnswerView.as_view(), name='qa_update_answer'),
url(r'^vote/question/(?P<object_id>\d+)/$',
views.QuestionVoteView.as_view(), name='qa_question_vote'),
url(r'^vote/answer/(?P<object_id>\d+)/$',
views.AnswerVoteView.as_view(), name='qa_answer_vote'),
url(r'^comment-answer/(?P<answer_id>\d+)/$',
views.CreateAnswerCommentView.as_view(),
name='qa_create_answer_comment'),
url(r'^comment-question/(?P<question_id>\d+)/$',
views.CreateQuestionCommentView.as_view(),
name='qa_create_question_comment'),
url(r'^comment-question/edit/(?P<comment_id>\d+)/$',
views.UpdateQuestionCommentView.as_view(),
name='qa_update_question_comment'),
url(r'^comment-answer/edit/(?P<comment_id>\d+)/$',
views.UpdateAnswerCommentView.as_view(),
name='qa_update_answer_comment'),
url(r'^search/$', views.QuestionsSearchView.as_view(), name='qa_search'),
url(r'^tag/(?P<tag>[-\w]+)/$',
views.QuestionsByTagView.as_view(), name='qa_tag'),
url(r'^profile/(?P<user_id>\d+)/$', views.profile, name='qa_profile'),
url('^markdown/', include('django_markdown.urls')),
url(r'hitcount/', include('hitcount.urls', namespace='hitcount')),
]
|
""" Executed from loop_swap_protocol folder """
import loop_align_updates as la
import pickle
from pyrosetta.rosetta.core.pack.task import TaskFactory
from pyrosetta.rosetta.core.pack.task.operation import \
OperateOnResidueSubset, RestrictAbsentCanonicalAASRLT
from pyrosetta.rosetta.protocols.grafting import CCDEndsGraftMover
from pyrosetta.rosetta.protocols.minimization_packing import PackRotamersMover
# Load data
with open('protease_db_3.pkl', 'rb') as o:
db_dict = pickle.load(o)
# Set poses
query_pose = pose_from_pdb('tev.pdb')
subject_name = '1lvm'
subject_pose = pose_from_pdb('aligned_pdbs/{}.pdb'.format(subject_name))
lvm_dat = db_dict['1lvm']
# All anine-swap the start pose
sf = get_fa_scorefxn()
tf = TaskFactory()
restriction = RestrictAbsentCanonicalAASRLT()
restriction.aas_to_keep('A')
selection = ResidueIndexSelector('1-{}'.format(subject_pose.total_residue()))
tf.push_back(OperateOnResidueSubset(restriction, selection))
pt = tf.create_task_and_apply_taskoperations(subject_pose)
prm = PackRotamersMover(sf, pt)
all_A_pose = Pose(subject_pose)
prm.apply(all_A_pose)
all_A_pose.dump_pdb('self_swaps_1lvm_test/1lvm_all_A.pdb')
# Put in loops
for loop_name, loop in lvm_dat.loop_maps.items():
print(loop_name)
# Setting up poses
swapped_pose = Pose(query_pose)
loop_pose = Pose(all_A_pose,
loop.N_outside_overlap_residue.subject_pose_number,
loop.C_outside_overlap_residue.subject_pose_number)
loop_pose.dump_pdb('self_swaps_1lvm_test/loop_{}_loop_only.pdb'.format(loop_name))
# Setting up CCDEndsGraftMover
ccdgm = CCDEndsGraftMover()
ccdgm.set_insert_region(loop.N_splice_residue.query_pose_number,
loop.C_splice_residue.query_pose_number)
ccdgm.set_piece(loop_pose,
loop.N_overlap_size, loop.C_overlap_size)
# Applying mover and scoring and dumping the pose
ccdgm.apply(swapped_pose)
sf(swapped_pose)
pp.dump_pdb('self_swaps_1lvm_test/loop_{}_tev_insert.pdb'.format(loop_name))
|
# -*- coding: utf-8 -*-
"""Tests for pyss3.cmd_line."""
from pyss3.cmd_line import SS3Prompt, main
from pyss3.server import Server
from pyss3.util import Evaluation
from pyss3 import SS3
from os import path
import pyss3.cmd_line
import pyss3.util
import pytest
import sys
MODEL_NAME = "cmd_test"
DATASET_FOLDER = "dataset"
DATASET_FOLDER_MR = "dataset_mr"
ArgsParserError = "1 2 3 4"
PYTHON3 = sys.version_info[0] >= 3
dataset_path = path.join(path.abspath(path.dirname(__file__)), DATASET_FOLDER)
dataset_path_mr = path.join(path.abspath(path.dirname(__file__)), DATASET_FOLDER_MR)
def test_ss3prompt(mocker, monkeypatch):
"""Test the Command-Line."""
if PYTHON3:
monkeypatch.setattr('builtins.input', lambda: 'Y')
mocker.patch.object(SS3Prompt, "cmdloop")
# not working in Python 2
# mocker.patch("pyss3.cmd_line.STOPWORDS_FILE", "tests/ss3_models/ss3_stopwords[%s].txt")
# mocker.patch(
# "pyss3.util.EVAL_HTML_OUT_FILE",
# "tests/ss3_models/ss3_model_evaluation[%s].html"
# )
# replaced by:
html_file_original = pyss3.util.EVAL_HTML_OUT_FILE
pyss3.cmd_line.STOPWORDS_FILE = "tests/ss3_models/ss3_stopwords[%s].txt"
pyss3.util.EVAL_HTML_OUT_FILE = "tests/ss3_models/" + html_file_original
Evaluation.__cache__ = None
Evaluation.__cache_file__ = None
Evaluation.__clf__ = None
Evaluation.__last_eval_tag__ = None
Evaluation.__last_eval_method__ = None
Evaluation.__last_eval_def_cat__ = None
main()
SS3.__models_folder__ = "tests/ss3_models"
cmd = SS3Prompt()
cmd.do_train(dataset_path + " file")
# do_new
cmd.do_new(MODEL_NAME + "_train_folder")
cmd.do_train(dataset_path_mr + " 3-grams")
cmd.do_train("non-existing 3-grams")
cmd.do_train(dataset_path_mr) # no test documents
cmd.do_train(ArgsParserError)
# do_new
cmd.do_new(MODEL_NAME)
# do_save evaluations - empty result history case
cmd.do_save("evaluations")
# do_train
cmd.do_train(dataset_path + " 3-grams file")
cmd.do_train("non-existing 3-grams file")
cmd.do_train(dataset_path) # no test documents
cmd.do_train(ArgsParserError)
# do_next_word
cmd.do_next_word("android")
cmd.do_next_word('') # doesn't work ยฏ\_(ใ)_/ยฏ
if PYTHON3:
mocker.patch("matplotlib.pyplot.show")
# do_test
cmd.do_test(dataset_path + " file")
cmd.do_test(dataset_path + " file") # cache
cmd.do_test(dataset_path + " file unknown") # def_cat=unknown
cmd.do_test(dataset_path + " file xxx") # def_cat=xxx
cmd.do_test(dataset_path + " file s .5 p") # IndexError
cmd.do_test(dataset_path + " file s .5 l 'a'") # BaseException
cmd.do_test("not-a-directory")
cmd.do_test(dataset_path) # no documents
cmd.do_test(dataset_path_mr + " file") # no documents
cmd.do_test(dataset_path_mr) # unknown categories
cmd.do_test(ArgsParserError)
# do_k_fold
cmd.do_k_fold(dataset_path + " file 3-grams 3-fold")
cmd.do_k_fold(dataset_path + " file 3-grams 3-fold") # cache
cmd.do_k_fold(dataset_path + " file 3-grams 3-fold xxx")
cmd.do_k_fold(dataset_path_mr + " 3-grams 3-fold xxx")
cmd.do_k_fold(ArgsParserError)
# do_grid_search
cmd.do_grid_search(dataset_path + " file 3-fold unknown p [.2] l [.2] s [.2]")
cmd.do_grid_search(dataset_path + " file 2-grams unknown p [.2] l [.2] s [.2]")
cmd.do_grid_search(dataset_path + " file 2-grams xxx p [.2] l [.2] s [.2]")
cmd.do_grid_search(dataset_path_mr + " 2-grams p [.2] l [.2] s [.2]")
cmd.do_grid_search(ArgsParserError)
# do_evaluations
mocker.patch("webbrowser.open")
cmd.do_evaluations("info")
cmd.do_evaluations("save")
cmd.do_evaluations("plot")
# do_load
cmd.do_load(MODEL_NAME)
if PYTHON3:
cmd.do_evaluations("remove p " + str(SS3.__p__))
cmd.do_evaluations("remove test s .2 l .2 p .2")
cmd.do_evaluations("remove test")
cmd.do_evaluations("remove 3-fold")
cmd.do_evaluations("otherwise")
cmd.do_evaluations(ArgsParserError)
# do_classify
cmd.do_classify(dataset_path + "/food.txt")
monkeypatch.setattr('sys.stdin.readlines', lambda: 'nice food!\n')
cmd.do_classify("")
cmd.do_classify(ArgsParserError)
# do_live_test
mocker.patch.object(Server, "set_testset")
mocker.patch.object(Server, "serve")
cmd.do_live_test("path")
set_testset_from_files = mocker.patch.object(Server, "set_testset_from_files")
set_testset_from_files.return_value = True
cmd.do_live_test("path")
set_testset_from_files.return_value = False
cmd.do_live_test("path")
cmd.do_live_test("")
cmd.do_live_test(ArgsParserError)
# do_learn
cmd.do_learn(ArgsParserError)
cmd.do_learn("food 3-grams %s/food.txt" % dataset_path)
# do_update
cmd.do_update('')
# do_info
cmd.do_info("all")
cmd.do_info("evaluations")
# do_debug_term
cmd.do_debug_term("android")
cmd.do_debug_term('') # doesn't work ยฏ\_(ใ)_/ยฏ
# do_plot
cmd.do_plot("evaluations")
if PYTHON3:
cmd.do_plot("distribution food")
cmd.do_plot("distribution")
cmd.do_plot("distribution non-existing")
cmd.do_plot(ArgsParserError)
# do_set
cmd.do_set("s .5")
cmd.do_set(ArgsParserError)
# do_get
cmd.do_get("s")
cmd.do_get("l")
cmd.do_get("p")
cmd.do_get("a")
cmd.do_get("otherwise")
cmd.do_get(ArgsParserError)
# do_save
cmd.do_save("model")
cmd.do_save("evaluations")
mocker.patch.object(SS3, "save_cat_vocab")
mocker.patch.object(SS3, "save_vocab")
cmd.do_save("vocabulary")
cmd.do_save("vocabulary food")
cmd.do_save("vocabulary invalid-category")
cmd.do_save("stopwords")
cmd.do_save("stopwords .01")
mocker.patch.object(SS3, "get_stopwords").return_value = []
cmd.do_save("stopwords")
cmd.do_save(ArgsParserError)
# do_load
cmd.do_load(MODEL_NAME)
# do_clone
cmd.do_clone(MODEL_NAME + "_backup")
cmd.do_clone(MODEL_NAME + "_copy")
# do_rename
cmd.do_rename(MODEL_NAME + "_renamed")
if PYTHON3:
cmd.do_rename(MODEL_NAME)
mocker.patch("pyss3.cmd_line.MODELS", [MODEL_NAME])
cmd.do_new(MODEL_NAME)
cmd.do_load(MODEL_NAME + "_copy")
cmd.do_rename(MODEL_NAME)
# do_license
cmd.do_license("")
# do_exit
with pytest.raises(SystemExit):
cmd.do_exit("")
cmd.complete_info("", 0, 0, 0)
cmd.complete_save("", 0, 0, 0)
cmd.complete_load("", 0, 0, 0)
cmd.complete_train("", 0, 0, 0)
cmd.complete_test("", 0, 0, 0)
cmd.complete_live_test("", 0, 0, 0)
cmd.complete_learn("", 0, 0, 0)
cmd.complete_set("", 0, 0, 0)
cmd.complete_plot("", 0, 0, 0)
cmd.complete_grid_search("", 0, 0, 0)
cmd.complete_evaluations("", 0, 0, 0)
main()
pyss3.util.EVAL_HTML_OUT_FILE = html_file_original
|
"""Config flow for Toggl integration."""
import logging
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({"token": str})
class TogglHub:
"""Hub for TogglPy
"""
def __init__(self, token):
"""Initialize."""
self.token = token
async def authenticate(self, token) -> bool:
"""Test if we can authenticate with the host."""
return True
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
hub = TogglHub(data["token"])
if not await hub.authenticate(data["token"]):
raise InvalidAuth
# If you cannot connect:
# throw CannotConnect
# If the authentication is wrong:
# InvalidAuth
# Return info that you want to store in the config entry.
return {"title": "Toggl"}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Toggl."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
return self.async_create_entry(title=info["title"], data=user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
import numpy as np
def dict_median_scores(dict_list):
median_dict = {}
for key in dict_list.keys():
value = dict_list[key]
value_arr = np.array(value)
if np.isnan(value_arr).any() == True:
median_dict[key] = np.nan
else:
#append_low = np.percentile(value_arr, 2.5)
append_mid = np.median(value_arr, axis=0)
#append_upp = np.percentile(value_arr, 95.7)
median_dict[key] = append_mid
return median_dict
|
from django.shortcuts import render , redirect , get_object_or_404
from django.http import HttpResponse, Http404
from django.contrib.auth.models import User
from .models import Note
from django.contrib import messages
from django.template.loader import get_template
from django.http import HttpResponse
from .script import check_ip
from django.views.generic import View
from notes_app.utlis.pdf import Render
# Create your views here.
def algorithm_analysis(request):
query_ip = request.GET.get("query_ip")
query_user = request.GET.get("query_user")
query_pass = request.GET.get("query_pass")
# query = request.GET.getlist('myvar')
if query_ip and query_user and query_pass:
print(query_ip, query_user, query_pass)
run_script = check_ip("192.168.1.1", "Randa-114", "1223334444")
result = run_script.dict()
print(run_script)
return Render.render('pdf/pdf.html', result)
return render(request, "notes.html", context={"userinput": query_ip})
def all_notes(request):
all_notes = Note.objects.all()
context = {
'all_notes' :all_notes ,
}
return render(request , 'notes.html' , context)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 7 20:34:37 2020
@author: adeela
"""
|
# coding: utf-8
# Python script created by Lucas Hale
# Standard Python libraries
from typing import Optional
# http://www.numpy.org/
import numpy as np
# https://github.com/usnistgov/atomman
import atomman as am
import atomman.lammps as lmp
import atomman.unitconvert as uc
from atomman.tools import filltemplate, aslist
# iprPy imports
from ...tools import read_calc_file
def diatom_scan(lammps_command: str,
potential: am.lammps.Potential,
symbols: list,
mpi_command: Optional[str] = None,
rmin: float = uc.set_in_units(0.02, 'angstrom'),
rmax: float = uc.set_in_units(6.0, 'angstrom'),
rsteps: int = 300) -> dict:
"""
Performs a diatom energy scan over a range of interatomic spaces, r.
Parameters
----------
lammps_command :str
Command for running LAMMPS.
potential : atomman.lammps.Potential
The LAMMPS implemented potential to use.
symbols : list
The potential symbols associated with the two atoms in the diatom.
mpi_command : str, optional
The MPI command for running LAMMPS in parallel. If not given, LAMMPS
will run serially.
rmin : float, optional
The minimum r spacing to use (default value is 0.02 angstroms).
rmax : float, optional
The maximum r spacing to use (default value is 6.0 angstroms).
rsteps : int, optional
The number of r spacing steps to evaluate (default value is 300).
Returns
-------
dict
Dictionary of results consisting of keys:
- **'r_values'** (*numpy.array of float*) - All interatomic spacings,
r, explored.
- **'energy_values'** (*numpy.array of float*) - The computed potential
energies for each r value.
"""
# Build lists of values
r_values = np.linspace(rmin, rmax, rsteps)
energy_values = np.empty(rsteps)
# Define atype based on symbols
symbols = aslist(symbols)
if len(symbols) == 1:
atype = [1, 1]
elif len(symbols) == 2:
if symbols[0] != symbols[1]:
atype = [1, 2]
else:
atype = [1, 1]
symbols = symbols[:1]
else:
raise ValueError('symbols must have one or two values')
# Initialize system (will shift second atom's position later...)
box = am.Box.cubic(a = rmax + 1)
atoms = am.Atoms(atype=atype, pos=[[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])
system = am.System(atoms=atoms, box=box, pbc=[False, False, False], symbols=symbols)
# Add charges if required
if potential.atom_style == 'charge':
system.atoms.prop_atype('charge', potential.charges(system.symbols))
# Get lammps units
lammps_units = lmp.style.unit(potential.units)
# Define lammps variables
lammps_variables = {}
# Loop over values
for i in range(rsteps):
# Shift second atom's x position
system.atoms.pos[1] = np.array([0.1 + r_values[i], 0.1, 0.1])
# Save configuration
system_info = system.dump('atom_data', f='diatom.dat',
potential=potential)
lammps_variables['atomman_system_pair_info'] = system_info
# Write lammps input script
lammps_script = 'run0.in'
template = read_calc_file('iprPy.calculation.diatom_scan', 'run0.template')
with open(lammps_script, 'w') as f:
f.write(filltemplate(template, lammps_variables, '<', '>'))
# Run lammps and extract data
try:
output = lmp.run(lammps_command, script_name=lammps_script,
mpi_command=mpi_command)
except:
energy_values[i] = np.nan
else:
energy = output.simulations[0]['thermo'].PotEng.values[-1]
energy_values[i] = uc.set_in_units(energy, lammps_units['energy'])
if len(energy_values[np.isfinite(energy_values)]) == 0:
raise ValueError('All LAMMPS runs failed. Potential likely invalid or incompatible.')
# Collect results
results_dict = {}
results_dict['r_values'] = r_values
results_dict['energy_values'] = energy_values
return results_dict
|
import itertools
data = []
"""
Process inputData.txt and put it into the data array.
"""
with open("inputData.txt", "r") as infile:
for line in infile:
data.append(int(line))
valid_permutations = {}
def checkIfValid(permutation):
return sum(permutation) == 150
"""
Main loop
This is the best explanation I could come up with:
1. For every possible amount of containers (e.g. if there are 4 containers, try 1, 2, 3, and 4)
2. And for every possible combination of that amount of containers
"""
for i in range(0, len(data)):
for permutation in itertools.combinations(data, i):
if checkIfValid(permutation):
"""
This was the logic that was modified.
"""
if len(permutation) not in valid_permutations:
valid_permutations[len(permutation)] = 1
else:
valid_permutations[len(permutation)] += 1
print("Answer: " + str(valid_permutations[min(valid_permutations)]))
|
from struct import *
import numpy as np
# I considered using multiprocessing package, but I find this code version is fine.
# Welcome for your version with multiprocessing to make the reading faster.
# from joblib import Parallel, delayed
import multiprocessing
import time
import os
import argparse
from open3d import *
from progressbar import ProgressBar
# from astropy.nddata.utils import block_reduce
# parallel processing for samples
from joblib import Parallel, delayed
def voxel2pcd(file_npy, dir_tar_pcd, type='partial'):
voxels = np.load(file_npy)
pcd = PointCloud()
if type is 'partial':
coordinate = np.transpose(np.where(voxels == 1))
pcd.points = Vector3dVector(coordinate)
colors_cat = np.transpose(np.tile(voxels[voxels == 1], ( 3, 1)))
pcd.colors = Vector3dVector(colors_cat)
else:
coordinate = np.transpose(np.where(voxels > 0))
pcd.points = Vector3dVector(coordinate)
colors_cat = np.float32(np.transpose(np.tile(voxels[voxels > 0], ( 3, 1))))/12
pcd.colors = Vector3dVector(colors_cat)
# Save
name_start = int(file_npy.rfind('/'))
name_end = int(file_npy.find('.', name_start))
write_point_cloud(dir_tar_pcd + file_npy[name_start:name_end] + '.pcd', pcd)
class ScanFile(object):
def __init__(self, directory, prefix=None, postfix='.bin'):
self.directory = directory
self.prefix = prefix
self.postfix = postfix
def scan_files(self):
files_list = []
for dirpath, dirnames, filenames in os.walk(self.directory):
for special_file in filenames:
if self.postfix:
if special_file.endswith(self.postfix):
files_list.append(os.path.join(dirpath, special_file))
elif self.prefix:
if special_file.startswith(self.prefix):
files_list.append(os.path.join(dirpath, special_file))
else:
files_list.append(os.path.join(dirpath, special_file))
return files_list
def scan_subdir(self):
subdir_list = []
for dirpath, dirnames, files in os.walk(self.directory):
subdir_list.append(dirpath)
return subdir_list
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Parser added')
parser.add_argument(
'-s',
action="store",
dest="dir_src",
default="/media/wangyida/D0-P1/database/SUNCGtrain_3001_5000",
help='folder of paired depth and voxel')
parser.add_argument(
'-tv',
action="store",
dest="dir_tar_pcd",
default="/media/wangyida/D0-P1/database/SUNCGtrain_3001_5000_depvox",
help='for storing generated npy')
parser.add_argument(
'-dt',
action="store",
dest="data_type",
default='partial',
help='for storing generated npy')
parser.print_help()
results = parser.parse_args()
# folder of paired depth and voxel
dir_src = results.dir_src
# for storing generated npy
dir_tar_pcd = results.dir_tar_pcd
data_type = results.data_type
# scan for voxel files
scan_npy = ScanFile(directory=dir_src, postfix='.npy')
files_npy = scan_npy.scan_files()
# making directories
try:
os.stat(dir_tar_pcd)
except:
os.mkdir(dir_tar_pcd)
# save voxel as npy files
pbar = ProgressBar()
num_cores = multiprocessing.cpu_count()
Parallel(n_jobs=num_cores)(delayed(voxel2pcd)(file_npy, dir_tar_pcd, type=data_type)
for file_npy in pbar(files_npy))
# below is the normal procedure for processing
"""
for file_npy in pbar(files_npy):
voxel2pcd(file_npy, dir_tar_pcd, type=data_type)
"""
|
import nltk
from nltk.collocations import *
from nltk.metrics import BigramAssocMeasures
import operator
import string
import math
measures = BigramAssocMeasures()
l_ru = []
with open("text_ru.txt", 'r', encoding="utf-8") as f:
for line in f:
for w in nltk.word_tokenize(line.lower()):
if w not in string.punctuation:
l_ru.append(w)
l_en = []
with open("text_en.txt", 'r', encoding="utf-8") as f:
for line in f:
for w in nltk.word_tokenize(line.lower()):
if w not in string.punctuation:
l_en.append(w)
freq_ru = nltk.FreqDist(l_ru)
sort_fr_ru = freq_ru.most_common()
finder_ru = BigramCollocationFinder.from_words(l_ru)
t_ru = finder_ru.nbest(measures.student_t, 100)
freq_en = nltk.FreqDist(l_en)
sort_fr_en = freq_en.most_common()
finder_en = BigramCollocationFinder.from_words(l_en)
t_en = finder_en.nbest(measures.student_t, 100)
with open("collocations_ru.csv", 'w', encoding="utf-8") as coll:
for i in t_ru:
coll.write("{}; {}; {}\n".format("t", i[0]+" "+i[1], round(finder_ru.score_ngram(measures.student_t, i[0], i[1]),2)))
for m in t_ru:
coll.write("{}; {}; {}\n".format("chi^2", m[0]+" "+m[1], round(finder_ru.score_ngram(measures.chi_sq, m[0], m[1]),2)))
for n in t_ru:
coll.write("{}; {}; {}\n".format("log-likelihood", n[0]+" "+n[1], round(finder_ru.score_ngram(measures.likelihood_ratio, n[0], n[1]),2)))
for q in t_ru:
coll.write("{}; {}; {}\n".format("pmi", q[0]+" "+q[1], round(finder_ru.score_ngram(measures.pmi, q[0], q[1]),2)))
with open("collocations_en.csv", 'w', encoding="utf-8") as coll:
for i in t_en:
coll.write("{}; {}; {}\n".format("t", i[0]+" "+i[1], round(finder_en.score_ngram(measures.student_t, i[0], i[1]),2)))
for m in t_en:
coll.write("{}; {}; {}\n".format("chi^2", m[0]+" "+m[1], round(finder_en.score_ngram(measures.chi_sq, m[0], m[1]),2)))
for n in t_en:
coll.write("{}; {}; {}\n".format("log-likelihood", n[0]+" "+n[1], round(finder_en.score_ngram(measures.likelihood_ratio, n[0], n[1]),2)))
for q in t_en:
coll.write("{}; {}; {}\n".format("pmi", q[0]+" "+q[1], round(finder_en.score_ngram(measures.pmi, q[0], q[1]),2)))
|
from django.db.models import Sum
from django.utils import timezone
from .models import Commission
from apps.jobs.models import Jobs
class CommissionManager(object):
"""
Manager for commission calculations
"""
def getCommUser(self, user):
"""
Returns commission amount due for the user
"""
commissions = Commission.objects.filter(
handyman=user,
is_paid=False
)
comm = 0.0
for commission in commissions:
comm += float(commission.amount.amount)
return [comm, commissions]
def addCommission(self, job):
for handyman in job.handyman.all():
amount = (0.2 * float(job.fee.amount))/job.handyman.count()
commission = Commission(job=job, amount=amount, handyman=handyman)
Commission.save(commission)
def updateCommission(self, job):
commissions = Commission.objects.filter(job=job)
for commission in commissions:
commission.amount=0.0
commission.save()
for handyman in job.handyman.all():
amount = (0.2 * float(job.fee.amount))/job.handyman.count()
if Commission.objects.filter(job=job, handyman=handyman):
commission=Commission.objects.get(job=job, handyman=handyman)
commission.amount=amount
commission.save()
else:
commission = Commission(job=job, amount=amount, handyman=handyman)
Commission.save(commission)
def removeCommission(self, job):
commissions = Commission.objects.filter(job=job)
for commission in commissions:
commission.amount=0.0
commission.save()
def setCommPaid(self, user):
"""
Sets commission flag as true for the user
"""
Commission.objects.filter(
handyman=user,
is_paid=False
).update(is_paid=True, paidout_date=timezone.now())
return True
def getTotalCommUser(self, user):
"""Returns the total commission paid
"""
commissions=Commission.objects.filter(handyman=user, is_paid=True)
return "Rs.{:,.2f}".format(commissions.aggregate(Sum('amount'))['amount__sum'])
|
def leiaInt():
red = "\033[1;31m"
print('-' * 20)
while True:
n = str(input('Digite um nรบmero: '))
if n.isnumeric():
n = int(n)
print(f'Voce digitou o nรบmero {n}')
break
else:
print(f'\033[0;31;mERRO! Digite um nรบmero inteiro vรกlido\033[m')
leiaInt()
|
import datetime
import hashlib
import json
import rsa
from flask import Flask, jsonify, request
import json
import requests
keys_list = {}
threshold_value = 60
class Blockchain:
def __init__(self):
self.chain = []
self.create_block(proof=1, previous_hash='0', value=0)
def create_block(self, proof, previous_hash, value):
result = get_chain_add(str(value))
block = {'index': len(self.chain) + 1,
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash,
'key' : str(result["public_key"]),
'encrypted_data' : str(result["value"])}
self.chain.append(block)
return block
def print_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(
str(new_proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
encoded_block = json.dumps(block).encode("utf-8")
return hashlib.sha256(encoded_block).hexdigest()
def chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(
str(proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:2] != '00':
return False
previous_block = block
block_index += 1
return True
def get_chain_add(value):
publicKey, privateKey = rsa.newkeys(512)
keys_list[str(publicKey)] = privateKey
enc = rsa.encrypt(value.encode(), publicKey)
return {'value': str(enc, encoding='latin-1'),
'public_key' : str(publicKey)}
if __name__=="__main__":
app = Flask(__name__)
blockchain = Blockchain()
# Mining a new block
@app.route('/mine_block', methods=['POST'])
def mine_block():
list_value = request.get_json()
send_data = []
for x in list_value:
previous_block = blockchain.print_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
block = blockchain.create_block(proof, previous_hash, x["value"])
body = {"message": "A block is MINED",
"index": block['index'],
"timestamp": block['timestamp'],
"proof": block['proof'],
"previous_hash": block['previous_hash'],
"key" : block['key'],
"encrypted_data" : block['encrypted_data']}
send_data.append(body)
return jsonify(send_data), 200
# Display blockchain in json format
@app.route('/get_chain', methods=['GET'])
def display_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
# Check validity of blockchain
@app.route('/valid', methods=['GET'])
def valid():
valid = blockchain.chain_valid(blockchain.chain)
if valid:
response = {'message': 'The Blockchain is valid.'}
else:
response = {'message': 'The Blockchain is not valid.'}
return jsonify(response), 200
@app.route('/getkey', methods=['GET'])
def getkey():
return jsonify(keys_list), 200
#calculation part
def get_gain(current):
if (current >= 90):
return 9
elif (current >= 80):
return 8
elif (current >= 70):
return 7
elif (current >= 60):
return 6
elif (current >= 50):
return 5
elif (current >= 40):
return 4
elif (current >= 30):
return 3
elif (current >= 20):
return 2
else:
return 1
def calculate_avg(list):
value = 0
sum = 0
for x in list:
energy = rsa.decrypt(bytes(x["encrypted_data"], encoding='latin-1'), keys_list[x["key"]]).decode()
value += get_gain(int(energy)) * int(energy)
sum += get_gain(int(energy))
return (value)/ (sum)
@app.route('/calculate', methods=['POST'])
def add_gusess():
list_block = request.get_json()
value = calculate_avg(list_block)
return {"avg": value,
"threshold" : threshold_value}
# Run the flask server locally
if __name__=="__main__":
app.run(host='127.0.0.1', port=5000)
|
from common import *
from database import *
from format import *
from site import *
|
import stat
from typing import Dict
from pathlib import Path
class FileStat:
filename: str
relname: str
size: int
mtime: int
FileStatDict = Dict[str, FileStat]
class LocalFileStat(FileStat):
"""
Local filestat
"""
def __init__(self, root: str, path: Path):
stat_output = path.stat()
self.filename = str(path)
self.relname = path.relative_to(root).as_posix()
self.size = stat_output.st_size
self.mtime = stat_output.st_mtime
class RemoteFileStat(FileStat):
"""
Remote filestat from stat command on Linux + some specific flags
"""
def __init__(self, root, stat_output, delim):
columns = stat_output.split(delim)
self.filename = columns[0]
self.relname = Path(self.filename).relative_to(root).as_posix()
self.size = int(columns[1])
self.mtime = int(columns[2])
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'form.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_mainWindow(object):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.resize(1057, 777)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(170, 480, 704, 232))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.up_btn = QtWidgets.QPushButton(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(25)
self.up_btn.setFont(font)
self.up_btn.setObjectName("up_btn")
self.verticalLayout.addWidget(self.up_btn)
self.save_btn = QtWidgets.QPushButton(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(25)
self.save_btn.setFont(font)
self.save_btn.setObjectName("save_btn")
self.verticalLayout.addWidget(self.save_btn)
self.pushButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(25)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton)
self.cause_name = QtWidgets.QLabel(self.centralwidget)
self.cause_name.setGeometry(QtCore.QRect(40, 50, 491, 71))
font = QtGui.QFont()
font.setPointSize(18)
self.cause_name.setFont(font)
self.cause_name.setObjectName("cause_name")
self.keyword = QtWidgets.QLabel(self.centralwidget)
self.keyword.setGeometry(QtCore.QRect(400, 270, 281, 51))
font = QtGui.QFont()
font.setPointSize(22)
self.keyword.setFont(font)
self.keyword.setObjectName("keyword")
self.horizontalLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(770, 50, 220, 61))
self.horizontalLayoutWidget_2.setObjectName("horizontalLayoutWidget_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_3 = QtWidgets.QLabel(self.horizontalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(15)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.horizontalLayout_2.addWidget(self.label_3)
self.keyword_cnt = QtWidgets.QLabel(self.horizontalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(15)
self.keyword_cnt.setFont(font)
self.keyword_cnt.setObjectName("keyword_cnt")
self.horizontalLayout_2.addWidget(self.keyword_cnt)
self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(50, 380, 984, 85))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.currency_btn = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.currency_btn.setBaseSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(25)
self.currency_btn.setFont(font)
self.currency_btn.setIconSize(QtCore.QSize(20, 20))
self.currency_btn.setObjectName("currency_btn")
self.horizontalLayout.addWidget(self.currency_btn)
self.proper_btn = QtWidgets.QPushButton(self.horizontalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(25)
self.proper_btn.setFont(font)
self.proper_btn.setObjectName("proper_btn")
self.horizontalLayout.addWidget(self.proper_btn)
self.relevant_btn = QtWidgets.QPushButton(self.horizontalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(25)
self.relevant_btn.setFont(font)
self.relevant_btn.setObjectName("relevant_btn")
self.horizontalLayout.addWidget(self.relevant_btn)
self.error_btn = QtWidgets.QPushButton(self.horizontalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(25)
self.error_btn.setFont(font)
self.error_btn.setObjectName("error_btn")
self.horizontalLayout.addWidget(self.error_btn)
self.backLabel = QtWidgets.QLabel(self.centralwidget)
self.backLabel.setGeometry(QtCore.QRect(310, 150, 491, 71))
font = QtGui.QFont()
font.setPointSize(14)
self.backLabel.setFont(font)
self.backLabel.setObjectName("backLabel")
mainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(mainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1057, 26))
self.menubar.setObjectName("menubar")
mainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(mainWindow)
self.statusbar.setObjectName("statusbar")
mainWindow.setStatusBar(self.statusbar)
self.retranslateUi(mainWindow)
self.relevant_btn.clicked.connect(mainWindow.set_word_relevant)
self.pushButton.clicked.connect(mainWindow.selectTxtFilePath)
self.proper_btn.clicked.connect(mainWindow.set_word_proper)
self.currency_btn.clicked.connect(mainWindow.set_word_currency)
self.up_btn.clicked.connect(mainWindow.up_word)
self.save_btn.clicked.connect(mainWindow.save_word)
self.error_btn.clicked.connect(mainWindow.set_word_error)
QtCore.QMetaObject.connectSlotsByName(mainWindow)
def retranslateUi(self, mainWindow):
_translate = QtCore.QCoreApplication.translate
mainWindow.setWindowTitle(_translate("mainWindow", "่ฟๆฏไธไธชๆฐๆฎๆ ๆณจ่ฝฏไปถ"))
self.up_btn.setText(_translate("mainWindow", "ไธไธไธช๏ผ็น้ๆถ็จ๏ผ(u)"))
self.save_btn.setText(_translate("mainWindow", "ไฟๅญ๏ผ้ๅบๅไฟๅญ!!!๏ผ(s)"))
self.pushButton.setText(_translate("mainWindow", "้ๆฉๆไปถ"))
self.cause_name.setText(_translate("mainWindow", "ๆก็ฑ"))
self.keyword.setText(_translate("mainWindow", "ๅ
ณ้ฎ่ฏ"))
self.label_3.setText(_translate("mainWindow", "ๅบๅท๏ผ"))
self.keyword_cnt.setText(_translate("mainWindow", "0"))
self.currency_btn.setText(_translate("mainWindow", "้็จ่ฏ(1)"))
self.proper_btn.setText(_translate("mainWindow", "ไธๆ่ฏ(2)"))
self.relevant_btn.setText(_translate("mainWindow", "ๅ
ณ่่ฏ(3)"))
self.error_btn.setText(_translate("mainWindow", "้่ฏฏ(4)"))
self.backLabel.setText(_translate("mainWindow", "ไธไธๆญฅๆไฝๅ
ๅฎน"))
|
"""@package ScipyMinimize
This package implements the minimize optimisers from SciPy.
@author Dr Franck P. Vidal, Bangor University
@date 5th July 2019
"""
#################################################
# import packages
###################################################
from scipy import optimize
from Solution import Solution
from Optimiser import *
## \class This class implements the simulated annealing optimisation method
class ScipyMinimize(Optimiser):
## \brief Constructor.
# \param self
# \param aCostFunction: The cost function to minimise
def __init__(self, aCostFunction, aMethodName, tol, initial_guess = None):
super().__init__(aCostFunction, initial_guess);
# Name of the algorithm
self.full_name = aMethodName;
self.short_name = aMethodName;
self.max_iterations = -1;
self.verbose = False;
self.tolerance = tol;
self.best_solution_set = [];
self.solution_set = [];
def setMaxIterations(self, aMaxIterations):
self.max_iterations = aMaxIterations;
def run(self):
options = {'disp': self.verbose};
if self.max_iterations > 0:
options['maxiter'] = self.max_iterations;
if self.tolerance > 0:
options['ftol'] = self.tolerance;
options['tol'] = self.tolerance;
if self.initial_guess == None:
self.initial_guess = self.objective_function.initialRandomGuess();
# Methods that cannot handle constraints or bounds.
if self.short_name == 'Nelder-Mead' or self.short_name == 'Powell' or self.short_name == 'CG' or self.short_name == 'BFGS' or self.short_name == 'COBYLA':
result = optimize.minimize(self.objective_function.minimisationFunction,
self.initial_guess,
method=self.short_name,
options=options,
callback=self.callback);
elif self.short_name == 'L-BFGS-B' or self.short_name == 'TNC' or self.short_name == 'SLSQP':
result = optimize.minimize(self.objective_function.minimisationFunction,
self.initial_guess,
method=self.short_name,
bounds=self.objective_function.boundaries,
options=options,
callback=self.callback);
else:
result = optimize.minimize(self.objective_function.minimisationFunction,
self.initial_guess,
method=self.short_name,
bounds=self.objective_function.boundaries,
jac='2-point',
options=options,
callback=self.callback);
self.best_solution = Solution(self.objective_function, 1, result.x)
def evaluate(self, aParameterSet):
return self.objective_function.evaluate(aParameterSet, 1);
def runIteration(self):
if len(self.best_solution_set) > 1 and len(self.solution_set) > 1:
self.best_solution = self.best_solution_set.pop(0);
self.current_solution_set.append(self.solution_set.pop(0));
def callback(self, xk):
solution = Solution(self.objective_function, 1, xk);
if self.best_solution == None:
self.best_solution = solution;
if self.best_solution.getObjective() < solution.getObjective():
self.best_solution_set.append(self.best_solution)
else:
self.best_solution_set.append(solution)
self.solution_set.append(solution);
def update(self, i):
# This is the first call
if i == 0:
# Run the minimisation
self.run();
super().update(i);
print(i)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 23 15:45:45 2017
@author: dgratz
"""
from readFile import readFile
import re
from glob import glob
import numpy as np
import matplotlib.pyplot as plt
s = re.compile('/')
u = re.compile('_')
datadir = 'D:/synchrony-data/AllConnAndRand/'
conns = list(map(lambda x: float(s.split(x)[-1]), glob(datadir+'0/*')))
data = np.zeros((2,26,20))
files = glob(datadir+'*/*/cell*dt0.tsv*')
for file in files:
temp = readFile(file)
fnames = s.split(file)
uparts = u.split(fnames[-1])
(row,col) = tuple(map(lambda x: int(x),filter(lambda x: x.isdigit(),uparts)))
conn = float(fnames[-2])
connPos = conns.index(conn)
simNum = int(fnames[-3])
data[col,connPos,simNum] = np.min(temp['cell'+str(row)+'_'+str(col)+'/vOld/cl'][-10:-1])
plt.figure(0)
vOld_cl_line = np.zeros(shape=(2,26))
vOld_cl_line[0,:] = np.mean(data[0,:,:],axis=1)
vOld_cl_line[1,:] = np.mean(data[1,:,:],axis=1)
vOld_cl_error = np.zeros(shape=(2,26))
vOld_cl_error[0,:] = np.std(data[0,:,:],axis=1)
vOld_cl_error[1,:] = np.std(data[1,:,:],axis=1)
plt.errorbar(conns,vOld_cl_line[0,:],yerr=vOld_cl_error[0,:],alpha=0.7)
plt.errorbar(conns,vOld_cl_line[1,:],yerr=vOld_cl_error[1,:],alpha=0.7)
plt.xscale('log')
plt.title('vOld/cl')
files = glob(datadir+'*/*/cell*dss0.tsv*')
for file in files:
temp = readFile(file)
fnames = s.split(file)
uparts = u.split(fnames[-1])
(row,col) = tuple(map(lambda x: int(x),filter(lambda x: x.isdigit(),uparts)))
conn = float(fnames[-2])
connPos = conns.index(conn)
simNum = int(fnames[-3])
data[col,connPos,simNum] = temp['cell'+str(row)+'_'+str(col)+'/vOld/peak']
plt.figure(1)
vOld_peak_line = np.zeros(shape=(2,26))
vOld_peak_line[0,:] = np.mean(data[0,:,:],axis=1)
vOld_peak_line[1,:] = np.mean(data[1,:,:],axis=1)
vOld_peak_error = np.zeros(shape=(2,26))
vOld_peak_error[0,:] = np.std(data[0,:,:],axis=1)
vOld_peak_error[1,:] = np.std(data[1,:,:],axis=1)
plt.errorbar(conns,vOld_peak_line[0,:],yerr=vOld_peak_error[0,:],alpha=0.7)
plt.errorbar(conns,vOld_peak_line[1,:],yerr=vOld_peak_error[1,:],alpha=0.7)
plt.xscale('log')
plt.title('vOld/peak')
files = glob(datadir+'*/*/cell*dss0.tsv*')
for file in files:
temp = readFile(file)
fnames = s.split(file)
uparts = u.split(fnames[-1])
(row,col) = tuple(map(lambda x: int(x),filter(lambda x: x.isdigit(),uparts)))
conn = float(fnames[-2])
connPos = conns.index(conn)
simNum = int(fnames[-3])
data[col,connPos,simNum] = temp['cell'+str(row)+'_'+str(col)+'/vOld/min']
plt.figure(2)
vOld_min_line = np.zeros(shape=(2,26))
vOld_min_line[0,:] = np.mean(data[0,:,:],axis=1)
vOld_min_line[1,:] = np.mean(data[1,:,:],axis=1)
vOld_min_error = np.zeros(shape=(2,26))
vOld_min_error[0,:] = np.std(data[0,:,:],axis=1)
vOld_min_error[1,:] = np.std(data[1,:,:],axis=1)
plt.errorbar(conns,vOld_min_line[0,:],yerr=vOld_min_error[0,:],alpha=0.7)
plt.errorbar(conns,vOld_min_line[1,:],yerr=vOld_min_error[1,:],alpha=0.7)
plt.xscale('log')
plt.title('vOld/min')
files = glob(datadir+'*/*/cell*dss0.tsv*')
for file in files:
temp = readFile(file)
fnames = s.split(file)
uparts = u.split(fnames[-1])
(row,col) = tuple(map(lambda x: int(x),filter(lambda x: x.isdigit(),uparts)))
conn = float(fnames[-2])
connPos = conns.index(conn)
simNum = int(fnames[-3])
data[col,connPos,simNum] = temp['cell'+str(row)+'_'+str(col)+'/caI/peak']
plt.figure(3)
caI_peak_line = np.zeros(shape=(2,26))
caI_peak_line[0,:] = np.mean(data[0,:,:],axis=1)
caI_peak_line[1,:] = np.mean(data[1,:,:],axis=1)
caI_peak_error = np.zeros(shape=(2,26))
caI_peak_error[0,:] = np.std(data[0,:,:],axis=1)
caI_peak_error[1,:] = np.std(data[1,:,:],axis=1)
plt.errorbar(conns,caI_peak_line[0,:],yerr=caI_peak_error[0,:],alpha=0.7)
plt.errorbar(conns,caI_peak_line[1,:],yerr=caI_peak_error[1,:],alpha=0.7)
plt.xscale('log')
plt.title('caI/peak')
|
#Created on February 23, 2017
#@author: rspies@lynkertech.com
# Python 2.7
# This script creates an horizontal bar chart of station data availability
# for precip/temp data sites
import os
import numpy as np
import datetime
from datetime import timedelta
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
plt.ioff()
os.chdir("../..") # change dir to \\AMEC\\NWS
maindir = os.getcwd()
############ User input ################
variable = 'temp' # choices: 'ptpx'
RFC = 'APRFC_FY2017'
fxgroup = 'NWAK'
plot_stations = 'all' # choices: 'all' or 'pxpp_input' ### pxpp_input will ignore the stations in the ignore_file and not plot
networks = ['nhds_daily','raws_hourly'] # choices: 'raws_hourly','asos_hourly','nhds_daily','nhds_hourly','scan_hourly','CONAGUA'
workingdir = maindir + os.sep + 'Calibration_NWS'+ os.sep + RFC[:5] + os.sep + RFC + os.sep + 'MAP_MAT_development' + os.sep +'station_data'
figname = workingdir + os.sep + 'station_summaries' + os.sep + 'data_timeline_plots' + os.sep + RFC + '_' + fxgroup + '_' + variable + '_' + plot_stations + '.png'
yearstart = 1960; yearend = 2016; # start and end years for plotting
########################################
########## define data timeline figure ###
fig, ax1 = plt.subplots(figsize=(11,9))
plt.title(variable.upper() + ' Station Data Availability Timeline',fontsize=14)
basins_list = []; count = 0
years = mdates.YearLocator() # every year
for network in networks:
print '\n**** ' + network + ' stations:'
timestep = network.split('_')[1]
ignore_sites = []
if network[:4] == 'raws' or network[:4] == 'asos' or network[:4] == 'scan':
card_dir = workingdir + os.sep + network +os.sep + 'cardfiles_' + variable + os.sep + fxgroup + os.sep
station_file = workingdir + os.sep + 'station_summaries' + os.sep + fxgroup + '_' + network[:4] + '_summary_' + variable + '_' + timestep + '.csv'
ignore_file = workingdir + os.sep + network + os.sep + fxgroup + '_ignore_stations.csv'
if network[:4] == 'nhds' or network[:4] == 'usgs':
if variable == 'temp' and network == 'nhds_daily':
card_dir = workingdir + os.sep + network[:4] + '_' + timestep +os.sep + 'tamx' + os.sep + 'cardfiles' + os.sep + fxgroup + os.sep
station_file = workingdir + os.sep + 'station_summaries' + os.sep + network[:4] + '_summary_tamx_' + timestep + '_' + fxgroup + '.csv'
else:
card_dir = workingdir + os.sep + network[:4] + '_' + timestep +os.sep + variable + os.sep + 'cardfiles' + os.sep + fxgroup + os.sep
station_file = workingdir + os.sep + 'station_summaries' + os.sep + network[:4] + '_summary_' + variable + '_' + timestep + '_' + fxgroup + '.csv'
ignore_file = workingdir + os.sep + network + os.sep + fxgroup + '_ignore_stations.csv'
if network[:4] == 'CONA':
card_dir = workingdir + os.sep + 'CONAGUA' + os.sep + variable + os.sep + 'cardfiles' + os.sep + timestep + os.sep
station_file = workingdir + os.sep + network.split('_')[0] + '_summary_' + variable + '_' + timestep + '.csv'
ignore_file = workingdir + os.sep + network.split('_')[0] + '_all' + '_ignore_stations.csv'
if variable == 'temp':
cards = [f for f in os.listdir(card_dir) if os.path.isfile(os.path.join(card_dir, f)) and f.endswith('.tmx')]
else:
cards = [f for f in os.listdir(card_dir) if os.path.isfile(os.path.join(card_dir, f))]
print cards
## read list of stations to ignore if plotting only pxpp input sites
if plot_stations == 'pxpp_input':
read_ignore = open(ignore_file,'r')
for site in read_ignore:
ignore_sites.append(site.rstrip('\n')[-4:])
read_ignore.close()
for card in cards:
if card.split('-')[1].upper() not in ignore_sites:
print card
count += 1
if network[:4] == 'raws' or network[:4] == 'asos':
csv_read = open(card_dir + '\\' + card, 'r')
data = []; date = []
### read card file formatted .txt files lists
line_count = 0
for line in csv_read:
if line_count >= 7: # ignore header lines
if line_count == 8:
sep = line.split()
### parse date columns
month = str(sep[1])[:-2]
year = str(sep[1])[-2:]
if int(year) <= 17:
year = int(year) + 2000 # assume years <= 17 are in the 2000s
else:
year = int(year) + 1900
hour = int(sep[2])
day = 1
full_date = datetime.datetime(year,int(month),int(day),int(hour))
date.append(full_date)
data.append(float(sep[-1][-10:]))
else:
sep = line.split()
if len(sep) > 0: # ignore blank lines
if variable == 'temp':
full_date += timedelta(days=1)
else:
full_date += timedelta(hours=1)
date.append(full_date)
data.append(float(sep[-1][-10:]))
line_count += 1
csv_read.close()
if network[:4] == 'nhds':
csv_read = open(card_dir + '\\' + card, 'r')
data = []; date = []; last_month = 13; mday_count = 1
### read card file formatted .txt files lists
line_count = 0
for line in csv_read:
if line_count >= 3: # ignore header lines
sep = line.split()
if len(sep) > 0: # ignore blank lines
if len(sep) < 4 and len(sep[-1]) < 10: # some QME files (from RFC) may not have gage/basin id as 1st index
sep.insert(0,'0000')
### parse date columns
month = str(sep[1])[:-2]
year = str(sep[1])[-2:]
if int(year) <= 17:
year = int(year) + 2000 # assume years <= 17 are in the 2000s
else:
year = int(year) + 1900
if timestep == 'daily':
day = str(sep[2])
if len(sep[-1]) > 10: # check for large streamflow values that get combined with day column
day = str(sep[2])[:-10]
if month != last_month:
last_month = month
mday_count = 1
for each in sep[3:]:
day = mday_count
full_date = datetime.datetime(year,int(month),int(day))
date.append(full_date)
data.append(float(sep[-1][-10:]))
mday_count += 1
if timestep == 'hourly':
if line_count == 3:
month = str(sep[1])[:-2]
year = str(sep[1])[-2:]
if int(year) <= 17:
year = int(year) + 2000 # assume years <= 17 are in the 2000s
else:
year = int(year) + 1900
day = 1; hour = 1
full_date = datetime.datetime(year,int(month),int(day),int(hour))
for each in sep[3:]:
date.append(full_date)
data.append(float(each))
full_date += timedelta(hours=1)
line_count += 1
csv_read.close()
Q_mask = np.ma.masked_less(data,0) # mask values less than 0 to ignore
#Q_mask = np.ma.masked_invalid(np.asarray(discharge)) # mask missing and 'nan' instances
date_mask = np.ma.masked_where(np.ma.getmask(Q_mask) == True, date) # mask dates containing missing discharge data
Q_data = np.ma.compressed(Q_mask).tolist() # create list with only valid dishcharge data
final_date = np.ma.compressed(date_mask).tolist() # create list with corresponding date
if len(final_date) != len(Q_data):
print 'WARNING -- Date and Discharge Data not the same length'
basin_gauge = card.split('.')[0].upper() # basin/gage name
basins_list.append(basin_gauge)
day_count = str(len(Q_data)) # number of valid daily data values
start_date = str(min(final_date)) # date of first measurement
end_date = str(max(final_date)) # date of last measurement
#new_summary.write(basin_gauge+','+day_count+','+start_date+','+end_date+','+mean_Q+','
#+str(max_Q)+','+str(min_Q)+','+sd+','+date_max+','+date_min+'\n')
###### create plot of dates of data availability
print 'Adding site data to plot...'
y_pos = [count] * len(final_date)
ax1.plot(final_date, y_pos, '|',mew=0.5,ms=14)
print 'Adding plot attributes...'
ax1.xaxis.set_major_locator(mdates.YearLocator(5))
ax1.xaxis.set_minor_locator(years)
plt.yticks(range(1,len(basins_list)+1),basins_list)
plt.xlabel('Date (1960-2016)')
plt.ylabel('Station ID')
plt.ylim(0,len(basins_list)+0.5)
plt.xlim(datetime.datetime(yearstart,1,1), datetime.datetime(yearend,1,1))
plt.savefig(figname, dpi=200,bbox_inches='tight')
plt.close()
print 'Completed!!'
|
""" Placeholder for a synthetic field. """
class SyntheticField:
""" Placeholder for a synthetic field. """
def __init__(self):
""" Store all the parameters for later usage, as well as reference to a synthetic generator. """
def make_sampler(self):
""" Create a sampler to generate pseudo-locations. """
def load_seismic(self):
""" Create a synthetic seismic slide. """
def make_mask(self, src=('horizons', 'faults')):
""" Make segmentation mask. """
_ = src
def show(self):
""" A simple slide visualization. """
def __repr__(self):
pass
def __str__(self):
pass
|
import numpy as np
e = np.array([11,2,3,4,5,6,12,23,8,21])
print(np.max(e))
print(np.min(e))
print(np.median(e))
print(np.std(e))
|
l,u=map(int, input().split())
li=[]
for i in range(l+1,u):
if i%2==0:
li.append(str(i))
print(" ".join(li))
#prasad
|
import math
from pyspark import SparkContext, SparkConf
import sys
import time
import multiprocessing
from pyspark.mllib.recommendation import ALS, Rating
import heapq
start_time = time.time()
# Model based cf
def model_base_cf(rdd, train_rdd, test_rdd, user_enum, bus_enum):
# Training
ratings_train = train_rdd \
.map(lambda s: Rating(user_enum[s[0]], bus_enum[s[1]], float(s[2])))
rank = 3
num_iter = 20
model = ALS.train(ratings_train, rank, num_iter, 0.265)
# Evaluation
test_features = test_rdd \
.map(lambda s: (user_enum[s[0]], bus_enum[s[1]]))
prediction = model \
.predictAll(test_features) \
.map(lambda s: ((s[0], s[1]), s[2]))
ratings_test = test_rdd \
.map(lambda s: Rating(user_enum[s[0]], bus_enum[s[1]], float(s[2])))
rate_pred = ratings_test \
.map(lambda s: ((s[0], s[1]), s[2])) \
.join(prediction) \
.cache()
# Filling users in test and not in training into prediction by default rating, 3
d_rating = float(3.0)
rdd_u = rdd.map(lambda s: (user_enum[s[0]], (bus_enum[s[1]], float(s[2]))))
rp_u = train_rdd.map(lambda s: (user_enum[s[0]], (bus_enum[s[1]], float(s[2]))))
rate_pred_u = rdd_u.subtractByKey(rp_u) \
.map(lambda s: ((s[0], s[1][0]), (s[1][1], d_rating)))
# Filling businesses in test and not in training into prediction by default rating, 3
rdd_b = rdd.map(lambda s: (bus_enum[s[1]], (user_enum[s[0]], float(s[2]))))
rp_b = train_rdd.map(lambda s: (bus_enum[s[1]], (user_enum[s[0]], float(s[2]))))
rate_pred_b = rdd_b.subtractByKey(rp_b) \
.map(lambda s: ((s[1][0], s[0]), (s[1][1], d_rating)))
# Combine all
rate_pred_diff = rate_pred_u.union(rate_pred_b)
rate_pred = rate_pred.union(rate_pred_diff)
return rate_pred
#############################################################
# User based cf
def user_base_cf(case_num, train_rdd, test_rdd, user_enum, bus_enum):
# Business to user map
bus_user_map = train_rdd \
.map(lambda s: (bus_enum[s[1]], [user_enum[s[0]]])) \
.reduceByKey(lambda x, y: x + y) \
.collectAsMap()
# User information
user_bus = train_rdd \
.map(lambda s: (user_enum[s[0]], [bus_enum[s[1]]])) \
.reduceByKey(lambda x, y: x + y)
user_rating = train_rdd \
.map(lambda s: (user_enum[s[0]], [float(s[2])])) \
.reduceByKey(lambda x, y: x + y) \
.map(lambda s: normalize_rating(s))
user_bus_rating = user_bus.join(user_rating) \
.map(lambda s: ((s[0], s[1][0]), (s[1][1][0], s[1][1][1])))
# User to business map
user_bus_map = user_bus.collectAsMap()
# User and business to ratings map
user_bus_rating_map = user_bus_rating \
.flatMap(lambda s: expand_ratings(case_num, s)) \
.collectAsMap()
# Test Features
test_features = test_rdd.map(lambda s: (user_enum[s[0]], bus_enum[s[1]]))
# Take top n number of cos similarities
# 13, 1.091
top = 13
# Prediction
prediction = test_features \
.map(lambda s: predict_rating(case_num, s, user_bus_rating_map, user_bus_map, bus_user_map, top))
rate_pred = test_rdd \
.map(lambda s: ((user_enum[s[0]], bus_enum[s[1]]), float(s[2]))) \
.join(prediction)
return rate_pred
#############################################################
# Item based cf
def item_base_cf(case_num, train_rdd, test_rdd, user_enum, bus_enum):
# User to business map
user_bus_map = train_rdd \
.map(lambda s: (user_enum[s[0]], [bus_enum[s[1]]])) \
.reduceByKey(lambda x, y: x + y) \
.collectAsMap()
# Business information
bus_user = train_rdd \
.map(lambda s: (bus_enum[s[1]], [user_enum[s[0]]])) \
.reduceByKey(lambda x, y: x + y)
bus_rating = train_rdd \
.map(lambda s: (bus_enum[s[1]], [float(s[2])])) \
.reduceByKey(lambda x, y: x + y) \
.map(lambda s: normalize_rating(s))
user_bus_rating = bus_user.join(bus_rating) \
.map(lambda s: ((s[1][0], s[0]), (s[1][1][0], s[1][1][1])))
# User and business to ratings map
user_bus_rating_map = user_bus_rating \
.flatMap(lambda s: expand_ratings(case_num, s)) \
.collectAsMap()
# User Information
bus_user_map = bus_user.collectAsMap()
# Test Features
test_features = test_rdd.map(lambda s: (user_enum[s[0]], bus_enum[s[1]]))
# rmse = 10
# rate_pred = None
top = 6
# while rmse > 0.9:
# start_time = time.time()
# Prediction
prediction = test_features \
.map(lambda s: predict_rating(case_num, s, user_bus_rating_map, user_bus_map, bus_user_map, top))
rate_pred = test_rdd \
.map(lambda s: ((user_enum[s[0]], bus_enum[s[1]]), float(s[2]))) \
.join(prediction)
# print("prediction: ", str(time.time() - start_time))
# # RMSE
# rmse = evaluate_rmse(rate_pred)
# print("Top: ", top)
# top *= 2
return rate_pred
# Record data
def record_data(data, output_file, user, business):
output_file.write("user_id, business_id, prediction\n")
# Model-based cf
data_list = data.collect()
for line in data_list:
output_file.write(user[line[0][0]] + "," + business[line[0][1]] + "," + str(line[1][1]) + "\n")
# Create user and item dictionaries associate with their index in input list
def create_user_item_dict(user_list, item_list):
user_enum = dict()
for i, u in enumerate(user_list):
user_enum[u] = i
bus_enum = dict()
for i, b in enumerate(item_list):
bus_enum[b] = i
return user_enum, bus_enum
# Print the RSME of predicted ratings
def evaluate_rmse(rate_pred):
rmse = math.sqrt(rate_pred
.map(lambda s: ((s[1][0] - s[1][1]) ** 2))
.mean())
print("RMSE: " + str(rmse))
return rmse
# Normalize rating for each user
def normalize_rating(data):
# Field could be user or business
field = data[0]
ratings_old = data[1]
ratings_avg = 0.0
# Get average rating
for r in ratings_old:
ratings_avg += float(r)
ratings_avg /= len(ratings_old)
return field, (ratings_old, ratings_avg)
# Expand the map for each business with one user
def expand_ratings(case_num, data):
user = data[0][0]
busis = data[0][1]
ratings = data[1][0]
avg_rating = data[1][1]
expand_list = list()
if case_num == 2:
# User based cf
for i, b in enumerate(busis):
expand_list.append(((user, b), (ratings[i], avg_rating)))
elif case_num == 3:
# Item based cf
for i, u in enumerate(user):
expand_list.append(((u, busis), (ratings[i], avg_rating)))
return expand_list
def predict_rating(case_num, test_fea, ubr_map, ub_map, bu_map, top_n):
user = int(test_fea[0])
busi = int(test_fea[1])
# Business in testing but not in training
if busi not in bu_map and user not in ub_map:
return (user, busi), 3.0
elif busi not in bu_map:
# Average of the other businesses that the user rated
return (user, busi), float(ubr_map[(user, ub_map[user][0])][1])
elif user not in ub_map:
# Average of the business rating from other users
return (user, busi), float(ubr_map[(bu_map[busi][0], busi)][1])
else:
# Business in training as well
field_list, co_rate = list(), list()
field_avg = 0.0, 0.0
if case_num == 2:
# User based cf
field_list = ub_map.get(user)
field_avg = float(ubr_map[(user, field_list[0])][1])
co_rate = bu_map[busi]
elif case_num == 3:
# Item based cf
field_list = bu_map.get(busi)
field_avg = float(ubr_map[(field_list[0], busi)][1])
co_rate = ub_map[user]
cos_sims = get_cos_sims(case_num, co_rate, field_list, field_avg, ubr_map, ub_map, bu_map, user, busi, top_n)
rate_pred = get_rating_pred_cs(case_num, cos_sims, ubr_map, field_avg, user, busi)
return rate_pred
# Get cos similarities
def get_cos_sims(case_num, co_rate, field_list, rate_avg, ubr_map, ub_map, bu_map, user, busi, top_n):
cos_sims = list()
rate_avg_2 = 0.0
bus_co_rate_len = 0
# Find cos similarities of the co-rated users or business
for f in co_rate:
other_field_co_rate = set(field_list)
if case_num == 2:
other_field_co_rate &= set(ub_map[f])
rate_avg_2 = ubr_map[(f, ub_map[f][0])][1]
elif case_num == 3:
# # Skip the users who did not rate on business
# if ubr_map.get((user, busi)) is None:
# continue
other_field_co_rate &= set(bu_map[f])
bus_co_rate_len = len(other_field_co_rate)
rate_avg_2 = ubr_map[(bu_map[f][0], f)][1]
num_cs, den_cs_1, den_cs_2 = 0.0, 0.0, 0.0
# Calculate the numerator and denominator of each cos similarity
for of in other_field_co_rate:
r_1, r_2 = 0.0, 0.0
if case_num == 2:
if of != busi:
r_1 = float(ubr_map[(user, of)][0]) - rate_avg
r_2 = float(ubr_map[(f, of)][0]) - rate_avg_2
elif case_num == 3:
if of != user:
r_1 = float(ubr_map[(of, busi)][0]) - rate_avg
r_2 = float(ubr_map[(of, f)][0]) - rate_avg_2
num_cs += r_1 * r_2
den_cs_1 += r_1 ** 2
den_cs_2 += r_2 ** 2
# Calcualte cos. similarity
cos_sim = num_cs / math.sqrt(den_cs_1 * den_cs_2) if num_cs != 0 else 0
# Memory-Based improvement
if case_num == 3:
# if cos_sim <= 0.0:
# continue
# Case Amplification
# cos_sim *= abs(cos_sim) ** 1.5
# Default Voting
cos_sim *= 0 if bus_co_rate_len < 60 else 1
cos_sims.append((f, cos_sim, rate_avg_2))
# Take top n cos similarities
cos_sims = heapq.nlargest(top_n, cos_sims, key=lambda s: s[1])
return cos_sims
# Get the rating prediction from cos. similarities
def get_rating_pred_cs(case_num, cos_sims, ubr_map, rate_avg, user, busi):
num_w, den_w, r = 0, 0, 0.0
# Get weights
for cs in cos_sims:
field_cs = cs[0]
cos_sim = cs[1]
rating_cs = cs[2]
if case_num == 2:
# User based cf
# rating_cs is average rating of business coloumn for every other users
r = float(ubr_map[(field_cs, busi)][0]) - rating_cs \
if ubr_map.get((field_cs, busi)) is not None else 0.0
elif case_num == 3:
# Item based cf
# r is the rating of business for every other user
r = ubr_map.get((user, field_cs))[0]
num_w += r * cos_sim
den_w += abs(cos_sim)
# Weighted rating
rating_pred = rate_avg
if den_w != 0:
if case_num == 2:
# User based cf
rating_pred = rate_avg + num_w / den_w
elif case_num == 3:
# Item based cf
rating_pred = num_w / den_w
# Round ratings
# if rating_pred < 1.0:
# rating_pred = 1.0
# elif rating_pred > 5.0:
# rating_pred = 5.0
# else:
# rating_pred = round(rating_pred, 1)
return (user, busi), rating_pred
# Main Execution
# Run Configurations
train_path = sys.argv[1]
test_path = sys.argv[2]
case_id = int(sys.argv[3])
output_path = sys.argv[4]
# Level of Parallelism - Recommended by Spark
# http://spark.apache.org/docs/latest/tuning.html#level-of-parallelism
cpu_num = multiprocessing.cpu_count()
task_per_cpu = cpu_num * 3
# Spark Configurations
# conf = SparkConf().setAppName('HW3 - Task 1').setMaster('local[*]')
conf = SparkConf() \
.setAppName('HW3 - Task 2') \
.setMaster('local[*]') \
.set('spark.executor.memory', '8g') \
.set('spark.driver.memory', '8g')
sc = SparkContext(conf=conf)
# Data Input
distFile_train = sc.textFile(train_path) \
.coalesce(task_per_cpu) \
.map(lambda s: s.split(","))
distFile_test = sc.textFile(test_path) \
.coalesce(task_per_cpu) \
.map(lambda s: s.split(","))
# Beheading
headers = distFile_train.first()
rdd_train = distFile_train.filter(lambda s: s != headers).cache()
rdd_test = distFile_test.filter(lambda s: s != headers).cache()
# All users and businesses from both training and testing rdds
rdd = sc.union([rdd_train, rdd_test]).cache()
users = rdd.map(lambda s: s[0]).distinct().collect()
busis = rdd.map(lambda s: s[1]).distinct().collect()
# Constructing dictionaries for users and business index
user_enum, bus_enum = create_user_item_dict(users, busis)
rst = None
# Model Based CF
if case_id == 1:
rst = model_base_cf(rdd, rdd_train, rdd_test, user_enum, bus_enum)
elif case_id == 2:
# User Based CF
rst = user_base_cf(case_id, rdd_train, rdd_test, user_enum, bus_enum)
elif case_id == 3:
# Item Based CF
rst = item_base_cf(case_id, rdd_train, rdd_test, user_enum, bus_enum)
if rst is not None:
# Data output
with open(output_path, "w") as op:
record_data(rst, op, users, busis)
print("Duration: " + str(time.time() - start_time))
# if rst is not None:
# # RMSE
# evaluate_rmse(rst)
|
from typing import (
List,
Dict,
Text,
Any,
Optional
)
from rasa.shared.core.events import Event, UserUttered
class UserBotUttered():
def __init__(self,
last_message:UserUttered,
bot_predict_event:Optional[Text]
) -> None:
"""
Crea un evento que contiene el mensaje que recibio el bot
y el evento que predice el bot para dicho mensaje
"""
self.message_user = last_message
self.bot_predict_event = bot_predict_event
def get_message_user(self) -> UserUttered:
return self.message_user
def get_bot_predict(self) -> Text:
return self.bot_predict_event
def set_bot_predict(self, bot_predict):
self.bot_predict_event = bot_predict
|
#coding:utf-8
#!/usr/bin/env python
from game.routine.pet import pet
from game.routine.skill import skill
from game.routine.equipment import equipment
class luck:
@staticmethod
def check(usr, card, petConf):
"""
ๆฃๆต็ผ
"""
petInfo = petConf[card['cardid']]
luckid = []
for l in petInfo['luck']:
luckid, cardid, equipmentid, skillid = luck.analyse(l)
if luck.has_card(usr, cardid):
if luck.has_equipment(usr, card, equipmentid):
if luck.has_skill(usr, card, skillid):
luckid.append(luckid)
return luckid
@staticmethod
def analyse(luck):
"""
ๅๆ
"""
luck = luck.split('_')
cardid = []
equipmentid = []
skillid = []
luckid = ''
for l in luck:
if l.startswith('pet'):
cardid.append(l)
elif l.startswith('sk'):
skillid.append(l)
elif l.startswith('eqp'):
equipmentid.append(l)
elif l.startswith('y'):
luckid = l
return luckid, cardid, equipmentid, skillid
@staticmethod
def has_card(usr, cardid):
"""
ๆๅก็
"""
inv = usr.getInventory()
teamCardid = []
for cid in inv.team:
if cid:
card = inv.getCard(cid)
teamCardid.append(card['cardid'])
for cid in cardid:
if cid not in teamCardid:
return False
return True
@staticmethod
def has_equipment(usr, card, equipmentid):
"""
ๆ่ฃ
ๅค
"""
inv = usr.getInventory()
equipid = []
for eq in card['slot']:
if eq:
equipid.append(eq['equipmentid'])
for eqid in equipmentid:
if eqid not in equipid:
return False
return True
@staticmethod
def has_skill(usr, card, skillid):
"""
ๆๆ่ฝ
"""
inv = usr.getInventory()
skid = []
for sk in card['sk_slot']:
if sk:
skid.append(sk['skillid'])
for sid in skillid:
if sid not in skid:
return False
return True
|
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import FileResponse,Response
from pydantic import BaseModel
import io
from io import BytesIO
import numpy as np
from PIL import Image
import shutil
import os
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
app = FastAPI()
detect_fn = tf.saved_model.load('saved_model')
category_index = label_map_util.create_category_index_from_labelmap("label_map.pbtxt",use_display_name=True)
def load_image_into_numpy_array(data):
"""Load an image from file into a numpy array.
Puts image into numpy array to feed into tensorflow graph.
Note that by convention we put it into a numpy array with shape
(height, width, channels), where channels=3 for RGB.
Args:
path: the file path to the image
Returns:
uint8 numpy array with shape (img_height, img_width, 3)
"""
return np.array(Image.open(io.BytesIO(data)))
def predict(image):
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
image_np = load_image_into_numpy_array(image)
input_tensor = tf.convert_to_tensor(image_np)
#print(input_tensor.shape)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis, ...]
# input_tensor = np.expand_dims(image_np, 0)
detections = detect_fn(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
# detection_classes should be ints.
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'],
detections['detection_classes'],
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=200,
min_score_thresh=.50,
agnostic_mode=False)
#print(detections)
return Image.fromarray(image_np_with_detections)
@app.post("/uploadfile/")
async def create_upload_file(image: UploadFile = File(...)):
img_data = await image.read()
predicted_image = predict(img_data)
output = BytesIO()
predicted_image.save(output, 'png')
return Response(output.getvalue(), media_type='image/png')
|
import tensorflow as tf
print(tf.__version__)
hello = tf.constant("Hello Tensorflow")
print(hello)
sess = tf.Session()
# ์คํ ํ ๊ฒฐ๊ณผ๊ฐ์ด bytes ํ์
์ด๋ฏ๋ก decode ํ์
result = sess.run(hello)
print(result)
print(result.decode())
a = tf.constant(10)
b = tf.constant(20)
_a, _b = sess.run([a, b])
print(_a, _b)
|
import json
def read_dict(db_name):
try: #little ducktyping -- catches both file not found and file empty (incase it was erased)
with open(db_name, 'r') as f:
print("updating db...db_name")
return json.load(f)
except (FileNotFoundError, ValueError):
print("creating db...")
return init_pairing_dict(student_arr)
def write_dict(db_name, data):
with open(db_name, 'w+') as f:
json.dump(data, f)
|
from django.urls import path
from jobs.api.views import (JobOfferDetailAPIView,
JobOfferListCreateAPIView)
urlpatterns = [
path("jobs/",
JobOfferListCreateAPIView.as_view(),
name="job-list"),
path("jobs/<int:pk>/",
JobOfferDetailAPIView.as_view(),
name="job-detail"),
]
|
"""
https://leetcode.com/problems/number-of-subarrays-with-bounded-maximum/
We are given an array nums of positive integers, and two positive integers left and right (left <= right).
Return the number of (contiguous, non-empty) subarrays such that the value of the maximum array element in that subarray is at least left and at most right.
Example:
Input:
nums = [2, 1, 4, 3]
left = 2
right = 3
Output: 3
Explanation: There are three subarrays that meet the requirements: [2], [2, 1], [3].
Sliding Window Method
"""
from typing import List
class Solution:
def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int:
# print (nums)
# print (left, right)
dp = [0] * len(nums)
k = 0
start = -1
end = -1
max_index = 0
for i in range(len(nums)):
# print ("===========")
# print (i, nums[i])
if left <= nums[i] <= right:
end = i
dp[i] = end - start
elif nums[i] > right:
start = i
end = i
elif nums[i] < left:
print("CHECK")
dp[i] = end - start
# print (dp)
return sum(dp)
nums = [2, 1, 4, 3]
left = 2
right = 3
print ("Input - Nums {}, Left {}, Right {}".format(nums, left, right))
ans =Solution().numSubarrayBoundedMax(nums, left, right)
print ("Solution - {}".format(ans))
nums = [2, 9, 2, 5, 6]
left = 2
right = 8
print ("Input - Nums {}, Left {}, Right {}".format(nums, left, right))
ans = Solution().numSubarrayBoundedMax(nums, left, right)
print ("Solution - {}".format(ans))
nums = [73, 55, 36, 5, 55, 14, 9, 7, 72, 52]
left = 32
right = 69
print ("Input - Nums {}, Left {}, Right {}".format(nums, left, right))
ans = Solution().numSubarrayBoundedMax(nums, left, right)
print ("Solution - {}".format(ans))
nums = [876,880,482,260,132,421,732,703,795,420,871,445,400,291,358,589,617,202,755,810,227,813,549,791,418,528,835,401,526,584,873,662,13,314,988,101,299,816,833,224,160,852,179,769,646,558,661,808,651,982,878,918,406,551,467,87,139,387,16,531,307,389,939,551,613,36,528,460,404,314,66,111,458,531,944,461,951,419,82,896,467,353,704,905,705,760,61,422,395,298,127,516,153,299,801,341,668,598,98,241]
left = 658
right = 719
print ("Input - Nums {}, Left {}, Right {}".format(nums, left, right))
ans = Solution().numSubarrayBoundedMax(nums, left, right)
print ("Solution - {}".format(ans))
|
import json
import logging
import time
import certifi
import urllib3
class BitbucketApiBindings:
"""
Wraps Bitbucket API functions.
"""
def __init__(self, rate_limit: int):
self.__rate_limit = rate_limit
def form_bitbucket_request(self, url: str) -> urllib3.response:
"""
Creates new bitbucket request and returns the response.
:param url: The url to call.
:return: The response resulting from the request.
"""
time.sleep(3600/self.__rate_limit)
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
return http.request('GET',
url)
def get_repo_substring(self, url, provider):
"""
Gets the repo-substring (i.e. url: https://bitbucket.org/osrf/gazebo -> returns: osrf/gazebo
:param url: URL to get the substring from.
:param provider: Part to cut off from the front.
:return: the substring formatted as {<user>|<organization>}/{repository_name}
"""
project_string = url.split(provider)[1]
# This is okay since Mercurial does not have an extension on the back of remote urls.
project_string = project_string.split(".git")[0]
return project_string
def get_stargazer_count(self, repo_url):
"""
Gets the "stargazer" count for github. Used watchers since stargazers do not exist in Bitbucket.
:param repo_url: URL to the repository.
:return: the amount of watchers on the repository, -1 if request failed.
"""
project_string = self.get_repo_substring(repo_url, "https://bitbucket.org/")
response = self.form_bitbucket_request(
"https://api.bitbucket.org/2.0/repositories/" + project_string + "/watchers")
if response.status == 200:
data = response.data
decoded = json.loads(data.decode('utf-8'))
return decoded["size"]
return -1
def get_next_url(self, result):
"""
Gets the URL for the next page.
:param result: URL for the next page.
:return: The next url, or empty string, if no next string is available.
"""
if "next" in result:
return result["next"]
else:
return ""
def get_issues_api_string(self, repo_url):
"""
Returns API url to call for issues associated with the repository.
:param repo_url: Repository URL to get issues from.
:return: API URL for retrieving an issue list.
"""
project_string = self.get_repo_substring(repo_url, "https://bitbucket.org/")
return "https://api.bitbucket.org/2.0/repositories/" + project_string + "/issues"
def get_pull_requests_api_string(self, repo_uri):
"""
Returns API URL to call for (open) pull requests associated with the repository.
:param repo_uri: Repository URL to get pull requests from.
:return: API URL for retrieving pull request list.
"""
project_string = self.get_repo_substring(repo_uri, "https://bitbucket.org/")
return "https://api.bitbucket.org/2.0/repositories/" + project_string + "/pullrequests?state=OPEN"
def get_values(self, api_url) -> iter:
"""
Gets the values field from an Bitbucket API result (used for e.g. pull requests, issues, etc..)
:param api_url: API url to call. (see *_api_string)
:return: Yield returns the values from the Bitbucket API.
"""
next_url = api_url
while next_url != "":
response = self.form_bitbucket_request(next_url)
if response.status != 200:
logging.info("[Bitbucket API Connector]: Could not reach " + next_url + ", request returned " + str(response.status))
next_url = ""
else:
result = json.loads(response.data.decode('utf-8'))
if "values" in result:
for value in result["values"]:
yield value
next_url = self.get_next_url(result)
|
import cv2
import random
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
import os
grayscale_max = 255
def load_image(filename):
image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
return image
def show_image(title, image):
max_val = image.max()
# image = np.absolute(image)
image = np.divide(image, max_val)
# cv2.imshow(title, image)
#cv2.imwrite(title+str(random.randint(1, 100))+'.jpg', image*grayscale_max)
cv2.imwrite(os.path.join(path_image,str(random.randint(1, 100))+'.jpg'),image*grayscale_max)
def add_padding(input, padding):
rows = input.shape[0]
columns = input.shape[1]
output = np.zeros((rows + padding * 2, columns + padding * 2), dtype=float)
output[ padding : rows + padding, padding : columns + padding] = input
return output
def add_replicate_padding(image):
# zero_padded = add_padding(image, padding)
# size = image.shape[0]
top_row = image[0, :]
image = np.vstack((top_row, image))
bottom_row = image[-1, :]
image = np.vstack((image, bottom_row))
left_column = image[:, 0]
left_column = np.reshape(left_column, (left_column.shape[0], 1))
image = np.hstack((left_column, image))
right_column = image[:, -1]
right_column = np.reshape(right_column, (right_column.shape[0], 1))
image = np.hstack((image, right_column))
return image
def euclid_dist(a, b):
distance = np.linalg.norm(a - b)
return distance
def get_search_bounds(column, block_size, width):
disparity_range = 25
left_bound = column - disparity_range
if left_bound < 0:
left_bound = 0
right_bound = column + disparity_range
if right_bound > width:
right_bound = width - block_size + 1
return left_bound, right_bound
def search_bounds(column, block_size, width, rshift):
disparity_range = 75
padding = block_size // 2
right_bound = column
if rshift:
left_bound = column - disparity_range
if left_bound < padding:
left_bound = padding
step = 1
else:
left_bound = column + disparity_range
if left_bound >= (width - 2*padding):
left_bound = width - 2*padding - 2
step = -1
return left_bound, right_bound, step
# max disparity 30
def disparity_map(left, right, block_size, rshift):
padding = block_size // 2
left_img = add_padding(left, padding)
right_img = add_padding(right, padding)
height, width = left_img.shape
# d_map = np.zeros((height - padding*2, width - padding*2), dtype=float)
d_map = np.zeros(left.shape , dtype=float)
for row in range(height - block_size + 1):
for col in range(width - block_size + 1):
bestdist = float('inf')
shift = 0
left_pixel = left_img[row:row + block_size, col:col + block_size]
l_bound, r_bound, step = search_bounds(col, block_size, width, rshift)
# for i in range(l_bound, r_bound - padding*2):
for i in range(l_bound, r_bound, step):
right_pixel = right_img[row:row + block_size, i:i + block_size]
# if euclid_dist(left_pixel, right_pixel) < bestdist :
ssd = np.sum((left_pixel - right_pixel) ** 2)
# print('row:',row,' col:',col,' i:',i,' bestdist:',bestdist,' shift:',shift,' ssd:',ssd)
if ssd < bestdist:
bestdist = ssd
shift = i
if rshift:
d_map[row, col] = col - shift
else:
d_map[row, col] = shift - col
print('Calculated Disparity at ('+str(row)+','+str(col)+') :', d_map[row,col])
return d_map
def mean_square_error(disparity_map, ground_truth):
# ssd = np.sum((disparity_map - ground_truth)**2)
# mse = ssd/(ground_truth.shape[0]*ground_truth.shape[1])
mse = np.mean((disparity_map - ground_truth)**2)
return mse
def consistency_map_mse_l(d_map_left, d_map_right, left_ground_truth):
rows, cols = d_map_left.shape
consistency_map = np.zeros((rows, cols))
for r in range(rows):
for c in range(cols):
left_pixel = d_map_left[r, c]
if cols > c - left_pixel > 0:
right_pixel = d_map_right[r, int(c - left_pixel)]
else:
right_pixel = d_map_right[r, c]
if left_pixel == right_pixel:
consistency_map[r, c] = left_pixel
else:
consistency_map[r, c] = 0
sum = 0
for r in range(rows):
for c in range(cols):
if consistency_map[r, c] != 0:
sum = sum + (left_ground_truth[r, c] - consistency_map[r, c]) ** 2
mse_c_left = sum / (rows * cols)
return mse_c_left, consistency_map
def consistency_map_mse_r(d_map_left, d_map_right, right_ground_truth):
rows, cols = d_map_right.shape
consistency_map = np.zeros((rows, cols))
for r in range(rows):
for c in range(cols):
right_pixel = d_map_right[r, c]
if c + right_pixel < cols:
left_pixel = d_map_left[r, int(c + right_pixel)]
else:
left_pixel = d_map_left[r, c]
if right_pixel == left_pixel:
consistency_map[r, c] = right_pixel
else:
consistency_map[r, c] = 0
sum = 0
for r in range(rows):
for c in range(cols):
if consistency_map[r, c] != 0:
sum = sum + (right_ground_truth[r, c] - consistency_map[r, c]) ** 2
mse_c_right = sum / (rows * cols)
return mse_c_right, consistency_map
def main():
#l = load_image('im0.png')
#r = load_image('im1.png')
# loading the images one by one from the folder
filename='MiddEval3/trainingQQ'
path_image='images'
#file_list=[]
for(curdir, subsHere, filesHere) in os.walk(filename):
for files in filesHere:
l=load_image('im0.png')
r=load_image('im1.png')
d_map_lr_3 = disparity_map(l, r, 3, True)
show_image('D_Map_lr_block3_', d_map_lr_3)
# Disparity Maps
# d_map_lr_3 = disparity_map(l, r, 3, True)
# show_image('D_Map_lr_block3_', d_map_lr_3)
#d_map_rl_3 = disparity_map(r, l, 3, False)
#show_image('D_Map_rl_block3_', d_map_rl_3)
#d_map_lr_9 = disparity_map(l, r, 9, True)
#show_image('D_Map_lr_block9_', d_map_lr_9)
#d_map_rl_9 = disparity_map(r, l, 9, False)
#show_image('D_Map_rl_block9_', d_map_rl_9)
return
main()
|
import pandas as pd
import sys
outf = sys.argv[1]
score_df = pd.DataFrame()
fids, pids, scores = [], [], []
with open('plink.profile', 'r') as r:
lines = r.readlines()
for line in lines[1:]:
words = line.split()
fids.append(words[0])
pids.append(words[1])
scores.append(float(words[-1]))
score_df['FID'] = fids
score_df['PID'] = pids
score_df['Score'] = scores
score_df.to_csv(outf, index=False)
|
import random
PATTERNS = [
(1000000000000000, 'xxxxx'),
(-1000000000000000, 'ooooo'),
(10, ' xx '),
(-10, ' oo '),
(10, ' x x '),
(-10, ' o o '),
(10, ' x x '),
(-10, ' o o '),
(100, ' xx '),
(-100, ' oo '),
(-3300000, ' ooo '),
(1100000, ' xxx '),
(-3000000, ' ooo '),
(1000000, ' xxx '),
(-3000, 'xooo '),
(1000, 'oxxx '),
(-3300000,' oo o '),
(1100000,' xx x '),
(-3000000,' oo o '),
(1000000,' xx x '),
(-300000000000,' oooo '),
(100000000000,' xxxx '),
(-3000000,'oooo '),
(1000000,'xxxx '),
(-3000000,' oooo'),
(1000000,' xxxx'),
(-3000000000,'oo oo'),
(1000000000,'xx xx'),
(10,' x ')
]
class Board:
SIZE = 15
def generate_rows(self):
rows = []
for i in range(self.SIZE):
row = []
for j in range(self.SIZE):
row.append(0)
rows.append(row)
return rows
def generate_diagonals(self):
diagonals = []
delka = 1
for i in range(self.SIZE):
diagonal = []
for j in range(delka):
diagonal.append(0)
diagonals.append(diagonal)
delka += 1
delka = 14
for i in range(self.SIZE - 1):
diagonal = []
for j in range(delka):
diagonal.append(0)
diagonals.append(diagonal)
delka -= 1
return diagonals
def __init__(self):
self.rows = self.generate_rows()
self.columns = self.generate_rows()
self.diagonals_descending = self.generate_diagonals()
self.diagonals_ascending = self.generate_diagonals()
def row_to_string(self, row):
output = ''
for i in row:
if (i == 0):
output += ' '
if (i == 1):
output += 'x'
if (i == -1):
output += 'o'
return output
def evaluate_row(self, row):
string_row = self.row_to_string(row)
total_score = 0
for pattern in PATTERNS:
score, p = pattern
if p in string_row:
print(f'found pattern {p} in {row}')
total_score += score
#total_score = total_score + score
return total_score
def evaluate_position(self):
total_score = 0
for row in self.rows:
total_score += self.evaluate_row(row)
for col in self.columns:
total_score += self.evaluate_row(col)
for desc in self.diagonals_descending:
total_score += self.evaluate_row(desc)
for asc in self.diagonals_ascending:
total_score += self.evaluate_row(asc)
return total_score
def new_turn(self, row, column, player):
self.rows[row][column] = player
self.columns[column][row] = player
ascending_diagonal_number = row + column
if (row + column < self.SIZE):
self.diagonals_ascending[ascending_diagonal_number][column] = player
else:
self.diagonals_ascending[ascending_diagonal_number][self.SIZE - 1 - row] = player
descending_diagonal_number = self.SIZE - 1 - row + column
if (descending_diagonal_number < 15):
self.diagonals_descending[descending_diagonal_number][column] = player
else:
self.diagonals_descending[descending_diagonal_number][row] = player
#self.print_all()
def get(self, row, col):
return self.rows[row][col]
def print_all(self):
print('rows')
for row in self.rows:
print(row)
print('cols')
for col in self.columns:
print(col)
print('desc')
for d in self.diagonals_descending:
print(d)
print('asc')
for d in self.diagonals_ascending:
print(d)
class Player:
def __init__(self, player_sign):
self.sign = 1
self.opponent_sign = -1
self.name = 'Stepan'
self.board = Board()
random.seed(17)
def pick_random_valid_turn(self):
while True:
row = random.randint(0, 14)
col = random.randint(0, 14)
if (self.board.get(row, col) == 0): return (row, col)
def pick_best_turn(self):
best_score = -float('inf')
best_turn = None
for row in range(15):
for col in range(15):
if (self.board.get(row, col) != 0): continue
self.board.new_turn(row, col, self.sign)
score = self.board.evaluate_position()
if score > best_score:
best_turn = (row, col)
best_score = score
self.board.new_turn(row, col, 0)
return best_turn
def play(self, opponent_move):
if opponent_move != None:
row, col = opponent_move
self.board.new_turn(row, col, self.opponent_sign)
#my_turn_row, my_turn_col = self.pick_random_valid_turn()
my_turn_row, my_turn_col = self.pick_best_turn()
self.board.new_turn(my_turn_row, my_turn_col, self.sign)
return my_turn_row, my_turn_col
|
# encoding: utf-8
'''
This application does a simple NVE+Langevin LAMMPS simulation of spherocylinder-like rods
(defined in a .cfg file) using the "lammps_multistate_rods" library.
The initial locations of the rods are at SC lattice points defined by the input params, and
their orientations are randomly determined at each insertion point.
The simulation will stop automatically when two beta-state (ID=1) rods occur.
Created on 16 Mar 2018
@author: Eugen Roลพiฤ
'''
import os
import argparse
parser = argparse.ArgumentParser(description='Program for NVE+Langevin hybrid LAMMPS'\
' simulation of spherocylinder-like rods, using the'\
' "lammps_multistate_rods" library.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('cfg_file',
help='path to the "lammps_multistate_rods" model configuration file')
parser.add_argument('run_file',
help='path to the run configuration file')
parser.add_argument('simlen', type=int,
help='the max length of the simulation')
parser.add_argument('--seed', type=int,
help='the seed for random number generators')
parser.add_argument('--out', type=str, default=None,
help='name/path for the output folder (defaults to cfg_file path w/o ext)')
parser.add_argument('-o', '--output_freq', type=int,
help='configuration output frequency (in MD steps);'\
' default behavior is after every batch of MC moves')
parser.add_argument('-s', '--silent', action='store_true',
help="doesn't print anything to stdout")
args = parser.parse_args()
if not args.cfg_file.endswith('.cfg'):
raise Exception('Model configuration file (first arg) has to end with ".cfg"!')
if not args.run_file.endswith('.run'):
raise Exception('Run configuration file (second arg) has to end with ".run"!')
if args.seed is None:
import time
seed = int((time.time() % 1)*1000000)
print "WARNING: no seed given explicitly; using:", seed
else:
seed = args.seed
if args.out is None:
output_folder = os.path.splitext(args.cfg_file)[0]
else:
output_folder = args.out
#========================================================================================
#from mpi4py import MPI #TODO make MPI work...
from lammps import PyLammps
import lammps_multistate_rods as rods
if not os.path.exists(output_folder):
os.makedirs(output_folder)
run_filename = os.path.splitext(os.path.basename(args.run_file))[0]
sim_ID = '{:s}_{:d}'.format(run_filename, seed)
dump_filename = sim_ID+'.dump'
dump_path = os.path.join(output_folder, dump_filename)
log_filename = '{:d}.lammps'.format(seed)
log_path = os.path.join(output_folder, log_filename)
run_args = rods.rod_model.Params()
execfile(args.run_file,
{'__builtins__' : None, 'True' : True, 'False' : False, 'None' : None},
vars(run_args))
out_freq = args.output_freq if args.output_freq != None else run_args.run_length
py_lmp = PyLammps(cmdargs=['-screen','none'])
py_lmp.log('"'+log_path+'"')
model = rods.Rod_model(args.cfg_file)
simulation = rods.Simulation(py_lmp, model, seed, output_folder)
py_lmp.units("lj")
py_lmp.dimension(3)
py_lmp.boundary("p p p")
py_lmp.lattice("sc", 1/(run_args.cell_size**3))
py_lmp.region("box", "block", -run_args.num_cells / 2, run_args.num_cells / 2,
-run_args.num_cells / 2, run_args.num_cells / 2,
-run_args.num_cells / 2, run_args.num_cells / 2)
simulation.setup("box")
simulation.create_rods(box = None)
# DYNAMICS
py_lmp.fix("thermostat", "all", "langevin",
run_args.temp, run_args.temp, run_args.damp, seed)#, "zero yes")
simulation.set_rod_dynamics("nve")
py_lmp.neigh_modify("every 1 delay 1")
py_lmp.timestep(run_args.dt)
# RANDOMISE INITIAL CONFIGURATION
simulation.deactivate_state(0, vx_eps=5.0)
py_lmp.command('run 10000')
simulation.activate_state(0)
py_lmp.reset_timestep(0)
# GROUPS & COMPUTES
if hasattr(run_args, 'label_fibrils'):
fibril_group = 'beta_patches'
beta_active_patch_types = sorted(filter(lambda t: (t in model.active_bead_types) and\
(t not in model.body_bead_types),
model.state_bead_types[1]))
py_lmp.variable(fibril_group, 'atom', '"' +
'||'.join(['(type == {:d})'.format(t)
for t in beta_active_patch_types]) +
'"')
py_lmp.group(fibril_group, 'dynamic', simulation.rods_group, 'var', fibril_group,
'every', out_freq)
fibril_compute = "fibril_ID"
if hasattr(run_args, 'fibril_cutoff'):
fibril_cutoff = run_args.fibril_cutoff
else:
fibril_cutoff = 0
i = -1
for t1 in beta_active_patch_types:
i += 1
for t2 in beta_active_patch_types[i:]:
try:
int_key = model.eps[(t1,t2)][1]
except:
continue
int_range = model.int_types[int_key][1]
cutoff = model.bead_radii[t1] + model.bead_radii[t2] + int_range*2/3
if cutoff > fibril_cutoff:
fibril_cutoff = cutoff
py_lmp.compute(fibril_compute, fibril_group, 'aggregate/atom', fibril_cutoff)
# OUTPUT
py_lmp.thermo_style("custom", "step atoms", "pe temp")
dump_elems = "id x y z type mol"
try:
dump_elems += " c_"+fibril_compute
except:
pass
py_lmp.dump("dump_cmd", "all", "custom", out_freq, dump_path, dump_elems)
py_lmp.dump_modify("dump_cmd", "sort id")
py_lmp.thermo(out_freq)
# RUN...
if model.num_states == 1 or run_args.mc_moves == 0:
raise Exception("Multiple states need to exist and MC moves need to be made for fibrils to grow!")
mc_moves_per_run = int(run_args.mc_moves * simulation.rods_count())
py_lmp.command('run {:d} post no'.format(run_args.run_length-1)) #so output happens after state changes
remaining = args.simlen - run_args.run_length + 1
while True:
success = simulation.state_change_MC(mc_moves_per_run)#, replenish=("box", 2*model.rod_radius, 10)) TODO
base_count = simulation.state_count(0)
beta_count = simulation.state_count(1)
if not args.silent:
print 'step {:d} / {:d} : beta-to-soluble ratio = {:d}/{:d} = {:.5f} (accept rate = {:.5f})'.format(
(i+1)*run_args.run_length, args.simlen, beta_count, base_count, float(beta_count)/base_count,
float(success)/mc_moves_per_run)
if beta_count >= 2:
to_next_output = out_freq - (args.simlen - remaining)%out_freq
py_lmp.command('run {:d} post no'.format(to_next_output))
break
elif remaining / run_args.run_length > 0:
py_lmp.command('run {:d} post no'.format(run_args.run_length))
remaining -= run_args.run_length
else:
py_lmp.command('run {:d} post no'.format(remaining))
break
|
from prettytable import PrettyTable
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import utils
class Evaluator(object):
@staticmethod
def evaluate_multi_class(y_preds, y_truths):
pred_labels = set(y_preds)
true_labels = set(y_truths)
all_labels = pred_labels.union(true_labels)
print(pred_labels, true_labels)
label2idx, idx2label = {}, {}
for i, label in enumerate(all_labels):
label2idx[label] = i
idx2label[i] = label
preds = [label2idx[p] for p in y_preds]
truths = [label2idx[t] for t in y_truths]
accuracy = accuracy_score(truths, preds)
individual_precision = precision_score(truths, preds, average=None)
individual_recall = recall_score(truths, preds, average=None)
individual_f1 = f1_score(truths, preds, average=None)
micro_precision = precision_score(truths, preds, average="micro")
micro_recall = recall_score(truths, preds, average="micro")
micro_f1 = f1_score(truths, preds, average="micro")
macro_precision = precision_score(truths, preds, average="macro")
macro_recall = recall_score(truths, preds, average="macro")
macro_f1 = f1_score(truths, preds, average="macro")
result = {
"accuracy": accuracy,
"individual_precision": individual_precision,
"individual_recall": individual_recall,
"individual_f1": individual_f1,
"micro_precision": micro_precision,
"micro_recall": micro_recall,
"micro_f1": micro_f1,
"macro_precision": macro_precision,
"macro_recall": macro_recall,
"macro_f1": macro_f1,
"label2idx": label2idx,
"idx2label": idx2label
}
header = ["label", "accuracy", "precision", "recall", "f1"]
csv_content = []
table = PrettyTable(header)
for label, idx in label2idx.items():
row = [label, "", str(individual_precision[idx]), str(individual_recall[idx]),
str(individual_f1[idx])]
table.add_row(row)
csv_content.append(row)
macro_row = ["macro", accuracy, macro_precision, macro_recall, macro_f1]
micro_row = ["micro", "", micro_precision, micro_recall, micro_f1]
table.add_row(macro_row)
table.add_row(micro_row)
csv_content.append(macro_row)
csv_content.append(micro_row)
utils.write_csv(csv_content, header, "evaluation.csv")
print(table)
|
# plotting two line diagrams on the same x-axis but different y-axis
# especially useful when you want to superimpose ratio/ percentage against absolute numbers
import numpy as np
import matplotlib.pyplot as plt
import pandas
from textwrap import wrap
from matplotlib.ticker import FuncFormatter
import locale
locale.setlocale(locale.LC_ALL, 'id_ID.UTF8')
## force matplotlib to use TrueType fonts
plt.rcParams['pdf.fonttype'] = 42
# necessary if you want to use relative path but your project isn't in Python $PATH
import sys, os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
berkasData = currentdir +'\\bab_05_02_dataPlotAKI.csv'
judulDiagram = 'Angka Kematian Ibu'
sumbuY = 'Jumlah'
tickerSumbuY = np.arange(0,510,100)
tickerSumbuY2 = np.arange(0,21,5)
sumbuX = 'Tahun'
berkasSimpan = currentdir +'\\bab_05_02_plotAKI.pdf'
# read data file
colnames = ['tahun','aki', 'kematian']
data = pandas.read_csv(berkasData, names=colnames, sep=';')
tahun = data.tahun.tolist()
aki = data.aki.tolist()
kematian = data.kematian.tolist()
# setting up x locations for the groups and width of the bars
ind = np.arange(len(tahun))
# actually just remnant from other bar diagrams, you can safely ignore this
#width = 0.25
# make the plots
fig, ax = plt.subplots()
garis1 = ax.plot(ind, aki, marker='.', color='royalblue', label='AKI')
ax2 = ax.twinx()
garis2 = ax2.plot(ind, kematian, marker='.', color='#cc0000', label='Jumlah Kematian')
# add some text for labels, title and axes ticks
ax.set_title(judulDiagram)
ax.set_yticks(tickerSumbuY)
# yticks can be set to auto
ax.set_ylabel('AKI per 100.000 kelahiran')
formatter = FuncFormatter(lambda y, pos: "{:n}".format(y))
# use round to get significant decimal
#formatter = FuncFormatter(lambda y, pos: "{:n}".format(round(y,2)))
ax.yaxis.set_major_formatter(formatter)
# set secondary yticks
ax2.set_yticks(tickerSumbuY2)
ax2.set_ylabel('Jumlah kematian')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax.set_xticks(ind)
ax.set_xticklabels(list(tahun), fontsize='small', ha='center')
ax.set_xlabel(sumbuX)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])
# legend workaround for 2-vert axis line diagram
kolomLegen = ['AKI','Jumlah Kematian']
ax.legend((garis1[0], garis2[0]), kolomLegen, fontsize='x-small', loc='upper center', bbox_to_anchor=(0.5, -0.15), fancybox=True, shadow=True, ncol=2)
# make labels for plots
for i, txt in enumerate(aki):
ax.annotate('{:n}'.format(txt), (ind[i],aki[i]+0.5))
for i, txt in enumerate(kematian):
ax2.annotate(txt, (ind[i],kematian[i]))
# finishing
pyrfig = plt.figure(1)
pyrfig.set_figwidth(8)
pyrfig.set_figheight(5)
# tight_layout to make consistent size
# adjust subplot to make room for legend
fig.subplots_adjust(bottom=-0.15)
plt.tight_layout()
# uncomment following two lines to save figures
plt.savefig(berkasSimpan)
plt.close(pyrfig)
# uncomment following lines to generate figures on screen
# plt.show()
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import MultinomialNB
def values_to_bins(data, bins):
return np.digitize(data, bins) - 1
train_images = np.loadtxt('data/train_images.txt') # incarcam imaginile
# incarcam etichetele avand # tipul de date int
train_labels = np.loadtxt('data/train_labels.txt').astype(int)
# image = train_images[1, :] # prima imagine
test_images = np.loadtxt('data/test_images.txt') # incarcam imaginile
# incarcam etichetele avand # tipul de date int
test_labels = np.loadtxt('data/test_labels.txt').astype(int)
# corespunzator
# Atentie! In cazul nostru indexarea elementelor va
# incepe de la 1, intrucat nu avem valori < 0
# returneaza pentru fiecare element intervalul
# for num_bins in range(1, 11):
# print(f"Num bins {num_bins}")
# # returneaza intervalele
# bins = np.linspace(start=0, stop=255, num=num_bins)
# train_to_bins = values_to_bins(train_images, bins)
# test_to_bins = values_to_bins(test_images, bins)
# naive_bayes_model = MultinomialNB()
# naive_bayes_model.fit(train_to_bins, train_labels)
# print(naive_bayes_model.score(test_to_bins, test_labels))
bins = np.linspace(start=0, stop=255, num=5)
train_to_bins = values_to_bins(train_images, bins)
test_to_bins = values_to_bins(test_images, bins)
# image = np.reshape(test_to_bins[0], (28, 28))
# plt.imshow(image.astype(np.uint8), cmap='gray')
# plt.show()
naive_bayes_model = MultinomialNB()
naive_bayes_model.fit(train_to_bins, train_labels)
confusion_matrix = np.zeros((10, 10))
for index, prediction in enumerate(naive_bayes_model.predict(test_to_bins)):
# if prediction != test_labels[index]:
# plt.title(f"{test_labels[index]} a fost identificat ca {prediction}")
# plt.imshow(np.reshape(test_images[index, :], (28, 28)).astype(
# np.uint8), cmap='gray')
# plt.show()
confusion_matrix[test_labels[index]][prediction] += 1
print(confusion_matrix)
|
TESTAPI_ID = 'testapi_id'
CSRF_TOKEN = 'csrf_token'
ROLE = 'role'
TESTAPI_USERS = ['opnfv-testapi-users']
|
from .models import *
from django.shortcuts import *
from django.views import View
from django.http import HttpResponse, HttpRequest
def check_if_int(number):
try:
return int(number)
except Exception:
return None
def create_person(request):
person = Person()
name = request.POST.get("name")
surname = request.POST.get("surname")
description = request.POST.get("description")
person.name = name
person.surname = surname
person.description = description
if name and surname:
person.save()
def create_phone(request, id):
phone = Phone()
phone.number = request.POST.get("number")
phone.type_number = request.POST.get("type_number")
phone.person_phone= Person.objects.get(pk=id)
if phone:
phone.save()
class AddPerson(View):
def get(self, request):
return render(request, "formatki/addPerson.html")
def post(self, request):
create_person(request)
return HttpResponse("Dodano nowy kontakt")
class PersonEdit(View):
def get(self, request, id):
person = Person.objects.get(pk=id)
context = {
'person': person
}
return render(request, "formatki/modifyPerson.html", context)
def post(self, request, id):
person = Person.objects.get(pk=id)
person.name = request.POST.get("name")
person.surname = request.POST.get("surname")
person.description = request.POST.get("description")
person.save()
return HttpResponse("Wprowadzono zmiany")
class PersonDetails(View):
def get(self, request, id):
person = Person.objects.get(pk=id)
context = {
'person': person
}
return render(request, "formatki/dataPerson.html", context)
class AllPerson(View):
def get(self, request):
persons = Person.objects.all()
context = {
'persons': persons,
}
return render(request, "formatki/allPersons.html", context)
class PersonDelete(View):
def get(self, request, id):
context = {'person': Person.objects.get(pk=id)}
return render(request, "formatki/deletePerson.html", context)
def post(self, request, id):
if request.POST.get('decision') == 'yes':
person = Person.objects.get(pk=id)
person.delete()
return redirect('/')
class AddPhone(View):
def get(self, request, id):
return render(request, "formatki/addPhone.html")
def post(self, request, id):
create_phone(request, id)
return HttpResponse("Dodano nowy telefon")
class AddAdress(View):
def get(self, request, id):
return render(request, "formatki/addAdress.html")
def post(self, request, id):
city = request.POST.get("city")
street = request.POST.get("street")
house_number = request.POST.get("house_number")
apartment_number = request.POST.get("apartment_number")
person_adress = Person.objects.get(pk=id)
Adress.objects.create(city = city, street = street, house_number=house_number, apartment_number=apartment_number, person_adress=person_adress)
return HttpResponse('Dodano nowy adres')
class AddEmail(View):
def get(self, request, id):
return render(request, "formatki/addEmail.html")
def post(self, request, id):
email = request.POST.get("email")
email_type = request.POST.get("email_type")
person_email = Person.objects.get(pk=id)
Email.objects.create(email=email, email_type=email_type, person_email=person_email)
return HttpResponse ('Dodano adres email')
class modifyAdress(View):
def get(self, request, id):
return render(request, "formatki/modifyAdress.html")
def post(self, request, id):
city = request.POST.get("city")
street = request.POST.get("street")
house_number = request.POST.get("house_number")
apartment_number = request.POST.get("apartment_number")
person_adress = Person.objects.get(pk=id)
Adress.objects.create(city=city, street=street, house_number=house_number, apartment_number=apartment_number,
person_adress=person_adress)
return HttpResponse('Zmieniono adres')
class modifyPhone(View):
def get(self, request, id):
return render(request, "formatki/modifyPhone.html")
def post(self, request, id):
create_phone(request, id)
return HttpResponse('Zmieniono telefon')
class modifyEmail(View):
def get(self, request, id):
return render(request, "formatki/modifyEmail.html")
def post(self, request, id):
email = request.POST.get("email")
email_type = request.POST.get("email_type")
person_email = Person.objects.get(pk=id)
Email.objects.create(email=email, email_type=email_type, person_email=person_email)
return HttpResponse ('Zmieniono adres email')
class AddGroup(View):
def get(self, request):
return render(request, "formatki/addGroup.html")
def post(self, request, id):
name = request.POST.get("name")
person = Person.objects.get(pk=id)
Group.objects.create(name=name, person=person)
return HttpResponse ('Stworzono nowฤ
grupฤ')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.