text stringlengths 8 6.05M |
|---|
# 桶子演算法
"""
桶子排序法 (Bucket Sort) 想法很簡單,其實就是準備幾個桶子,將要排序的資料分類丟至指定的桶子中,
再依序將桶子裡的東西取出。有點類似資源回收的概念啦~
O(M + N)
"""
# 題目: 將資料 (分數) 由小到達排序
data = [89, 34, 23, 78, 67, 100, 66, 29, 79, 55, 78, 88, 92, 96, 96, 23]
# 結果: data = [23, 23, 29, 34, 55, 66, 67, 78, 78, 79, 88, 89, 92, 96, 96, 100]
def bucket_sort(data):
# 1. 生成桶子
# Suppose max score = 100
max_score = 100
bucket = []
for i in range(max_score + 1):
bucket.append(0)
# 2. 依序將資料放入桶子
for j in data:
bucket[j] = bucket[j] + 1
print(bucket)
# 讀取桶子的資料
index = 0
for k in range(len(bucket)):
if bucket[k] != 0:
for _ in range(bucket[k]):
data[index] = k
index += 1
print(data)
bucket_sort(data)
|
from PIL import Image, ImageDraw, ImageFont
import numpy as np
def create_DB():
# generate char imgs:
from PIL import Image, ImageDraw, ImageFont
IMG_WIDTH = 10
IMG_HEIGHT = 15
fnt = ImageFont.truetype('arial.ttf', 15)
chars = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'
,'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
with open("char_data.txt", 'w') as f:
for c in chars:
img = Image.new('RGB', (IMG_WIDTH, IMG_HEIGHT), color=(255, 255, 255))
d = ImageDraw.Draw(img)
d.text((0, 0), c, fill=(0, 0, 0), font=fnt)
# find out avg color:
avg = [0,0,0]
for x in range(IMG_WIDTH):
for y in range(IMG_HEIGHT):
avg[0] += img.getpixel((x, y))[0]
avg[1] += img.getpixel((x, y))[1]
avg[2] += img.getpixel((x, y))[2]
avg[0] /= IMG_HEIGHT * IMG_WIDTH
avg[1] /= IMG_HEIGHT * IMG_WIDTH
avg[2] /= IMG_HEIGHT * IMG_WIDTH
print(c, avg)
f.write(c + ":" + str(avg[0]) + "," + str(avg[1]) + "," + str(avg[2]) + "\n")
if __name__ == '__main__':
# load the DB:
DB = {}
with open("char_data.txt", 'r') as f:
data = f.readlines()
for row in data:
c, rgb_data = row.replace("\n", "").split(":")
rgb = rgb_data.split(",")
DB[c] = [float(rgb[0]), float(rgb[1]), float(rgb[2])]
# print(DB)
# normalize the DB:
red_val = [DB[d][0] for d in DB]
mean = np.sum(red_val)/len(red_val)
std = np.std(red_val)
for key in DB:
DB[key] = [(DB[key][0] - mean)/std, (DB[key][1] - mean)/std, (DB[key][2] - mean)/std]
#print(DB)
target = Image.open("Pictures/Abschlussball.png", 'r').convert('LA')
out_res = (1000, 350)
target = target.resize(out_res)
# normalize the target img:
pixels_as_list = []
for x in range(out_res[1]):
for y in range(out_res[0]):
pixels_as_list.append(target.getpixel((y, x))[0])
mean = np.sum(pixels_as_list)/(out_res[0]*out_res[1])
#print(mean)
std = np.std(pixels_as_list)
norm_target = np.zeros(shape=out_res)
for x in range(out_res[1]):
for y in range(out_res[0]):
norm_target[y,x] = (target.getpixel((y, x))[0] - mean) / std
#print(norm_target)
#from matplotlib import pyplot as plt
#plt.imshow(target.resize(out_res))
#plt.show()
output = ""
# now find for each pixel the best matching character:
for x in range(out_res[1]):
for y in range(out_res[0]):
t_val = norm_target[y,x]
diff = 999
best_char = 'a'
for key in DB.keys():
if np.abs(DB[key][0] - t_val) < diff:
diff = np.abs(DB[key][0] - t_val)
best_char = key
output += best_char
output += "\n"
#print(output)
with open("png2txt_output.txt", 'w') as f:
f.write(output)
|
import os
import csv
from itertools import chain
with open('cloudwatch_logs.csv', 'r') as f:
reader = csv.reader(f)
security_groups = list(reader)
for sg in security_groups:
print("\nChanging retention on Log Group: {}".format(str(sg)[2:-2]))
os.system("aws ec2 delete-security-group --group-id {}".format(str(sg)[2:-2]))
|
# Original
bool1 = True;
store1 = 0;
store2 = 0;
store3 = 0;
store4 = 0;
list1 = []
while(bool1 == True):
keys = input("Directions in ^ (North) or v (South) or < (East) or > (West): ")
if(keys == "^"):
store1 += 1
print("store1", store1)
list1.append("store1")
elif(keys == ">"):
store2 += 1
print("store2", store2)
list1.append("store2")
elif(keys == "<"):
store3 += 1
print("store3", store3)
list1.append("store3")
elif(keys == "v"):
store4 += 1
print("store4", store4)
list1.append("store4")
else:
break;
print("These are the number of times each store has been visited: ")
print("Store 1:", store1)
print("Store 2:", store2)
print("Store 3:", store3)
print("Store 4:", store4)
print("Only", len(list1), "store have been visited.")
|
#!/usr/bin/python
# EA to check for Safari's opening "safe" files on download
import CoreFoundation
domain = 'com.apple.Safari'
key = 'AutoOpenSafeDownloads'
key_value = CoreFoundation.CFPreferencesCopyAppValue(key, domain)
if key_value == 0:
print "<result>Disabled</result>"
else:
print "<result>Enabled</result>"
|
import numpy
from xversion.model import *
'''
每次都会给一整棵树,所以
'''
class Painter(object):
start_point = numpy.array([0, 0, 0])
base_vector = numpy.array([0, 0, 10])
tree_string = ''
n = 0
def __init__(self, tree):
self.tree = tree
self.tree_string = tree.axiom
# Every time this function called, the tree string will be renew
def build_tree(self):
rules = self.tree.rules
variables = self.tree.variables
tree_string = self.tree_string
temp_string = ''
if tree_string == '':
raise RuntimeError('the tree string must not be none')
for index, item in enumerate(tree_string):
if item not in variables:
temp_string = temp_string + item
continue
item_rules = get_rule_list(rules, item)
rule = self.check_rules(item_rules, tree_string[index + 1:], temp_string, key=item)
temp_string = temp_string + rule
self.tree_string = temp_string
branch_list = self.string_to_branch(temp_string)
return branch_list
def check_rules(self, item_rules, rest_string, front_string, key):
rules = item_rules
for rule in rules:
string = front_string + rule + rest_string
branch_list = self.string_to_branch(string)
# This is the method to check the validation of rule
valid = check_valid(branch_list)
if valid:
return rule
return key
def string_to_branch(self, tree_string):
vector = self.base_vector
start_point = self.start_point
angle = self.tree.angle
string = tree_string
save_point = []
save_vector = []
branch_list = []
for item in string:
if item == 'F' or item == 'A' or item == 'B' or item == 'C':
end_point = start_point + vector
branch = Branch(start_point, end_point, item)
branch_list.append(branch)
start_point = end_point
elif item == '>':
r_matrix = get_matrix('H', -angle)
vector = numpy.dot(vector, r_matrix)
elif item == '<':
r_matrix = get_matrix('H', angle)
vector = numpy.dot(vector, r_matrix)
elif item == '+':
r_matrix = get_matrix('U', angle)
vector = numpy.dot(vector, r_matrix)
elif item == '-':
r_matrix = get_matrix('U', -angle)
vector = numpy.dot(vector, r_matrix)
elif item == '&':
r_matrix = get_matrix('L', angle)
vector = numpy.dot(vector, r_matrix)
elif item == '∧':
r_matrix = get_matrix('L', -angle)
vector = numpy.dot(vector, r_matrix)
elif item == '|':
r_matrix = get_matrix('U', math.pi)
vector = numpy.dot(vector, r_matrix)
elif item == '[':
save_point.append(start_point)
save_vector.append(vector)
elif item == ']':
start_point = save_point.pop()
vector = save_vector.pop()
else:
pass
return branch_list
def check_valid(branch_list):
return True
def get_matrix(m_type, x):
if m_type == 'H':
t = numpy.array(
[
[math.cos(x), math.sin(x), 0],
[-math.sin(x), math.cos(x), 0],
[0, 0, 1]
]
)
elif m_type == 'L':
t = numpy.array(
[
[math.cos(x), 0, -math.sin(x)],
[0, 1, 0],
[math.sin(x), 0, math.cos(x)]
]
)
elif m_type == 'U':
t = numpy.array(
[
[1, 0, 0],
[0, math.cos(x), -math.sin(x)],
[0, math.sin(x), math.cos(x)]
]
)
else:
raise RuntimeError('matrix type error')
return t
def get_rule_list(rules, key_word):
rule_list = []
for rule, key in rules.items():
if key == key_word:
rule_list.append(rule)
return rule_list
|
from src.abcnn.graph import Graph
from src.abcnn import args
import tensorflow as tf
import os
import numpy as np
import pandas as pd
from src.utils import singleton
import logging
import logging.config
from src.config import AbcnnConfig
@singleton
class AbcnnModel:
def __init__(self):
self.model = Graph(True, True)
self.sess = tf.Session()
self.saver = tf.train.Saver()
self.word2idx = {}
def load_model(self, model_file='../model/abcnn2.ckpt'):
self.saver.restore(self.sess, model_file)
print('load SUCCESS !')
def train(self, p, h, y, p_eval, h_eval, y_eval,
model_file='../model/abcnn.ckpt'):
p, h, y = self.shuffle(p, h, y)
p_holder = tf.placeholder(
dtype=tf.int32, shape=(
None, args.seq_length), name='p')
h_holder = tf.placeholder(
dtype=tf.int32, shape=(
None, args.seq_length), name='h')
y_holder = tf.placeholder(dtype=tf.int32, shape=None, name='y')
dataset = tf.data.Dataset.from_tensor_slices(
(p_holder, h_holder, y_holder))
dataset = dataset.batch(args.batch_size).repeat(args.epochs)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1
with tf.Session(config=config)as sess:
sess.run(tf.global_variables_initializer())
sess.run(
iterator.initializer,
feed_dict={
p_holder: p,
h_holder: h,
y_holder: y})
steps = int(len(y) / args.batch_size)
last_loss = 1.0
for epoch in range(args.epochs):
for step in range(steps):
p_batch, h_batch, y_batch = sess.run(next_element)
_, loss, acc = sess.run([self.model.train_op, self.model.loss, self.model.acc],
feed_dict={self.model.p: p_batch,
self.model.h: h_batch,
self.model.y: y_batch,
self.model.keep_prob: args.keep_prob})
print(
'epoch:',
epoch,
' step:',
step,
' loss: ',
loss,
' acc:',
acc)
loss_eval, acc_eval = sess.run([self.model.loss, self.model.acc],
feed_dict={self.model.p: p_eval,
self.model.h: h_eval,
self.model.y: y_eval,
self.model.keep_prob: 1})
print('loss_eval: ', loss_eval, ' acc_eval:', acc_eval)
print('\n')
if loss_eval < last_loss:
last_loss = loss_eval
self.saver.save(sess, model_file)
def predict(self, p, h):
# with self.sess:
# prediction = self.sess.run(self.model.prediction,
# feed_dict={self.model.p: p,
# self.model.h: h,
# self.model.keep_prob: 1})
prediction = self.sess.run(self.model.prediction,
feed_dict={self.model.p: p,
self.model.h: h,
self.model.keep_prob: 1})
return prediction
def test(self, p, h, y):
with self.sess:
loss, acc = self.sess.run([self.model.loss, self.model.acc],
feed_dict={self.model.p: p,
self.model.h: h,
self.model.y: y,
self.model.keep_prob: 1})
return loss, acc
# 加载字典
def load_char_vocab(self):
path = os.path.join(os.path.dirname(__file__), './input/vocab.txt')
vocab = [line.strip() for line in open(path, encoding='utf-8').readlines()]
self.word2idx = {word: index for index, word in enumerate(vocab)}
return self.word2idx
def pad_sequences(self, sequences, maxlen=None, dtype='int32', padding='post',
truncating='post', value=0.):
''' pad_sequences
把序列长度转变为一样长的,如果设置了maxlen则长度统一为maxlen,如果没有设置则默认取
最大的长度。填充和截取包括两种方法,post与pre,post指从尾部开始处理,pre指从头部
开始处理,默认都是从尾部开始。
Arguments:
sequences: 序列
maxlen: int 最大长度
dtype: 转变后的数据类型
padding: 填充方法'pre' or 'post'
truncating: 截取方法'pre' or 'post'
value: float 填充的值
Returns:
x: numpy array 填充后的序列维度为 (number_of_sequences, maxlen)
'''
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
x = (np.ones((nb_samples, maxlen)) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError("Truncating type '%s' not understood" % padding)
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError("Padding type '%s' not understood" % padding)
return x
def char_index(self, p_sentences, h_sentences):
p_list, h_list = [], []
for p_sentence, h_sentence in zip(p_sentences, h_sentences):
p = [self.word2idx[word.lower()] for word in p_sentence if len(
word.strip()) > 0 and word.lower() in self.word2idx.keys()]
h = [self.word2idx[word.lower()] for word in h_sentence if len(
word.strip()) > 0 and word.lower() in self.word2idx.keys()]
p_list.append(p)
h_list.append(h)
p_list = self.pad_sequences(p_list, maxlen=args.seq_length)
h_list = self.pad_sequences(h_list, maxlen=args.seq_length)
return p_list, h_list
def shuffle(self, *arrs):
arrs = list(arrs)
for i, arr in enumerate(arrs):
assert len(arrs[0]) == len(arrs[i])
arrs[i] = np.array(arr)
p = np.random.permutation(len(arrs[0]))
return tuple(arr[p] for arr in arrs)
# 加载char_index训练数据
def load_char_data(self, file, data_size=None):
# path = os.path.join(os.path.dirname(__file__), '../' + file)
# df = pd.read_csv(path)
df = pd.read_csv(file)
p = df['sentence1'].values[0:data_size]
h = df['sentence2'].values[0:data_size]
label = df['label'].values[0:data_size]
p_c_index, h_c_index = self.char_index(p, h)
return p_c_index, h_c_index, label
# 针对传入两个列表(为match:相当于test数据)
def transfer_char_data(self, p, h):
p_c_index, h_c_index = self.char_index(p, h)
return p_c_index, h_c_index
def init_abcnn(abcnn_config: AbcnnConfig):
logger = logging.getLogger('init_abcnn')
abcnn_model = AbcnnModel()
abcnn_model.load_char_vocab()
abcnn_model.load_model(abcnn_config.model_file)
logger.info('init abcnn model SUCCESS !')
if __name__ == '__main__':
abcnn = AbcnnModel()
# predict
abcnn.load_char_vocab()
p_test, h_test, y_test = abcnn.load_char_data('./input/test.csv', data_size=None)
abcnn.load_model('../model/abcnn2.ckpt')
prd = abcnn.predict(p_test, h_test)
# train
# abcnn.load_char_vocab()
# p, h, y = abcnn.load_char_data('input/train.csv', data_size=None)
# p_eval, h_eval, y_eval = abcnn.load_char_data('input/dev.csv', data_size=1000)
# abcnn.train(p, h, y, p_eval, h_eval, y_eval, '../model/abcnn2.ckpt')
|
#!/usr/bin/python3
import sys
a = 1
while a < 26:
for i in sys.argv[1]:
ch = ord(i) + a
if ch > 122:
ch -= 26
print(chr(ch), end="")
print('')
a += 1
|
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
count = 0
mem = 0
f = open("data.txt", 'r')
g = open("output2.txt", 'w')
for line in f:
count = count + 1
splitline = line.split()
print splitline
if splitline:
if is_number(splitline[0]):
mem = splitline[0]
else:
if splitline[0] != '{' or splitline[0] != '}':
try:
g.write(mem + '\t' + splitline[0] + '\t' + splitline[1])
except:
continue
g.write('\n')
if count % 1000000 == 0:
print count
f.close()
g.close()
|
from django.shortcuts import render
def home_page(request):
context ={
"title": "Hello World!",
"welcome": "Welcome to the homepage",
"premium_content": "YEAHHH"
}
# print(request.session.get('first_name', 'Unknown'))
return render(request, "home_page.html", context)
def about_page(request):
context ={
"title": "About Page",
"content": "Welcome to the about page",
}
return render(request, "about_page.html", context)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import pytest
from pants.base.specs import (
AddressLiteralSpec,
AncestorGlobSpec,
DirGlobSpec,
DirLiteralSpec,
RawSpecsWithoutFileOwners,
RecursiveGlobSpec,
)
from pants.util.frozendict import FrozenDict
@pytest.mark.parametrize(
"spec,expected",
[
(AddressLiteralSpec("dir"), "dir"),
(AddressLiteralSpec("dir/f.txt"), "dir/f.txt"),
(AddressLiteralSpec("dir", "tgt"), "dir:tgt"),
(AddressLiteralSpec("dir", None, "gen"), "dir#gen"),
(AddressLiteralSpec("dir", "tgt", "gen"), "dir:tgt#gen"),
(
AddressLiteralSpec("dir", None, None, FrozenDict({"k1": "v1", "k2": "v2"})),
"dir@k1=v1,k2=v1",
),
(AddressLiteralSpec("dir", "tgt", None, FrozenDict({"k": "v"})), "dir:tgt@k=v"),
(AddressLiteralSpec("dir", "tgt", "gen", FrozenDict({"k": "v"})), "dir:tgt#gen@k=v"),
],
)
def address_literal_str(spec: AddressLiteralSpec, expected: str) -> None:
assert str(spec) == expected
def assert_build_file_globs(
specs: RawSpecsWithoutFileOwners,
*,
expected_build_globs: set[str],
expected_validation_globs: set[str],
) -> None:
build_path_globs, validation_path_globs = specs.to_build_file_path_globs_tuple(
build_patterns=["BUILD"], build_ignore_patterns=[]
)
assert set(build_path_globs.globs) == expected_build_globs
assert set(validation_path_globs.globs) == expected_validation_globs
def test_dir_literal() -> None:
spec = DirLiteralSpec("dir/subdir")
assert spec.to_glob() == "dir/subdir/*"
assert spec.matches_target_residence_dir("") is False
assert spec.matches_target_residence_dir("dir") is False
assert spec.matches_target_residence_dir("dir/subdir") is True
assert spec.matches_target_residence_dir("dir/subdir/nested") is False
assert spec.matches_target_residence_dir("another/subdir") is False
assert_build_file_globs(
RawSpecsWithoutFileOwners(dir_literals=(spec,), description_of_origin="tests"),
expected_build_globs={"BUILD", "dir/BUILD", "dir/subdir/BUILD"},
expected_validation_globs={"dir/subdir/*"},
)
spec = DirLiteralSpec("")
assert spec.to_glob() == "*"
assert spec.matches_target_residence_dir("") is True
assert spec.matches_target_residence_dir("dir") is False
assert_build_file_globs(
RawSpecsWithoutFileOwners(dir_literals=(spec,), description_of_origin="tests"),
expected_build_globs={"BUILD"},
expected_validation_globs={"*"},
)
def test_dir_glob() -> None:
spec = DirGlobSpec("dir/subdir")
assert spec.to_glob() == "dir/subdir/*"
assert spec.matches_target_residence_dir("") is False
assert spec.matches_target_residence_dir("dir") is False
assert spec.matches_target_residence_dir("dir/subdir") is True
assert spec.matches_target_residence_dir("dir/subdir/nested") is False
assert spec.matches_target_residence_dir("another/subdir") is False
assert_build_file_globs(
RawSpecsWithoutFileOwners(dir_globs=(spec,), description_of_origin="tests"),
expected_build_globs={"BUILD", "dir/BUILD", "dir/subdir/BUILD"},
expected_validation_globs={"dir/subdir/*"},
)
spec = DirGlobSpec("")
assert spec.to_glob() == "*"
assert spec.matches_target_residence_dir("") is True
assert spec.matches_target_residence_dir("dir") is False
assert_build_file_globs(
RawSpecsWithoutFileOwners(dir_globs=(spec,), description_of_origin="tests"),
expected_build_globs={"BUILD"},
expected_validation_globs={"*"},
)
def test_recursive_glob() -> None:
spec = RecursiveGlobSpec("dir/subdir")
assert spec.to_glob() == "dir/subdir/**"
assert spec.matches_target_residence_dir("") is False
assert spec.matches_target_residence_dir("dir") is False
assert spec.matches_target_residence_dir("dir/subdir") is True
assert spec.matches_target_residence_dir("dir/subdir/nested") is True
assert spec.matches_target_residence_dir("dir/subdir/nested/again") is True
assert spec.matches_target_residence_dir("another/subdir") is False
assert_build_file_globs(
RawSpecsWithoutFileOwners(recursive_globs=(spec,), description_of_origin="tests"),
expected_build_globs={"BUILD", "dir/BUILD", "dir/subdir/BUILD", "dir/subdir/**/BUILD"},
expected_validation_globs={"dir/subdir/**"},
)
spec = RecursiveGlobSpec("")
assert spec.to_glob() == "**"
assert spec.matches_target_residence_dir("") is True
assert spec.matches_target_residence_dir("dir") is True
assert spec.matches_target_residence_dir("another_dir") is True
assert_build_file_globs(
RawSpecsWithoutFileOwners(recursive_globs=(spec,), description_of_origin="tests"),
expected_build_globs={"BUILD", "**/BUILD"},
expected_validation_globs={"**"},
)
def test_ancestor_glob() -> None:
spec = AncestorGlobSpec("dir/subdir")
assert spec.matches_target_residence_dir("") is True
assert spec.matches_target_residence_dir("dir") is True
assert spec.matches_target_residence_dir("dir/subdir") is True
assert spec.matches_target_residence_dir("dir/subdir/nested") is False
assert spec.matches_target_residence_dir("another/subdir") is False
assert_build_file_globs(
RawSpecsWithoutFileOwners(ancestor_globs=(spec,), description_of_origin="tests"),
expected_build_globs={"BUILD", "dir/BUILD", "dir/subdir/BUILD"},
expected_validation_globs={"dir/subdir/*"},
)
spec = AncestorGlobSpec("")
assert spec.matches_target_residence_dir("") is True
assert spec.matches_target_residence_dir("dir") is False
assert_build_file_globs(
RawSpecsWithoutFileOwners(ancestor_globs=(spec,), description_of_origin="tests"),
expected_build_globs={"BUILD"},
expected_validation_globs={"*"},
)
|
# Copyright 2018 Davide Spadini
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pydriller.repository import Repository
from datetime import datetime, timezone, timedelta
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO)
def test_one_timezone():
lc = list(
Repository('test-repos/branches_merged',
single='29e929fbc5dc6a2e9c620069b24e2a143af4285f').traverse_commits())
to_zone = timezone(timedelta(hours=2))
dt = datetime(2016, 4, 4, 13, 21, 25, tzinfo=to_zone)
assert lc[0].author_date == dt
def test_between_dates_reversed():
lc = list(
Repository('test-repos/different_files',
single='375de7a8275ecdc0b28dc8de2568f47241f443e9').traverse_commits())
to_zone = timezone(timedelta(hours=-4))
dt = datetime(2016, 10, 8, 17, 57, 49, tzinfo=to_zone)
assert lc[0].author_date == dt
|
import matplotlib.pyplot as plt
import numpy as np
def f(x, y):
return x**2 + y**2 / 4
x = np.linspace(-5, 5, 300)
y = np.linspace(-5, 5, 300)
xmesh, ymesh = np.meshgrid(x, y)
z = f(xmesh, ymesh)
colors = ["0.1", "0.3", "0.5", "0.7"]
levels = [1, 2, 3, 4, 5]
plt.contourf(x, y, z, colors=colors, levels=levels)
plt.show() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from nrf24 import NRF24
import time
from time import gmtime, strftime
import MySQLdb
import xml.dom.minidom
import sys
verbose = 0
if len(sys.argv) > 1:
if sys.argv[1] == "-v":
verbose = 1
else:
print "Argument non reconnu ! -v pour verbose"
sys.exit(0)
"""import données connexion SGL d'un fichier config en XML"""
tree = xml.dom.minidom.parse("/home/pi/nrf24/configSQL.xml")
valeurListe = tree.getElementsByTagName("SQL")
"""Variable de connexion R24"""
pipes = [[0xf0, 0xf0, 0xf0, 0xf0, 0xe1], [0xf0, 0xf0, 0xf0, 0xf0, 0xd2]]
""" init variable"""
temperExt = '0.0'
"""Initialisation connexion R24"""
radio = NRF24()
radio.begin(0, 0,25,18) #set gpio 25 as CE pin
radio.setRetries(15,15)
radio.setPayloadSize(52)
radio.setChannel(0x4c)
radio.setDataRate(NRF24.BR_250KBPS)
radio.setPALevel(NRF24.PA_MAX)
radio.setAutoAck(1)
radio.openWritingPipe(pipes[0])
radio.openReadingPipe(1, pipes[1])
radio.startListening()
"""fin initialisation R24"""
"""Fonction pour extraire de la variable receptionnée les différentes valeurs"""
def extract(raw_string, start_marker, end_marker):
start = raw_string.index(start_marker) + len(start_marker)
end = raw_string.index(end_marker, start)
return raw_string[start:end]
if verbose:
print
print ("Attente réception du capteur")
print
"""Boucle infinie de réception des données"""
while True:
pipe = [0]
"""Si pas de réception on attends ..."""
while not radio.available(pipe, True):
time.sleep(1000/1000000.0)
recv_buffer = []
radio.read(recv_buffer) #Les données réceptionnées sont mise dans la variable
out = ''.join(chr(i) for i in recv_buffer)#création d'une variable blobage avec les valeurs du tableu buffer
"""Création de la date et heure en francais et en SQL"""
now = time.localtime(time.time())
year, month, day, hour, minute, second, weekday, yearday, daylight = now
dateheure = "%02d/%02d/%04d" % (day, month, year) + " " + "%02d:%02d:%02d" % (hour, minute, second)
dateheureSQL = "%04d-%02d-%02d" % (year, month, day) + " " + "%02d:%02d:%02d" % (hour, minute, second)
""""La variable out est décortiquée avec les données de numéro de capteur, température, humidité, tension pile """
temperExt=extract(out,'E','E')
"""affichage des données recues pour es teste"""
if verbose:
print (dateheure)
if verbose:
print ("La température Ext : " + temperExt + "°C")
print
"""Connexion et insertion de la données dans la base"""
for valeur in valeurListe:
#connexion à la base de données
db = MySQLdb.connect(valeur.attributes['ip'].value, valeur.attributes['login'].value, valeur.attributes['mdp'].value, valeur.attributes['dbase'].value)
dbSQL = db.cursor()
sql = "INSERT INTO `Temp_Ext`(`Date_Temp_Ext`, `Temp_Temp_Ext`)\
VALUES ('" + dateheureSQL + "', " + temperExt + ")"
if verbose:
print sql
dbSQL.execute(sql)
db.commit()
db.close()
|
import numpy as np
import glob
def getAllInDir(dir_filename):
if not dir_filename[-1] =="/":
dir_filename+="/"
all_files = glob.glob("{}*".format(dir_filename))
return all_files
def safe_crop_ltbr(image, x1, y1, x2, y2):
"""
Returns a crop of an image based on the image and an [left, top, right, bottom] bounding box
>>> safe_crop_ltbr(np.zeros((720, 1080, 3)), 0, 10, 100, 200).shape
(190, 100, 3)
>>> safe_crop_ltbr(np.zeros((720, 1080, 3)), 100, 100, 20, 200)
Traceback (most recent call last):
ValueError: {x,y}1 should be less than {x,y}2
"""
if image is None or len(image.shape) != 3:
raise ValueError("Image should be a 3d array")
if x1 >= x2 or y1 >= y2:
raise ValueError("{x,y}1 should be less than {x,y}2")
x1 = int(np.clip(x1, 0, image.shape[1]))
x2 = int(np.clip(x2, 0, image.shape[1]))
y1 = int(np.clip(y1, 0, image.shape[0]))
y2 = int(np.clip(y2, 0, image.shape[0]))
return image[y1:y2, x1:x2].copy()
def ltwh_to_tlbr(bbox): # these boxes are really ltwh
left, top = bbox[:2]
bottom = top + bbox[3]
right = left + bbox[2]
return np.array([top, left, bottom, right])
def tlbr_to_ltrb(bbox):
"""
>>> ltbr = np.asarray([10, 20, 101, 122])
... assert np.array_equal(tlbr_to_ltrb(tlbr_to_ltrb(ltbr)), ltbr)
"""
return ltrb_to_tlbr(bbox) # it's the same as the other way
def ltrb_to_tlbr(bbox):
#this composed with itself is an identity mapping
return np.asarray([bbox[1], bbox[0], bbox[3], bbox[2]])
def tlbr_to_ltwh(bbox):
"""
>>> ltwh = tlbr_to_ltwh([10, 20, 101, 122])
... print(ltwh_to_tlbr(ltwh))
"""
top, left = bbox[:2]
width = bbox[3] - left
height = bbox[2] - top
return np.asarray([left, top, width, height])
def ltwh_to_xyah(ltwh_bbox):
"""
>>> ltwh_bbox = [123, 142, 45, 34]
... xyah_bbox = ltwh_to_xyah( ltwh_bbox )
... assert xyah_to_ltwh(xyah_bbox) == ltwh_bbox
"""
#"""convert bounding box to format `(center x, center y, aspect ratio,
#height)`, where the aspect ratio is `width / height`.
#"""
#ret = self.tlwh.copy()
#ret[:2] += ret[2:] / 2
#ret[2] /= ret[3]
#return ret
#note the a is aspect ratio, not the area
bbox = ltwh_bbox.copy()
bbox[0] += ltwh_bbox[2] / 2
bbox[1] += ltwh_bbox[3] / 2
bbox[2] /= ltwh_bbox[3]
return bbox
def xyah_to_ltwh(xyah_bbox):
#"""convert bounding box from format `(center x, center y, aspect ratio,
#height)`, where the aspect ratio is `width / height` to left, top, width, height
#"""
#ret = self.tlwh.copy()
#ret[:2] += ret[2:] / 2
#ret[2] /= ret[3]
#return ret
#note the a is aspect ratio, not the area
bbox = xyah_bbox.copy()
bbox[2] = xyah_bbox[2] * xyah_bbox[3] # height * width / height
bbox[0] -= bbox[2] / 2
bbox[1] -= bbox[3] / 2
return bbox
def debug_signal_handler(signal, frame):
import pdb
pdb.set_trace()
def pdb_on_ctrl_c():
import signal
signal.signal(signal.SIGINT, debug_signal_handler)
|
# SCREEN SETTINGS
WIDTH = 720
HEIGHT = 500
BACKGROUND = (76, 175, 80)
PADDLE_COLOR = (255, 255, 255)
# GAME SETTINGS
FPS = 60
#PADDLE1
PADDLE_WIDTH = 25
PADDLE_HEIGHT = 80
PADDLE_SPEED = 10
#BALL1
BALL_WIDTH = 20
BALL_HEIGHT = 20
BALL_COLOR = (255, 255, 255)
|
$ chomod +x test1.py
$ ./test1.py |
def factorialfun(number):
factorial = 1
while number > 0:
factorial = factorial * number
number = number - 1
return factorial
|
#!/usr/bin/python3
import validate
import wave_generator
years = [2018,2019,2020,2021,2022,2023,2024,2025]
marks = []
ymap = []
for y in years:
temp = wave_generator.generate_markers(y)
marks.extend(temp)
for i in range(len(temp)):
ymap.append(y)
print(len(marks))
vald = validate.val(marks)
print(len(vald))
filt = [(marks[i],ymap[i]) for i in range(len(marks)) if vald[i]]
print(len(filt))
print(filt[0])
f = open("generated_locs.csv", "w")
for pnt in filt:
f.write("%d,,,,,,%f,%f\n" % (pnt[1], pnt[0][0], pnt[0][1]))
f.close()
|
# from . import kitti_dataset
# from . import nuscenes_dataset
from .import lyft_dataset
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os.path
from dataclasses import dataclass
from typing import Iterable
from pants.backend.javascript import nodejs_project_environment
from pants.backend.javascript.nodejs_project import AllNodeJSProjects, NodeJSProject
from pants.backend.javascript.nodejs_project_environment import (
NodeJsProjectEnvironment,
NodeJsProjectEnvironmentProcess,
)
from pants.backend.javascript.package_json import PackageJsonTarget
from pants.backend.javascript.resolve import NodeJSProjectResolves
from pants.backend.javascript.subsystems.nodejs import UserChosenNodeJSResolveAliases
from pants.core.goals.generate_lockfiles import (
GenerateLockfile,
GenerateLockfileResult,
KnownUserResolveNames,
KnownUserResolveNamesRequest,
RequestedUserResolveNames,
UserGenerateLockfiles,
)
from pants.core.goals.tailor import TailorGoal
from pants.engine.internals.native_engine import AddPrefix, Digest
from pants.engine.internals.selectors import Get
from pants.engine.process import ProcessResult
from pants.engine.rules import Rule, collect_rules, rule
from pants.engine.unions import UnionRule
from pants.util.docutil import bin_name
from pants.util.ordered_set import FrozenOrderedSet
from pants.util.strutil import pluralize, softwrap
@dataclass(frozen=True)
class GeneratePackageLockJsonFile(GenerateLockfile):
project: NodeJSProject
class KnownPackageJsonUserResolveNamesRequest(KnownUserResolveNamesRequest):
pass
class RequestedPackageJsonUserResolveNames(RequestedUserResolveNames):
pass
@rule
async def determine_package_json_user_resolves(
_: KnownPackageJsonUserResolveNamesRequest,
all_projects: AllNodeJSProjects,
user_chosen_resolves: UserChosenNodeJSResolveAliases,
) -> KnownUserResolveNames:
names = FrozenOrderedSet(
user_chosen_resolves.get(
os.path.join(project.root_dir, project.lockfile_name), project.default_resolve_name
)
for project in all_projects
)
unmatched_aliases = set(user_chosen_resolves.values()).difference(names)
if unmatched_aliases:
projects = pluralize(len(unmatched_aliases), "project", include_count=False)
lockfiles = ", ".join(
lockfile
for lockfile, alias in user_chosen_resolves.items()
if alias in unmatched_aliases
)
paths = pluralize(len(unmatched_aliases), "path", include_count=False)
raise ValueError(
softwrap(
f"""
No nodejs {projects} could be found for {lockfiles}, but
some are configured under [nodejs].resolves.
Ensure that a package.json file you intend to manage with pants has
a corresponding BUILD file containing a `{PackageJsonTarget.alias}` target
by running `{bin_name()} {TailorGoal.name} ::`.
Also confirm that {lockfiles} would be generated by your
chosen nodejs package manager at the specified {paths}.
"""
)
)
return KnownUserResolveNames(
names=tuple(names),
option_name="[nodejs].resolves",
requested_resolve_names_cls=RequestedPackageJsonUserResolveNames,
)
@rule
async def setup_user_lockfile_requests(
resolves: NodeJSProjectResolves,
requested: RequestedPackageJsonUserResolveNames,
) -> UserGenerateLockfiles:
return UserGenerateLockfiles(
GeneratePackageLockJsonFile(
resolve_name=name,
lockfile_dest=os.path.join(resolves[name].root_dir, resolves[name].lockfile_name),
diff=False,
project=resolves[name],
)
for name in requested
)
@rule
async def generate_lockfile_from_package_jsons(
request: GeneratePackageLockJsonFile,
) -> GenerateLockfileResult:
result = await Get(
ProcessResult,
NodeJsProjectEnvironmentProcess(
env=NodeJsProjectEnvironment.from_root(request.project),
args=request.project.generate_lockfile_args,
description=f"generate {request.project.lockfile_name} for '{request.resolve_name}'.",
output_files=(request.project.lockfile_name,),
),
)
output_digest = await Get(Digest, AddPrefix(result.output_digest, request.project.root_dir))
return GenerateLockfileResult(output_digest, request.resolve_name, request.lockfile_dest)
def rules() -> Iterable[Rule | UnionRule]:
return (
*collect_rules(),
*nodejs_project_environment.rules(),
UnionRule(GenerateLockfile, GeneratePackageLockJsonFile),
UnionRule(KnownUserResolveNamesRequest, KnownPackageJsonUserResolveNamesRequest),
UnionRule(RequestedUserResolveNames, RequestedPackageJsonUserResolveNames),
)
|
def decodeMorse(morse_code):
output = []
for x in morse_code.split(" "):
output.append(" ")
for i in x.split(" "):
if len(i)>0: output.append(MORSE_CODE[i])
return "".join(output).lstrip()
'''
In this kata you have to write a simple Morse code decoder. While the Morse code
is now mostly superceded by voice and digital data communication channels,
it still has its use in some applications around the world.
The Morse code encodes every character as a sequence of "dots" and "dashes".
For example, the letter A is coded as ·−, letter Q is coded as −−·−,
and digit 1 is coded as ·−−−. The Morse code is case-insensitive,
traditionally capital letters are used. When the message is written in Morse code,
a single space is used to separate the character codes and 3 spaces are used to separate words.
For example, the message HEY JUDE in Morse code is ···· · −·−− ·−−− ··− −·· ·.
In addition to letters, digits and some punctuation, there are some special service codes,
the most notorious of those is the international distress signal SOS
(that was first issued by Titanic), that is coded as ···−−−···. These special codes
are treated as single special characters, and usually are transmitted as separate words.
Your task is to implement a function that would take the morse code as input and return a decoded human-readable string.
For example:
decodeMorse('.... . -.-- .--- ..- -.. .')
#should return "HEY JUDE"
The Morse code table is preloaded for you as a dictionary.
All the test strings will contain valid Morse code.
'''
|
import pickletools
def protocol_version(file_object):
maxproto = -1
for opcode, arg, pos in pickletools.genops(file_object):
maxproto = max(maxproto, opcode.proto)
return maxproto
|
from django.contrib import admin
from models import Item, Label, Category, Subcategory, BagCount, Setting
class ItemAdmin(admin.ModelAdmin):
pass
admin.site.register(Item, ItemAdmin)
class LabelAdmin(admin.ModelAdmin):
pass
admin.site.register(Label, LabelAdmin)
class CategoryAdmin(admin.ModelAdmin):
pass
admin.site.register(Category, CategoryAdmin)
class SubcategoryAdmin(admin.ModelAdmin):
pass
admin.site.register(Subcategory, SubcategoryAdmin)
class BagCountAdmin(admin.ModelAdmin):
pass
admin.site.register(BagCount, BagCountAdmin)
class SettingAdmin(admin.ModelAdmin):
pass
admin.site.register(Setting, SettingAdmin)
|
import os
import json
from functools import lru_cache
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
CONF_FILE_PATH = os.path.normpath(os.path.join(SCRIPT_DIR, "config.json"))
@lru_cache(maxsize=None)
def get_config():
with open(CONF_FILE_PATH, "r") as fp:
return json.load(fp) |
from datetime import datetime
from dateutil.relativedelta import relativedelta
from time import mktime
import time
import requests
import pandas as pd
import config
import os
try:
previous_data = pd.read_csv(os.path.join(config.DATADIR, "last_month_tracks.csv"))
except FileNotFoundError:
# 1 month ago
last_date = int(mktime((datetime.now() - relativedelta(months=1)).timetuple()))
previous_data_flag, collection_period = False, 0
else:
last_date = previous_data["date"].max()+1
previous_data_flag, collection_period = True, previous_data["collection_period"].max()+1
now = int(mktime(datetime.now().timetuple()))
res = []
page = 1
totalPages = None
while not totalPages or page < totalPages:
data = requests.get("http://ws.audioscrobbler.com/2.0",
{"method": "user.getrecenttracks",
"user": "tysonpo",
"limit": 200,
"page": page,
"from": last_date,
"to": now,
"api_key": config.lastfm_client_id,
"format": "json"}).json()["recenttracks"]
if page == 1:
totalPages = int(data["@attr"]["totalPages"])
# if a track is 'now playing' then it will not have a date. we'll avoid collecting this
data = [track for track in data["track"] if "date" in track]
res.extend([[track["name"],
track["artist"]["#text"],
track["album"]["#text"],
track["date"]["uts"],
collection_period] for track in data])
page += 1
time.sleep(2)
new_data = pd.DataFrame(data=res, columns=["track", "artist", "album", "date", "collection_period"])
if previous_data_flag:
combined_data = pd.concat([new_data, previous_data])
combined_data.to_csv(os.path.join(config.DATADIR, "last_month_tracks.csv"), index = False)
else:
new_data.to_csv(os.path.join(config.DATADIR, "last_month_tracks.csv"), index = False) |
# Generated by Django 2.2.11 on 2021-08-24 11:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0043_auto_20210824_1534'),
('facility', '0271_auto_20210815_1617'),
]
operations = [
migrations.AddField(
model_name='patientexternaltest',
name='block',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='users.Block'),
),
]
|
#encoding: utf-8
import random
import sys
if len(sys.argv) != 2 :
print 'Args: número'
sys.exit(1)
veces = int(float(sys.argv[1]))
media = 100
sigma = 20
indice0 = media - 3*sigma
indice1 = media + 3*sigma
print "# ", indice0, indice1
amplitud = indice1-indice0 + 1
varreglo = [0]*amplitud
i=0
while i < veces:
n = random.gauss(media,sigma)
v = int(n+0.5) - indice0
if v >= 0 and v<amplitud :
varreglo[v] += 1
i += 1
i = 0
while i<amplitud :
v = i + indice0
norma = float(varreglo[i])/veces
#print v, varreglo[i]
print v, norma
i+=1
|
#!/usr/bin/env python3
from certCheck import CertCheck
from loadEnv import load as loadEnvVariables
from printSubprocessStdout import printSubprocessStdout
from subprocess import check_output
def relative(subpath='', useCwd=False):
import os
basePath = os.getcwd() if useCwd else os.path.dirname(os.path.abspath(__file__))
return os.path.normpath(os.path.join(basePath, os.path.expanduser(subpath)))
print('Cert check cron job started.')
loadEnvVariables()
print('Loaded environment variables.')
certCheck = CertCheck()
if certCheck.shouldServerUpdate():
print('Saved timestamp is older than at least one of certificate files or there is no saved timestamp yet. Using newest certificate files by reloading the server.')
printSubprocessStdout(message=check_output(relative('server/reloadServer')), colors=False)
print('Server reload requested.')
certCheck.updateTimestamp()
else:
print('No action required.')
print('Cert check cron job finished.') |
from django.http import HttpResponse,JsonResponse
from django.shortcuts import render,redirect
from datetime import datetime
from django.views.generic import View
from django.contrib.auth.models import User, Group, auth
from django.contrib import messages
from rest_framework import viewsets
from rest_framework import permissions
from testapp.serializers import UserSerializer
from testapp.models import tb_content
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
def uploadContentData(request):
heading = request.POST["title"];
contentData = request.POST["content"];
uploadData = tb_content(userid = 1,Title = heading, content = contentData)
uploadData.save();
return redirect("/");
# Create your views here.
def index(request):
contents = tb_content.objects.all()
cont = tb_content.objects.raw('SELECT * FROM testapp_tb_content WHERE s_no = '+"1")
return render(request, "index.html", {"contents": contents, "details":cont})
def getActiveData(request,card_id):
# contentId = request.GET["card_id"]
contents = tb_content.objects.all()
cont = tb_content.objects.raw('SELECT * FROM testapp_tb_content WHERE s_no = '+card_id)
return render(request, "index.html", {"contents": contents, "details":cont})
def register(request):
if request.method == "POST":
first_name = request.POST["fname"]
last_name = request.POST["lname"]
username = request.POST["uname"]
email = request.POST["email"]
password = request.POST["password"]
confirmPassword = request.POST["confirmPassword"]
if password == confirmPassword:
if User.objects.filter(email =email).exists():
messages.info(request,"user already exist")
return redirect('/register')
else:
user = User.objects.create_user(username = username, password=password, email= email,first_name = first_name,last_name= last_name)
user.save();
messages.info(request,"user created successfully.")
print("user created")
return redirect('login')
else:
messages.info(request,"User password is not same.")
return redirect('/register')
else:
return render(request, "register.html")
def login(request):
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
user = auth.authenticate(username = username, password = password)
if user is not None:
auth.login(request,user)
contents = tb_content.objects.all()
return redirect("/")
else:
messages.info(request,"invalid credentials")
return redirect("login")
else:
return render(request, "login.html")
def logout(request):
auth.logout(request);
return redirect("login"); |
from entity.response_wrapper import Response
|
from resourse import db
class Teacher(db.Model):
__tablename__ = 'teacher'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(255), nullable=True)
password = db.Column(db.String(255), nullable=True)
e_mail = db.Column(db.String(255))
create_time = db.Column(db.DateTime)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
return '<Teacher {}>'.format(self.name)
|
def letter_count(s):
outut ={}
for i in s:
outut[i] = s.count(i)
return outut
|
import unittest
from Pyskell.Language.TypeClasses import *
from Pyskell.Language.EnumList import L
class ShowTest(unittest.TestCase):
def setUp(self):
self.int_test = 1
self.float_test = 1.1
self.string_test = "some string"
self.list_test = [1, 2, 3]
self.set_test = {1, 1, 4, 5, 1, 4}
self.complex_test = complex(12, 34)
self.dict_test = {'p1': 1, 'p2': 2}
self.haskell_list_test = L[1, 2, 3]
def test_show(self):
self.assertEqual(str(self.int_test), show % self.int_test)
self.assertEqual(str(self.float_test), show % self.float_test)
self.assertEqual(str(self.string_test), show % self.string_test)
self.assertEqual(str(self.list_test), show % self.list_test)
self.assertEqual(str(self.set_test), show % self.set_test)
self.assertEqual(str(self.complex_test), show % self.complex_test)
self.assertEqual(str(self.dict_test), show % self.dict_test)
self.assertEqual("L[1, 2, 3]", show % self.haskell_list_test)
|
import unittest
from katas.beta.vowel_shifting import vowel_shift
class VowelShiftTestCase(unittest.TestCase):
def test_is_none_1(self):
self.assertIsNone(vowel_shift(None, 0))
def test_equal_1(self):
self.assertEqual(vowel_shift('', 0), '')
def test_equal_2(self):
self.assertEqual(vowel_shift('This is a test!', 0), 'This is a test!')
def test_equal_3(self):
self.assertEqual(vowel_shift('This is a test!', 1), 'Thes is i tast!')
def test_equal_4(self):
self.assertEqual(vowel_shift('This is a test!', 3), 'This as e tist!')
def test_equal_5(self):
self.assertEqual(vowel_shift('This is a test!', 4), 'This is a test!')
def test_equal_6(self):
self.assertEqual(vowel_shift('This is a test!', -1), 'This as e tist!')
def test_equal_7(self):
self.assertEqual(vowel_shift('This is a test!', -5), 'This as e tist!')
def test_equal_8(self):
self.assertEqual(vowel_shift('Brrrr', 99), 'Brrrr')
def test_equal_9(self):
self.assertEqual(vowel_shift('AEIOUaeiou', 1), 'uAEIOUaeio')
|
import sys
import numpy as np
from dateutil import parser
import json
from prime_mcmc import ammcmc
from prime_posterior import logpost, logpost_negb, logpost_poisson
from prime_utils import runningAvg, compute_error_weight
def main(setupfile):
r"""
Driver script to run MCMC for parameter inference for a multi-wave
epidemic model. Currently limited to up to three infection curves.
To run this script:
python <path-to-this-directory>/prime_run.py <name-of-json-input-file>
Parameters
----------
setupfile: string
json format input file with information on observations data, filtering options,
MCMC options, and postprocessing options. See "setup_template.json" for a detailed
example
"""
#-------------------------------------------------------
run_setup=json.load(open(sys.argv[1]))
print("=====================================================")
print(run_setup)
print("=====================================================")
#-------------------------------------------------------
# definitions
fdata = run_setup["regioninfo"]["regionname"]+".dat"
fchno = run_setup["regioninfo"]["fchain"]
day0 = run_setup["regioninfo"]["day0"]
#-------------------------------------------------------
# determine model type
model_type = run_setup["mcmcopts"]["model_type"]
if model_type == "oneWave":
print("Running MCMC with one-wave model")
elif model_type == "twoWave":
print("Running MCMC with two-wave model")
elif model_type == "threeWave":
print("Running MCMC with three-wave model")
else:
sys.exit("Did not recognize 'model_type' specified in {}\n Options are 'oneWave', 'twoWave', or 'threeWave'".format(sys.argv[1]))
#-------------------------------------------------------
# determine error model type
error_model_type = run_setup["mcmcopts"]["error_model_type"]
if error_model_type == "add":
print("Additive error model selected")
elif error_model_type == "addMult":
print("Additive & Multiplicative error model selected")
else:
sys.exit("Did not recognize 'error_model_type' specified in {}\n Options are 'add' or 'addMult'".format(sys.argv[1]))
#-------------------------------------------------------
# ensure that inputs are consistent with specified model
# and error model type
def check_param_lengths(n_param,model_type,error_model_type):
e_message=" needs to be length {} for a '{}' model with '{}' error model".format(n_param,model_type,error_model_type)
assert len(run_setup["mcmcopts"]["cini"])==n_param,"cini"+e_message
assert len(run_setup["mcmcopts"]["cvini"])==n_param,"cvini"+e_message
assert len(run_setup["mcmcopts"]["spllo"])==n_param,"spllo"+e_message
assert len(run_setup["mcmcopts"]["splhi"])==n_param,"splhi"+e_message
assert len(run_setup["bayesmod"]["prior_types"])==n_param,"prior_types"+e_message
assert len(run_setup["bayesmod"]["prior_info"])==n_param,"prior_info"+e_message
if model_type=="oneWave":
if error_model_type=="add":
check_param_lengths(5,model_type,error_model_type)
else:
check_param_lengths(6,model_type,error_model_type)
elif model_type=="twoWave":
if error_model_type=="add":
check_param_lengths(9,model_type,error_model_type)
else:
check_param_lengths(10,model_type,error_model_type)
else:
if error_model_type=="add":
check_param_lengths(13,model_type,error_model_type)
else:
check_param_lengths(14,model_type,error_model_type)
#-------------------------------------------------------
# extract data from raw data
rawdata = np.loadtxt(fdata,dtype=str)
ndays = rawdata.shape[0]
days_since_day0 = np.array([(parser.parse(rawdata[i,0])-parser.parse(day0)).days for i in range(ndays)])
if "running_avg_obs" in run_setup["regioninfo"]:
new_cases = runningAvg(np.array([float(rawdata[i,1]) for i in range(rawdata.shape[0])]),
run_setup["regioninfo"]["running_avg_obs"])
print("Taking {}-day running average of observations".format(run_setup["regioninfo"]["running_avg_obs"]))
else:
new_cases = np.array([float(rawdata[i,1]) for i in range(rawdata.shape[0])])
# get sigma for the incubation model
incubation_median = run_setup["incopts"]["incubation_median"]
incubation_sigma = run_setup["incopts"]["incubation_sigma"]
#-------------------------------------------------------
# mcmc
opts = {"nsteps": run_setup["mcmcopts"]["nsteps"],
"nfinal": run_setup["mcmcopts"]["nfinal"],
"gamma": run_setup["mcmcopts"]["gamma"],
"inicov": np.array(run_setup["mcmcopts"]["cvini"]),
"spllo": np.array(run_setup["mcmcopts"]["spllo"]),
"splhi": np.array(run_setup["mcmcopts"]["splhi"]),
"logfile":run_setup["mcmcopts"]["logfile"],
"nburn":1000,"nadapt":100,"coveps":1.e-10,"burnsc":5,
"ndr":2,"drscale":[5,4,3],"ofreq":5000,"tmpchn":"tmpchn"
}
# This will allow full covariance matrices
# to be passed for inicov, or just the diagonal [tporton]
if len(opts['inicov'].shape)==1:
opts['inicov'] = np.diag(opts['inicov'])
if "incubation_model" in run_setup["incopts"]:
inc_model = run_setup["incopts"]["incubation_model"]
else:
inc_model = "lognormal"
error_weight = None
if "error_weight" in run_setup["mcmcopts"]:
print("Applying weighting to error term")
error_weight = compute_error_weight(run_setup["mcmcopts"]["error_weight"],days_since_day0)
modelinfo={"model_type": model_type,
"error_model_type": error_model_type,
"error_weight": error_weight,
"days_since_day0": days_since_day0,
"new_cases": new_cases,
"incubation_model": inc_model,
"incubation_median":incubation_median,
"incubation_sigma": incubation_sigma,
"inftype": "gamma",
"days_extra": 0,
"prior_types": run_setup["bayesmod"]["prior_types"],
"prior_info": run_setup["bayesmod"]["prior_info"]}
# Convolution vs Quadrature:
# -The user can choose to use a fft convolution instead of
# quadrature to perform the integration of Y(t)
# -default is set to zero if the user defines nothing
# -To set, add "useconv":1 to the mcmcopts in the *json file
if "useconv" in run_setup["mcmcopts"]:
modelinfo["useconv"] = run_setup["mcmcopts"]["useconv"]
if modelinfo["useconv"] == 1:
print("Using fft convolution instead of quadrature")
if "incubation_type" in run_setup["mcmcopts"]:
modelinfo["incubation_type"] = run_setup["mcmcopts"]["incubation_type"]
print("Using incubation type:",modelinfo["incubation_type"])
else:
print("Using fixed incubation type")
# choose log-posterior function
if "likl_type" in run_setup["mcmcopts"]:
lliktype = run_setup["mcmcopts"]["likl_type"]
print("Using %s likelihood"%(lliktype))
if lliktype=="gaussian":
lpf = logpost
elif lliktype=="negative_binomial":
lpf = logpost_negb
elif lliktype=="poisson":
lpf = logpost_poisson
else:
print("Unknown likelihood construction")
quit()
else:
lpf = logpost
if "likl_type" in run_setup["mcmcopts"]:
lliktype = run_setup["mcmcopts"]["likl_type"]
if lliktype=="poisson":
modelinfo["sumLogK"] = sum([sum([np.log(i) for i in range(1,int(k)+1)]) for k in new_cases if k>0])
sol=ammcmc(opts,np.array(run_setup["mcmcopts"]["cini"]),lpf,modelinfo)
#-------------------------------------------------------------------------------------------
# save mcmc output
import h5py
f = h5py.File(fchno, 'w')
dset = f.create_dataset("chain", data=sol['chain'], compression="gzip")
mdIn = f.create_dataset("mode", data=sol['cmap'], compression="gzip")
mdPs = f.create_dataset("modepost", data=[sol['pmap']], compression="gzip")
minf = f.create_dataset("minfo", data=sol['minfo'], compression="gzip")
accr = f.create_dataset("accr", data=[sol['accr']], compression="gzip")
fcov = f.create_dataset("final_cov", data=sol['final_cov'], compression="gzip")
f.close()
if __name__ == '__main__':
main(sys.argv[1])
|
import argparse
def _add_common_args(arg_parser):
arg_parser.add_argument('--config', type=str)
# Input
arg_parser.add_argument('--types_path', type=str, help="Path to type specifications")
# Preprocessing
arg_parser.add_argument('--tokenizer_path', type=str, help="Path to tokenizer")
arg_parser.add_argument('--max_span_size', type=int, default=10, help="Maximum size of spans")
arg_parser.add_argument('--lowercase', action='store_true', default=False,
help="If true, input is lowercased during preprocessing")
arg_parser.add_argument('--sampling_processes', type=int, default=4,
help="Number of sampling processes. 0 = no multiprocessing for sampling")
arg_parser.add_argument('--sampling_limit', type=int, default=100, help="Maximum number of sample batches in queue")
# Logging
arg_parser.add_argument('--label', type=str, help="Label of run. Used as the directory name of logs/models")
arg_parser.add_argument('--log_path', type=str, help="Path do directory where training/evaluation logs are stored")
arg_parser.add_argument('--store_predictions', action='store_true', default=False,
help="If true, store predictions on disc (in log directory)")
arg_parser.add_argument('--store_examples', action='store_true', default=False,
help="If true, store evaluation examples on disc (in log directory)")
arg_parser.add_argument('--example_count', type=int, default=None,
help="Count of evaluation example to store (if store_examples == True)")
arg_parser.add_argument('--debug', action='store_true', default=False, help="Debugging mode on/off")
# Model / Training / Evaluation
arg_parser.add_argument('--model_path', type=str, help="Path to directory that contains model checkpoints")
arg_parser.add_argument('--model_type', type=str, default="spert", help="Type of model")
arg_parser.add_argument('--cpu', action='store_true', default=False,
help="If true, train/evaluate on CPU even if a CUDA device is available")
arg_parser.add_argument('--eval_batch_size', type=int, default=1, help="Evaluation batch size")
arg_parser.add_argument('--max_pairs', type=int, default=1000,
help="Maximum entity pairs to process during training/evaluation")
arg_parser.add_argument('--rel_filter_threshold', type=float, default=0.4, help="Filter threshold for relations")
arg_parser.add_argument('--size_embedding', type=int, default=25, help="Dimensionality of size embedding")
arg_parser.add_argument('--prop_drop', type=float, default=0.1, help="Probability of dropout used in SpERT")
arg_parser.add_argument('--freeze_transformer', action='store_true', default=False, help="Freeze BERT weights")
arg_parser.add_argument('--no_overlapping', action='store_true', default=False,
help="If true, do not evaluate on overlapping entities "
"and relations with overlapping entities")
# Misc
arg_parser.add_argument('--seed', type=int, default=None, help="Seed")
arg_parser.add_argument('--cache_path', type=str, default=None,
help="Path to cache transformer models (for HuggingFace transformers library)")
def train_argparser():
arg_parser = argparse.ArgumentParser()
# Input
arg_parser.add_argument('--train_path', type=str, help="Path to train dataset")
arg_parser.add_argument('--valid_path', type=str, help="Path to validation dataset")
# Logging
arg_parser.add_argument('--save_path', type=str, help="Path to directory where model checkpoints are stored")
arg_parser.add_argument('--init_eval', action='store_true', default=False,
help="If true, evaluate validation set before training")
arg_parser.add_argument('--save_optimizer', action='store_true', default=False,
help="Save optimizer alongside model")
arg_parser.add_argument('--train_log_iter', type=int, default=1, help="Log training process every x iterations")
arg_parser.add_argument('--final_eval', action='store_true', default=False,
help="Evaluate the model only after training, not at every epoch")
# Model / Training
arg_parser.add_argument('--train_batch_size', type=int, default=2, help="Training batch size")
arg_parser.add_argument('--epochs', type=int, default=20, help="Number of epochs")
arg_parser.add_argument('--neg_entity_count', type=int, default=100,
help="Number of negative entity samples per document (sentence)")
arg_parser.add_argument('--neg_relation_count', type=int, default=100,
help="Number of negative relation samples per document (sentence)")
arg_parser.add_argument('--lr', type=float, default=5e-5, help="Learning rate")
arg_parser.add_argument('--lr_warmup', type=float, default=0.1,
help="Proportion of total train iterations to warmup in linear increase/decrease schedule")
arg_parser.add_argument('--weight_decay', type=float, default=0.01, help="Weight decay to apply")
arg_parser.add_argument('--max_grad_norm', type=float, default=1.0, help="Maximum gradient norm")
_add_common_args(arg_parser)
return arg_parser
def eval_argparser():
arg_parser = argparse.ArgumentParser()
# Input
arg_parser.add_argument('--dataset_path', type=str, help="Path to dataset")
_add_common_args(arg_parser)
return arg_parser
|
import turtle
import math
bob = turtle.Turtle()
bob.speed(10)
#making the fibonaccis sequence
def fib(n):
if n == 0: return 1
if n == 1: return 1
else: return fib(n-1) + fib(n-2)
#making fibonacci squares
def make_square(n):
for i in range(6):
bob.forward(n)
bob.left(90)
bob.right(90)
#start with n = 1
def run_simulation(n):
for i in range(n):
make_square(fib(i))
def draw_spiral(n):
for i in range(n):
bob.pencolor((1-i/float(n),1-i/float(n),i/float(n)))
bob.circle(fib(i),90)
run_simulation(13)
bob.penup()
bob.setposition(0,0)
bob.pendown()
bob.right(90)
draw_spiral(13)
turtle.done()
|
from django.http import HttpResponse
from django.core import serializers
import tushare as ts
import json
from pandas import DataFrame
def get_price_data(request):
stockCode = getRequestParameter(request, 'stockcode')
beginDate = getRequestParameter(request, 'begindate')
endDate = getRequestParameter(request, 'enddate')
json_date = json.dumps((''))
if stockCode != '' and beginDate != '' and endDate != '':
df = ts.get_hist_data(stockCode, beginDate, endDate)
if df is not None and not df.empty:
json_data = df.to_json(orient='split')
else:
json_data = json.dumps(('fectching error'))
else:
json_data = json.dumps(('please check your parameter'))
return HttpResponse(json_data, content_type='application/json')
def getRequestParameter(request, parameterName):
if request.method=='GET' and parameterName in request.GET:
return request.GET[parameterName]
return ''
|
import utils
import os
import time
import torch
import torch.utils.data
import torchvision.transforms as transforms
import skimage.color as skcolor
import skimage.io as skio
import skimage.filters as skfilters
import skimage.feature as skfeature
import numpy as np
from tqdm import tqdm
from utils import normalize, tensor2im
from PIL import Image
from torch.autograd import Variable
def convert_image(img, model, transformers):
# Apply transformations and normalizations
a_img = transformers(img)
a_img = a_img.view(1, a_img.size(0), a_img.size(1), a_img.size(2))
a_img = normalize(a_img, -1, 1)
b_img = torch.randn(a_img.size())
# Get fake_y (generated output)
model.set_input({
'A': a_img,
'B': b_img
})
model.test()
visuals = model.get_current_visuals()
return Image.fromarray(visuals['fake_B'])
|
#!/usr/bin/env python
'''
Custom operations to check annotations
'''
#####################
# IMPORT OPERATIONS #
#####################
import GenerationOps as GnOps
import GlobalVariables as GlobVars
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import FeatureLocation
from Bio.SeqFeature import CompoundLocation
from unidecode import unidecode
from itertools import chain
###############
# AUTHOR INFO #
###############
__author__ = 'Michael Gruenstaeudl <m.gruenstaeudl@fu-berlin.de>'
__copyright__ = 'Copyright (C) 2016-2020 Michael Gruenstaeudl'
__info__ = 'annonex2embl'
__version__ = '2020.03.08.1700'
#############
# DEBUGGING #
#############
#import ipdb
#ipdb.set_trace()
###########
# CLASSES #
###########
class AnnoCheck:
''' This class contains functions to evaluate the quality of an
annotation.
Args:
extract (obj): a sequence object; example: Seq('ATGGAGTAA',
IUPACAmbiguousDNA())
feature_object (obj): a feature object
record_id (str): a string deatiling the name of the sequence in
question; example: "taxon_A"
transl_table (int): an integer; example: 11 (for bacterial code)
Returns:
tupl. The return consists of the translated sequence (a str)
and the updated feature location (a location object);
example: (transl_out, feat_loc)
Raises:
Exception
'''
def __init__(self, extract, feature, record_id, transl_table=11):
self.extract = extract
self.feature = feature
self.record_id = record_id
self.transl_table = transl_table
@staticmethod
def _transl(extract, transl_table, to_stop=False, cds=False):
''' An internal static function to translate a coding region. '''
# Note: Suppressing warnings necessary to suppress the Biopython
# warning about an annotation not being a multiple of three
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
transl = extract.translate(table=transl_table,
to_stop=to_stop, cds=cds)
# Adjustment for non-start codons given the necessary use of
# cds=True in TPL.
if not extract.startswith(GlobVars.nex2ena_start_codon):
first_codon_seq = extract[0:3]
first_aa = first_codon_seq.translate(table=transl_table,
to_stop=to_stop, cds=False)
transl = first_aa + transl[1:]
return transl
@staticmethod
def _check_protein_start(extract, transl_table):
''' An internal static function to translate a coding region and check
if it starts with a methionine. '''
transl = extract.translate(table=transl_table)
return transl.startswith('M')
@staticmethod
def _adjust_feat_loc(location_object, transl_with_internStop,
transl_without_internStop):
''' An internal static function to adjust the feature location if an
internal stop codon were present. '''
if len(transl_without_internStop) > len(transl_with_internStop):
# 1. Unnest the nested lists
contiguous_subsets = [list(range(e.start.position, e.end.position))
for e in location_object.parts]
compound_integer_range = sum(contiguous_subsets, [])
# 2. Adjust location range
len_with_internStop = len(transl_with_internStop) * 3
# IMPORTANT!: In TFL, the "+3" is for the stop codon, which is
# counted in the location range, but is not part of the AA
# sequence of the translation.
adjusted_range = compound_integer_range[:(len_with_internStop+3)]
# 3. Establish location
feat_loc = GnOps.GenerateFeatLoc().make_location(adjusted_range)
if len(transl_without_internStop) == len(transl_with_internStop):
feat_loc = location_object
return feat_loc
def check(self):
''' This function performs checks on a coding region.
Specifically, the function tries to translate the coding
region (CDS) directly, using the internal checker
"cds=True". If a direct translation fails, it confirms if
the CDS starts with a methionine. If the CDS does not start
with a methionine, a ValueError is raised. If the CDS does
start with a methionine, translations are conducted with
and without regard to internal stop codons. The shorter
of the two translations is kept. The feature location is
adjusted, where necessary.
Note:
The asterisk indicating a stop codon is truncated under
_transl(to_stop=True) and must consequently be added again
(see line 137).
'''
try:
# Note: TFL must contain "cds=True"; don't delete it
transl_out = AnnoCheck._transl(self.extract,
self.transl_table, cds=True)
feat_loc = self.feature.location
except:
try:
without_internalStop = AnnoCheck._transl(self.extract,
self.transl_table)
with_internalStop = AnnoCheck._transl(
self.extract, self.transl_table, to_stop=True)
transl_out = with_internalStop
feat_loc = AnnoCheck._adjust_feat_loc(
self.feature.location, with_internalStop, without_internalStop)
except Exception:
msg = 'Translation of feature `%s` of \
sequence `%s` is unsuccessful.' % (self.feature.id, self.record_id)
#warnings.warn(msg)
raise Exception(msg)
if len(transl_out) < 2:
msg = 'Translation of feature `%s` of sequence `%s` \
indicates a protein length of only a single amino acid.' \
% (self.feature.id, self.record_id)
#warnings.warn(msg)
raise Exception(msg)
# IMPORTANT!!!: In an ENA record, the translation does not display the
# stop codon (i.e., the '*'), while the feature location range (i.e., 738..2291)
# very much includes its position, which is biologically logical, as
# a stop codon is not an amino acid in a translation.
# Thus, TFL would be incorrect, because it would add back an asterisk into the translation.
#transl_out = transl_out + "*"
return (transl_out, feat_loc)
def for_unittest(self):
try:
transl_out, feat_loc = AnnoCheck(
self.extract, self.feature, self.record_id, self.transl_table).check()
if isinstance(transl_out, Seq) and isinstance(feat_loc,
FeatureLocation):
return True
return False
# except ValueError: # Keep 'ValueError'; don't replace with 'Exception'
# return False
except Exception as e:
warnings.warn(e)
raise Exception(e) # Should this line be commented out?
class TranslCheck:
''' This class contains functions to coordinate different checks. '''
def __init__(self):
pass
# This function extract the sequence from pattern sequence
# for a forward and reverse strand
def extract(self, feature, seq_record):
if feature._get_strand() == 1:
return feature.extract(seq_record)
else:
reverse = SeqRecord("")
for i in feature.location.parts[::-1]:
reverse.seq = reverse.seq + i.extract(seq_record).seq
return reverse
# By checking the translation of a CDS or an gene it may happen that
# the location from the CDS or gene had to be adjusted. If after such
# a feature a IGS or intron follows it have to be adjust aswell.
# This is done by this function
def adjustLocation(self, oldLocation, newLocation):
start = []
end = []
start.append(newLocation.end)
t = oldLocation.start
for i in oldLocation:
if not i == t:
end.append(t)
start.append(i)
t = i
t = t + 1
end.append(oldLocation.end)
locations = []
for i in range(len(start)):
locations.append(FeatureLocation(start[i],end[i]))
try:
return CompoundLocation(locations)
except Exception as e:
return locations[0]
def transl_and_quality_of_transl(self, seq_record, feature, transl_table):
''' This function conducts a translation of a coding region and checks
the quality of said translation.
Args:
seq_record (obj): foobar; example: 'foobar'
feature (obj): foobar; example: 'foobar'
transl_table (int):
Returns:
True, unless exception
Raises:
feature
'''
extract = self.extract(feature, seq_record)
try:
transl, loc = AnnoCheck(extract.seq, feature, seq_record.id,
transl_table).check()
if feature.type == 'CDS':
feature.qualifiers["translation"] = transl
if feature.type == 'exon' or feature.type == 'gene':
# With gene and exon features that are less than 15 nt long,
# the annotation should be dropped from the output.
if len([base for base in loc]) < 15:
raise
feature.location = loc
except Exception as e:
raise Exception(e)
return feature
class QualifierCheck:
''' This class contains functions to evaluate the quality of metadata.
Args:
lst_of_dcts (list): a list of dictionaries; example:
[{'foo': 'foobarqux', 'bar': 'foobarqux',
'qux': 'foobarqux'}, {'foo': 'foobarbaz',
'bar': 'foobarbaz', 'baz': 'foobarbaz'}]
label (???): ?
Returns:
none
Raises:
Exception
'''
def __init__(self, lst_of_dcts, label):
self.lst_of_dcts = lst_of_dcts
self.label = label
@staticmethod
def _enforce_ASCII(lst_of_dcts):
''' This function converts any non-ASCII characters among
qualifier values to ASCII characters. '''
try:
filtered_lst_of_dcts = [
{k: unidecode(v) for k, v in list(dct.items())}
for dct in lst_of_dcts]
except:
filtered_lst_of_dcts = [
{k: unidecode(v.decode('utf-8')) for k, v in dct.items()}
for dct in lst_of_dcts]
return filtered_lst_of_dcts
@staticmethod
def _label_present(lst_of_dcts, label):
''' This function checks if each (!) list of dictionary keys
of a list of dictionaries encompass the element <label> at
least once. '''
if not all(label in list(dct.keys()) for dct in lst_of_dcts):
msg = 'ERROR: csv-file does not contain a column \
labelled %s' % (label)
warnings.warn(msg)
raise Exception
return True
@staticmethod
def _rm_empty_qual(lst_of_dcts):
''' This function removes any qualifier from a dictionary which
displays an empty value. Technically, this function
loops through the qualifier dictionaries and removes any
key-value-pair from a dictionary which contains an empty
value. '''
nonempty_lst_of_dcts = [{k: v for k, v in list(dct.items()) if v != ''}
for dct in lst_of_dcts]
return nonempty_lst_of_dcts
@staticmethod
def _valid_INSDC_quals(lst_of_dcts):
''' This function checks if every (!) dictionary key in a list of
dictionaries is a valid INSDC qualifier. '''
keys_present = list(chain.from_iterable([list(dct.keys()) for dct in
lst_of_dcts]))
not_valid = [k for k in keys_present if k not in
GlobVars.nex2ena_valid_INSDC_quals]
if not_valid:
msg = 'ERROR: The following are invalid INSDC \
qualifiers: %s' % (', '.join(not_valid))
warnings.warn(msg)
raise Exception
return True
@staticmethod
def uniqueSeqname(seqnameCSV, seqnameNEX):
''' This function checks if (a) any sequence name is duplicated in
either the NEXUS or the metadata file, and (b) every sequence
name in the NEXUS file has a corresponding entry in the metadata
file. '''
if len(set(seqnameCSV)) != len(seqnameCSV):
msg = 'ERROR: Some sequence names are present more than \
once in the metadata file.'
warnings.warn(msg)
raise Exception
for seqname in seqnameNEX:
if seqname.split(".")[-1] == "copy":
msg = 'ERROR: Some sequence names are present more \
than once in the NEXUS file.'
warnings.warn(msg)
raise Exception
if not seqname in seqnameCSV:
msg = 'ERROR: The sequence name `%s` does not have a \
corresponding entry in the metadata file.' % (seqname)
warnings.warn(msg)
raise Exception
def quality_of_qualifiers(self):
''' This function conducts a series of quality checks on the
qualifiers list (a list of dictionaries). First (label_present),
it checks if a qualifier matrix (and, hence, each entry)
contains a column labelled with <seqname_col_label>.
Second (nex2ena_valid_INSDC_quals), it checks if column
names constitute valid INSDC feature table qualifiers.
Args:
label (str): a string; example: 'isolate'
lst_of_dcts (list): a list of dictionaries; example:
[{'isolate': 'taxon_A', 'country': 'Ecuador'},
{'isolate': 'taxon_B', 'country': 'Peru'}]
Returns:
True, unless exception
Raises:
passed exception
'''
try:
QualifierCheck._label_present(self.lst_of_dcts, self.label)
QualifierCheck._valid_INSDC_quals(self.lst_of_dcts)
except Exception as e:
warnings.warn(e)
raise Exception(e)
return True
|
# Generated by Django 2.1.7 on 2019-04-16 06:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0025_share_date_posted'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'verbose_name': 'Post', 'verbose_name_plural': 'Posts'},
),
migrations.AlterModelOptions(
name='share',
options={'verbose_name': 'Share', 'verbose_name_plural': 'Shares'},
),
]
|
from snake import Snake
from food import Food
from scoreboard import Scoreboard
from turtle import Turtle
import time
snake = Snake()
food = Food()
score = Scoreboard()
flag = 1
snake.screen.listen()
snake.screen.onkey(fun=snake.up,key="Up")
snake.screen.onkey(fun=snake.down,key="Down")
snake.screen.onkey(fun=snake.left,key="Left")
snake.screen.onkey(fun=snake.right,key="Right")
while flag:
snake.screen.update()
time.sleep(0.01)
snake.move()
#detect collision from food
if snake.header.distance(food) <20:
food.refresh()
snake.snake.append(Turtle(shape="circle"))
snake.snake[len(snake.snake)-1].up()
snake.snake[len(snake.snake)-1].color("green")
score.increase()
if snake.header.xcor() > 280 or snake.header.xcor() < -280 or snake.header.ycor() > 280 or snake.header.ycor() < -280:
score.gameover()
break
for i in range(1,len(snake.snake)):
if snake.header.distance(snake.snake[i])<10:
flag = 0
score.gameover()
snake.screen.exitonclick() |
from battle.battlemenu.BattleOption import BattleOptions
from battle.round.RoundAction import RoundAction
from battle.battleeffect.RegularAttack import RegularAttack
# Represents just a regular attack:
class AttackOption(BattleOptions):
def __init__(self, fighter, targets):
super().__init__("Attack", fighter, targets)
# TODO: This needs to select an attack strategy from weapons.
def generate_round_actions(self):
action = RegularAttack(self.fighter, self.targets)
target = action.selection_strategy.select_target(self.targets)
return RoundAction(action, target) |
import pickle
import numpy as np
from data import TextData
from train import TextTrain
with open('../data/mailContent_list_1000.pickle', 'rb') as file:
content_list = pickle.load(file)
# random.seed(1234)
# pickle.dump(random.sample(content_list,1000), open('mailContent_list_1000.pickle', 'wb'))
with open('../data/mailLabel_list_1000.pickle', 'rb') as file:
label_list = pickle.load(file)
# random.seed(1234)
# pickle.dump(random.sample(label_list,1000), open('mailLabel_list_1000.pickle', 'wb'))
def statistics():
# 查看邮件最大长度和平均长度
length = np.array([len(tmp) for tmp in content_list])
print(np.max(length)) # 33714
print(np.mean(length)) # 752
print(np.median(length)) # 333
# 统计汉字个数
print(len(set(''.join(content_list)))) # 9777
config = {
# train
'num_iteration': 400,
'batch_size': 100,
'learning_rate': 1e-3,
'print_per_batch': 500 / 10,
# model
'embedding_dim': 64,
'num_filters': 256,
'kernel_size': 5,
'hidden_dim': 128,
'dropout_keep_prob': 0.5
}
td = TextData(content_list, label_list)
train_X, train_Y, test_X, test_Y = td.get_data()
config['vocab_size'] = td.vocab_size
config['num_classes'] = td.num_classes
config['labels'] = td.labels
train = TextTrain(config, train_X, train_Y, test_X, test_Y)
train.cnn_train()
train.cnn_test()
|
import random
from scipy.stats.distributions import norm, triang
import pyDOE
class Lhs(object):
"""
Generate a latin-hypercube design
Parameters
----------
sampling_dist: string
Models of distributions: uniform, normal, triang
Example
-------
A 1-factor design with uniform distribution
>>> sampling = Lhs(sampling_dist='uniform', n=1, samples=10).generate()
Reference
---------
pyDOE, lhs: http://pythonhosted.org/pyDOE/randomized.html#latin-hypercube
Models of dist: http://docs.scipy.org/doc/scipy/reference/stats.html
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
self.sampling_dist = kwargs['sampling_dist']
def generate(self):
if self.sampling_dist == 'uniform':
lhd = self._uniform_generate()
elif self.sampling_dist == 'normal':
lhd = self._normal_generate()
elif self.sampling_dist == 'triang':
lhd = self._triang_generate()
else:
raise NotImplementedError("It does not be supported")
# flatten ndarray and make it to list
return lhd.flatten().tolist()
def _uniform_generate(self):
n = self.kwargs['n']
samples = self.kwargs['samples']
lhd = pyDOE.lhs(n, samples)
return lhd
def _normal_generate(self):
loc = self.kwargs['loc']
scales = self.kwargs['scales']
lhd = self._uniform_generate(n, samples)
lhd = norm(loc, scales=1).ppf(lhd)
return lhd
def _triang_generate(self):
min = self.kwargs['min']
max = self.kwargs['max']
mod = self.kwargs['mod']
loc = min
scale = max - min
c = (mod - min) / (max - min)
lhd = self._uniform_generate(n, samples)
lhd = triang(c, loc, scale).ppf(lhd)
return lhd
|
import numpy as np
import control
from rrt_star import RRT_star
# see spec for RRT_star
# A_fn and B_fn take state, control and return matrix
# Q and R are matrices
# update_fn takes state, control and returns a new state
class LQR_RRT_star(RRT_star):
def __init__(self, s_init, s_goal, bounds, obstacles, A_fn, B_fn, Q, R, update_fn, obstacle_coords = None, gamma = 1):
super().__init__(s_init, s_goal, bounds, obstacles, obstacle_coords, gamma)
self.A_fn = A_fn
self.B_fn = B_fn
self.Q = Q
self.R = R
self.update_fn = update_fn
self.lqr_memo = {}
def lqr(self, x_0, u_0):
lqr_memo_key = (tuple(x_0), tuple(u_0))
if lqr_memo_key in self.lqr_memo:
return self.lqr_memo[lqr_memo_key]
A = self.A_fn(x_0, u_0)
B = self.B_fn(x_0, u_0)
K, S, E = control.lqr(A, B, self.Q, self.R)
self.lqr_memo[lqr_memo_key] = (K, S)
return (K, S)
# linearization should be done around the to state
# TODO: should we ever consider nonzero control?
# TODO: correct control dimensions (don't assume 1)
def distance(self, from_state, to_state):
K, S = self.lqr(to_state, np.zeros(1))
x_bar = from_state - to_state
return np.dot(np.dot(x_bar.T, S), x_bar)
def steer(self, from_state, to_state):
K, S = self.lqr(to_state, np.zeros(1))
x_bar = from_state - to_state
u = - np.dot(K, x_bar)
return self.update_fn(from_state, u)
|
from matplotlib import pyplot as plt
import datetime
def plot_errors(generator, disriminator, display):
file_name = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")+'.png'
# plt.title("Final Model Training Losses vs Epochs")
plt.xlabel("Epoch")
plt.ylabel("Error")
plt.plot(generator,'b',label='Generator Loss')
plt.plot(disriminator,'r',label = 'Discriminator Loss')
plt.legend(loc='best')
plt.savefig(file_name)
if display:
plt.show()
|
"""Yaml CLI formatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from treadmill import yamlwrapper as yaml
def format(obj): # pylint: disable=W0622
"""Returns yaml representation of the object."""
return yaml.dump(obj,
default_flow_style=False,
explicit_start=True,
explicit_end=True)
|
from django.conf.urls import url
from DPMAPI import views
urlpatterns = [
url('', views.weibullAnalysis),
url('WeibullAnalysis/', views.weibullAnalysis)
]
|
from kmeans import K_Means
from knn import KNN_test
from trees import DT_train_binary, DT_test_binary, DT_train_binary_best
import numpy as np
import random
import math
def main():
#K-NN Data
X_train = np.array([[1,5],[2,6],[2,7],[3,7],[3,8],[4,8],[5,1],[5,9],[6,2],[7,2],[7,3],[8,3],[8,4],[9,5]])
Y_train = np.array([[-1],[-1],[1],[-1],[1],[-1],[1],[-1],[1],[-1],[1],[-1],[1],[1]])
X_test = np.array([[1,1], [2,1], [0,10], [10,10], [5,5], [3,10], [9,4], [6,2], [2,2], [8,7]])
Y_test = np.array([[1], [-1], [1], [-1], [1], [-1], [1], [-1], [1], [-1]])
K = 1
Accuracy = KNN_test(X_train,Y_train,X_test,Y_test,K)
print("KNN Accuracy: ", Accuracy, "%")
#K_Means
X = np.array([[1,0],[7,4],[9,6],[2,1],[4,8],[0,3],[13,5],[6,8],[7,3],[3,6],[2,1],[8,3],[10,2],[3,5],[5,1],[1,9],[10,3],[4,1],[6,6],[2,2]])
X = np.array([[0],[1],[2],[7],[8],[9],[12],[14],[15]])
K = 3
K_Means(X,K)
# Training Set 1:
X_train_1 = np.array([[0,1], [0,0], [1,0], [0,0], [1,1]])
Y_train_1 = np.array([[1], [0], [0], [0], [1]])
# Validation Set 1:
X_val_1 = np.array([[0,0], [0,1], [1,0], [1,1]])
Y_val_1 = np.array([[0], [1], [0], [1]])
# Testing Set 1:
X_test_1 = np.array([[0,0], [0,1], [1,0], [1,1]])
Y_test_1 = np.array([[1], [1], [0], [1]])
# Training Set 2:
X_train_2 = np.array([[0,1,0,0], [0,0,0,1], [1,0,0,0], [0,0,1,1], [1,1,0,1], [1,1,0,0], [1,0,0,1], [0,1,0,1], [0,1,0,0]])
Y_train_2 = np.array([[0], [1], [0], [0], [1], [0], [1], [1], [1]])
# Validation Set 2:
X_val_2 = np.array([[1,0,0,0], [0,0,1,1], [1,1,0,1], [1,1,0,0], [1,0,0,1], [0,1,0,0]])
Y_val_2 = np.array([[0], [0], [1], [0], [1], [1]])
# Testing Set 2:
X_test_2 = np.array([[0,1,0,0], [0,0,0,1], [1,0,0,0], [0,0,1,1], [1,1,0,1], [1,1,0,0], [1,0,0,1], [0,1,0,1], [0,1,0,0]])
Y_test_2 = np.array([[1], [1], [0], [0], [1], [0], [1], [1], [1]])
max_depth = 4
Trained_DT = DT_train_binary(X_train_2,Y_train_2,max_depth)
Accuracy = DT_test_binary(X_test_2,Y_test_2,Trained_DT)
Trained_DT = DT_train_binary_best(X_train_2, Y_train_2, X_val_2, Y_val_2)
Accuracy = DT_test_binary(X_test_2,Y_test_2,Trained_DT)
if __name__ == "__main__":
main() |
peso = float (input ("Digite o seu peso: "))
print("Seu peso é: ", peso)
altura = float (input ("Digite sua altura: "))
print("Sua altura é: ", altura)
imc = peso/(altura*altura)
print("Seu IMC é: ",imc)
|
from datetime import datetime
from django.shortcuts import render
def now(request):
now = datetime.now()
h = str(now.hour)
if len(h) == 1:
h = "0" + h
m = str(now.minute)
if len(m) == 1:
m = "0" + m
s = str(now.second)
if len(s) == 1:
s = "0" + s
now_str = h + ":" + m + ":" + s
return render(request, 'web_demo/home.html', {
'current_time': now_str,
})
# 檔名: views.py
# 作者: Kaiching Chang
# 時間: June, 2018
|
import airsim
import os
from shutil import copy2
airsim_dir = os.path.dirname(airsim.__file__)
file_path = os.path.realpath(__file__)
dir_path = os.path.dirname(file_path)
print(airsim_dir)
copy2(os.path.join(dir_path, "client.py"), airsim_dir)
copy2(os.path.join(dir_path, "types.py"), airsim_dir)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import itertools
import logging
import re
from dataclasses import dataclass
from pathlib import Path, PurePath
from textwrap import dedent
from typing import Iterable, List, Type
import pytest
from pants.build_graph.address import Address
from pants.core.goals.fix import (
AbstractFixRequest,
Fix,
FixFilesRequest,
FixResult,
FixTargetsRequest,
Partitions,
)
from pants.core.goals.fix import rules as fix_rules
from pants.core.goals.fmt import FmtResult, FmtTargetsRequest
from pants.core.util_rules import source_files
from pants.core.util_rules.partitions import PartitionerType
from pants.engine.fs import (
EMPTY_SNAPSHOT,
CreateDigest,
Digest,
DigestContents,
FileContent,
Snapshot,
)
from pants.engine.rules import Get, QueryRule, collect_rules, rule
from pants.engine.target import FieldSet, MultipleSourcesField, SingleSourceField, Target
from pants.option.option_types import SkipOption
from pants.option.subsystem import Subsystem
from pants.testutil.rule_runner import RuleRunner
from pants.util.logging import LogLevel
from pants.util.meta import classproperty
FORTRAN_FILE = FileContent("fixed.f98", b"READ INPUT TAPE 5\n")
SMALLTALK_FILE = FileContent("fixed.st", b"y := self size + super size.')\n")
class FortranSource(SingleSourceField):
pass
class FortranTarget(Target):
alias = "fortran"
core_fields = (FortranSource,)
@dataclass(frozen=True)
class FortranFieldSet(FieldSet):
required_fields = (FortranSource,)
sources: FortranSource
class FortranFixRequest(FixTargetsRequest):
field_set_type = FortranFieldSet
@classproperty
def tool_name(cls) -> str:
return "Fortran Conditionally Did Change"
@classproperty
def tool_id(cls) -> str:
return "fortranconditionallydidchange"
class FortranFmtRequest(FmtTargetsRequest):
field_set_type = FortranFieldSet
@classproperty
def tool_name(cls) -> str:
return "Fortran Formatter"
@classproperty
def tool_id(cls) -> str:
return "fortranformatter"
@rule
async def fortran_fix_partition(request: FortranFixRequest.PartitionRequest) -> Partitions:
if not any(fs.address.target_name == "needs_fixing" for fs in request.field_sets):
return Partitions()
return Partitions.single_partition(fs.sources.file_path for fs in request.field_sets)
@rule
async def fortran_fmt_partition(request: FortranFmtRequest.PartitionRequest) -> Partitions:
return Partitions.single_partition(fs.sources.file_path for fs in request.field_sets)
@rule
async def fortran_fix(request: FortranFixRequest.Batch) -> FixResult:
input = request.snapshot
output = await Get(
Snapshot, CreateDigest([FileContent(file, FORTRAN_FILE.content) for file in request.files])
)
return FixResult(
input=input, output=output, stdout="", stderr="", tool_name=FortranFixRequest.tool_name
)
@rule
async def fortran_fmt(request: FortranFmtRequest.Batch) -> FmtResult:
output = await Get(
Snapshot, CreateDigest([FileContent(file, FORTRAN_FILE.content) for file in request.files])
)
return FmtResult(
input=request.snapshot,
output=output,
stdout="",
stderr="",
tool_name=FortranFmtRequest.tool_name,
)
class SmalltalkSource(SingleSourceField):
pass
class SmalltalkTarget(Target):
alias = "smalltalk"
core_fields = (SmalltalkSource,)
@dataclass(frozen=True)
class SmalltalkFieldSet(FieldSet):
required_fields = (SmalltalkSource,)
source: SmalltalkSource
class SmalltalkNoopRequest(FixTargetsRequest):
field_set_type = SmalltalkFieldSet
@classproperty
def tool_name(cls) -> str:
return "Smalltalk Did Not Change"
@classproperty
def tool_id(cls) -> str:
return "smalltalkdidnotchange"
@rule
async def smalltalk_noop_partition(request: SmalltalkNoopRequest.PartitionRequest) -> Partitions:
return Partitions.single_partition(fs.source.file_path for fs in request.field_sets)
@rule
async def smalltalk_noop(request: SmalltalkNoopRequest.Batch) -> FixResult:
assert request.snapshot != EMPTY_SNAPSHOT
return FixResult(
input=request.snapshot,
output=request.snapshot,
stdout="",
stderr="",
tool_name=SmalltalkNoopRequest.tool_name,
)
class SmalltalkSkipRequest(FixTargetsRequest):
field_set_type = SmalltalkFieldSet
@classproperty
def tool_name(cls) -> str:
return "Smalltalk Skipped"
@classproperty
def tool_id(cls) -> str:
return "smalltalkskipped"
@rule
async def smalltalk_skip_partition(request: SmalltalkSkipRequest.PartitionRequest) -> Partitions:
return Partitions()
@rule
async def smalltalk_skip(request: SmalltalkSkipRequest.Batch) -> FixResult:
assert False
class BrickyBuildFileFixer(FixFilesRequest):
"""Ensures all non-comment lines only consist of the word 'brick'."""
@classproperty
def tool_name(cls) -> str:
return "Bricky Bobby"
@classproperty
def tool_id(cls) -> str:
return "brickybobby"
@rule
async def bricky_partition(request: BrickyBuildFileFixer.PartitionRequest) -> Partitions:
return Partitions.single_partition(
file for file in request.files if PurePath(file).name == "BUILD"
)
@rule
async def fix_with_bricky(request: BrickyBuildFileFixer.Batch) -> FixResult:
def brickify(contents: bytes) -> bytes:
content_str = contents.decode("ascii")
new_lines = []
for line in content_str.splitlines(keepends=True):
if not line.startswith("#"):
line = re.sub(r"[a-zA-Z_]+", "brick", line)
new_lines.append(line)
return "".join(new_lines).encode()
snapshot = request.snapshot
digest_contents = await Get(DigestContents, Digest, snapshot.digest)
new_contents = [
dataclasses.replace(file_content, content=brickify(file_content.content))
for file_content in digest_contents
]
output_snapshot = await Get(Snapshot, CreateDigest(new_contents))
return FixResult(
input=snapshot,
output=output_snapshot,
stdout="",
stderr="",
tool_name=BrickyBuildFileFixer.tool_name,
)
def fix_rule_runner(
target_types: List[Type[Target]],
request_types: List[Type[AbstractFixRequest]] = [],
) -> RuleRunner:
return RuleRunner(
rules=[
*collect_rules(),
*source_files.rules(),
*fix_rules(),
*itertools.chain.from_iterable(request_type.rules() for request_type in request_types),
],
target_types=target_types,
)
def run_fix(
rule_runner: RuleRunner,
*,
target_specs: List[str],
only: list[str] | None = None,
extra_args: Iterable[str] = (),
) -> str:
result = rule_runner.run_goal_rule(
Fix,
args=[f"--only={repr(only or [])}", *target_specs, *extra_args],
)
assert result.exit_code == 0
assert not result.stdout
return result.stderr
def write_files(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
fortran(name='f1', source="ft1.f98")
fortran(name='needs_fixing', source="fixed.f98")
smalltalk(name='s1', source="st1.st")
smalltalk(name='s2', source="fixed.st")
""",
),
"ft1.f98": "READ INPUT TAPE 5\n",
"fixed.f98": "READ INPUT TAPE 5",
"st1.st": "y := self size + super size.')",
"fixed.st": "y := self size + super size.')\n",
},
)
def test_summary() -> None:
rule_runner = fix_rule_runner(
target_types=[FortranTarget, SmalltalkTarget],
request_types=[
FortranFixRequest,
FortranFmtRequest,
SmalltalkSkipRequest,
SmalltalkNoopRequest,
BrickyBuildFileFixer,
],
)
write_files(rule_runner)
stderr = run_fix(rule_runner, target_specs=["::"])
assert stderr == dedent(
"""\
+ Bricky Bobby made changes.
+ Fortran Conditionally Did Change made changes.
✓ Fortran Formatter made no changes.
✓ Smalltalk Did Not Change made no changes.
"""
)
fortran_file = Path(rule_runner.build_root, FORTRAN_FILE.path)
smalltalk_file = Path(rule_runner.build_root, SMALLTALK_FILE.path)
build_file = Path(rule_runner.build_root, "BUILD")
assert fortran_file.is_file()
assert fortran_file.read_text() == FORTRAN_FILE.content.decode()
assert smalltalk_file.is_file()
assert smalltalk_file.read_text() == SMALLTALK_FILE.content.decode()
assert build_file.is_file()
assert build_file.read_text() == dedent(
"""\
brick(brick='brick1', brick="brick1.brick98")
brick(brick='brick', brick="brick.brick98")
brick(brick='brick1', brick="brick1.brick")
brick(brick='brick2', brick="brick.brick")
"""
)
def test_skip_formatters() -> None:
rule_runner = fix_rule_runner(
target_types=[FortranTarget, SmalltalkTarget],
request_types=[FortranFmtRequest],
)
write_files(rule_runner)
stderr = run_fix(rule_runner, target_specs=["::"], extra_args=["--fix-skip-formatters"])
assert not stderr
def test_fixers_first() -> None:
rule_runner = fix_rule_runner(
target_types=[FortranTarget, SmalltalkTarget],
# NB: Order is important here
request_types=[FortranFmtRequest, FortranFixRequest],
)
write_files(rule_runner)
stderr = run_fix(rule_runner, target_specs=["::"])
# NB Since both rules have the same body, if the fixer runs first, it'll make changes. Then the
# formatter will have nothing to change.
assert stderr == dedent(
"""\
+ Fortran Conditionally Did Change made changes.
✓ Fortran Formatter made no changes.
"""
)
def test_only() -> None:
rule_runner = fix_rule_runner(
target_types=[FortranTarget, SmalltalkTarget],
request_types=[
FortranFixRequest,
SmalltalkSkipRequest,
SmalltalkNoopRequest,
BrickyBuildFileFixer,
],
)
write_files(rule_runner)
stderr = run_fix(
rule_runner,
target_specs=["::"],
only=[SmalltalkNoopRequest.tool_id],
)
assert stderr.strip() == "✓ Smalltalk Did Not Change made no changes."
def test_no_targets() -> None:
rule_runner = fix_rule_runner(
target_types=[FortranTarget, SmalltalkTarget],
request_types=[
FortranFixRequest,
SmalltalkSkipRequest,
SmalltalkNoopRequest,
BrickyBuildFileFixer,
],
)
write_files(rule_runner)
stderr = run_fix(
rule_runner,
target_specs=[],
)
assert not stderr.strip()
def test_message_lists_added_files() -> None:
input_snapshot = Snapshot.create_for_testing(["f.ext", "dir/f.ext"], ["dir"])
output_snapshot = Snapshot.create_for_testing(["f.ext", "added.ext", "dir/f.ext"], ["dir"])
result = FixResult(
input=input_snapshot,
output=output_snapshot,
stdout="stdout",
stderr="stderr",
tool_name="fixer",
)
assert result.message() == "fixer made changes.\n added.ext"
def test_message_lists_removed_files() -> None:
input_snapshot = Snapshot.create_for_testing(["f.ext", "removed.ext", "dir/f.ext"], ["dir"])
output_snapshot = Snapshot.create_for_testing(["f.ext", "dir/f.ext"], ["dir"])
result = FixResult(
input=input_snapshot,
output=output_snapshot,
stdout="stdout",
stderr="stderr",
tool_name="fixer",
)
assert result.message() == "fixer made changes.\n removed.ext"
def test_message_lists_files() -> None:
input_snapshot = Snapshot.create_for_testing(["f.ext", "removed.ext", "dir/f.ext"], ["dir"])
output_snapshot = Snapshot.create_for_testing(["f.ext", "added.ext", "dir/f.ext"], ["dir"])
result = FixResult(
input=input_snapshot,
output=output_snapshot,
stdout="stdout",
stderr="stderr",
tool_name="fixer",
)
assert result.message() == "fixer made changes.\n added.ext\n removed.ext"
@dataclass(frozen=True)
class KitchenSingleUtensilFieldSet(FieldSet):
required_fields = (FortranSource,)
utensil: SingleSourceField
@dataclass(frozen=True)
class KitchenMultipleUtensilsFieldSet(FieldSet):
required_fields = (FortranSource,)
utensils: MultipleSourcesField
@pytest.mark.parametrize(
"kitchen_field_set_type, field_sets",
[
(
KitchenSingleUtensilFieldSet,
(
KitchenSingleUtensilFieldSet(
Address("//:bowl"), SingleSourceField("bowl.utensil", Address(""))
),
KitchenSingleUtensilFieldSet(
Address("//:knife"), SingleSourceField("knife.utensil", Address(""))
),
),
),
(
KitchenMultipleUtensilsFieldSet,
(
KitchenMultipleUtensilsFieldSet(
Address("//:utensils"),
MultipleSourcesField(["*.utensil"], Address("")),
),
),
),
],
)
def test_default_single_partition_partitioner(kitchen_field_set_type, field_sets) -> None:
class KitchenSubsystem(Subsystem):
options_scope = "kitchen"
help = "a cookbook might help"
name = "The Kitchen"
skip = SkipOption("lint")
class FixKitchenRequest(FixTargetsRequest):
field_set_type = kitchen_field_set_type
tool_subsystem = KitchenSubsystem
partitioner_type = PartitionerType.DEFAULT_SINGLE_PARTITION
rules = [
*FixKitchenRequest._get_rules(),
QueryRule(Partitions, [FixKitchenRequest.PartitionRequest]),
]
rule_runner = RuleRunner(rules=rules)
print(rule_runner.write_files({"BUILD": "", "knife.utensil": "", "bowl.utensil": ""}))
partitions = rule_runner.request(Partitions, [FixKitchenRequest.PartitionRequest(field_sets)])
assert len(partitions) == 1
assert partitions[0].elements == ("bowl.utensil", "knife.utensil")
rule_runner.set_options(["--kitchen-skip"])
partitions = rule_runner.request(Partitions, [FixKitchenRequest.PartitionRequest(field_sets)])
assert partitions == Partitions([])
def test_streaming_output_changed(caplog) -> None:
caplog.set_level(logging.DEBUG)
changed_snapshot = Snapshot.create_for_testing(["other_file.txt"], [])
result = FixResult(
input=EMPTY_SNAPSHOT,
output=changed_snapshot,
stdout="stdout",
stderr="stderr",
tool_name="fixer",
)
assert result.level() == LogLevel.WARN
assert result.message() == "fixer made changes.\n other_file.txt"
assert ["Output from fixer\nstdout\nstderr"] == [
rec.message for rec in caplog.records if rec.levelno == logging.DEBUG
]
def test_streaming_output_not_changed(caplog) -> None:
caplog.set_level(logging.DEBUG)
result = FixResult(
input=EMPTY_SNAPSHOT,
output=EMPTY_SNAPSHOT,
stdout="stdout",
stderr="stderr",
tool_name="fixer",
)
assert result.level() == LogLevel.INFO
assert result.message() == "fixer made no changes."
assert ["Output from fixer\nstdout\nstderr"] == [
rec.message for rec in caplog.records if rec.levelno == logging.DEBUG
]
|
#!/usr/bin/env python
"""
v0.1 This retrieves TCP related data files, weka .models, etc... which
are required to run as a TCP task client / ipengine client.
"""
import os, sys
import ingest_tools
ingest_tools_pars = ingest_tools.pars
if __name__ == '__main__':
local_scratch_dirpath = os.path.expandvars(\
'$HOME/scratch/Noisification/')
os.system("mkdir -p %s" % (local_scratch_dirpath))
for class_schema_name, class_dict in ingest_tools_pars[\
'class_schema_definition_dicts'].iteritems():
if class_dict.has_key('weka_training_model_fpath'):
class_dirpath = class_dict['weka_training_model_fpath'][: \
class_dict['weka_training_model_fpath'].rfind('/')]
if not os.path.exists(class_dirpath):
os.system("mkdir -p " + class_dirpath)
fpath = class_dict['weka_training_model_fpath']
sysindep_fpath = fpath[fpath.find("scratch"):]
if not os.path.exists(fpath):
scp_str = "scp -C pteluser@192.168.1.25:%s %s/" % (sysindep_fpath, class_dirpath)
os.system(scp_str)
fpath = class_dict['weka_training_arff_fpath']
sysindep_fpath = fpath[fpath.find("scratch"):]
if not os.path.exists(fpath):
scp_str = "scp -C pteluser@192.168.1.25:%s %s/" % (sysindep_fpath, class_dirpath)
os.system(scp_str)
|
from django.conf.urls import url
from book import views
urlpatterns = [
url(r'^$',views.showbook_view),
url(r'^showbook/(\d+)',views.showbook_view),
url(r'^booktype/',views.booktype_view),
url(r'^addbooktype',views.addbooktype_view),
url(r'^changebooktype/(\d+)',views.changebooktype_view),
url(r'^booktypedel/(\d+)',views.booktypedel_view),
url(r'^addbook/',views.addbook_view),
url(r'^changebook/(\d+)',views.changebook_view),
url(r'^delbook/(\d+)',views.delbook_view),
url(r'^isbExist/$',views.isbexit_view),
url(r'^istype/$',views.istype_view),
] |
if __name__ == '__main__':
file = open("day12.txt", "r")
moves = []
ship_x_coord = 0
ship_y_coord = 0
waypoint_x_coord = 10
waypoint_y_coord = 1
facing = 90
for line in file:
moves.append(line.strip("\n"))
for individual_move in moves:
action = individual_move[:1]
value = int(individual_move[1:])
if action == "F":
ship_x_coord += waypoint_x_coord * value
ship_y_coord += waypoint_y_coord * value
elif action == "N":
waypoint_y_coord += value
elif action == "S":
waypoint_y_coord -= value
elif action == "E":
waypoint_x_coord += value
elif action == "W":
waypoint_x_coord -= value
# only "R" and "L" left
else:
# Dead Or Alive - You Spin Me Round (Like a Record) (Official Video)
rotate = {'R': (1, -1), 'L': (-1, 1)}
for i in range(value//90):
waypoint_x_coord, waypoint_y_coord = \
waypoint_y_coord * rotate[action][0], waypoint_x_coord * rotate[action][1]
print (abs(ship_x_coord) + abs(ship_y_coord)) |
from security import export_key, generate_keys
# Check if we have locally stored keys or generate new ones
def check_key(keyfile):
try:
key = open(keyfile, 'r')
except Exception:
return None
else:
return key
def main():
if check_key('private_key.pem') is None or \
check_key('public_key.pem') is None:
privatekey, publickey = generate_keys()
export_key(privatekey, private=True)
export_key(publickey)
if __name__ == '__main__':
main()
|
#input exercise
print("adinizi giriniz:")
x=input()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import itertools
import logging
from pants.backend.java.target_types import JavaFieldSet, JavaGeneratorFieldSet
from pants.backend.kotlin.compile.kotlinc_plugins import (
KotlincPlugins,
KotlincPluginsForTargetRequest,
KotlincPluginsRequest,
KotlincPluginTargetsForTarget,
)
from pants.backend.kotlin.subsystems.kotlin import KotlinSubsystem
from pants.backend.kotlin.subsystems.kotlinc import KotlincSubsystem
from pants.backend.kotlin.target_types import (
KotlinFieldSet,
KotlinGeneratorFieldSet,
KotlinSourceField,
)
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.internals.native_engine import EMPTY_DIGEST, Digest, MergeDigests
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.process import FallibleProcessResult
from pants.engine.rules import collect_rules, rule
from pants.engine.target import CoarsenedTarget, SourcesField
from pants.engine.unions import UnionRule
from pants.jvm.classpath import Classpath
from pants.jvm.compile import (
ClasspathDependenciesRequest,
ClasspathEntry,
ClasspathEntryRequest,
CompileResult,
FallibleClasspathEntries,
FallibleClasspathEntry,
)
from pants.jvm.compile import rules as jvm_compile_rules
from pants.jvm.jdk_rules import JdkEnvironment, JdkRequest, JvmProcess
from pants.jvm.resolve.common import ArtifactRequirements, Coordinate
from pants.jvm.resolve.coursier_fetch import ToolClasspath, ToolClasspathRequest
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
class CompileKotlinSourceRequest(ClasspathEntryRequest):
field_sets = (KotlinFieldSet, KotlinGeneratorFieldSet)
field_sets_consume_only = (JavaFieldSet, JavaGeneratorFieldSet)
def compute_output_jar_filename(ctgt: CoarsenedTarget) -> str:
return f"{ctgt.representative.address.path_safe_spec}.kotlin.jar"
@rule(desc="Compile with kotlinc")
async def compile_kotlin_source(
kotlin: KotlinSubsystem,
kotlinc: KotlincSubsystem,
request: CompileKotlinSourceRequest,
) -> FallibleClasspathEntry:
# Request classpath entries for our direct dependencies.
dependency_cpers = await Get(FallibleClasspathEntries, ClasspathDependenciesRequest(request))
direct_dependency_classpath_entries = dependency_cpers.if_all_succeeded()
if direct_dependency_classpath_entries is None:
return FallibleClasspathEntry(
description=str(request.component),
result=CompileResult.DEPENDENCY_FAILED,
output=None,
exit_code=1,
)
kotlin_version = kotlin.version_for_resolve(request.resolve.name)
component_members_with_sources = tuple(
t for t in request.component.members if t.has_field(SourcesField)
)
component_members_and_source_files = zip(
component_members_with_sources,
await MultiGet(
Get(
SourceFiles,
SourceFilesRequest(
(t.get(SourcesField),),
for_sources_types=(KotlinSourceField,),
enable_codegen=True,
),
)
for t in component_members_with_sources
),
)
plugins_ = await MultiGet(
Get(
KotlincPluginTargetsForTarget,
KotlincPluginsForTargetRequest(target, request.resolve.name),
)
for target in request.component.members
)
plugins_request = KotlincPluginsRequest.from_target_plugins(plugins_, request.resolve)
local_plugins = await Get(KotlincPlugins, KotlincPluginsRequest, plugins_request)
component_members_and_kotlin_source_files = [
(target, sources)
for target, sources in component_members_and_source_files
if sources.snapshot.digest != EMPTY_DIGEST
]
if not component_members_and_kotlin_source_files:
# Is a generator, and so exports all of its direct deps.
exported_digest = await Get(
Digest, MergeDigests(cpe.digest for cpe in direct_dependency_classpath_entries)
)
classpath_entry = ClasspathEntry.merge(exported_digest, direct_dependency_classpath_entries)
return FallibleClasspathEntry(
description=str(request.component),
result=CompileResult.SUCCEEDED,
output=classpath_entry,
exit_code=0,
)
toolcp_relpath = "__toolcp"
local_kotlinc_plugins_relpath = "__localplugincp"
usercp = "__cp"
user_classpath = Classpath(direct_dependency_classpath_entries, request.resolve)
tool_classpath, sources_digest, jdk = await MultiGet(
Get(
ToolClasspath,
ToolClasspathRequest(
artifact_requirements=ArtifactRequirements.from_coordinates(
[
Coordinate(
group="org.jetbrains.kotlin",
artifact="kotlin-compiler-embeddable",
version=kotlin_version,
),
Coordinate(
group="org.jetbrains.kotlin",
artifact="kotlin-scripting-compiler-embeddable",
version=kotlin_version,
),
]
),
),
),
Get(
Digest,
MergeDigests(
(
sources.snapshot.digest
for _, sources in component_members_and_kotlin_source_files
)
),
),
Get(JdkEnvironment, JdkRequest, JdkRequest.from_target(request.component)),
)
extra_immutable_input_digests = {
toolcp_relpath: tool_classpath.digest,
local_kotlinc_plugins_relpath: local_plugins.classpath.digest,
}
extra_nailgun_keys = tuple(extra_immutable_input_digests)
extra_immutable_input_digests.update(user_classpath.immutable_inputs(prefix=usercp))
classpath_arg = ":".join(user_classpath.immutable_inputs_args(prefix=usercp))
output_file = compute_output_jar_filename(request.component)
process_result = await Get(
FallibleProcessResult,
JvmProcess(
jdk=jdk,
classpath_entries=tool_classpath.classpath_entries(toolcp_relpath),
argv=[
"org.jetbrains.kotlin.cli.jvm.K2JVMCompiler",
*(("-classpath", classpath_arg) if classpath_arg else ()),
"-d",
output_file,
*(local_plugins.args(local_kotlinc_plugins_relpath)),
*kotlinc.args,
*sorted(
itertools.chain.from_iterable(
sources.snapshot.files
for _, sources in component_members_and_kotlin_source_files
)
),
],
input_digest=sources_digest,
extra_immutable_input_digests=extra_immutable_input_digests,
extra_nailgun_keys=extra_nailgun_keys,
output_files=(output_file,),
description=f"Compile {request.component} with kotlinc",
level=LogLevel.DEBUG,
),
)
output: ClasspathEntry | None = None
if process_result.exit_code == 0:
# NB: `kotlinc` produces reproducible JARs by default, so there is no need for an additional
# stripping step.
output = ClasspathEntry(
process_result.output_digest, (output_file,), direct_dependency_classpath_entries
)
return FallibleClasspathEntry.from_fallible_process_result(
str(request.component),
process_result,
output,
)
def rules():
return (
*collect_rules(),
*jvm_compile_rules(),
UnionRule(ClasspathEntryRequest, CompileKotlinSourceRequest),
)
|
from mrsimulator import MRSimulator, SameKeyGroup, PairMultiset
def map(key, value):
return key, value
def reduce(key, group: SameKeyGroup):
sum = 0
for k,v in group:
sum += v
return 1, sum
if __name__ == '__main__':
raw_data = range(0,1000)
pairs = PairMultiset([(1,v) for v in raw_data])
MRSimulator(num_mapper=100, num_reducer=10).execute(pairs,map,reduce)
|
player1_run = int(input("enter the run scored by player 1 in 60 balls: "))
player2_run = int(input("enter the run scored by player 2 in 60 balls: "))
player3_run = int(input("enter the run scored by player 3 in 60 balls: "))
strikerate1 = player1_run * 100 / 60
strikerate2 = player2_run * 100 / 60
strikerate3 = player3_run * 100 / 60
print("strikerate of player 1 is", strikerate1)
print("strikerate of player 2 is", strikerate2)
print("strikerate of player 3 is", strikerate3)
print("run scored by player 1 if they played 60 more balls is", player1_run * 2)
print("run scored by player 2 if they played 60 more balls is", player2_run * 2)
print("run scored by player 3 if they played 60 more balls is", player3_run * 2)
print("maximum number of sixes player 1 could hit =", player1_run // 6)
print("maximum number of sixes player 2 could hit =", player2_run // 6)
print("maximum number of sixes player 3 could hit =", player3_run // 6)
|
# -*- coding: utf-8; -*-
from collections import namedtuple
from pubsub import pub
import ConfigParser
import logging
import os.path
import sys
logger = logging.getLogger("platakart.core")
from pygame.time import Clock
import pygame
import pygame.event
import pygame.font
import pygame.joystick
from pytmx.util_pygame import load_pygame
from platakart.circuitselect import CircuitSelectScene
from platakart.config import parse_config
from platakart.config import parse_control_config
from platakart.controllerconf import ControllerConfScene
from platakart.kart import KartRecord
from platakart.kartselect import KartSelectScene
from platakart.raceresult import RaceResultScene
from platakart.title import TitleScene
from platakart.track import TrackScene
from platakart.trackselect import TrackSelectScene
SHOWFPSEVENT = pygame.USEREVENT + 1
GAMETITLE = "Platakart"
from collections import namedtuple
CircuitRecord = namedtuple(
"CircuitRecord",
[
"id",
"name",
"descriptions",
"difficulty",
"tracks",
"thumbnail",
])
class Resources(object):
def __init__(self):
self.images = dict()
self.sounds = dict()
self.tilemaps = dict()
self.models = dict()
self.fonts = dict()
self.karts = dict()
self.circuits = dict()
current_dir = os.path.dirname(os.path.realpath(__file__))
self.path = os.path.join(current_dir, "resources")
self.config_path = os.path.join(self.path, "resources.ini")
self.loaded = False
self.objects_xml_path = os.path.join(self.path, "objects.xml")
def load_images(self):
for key, path in self.images.items():
full_path = os.path.join(self.path, path)
img = pygame.image.load(full_path).convert_alpha()
temp = pygame.Surface(img.get_size())
temp.fill((255, 0, 255))
temp.blit(img, (0, 0))
temp.set_colorkey((255, 0, 255))
self.images[key] = temp
logger.debug("Loaded image %s" % full_path)
yield "image", key
def load_sounds(self):
for key, path in self.sounds.items():
full_path = os.path.join(self.path, path)
self.sounds[key] = pygame.mixer.Sound(full_path)
logger.debug("Loaded sound %s" % full_path)
yield "sound", key
def load_tilemaps(self):
for key, path in self.tilemaps.items():
full_path = os.path.join(self.path, path)
self.tilemaps[key] = load_pygame(full_path)
logger.debug("Loaded tilemap %s" % full_path)
yield "tilemap", key
def load_models(self):
for key, path in self.models.items():
full_path = os.path.join(self.path, path)
self.models[key] = load_pygame(full_path)
logger.debug("Loaded model %s" % full_path)
yield "model", key
def load_fonts(self):
# load just the filename, since pygame font objects require a size,
# to create and it would be impractical to load every needed size.
for key, path in self.fonts.items():
full_path = os.path.join(self.path, path)
self.fonts[key] = full_path
logger.debug("Loaded font %s" % full_path)
yield "font", key
def load_config(self):
parser = ConfigParser.SafeConfigParser()
parser.read(self.config_path)
self.images.update(parser.items("images"))
self.sounds.update(parser.items("sounds"))
self.tilemaps.update(parser.items("tilemaps"))
self.models.update(parser.items("models"))
self.fonts.update(parser.items("fonts"))
def load_karts(self):
parser = ConfigParser.SafeConfigParser()
karts_path = os.path.join(self.path, "karts.ini")
parser.read(karts_path)
for section in parser.sections():
get = lambda k: parser.get(section, k)
get_float = lambda k: parser.getfloat(section, k)
id = section
acceleration_rate = get_float(u"acceleration_rate")
body_surf = get(u"body_surf")
brake_rate = get_float(u"brake_rate")
coast_rate = get_float(u"coast_rate")
chassis_mass = get_float(u"chassis_mass")
damping = get_float(u"damping")
stiffness = get_float(u"stiffness")
description = get(u"description")
front_wheel_offset_percent = get_float(
u"front_wheel_offset_percent")
rear_wheel_offset_percent = get_float("rear_wheel_offset_percent")
jump_impulse = get_float(u"jump_impulse")
max_motor_rate = get_float(u"max_motor_rate")
if parser.has_option(section, "model_id"):
model_id = get(u"model_id")
else:
model_id = "default"
name = get("name")
select_thumb = get("select_thumb")
wheel_friction = get_float("wheel_friction")
wheel_mass = get_float("wheel_mass")
wheel_surf = get("wheel_surf")
wheel_vertical_offset = get_float("wheel_vertical_offset")
# the best way to make sure these args don't get out of
# order, is to put 'id' first, then sort the rest of the
# args in alphabetical order.
self.karts[id] = KartRecord(
id,
acceleration_rate,
body_surf,
brake_rate,
chassis_mass,
coast_rate,
damping,
description,
front_wheel_offset_percent,
jump_impulse,
max_motor_rate,
model_id,
name,
rear_wheel_offset_percent,
select_thumb,
stiffness,
wheel_friction,
wheel_mass,
wheel_surf,
wheel_vertical_offset
)
def load_circuits(self):
parser = ConfigParser.SafeConfigParser()
circuits_path = os.path.join(self.path, "circuits.ini")
parser.read(circuits_path)
for section in parser.sections():
get = lambda k: parser.get(section, k)
get_int = lambda k: parser.getint(section, k)
id = section
name = get(u"name")
description = get(u"description")
difficulty = get_int(u"difficulty")
tracks = get(u"tracks")
tracks = tuple(i.strip() for i in tracks.split(u","))
thumbnail = get(u"thumbnail")
self.circuits[id] = CircuitRecord(
id,
name,
description,
difficulty,
tracks,
thumbnail
)
def load(self):
if not self.loaded:
self.load_config()
self.load_karts()
msg = "Kart %s specified %s (%s) was not" \
" configured in resources.ini"
props = set(["body_surf", "wheel_surf", "select_thumb"])
for kart in self.karts.values():
for prop in props:
if getattr(kart, prop) not in self.images:
raise KeyError(
msg % (kart.name, prop, getattr(kart, prop)))
dicts = (self.images, self.sounds, self.tilemaps, self.fonts)
total = sum(map(len, dicts))
loaded = 0
logger.debug(
"Loading resources from config: %s" % str(self.config_path))
gens = (self.load_images, self.load_sounds, self.load_tilemaps,
self.load_fonts, self.load_models)
for gen in gens:
for category, key in gen():
loaded += 1
pub.sendMessage("resources.loading",
percent=float(loaded) / float(total),
category=category,
key=key)
yield
self.load_circuits()
# check the circuits for consistency
msg = "Circuit %s specified %s (%s) was not " \
"configured in resources.ini"
props = set(["thumbnail"])
all_tracks = set(self.tilemaps.keys())
for circuit in self.circuits.values():
for prop in props:
if getattr(circuit, prop) not in self.images:
raise KeyError(
msg % (circuit.name, prop, getattr(circuit, prop)))
for track in circuit.tracks:
if track not in all_tracks:
raise KeyError(msg % (circuit.name, track))
self.loaded = True
pub.sendMessage("resources.loaded")
class Game(object):
def __init__(self, config, scenes, starting_scene, resources, input_map):
self.clock = Clock()
self.config = config
self.current_scene = starting_scene
self.input_map = input_map
self.joysticks = list()
self.resources = resources
self.scenes = scenes
self.screen = None
self.shutting_down = False
try:
self.display_width = int(config.get("display_width", 640))
except ValueError:
logger.warning("Invalid DISPLAY_WIDTH")
self.display_width = 640
try:
self.display_height = int(config.get("display_height", 480))
except ValueError:
logger.warning("Invalid DISPLAY_HEIGHT")
self.display_height = 480
self.display_size = (self.display_width, self.display_height)
pub.subscribe(self.switch_scene, "game.switch-scene")
pub.subscribe(self.play_sound, "game.play-sound")
pub.subscribe(self.stop_sound, "game.stop-sound")
pub.subscribe(self.reload_scene, "game.reload-scene")
pub.subscribe(self.reload_scene, "reset-scene")
def reload_scene(self, tilt_percent):
if tilt_percent != 1.0:
return
if self.current_scene is None:
logger.error("No scene currently loaded!")
pygame.quit()
sys.exit(1)
elif not self.current_scene.reloadable:
logger.warning("%s is not reloadable"
% self.current_scene.get_name())
return
logger.debug("Reloading scene...")
self.current_scene.teardown()
self.screen.fill((0, 0, 0))
font = pygame.font.SysFont(None, 32)
loading_surf = font.render("Loading", False, (255, 255, 255))
loading_surf_rect = loading_surf.get_rect()
loading_surf_rect.center = self.screen.get_rect().center
self.screen.blit(loading_surf, loading_surf_rect)
pygame.display.flip()
self.resources = Resources()
for scene in self.scenes.values():
if hasattr(scene, "resources"):
scene.resources = self.resources
for _ in self.resources.load():
pass
self.current_scene.setup(self.current_scene.options)
def init_pygame(self):
logger.debug("Initializing pygame")
pygame.display.init()
pygame.font.init()
pygame.mixer.init()
flags = 0
if self.config.get("full_screen", 0) == 1:
flags = flags | pygame.FULLSCREEN
screen = pygame.display.set_mode(self.display_size, flags)
pygame.display.set_caption(GAMETITLE)
logger.debug("Initializing joystick support")
pygame.joystick.init()
joystick_count = pygame.joystick.get_count()
logger.debug("Provisioning %d joysticks" % joystick_count)
for i in range(joystick_count):
js = pygame.joystick.Joystick(i)
js.init()
self.joysticks.append(js)
self.screen = screen
return screen
def switch_scene(self, name, options):
self.current_scene.teardown()
self.current_scene = self.scenes[name]
if options is None:
self.current_scene.setup()
else:
self.current_scene.setup(dict(options))
def play_sound(self, name=None, loops=0, maxtime=0, fade_ms=0):
if int(self.config.get("sound_enabled", 0)):
self.resources.sounds[name].play(loops, maxtime, fade_ms)
def stop_sound(self, name=None, fade_ms=0):
if fade_ms == 0:
self.resources.sounds[name].stop()
else:
self.resources.sounds[name].fadeout(fade_ms)
def main_loop(self):
screen = self.init_pygame()
logger.debug("Entering main loop")
try:
self._main_loop(screen)
except KeyboardInterrupt:
logger.debug("Keyboard interrupt received")
logger.debug("Shutting down main loop")
pygame.quit()
def _main_loop(self, screen):
# Get references to things that will be used in every frame to
# avoid needless derefrencing.
target_fps = float(self.config.get("target_fps", 30))
pump = pygame.event.pump
get = pygame.event.get
QUIT = pygame.QUIT
MOUSEMOTION = pygame.MOUSEMOTION
MOUSEBUTTONDOWN = pygame.MOUSEBUTTONDOWN
MOUSEBUTTONUP = pygame.MOUSEBUTTONUP
pygame.time.set_timer(SHOWFPSEVENT, 3000)
self.current_scene.setup(dict())
rect = screen.get_rect()
while not self.shutting_down:
pump()
events = get()
# give the game class the first stab at the events
for event in events:
t = event.type
if t == QUIT:
self.shutting_down = True
break
elif t == SHOWFPSEVENT:
logger.debug("FPS: %d" % self.clock.get_fps())
elif t == MOUSEMOTION:
pub.sendMessage("input.mouse-move", pos=event.pos,
rel=event.rel, buttons=event.buttons)
elif t == MOUSEBUTTONDOWN:
pub.sendMessage("input.mouse-down", pos=event.pos,
button=event.button)
elif t == MOUSEBUTTONUP:
pub.sendMessage("input.mouse-up", pos=event.pos,
button=event.button)
# delegate the rest of the events to the InputMap
self.input_map.update(events)
for event_name, tilt_percent in self.input_map.yield_events():
logger.debug("Emitting %s event with %f tilt"
% (event_name, tilt_percent))
pub.sendMessage(event_name, tilt_percent=tilt_percent)
delta = self.clock.tick(target_fps)
self.current_scene.update(delta)
dirty = self.current_scene.draw(screen, rect)
pygame.display.update(dirty)
def create_game(config_path, control_config_path):
conf = parse_config(config_path)
input_map = parse_control_config(control_config_path)
resources = Resources()
scenes = {"title": TitleScene(resources),
"kart-select": KartSelectScene(resources),
"track-select": TrackSelectScene(resources),
"circuit-select": CircuitSelectScene(resources),
"controller-conf": ControllerConfScene(),
"track": TrackScene(resources, conf),
"race-result": RaceResultScene()}
g = Game(conf, scenes, scenes["title"], resources, input_map)
return g
|
from studentdata.student import Student
from studentdata.club import Club
from studentdata.supervisor import Supervisor
from studentdata.city import City
name = "name1 name2"
status = "status"
city = City(name="city")
supervisor = Supervisor(name="super")
clubs = [Club("Chess"), Club("Fencing")]
def test_cityPop():
assert 900 <= city.population <= 6*10**6
def test_classes():
newStudent = Student(name,status,city,supervisor,clubs)
assert newStudent.supervisor.name == "super"
assert newStudent.city.name == "city"
assert newStudent.clubs[0].name == "Chess"
def test_manyInstances():
club1 = Club("club1")
club2 = Club("club2")
club1.addStudent(Student(name,status,city))
assert len(club1.roster) == 1
assert len(club2.roster) == 0
|
#! /usr/bin/env python
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import rospy
from std_msgs.msg import Int16
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('move_group_python_interface_tutorial', anonymous=True)
pub = rospy.Publisher('gripper_cmd', Int16, queue_size=10)
robot = moveit_commander.RobotCommander()
scene = moveit_commander.PlanningSceneInterface()
arm_group = moveit_commander.MoveGroupCommander("arm")
gripper_group = moveit_commander.MoveGroupCommander("gripper")
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path', moveit_msgs.msg.DisplayTrajectory, queue_size=1)
#[[0,-1.1,1.9,0,-1.4,0]]
arm_group.set_named_target("start")
plan_arm = arm_group.plan()
arm_group.go(wait=True)
gripper_group.set_named_target("gripper_open")
plan_gripper = gripper_group.plan()
gripper_group.go(wait=True)
rospy.sleep(2)
'''
positions = [[0,0.0,1.57,0.4,0,0]]
for pos in positions:
group_variable_values = group.get_current_joint_values()
print group_variable_values
group_variable_values[0] = pos[0]
group_variable_values[1] = pos[1]
group_variable_values[2] = pos[2]
group_variable_values[3] = pos[3]
group_variable_values[4] = pos[4]
group_variable_values[5] = pos[5]
group.set_joint_value_target(group_variable_values)
plan2 = group.plan()
pub.publish(300)
group.go(wait=True)
rospy.sleep(2)
'''
rospy.sleep(5)
moveit_commander.roscpp_shutdown()
|
# noinspection PyUnusedLocal
# skus = unicode string
def calculate_offers(bill, item, quantity, offers):
bill = bill.copy()
for offer, price in offers:
bill[item]['offers'].append(
{'items': quantity / offer, 'price': price}
)
quantity = quantity % offer
return bill, quantity
def remove_free_items(skus):
def remove_free_item(quantity, offer_quantity, free_item):
to_remove = quantity / offer_quantity
for t in range(to_remove):
if free_item in skus:
skus.pop(skus.index(free_item))
for s in set(skus):
quantity = skus.count(s)
if s == 'E':
offer_quantity = 2
free_item = 'B'
elif s == 'F':
offer_quantity = 3
free_item = 'F'
elif s == 'N':
offer_quantity = 3
free_item = 'M'
elif s == 'R':
offer_quantity = 3
free_item = 'Q'
elif s == 'U':
offer_quantity = 4
free_item = 'U'
else:
continue
remove_free_item(quantity, offer_quantity, free_item)
return skus
def any_of_three(skus, bill):
price_lookup = {
'S': 20,
'T': 20,
'X': 17,
'Y': 20,
'Z': 21,
}
target_skus = sorted([i for i in skus if i in 'STXYZ'] , key=lambda x: price_lookup.get(x, 0), reverse=True)
def pop_items(items_to_pop):
for i in items_to_pop:
skus.pop(skus.index(i))
count = 0
tot = 0
to_pop = []
last_item = None
for item in target_skus:
if item in 'STXYZ':
if item not in bill:
bill[item] = {
'standard':
{'items': 0, 'price': 0},
'offers': [],
}
count += 1
to_pop.append(item)
if count == 3:
count = 0
tot += 1
last_item = item
pop_items(to_pop)
to_pop = []
if last_item is not None:
bill[last_item]['offers'].append({'items': tot, 'price': 45})
return skus, bill
def process_bill(bill):
bill_tot = list()
for v in bill.values():
standard_items = v['standard']['items']
standard_price = v['standard']['price']
bill_tot.append(standard_items * standard_price)
item_offers = v['offers']
for o in item_offers:
items = o['items']
price = o['price']
bill_tot.append(items * price)
return sum(bill_tot)
def checkout(skus):
skus = sorted([c for c in skus])
bill = dict()
skus = remove_free_items(skus)
skus, bill = any_of_three(skus, bill)
for s in set(skus):
quantity = skus.count(s)
offers = tuple()
if s not in bill:
bill[s] = {
'standard':
{'items': 0, 'price': 0},
'offers': [],
}
if s == 'A':
unit_price = 50
offers = ((5, 200), (3, 130))
elif s == 'B':
unit_price = 30
offers = ((2, 45),)
elif s == 'C':
unit_price = 20
elif s == 'D':
unit_price = 15
elif s == 'E':
unit_price = 40
elif s == 'F':
unit_price = 10
elif s == 'G':
unit_price = 20
elif s == 'H':
unit_price = 10
offers = ((10, 80), (5, 45))
elif s == 'I':
unit_price = 35
elif s == 'J':
unit_price = 60
elif s == 'K':
unit_price = 70
offers = ((2, 120),)
elif s == 'L':
unit_price = 90
elif s == 'M':
unit_price = 15
elif s == 'N':
unit_price = 40
elif s == 'O':
unit_price = 10
elif s == 'P':
unit_price = 50
offers = ((5, 200), )
elif s == 'Q':
unit_price = 30
offers = ((3, 80),)
elif s == 'R':
unit_price = 50
elif s == 'S':
unit_price = 20
elif s == 'T':
unit_price = 20
elif s == 'U':
unit_price = 40
elif s == 'V':
unit_price = 50
offers = ((3, 130), (2, 90))
elif s == 'W':
unit_price = 20
elif s == 'X':
unit_price = 17
elif s == 'Y':
unit_price = 20
elif s == 'Z':
unit_price = 21
else:
return -1
bill, quantity = calculate_offers(bill, s, quantity, offers)
bill[s]['standard']['items'] = quantity
bill[s]['standard']['price'] = unit_price
return process_bill(bill)
print(checkout("ABCDEFGHIJKLMNOPQRSTUVW"))
|
print([i for i in range (50)]) |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
# Poskrapeovat iba jazyk - slovensky
class BookItem(scrapy.Item):
# define the fields for your item here like:
id = scrapy.Field()
title = scrapy.Field()
image_url = scrapy.Field()
author = scrapy.Field()
publisher = scrapy.Field()
description = scrapy.Field()
price = scrapy.Field()
normal_price = scrapy.Field()
discount = scrapy.Field()
original_name = scrapy.Field()
page_number = scrapy.Field()
pledge = scrapy.Field()
size = scrapy.Field()
weight = scrapy.Field()
language = scrapy.Field()
isbn = scrapy.Field()
release_year = scrapy.Field()
publishing_house = scrapy.Field()
cat_number = scrapy.Field()
url = scrapy.Field()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from poseidon.providers.interface import ProviderInterface
from poseidon.acl.interface import ACLInterface
from poseidon.watchdog.networkbench import NetworkBench
from ..base.handlers import BaseHandler
from ..privilege import require_node_privileges
from .privileges import CDNPrivilege
class IndexHandler(BaseHandler):
def get(self):
self.render('cdn/index.html')
class BandwidthHandler(BaseHandler):
@require_node_privileges(CDNPrivilege.view_cdn, lambda c: 1)
def get(self):
start_date, end_date = self.get_argument('fd', None), self.get_argument('td', None)
if not start_date or not end_date:
start_date, end_date = '0', '0'
try:
start_ts = int(datetime.strptime(start_date, '%Y-%m-%d').strftime('%s')) * 1000
end_ts = int((datetime.strptime(end_date, '%Y-%m-%d') + timedelta(days=1)).strftime('%s')) * 1000
except ValueError:
start_ts = 'now-7d'
end_ts = 'now'
self.render('cdn/bandwidth.html', start_ts=start_ts, end_ts=end_ts)
class BandwidthQueryHandler(BaseHandler):
@require_node_privileges(CDNPrivilege.view_cdn, lambda c: 1)
def post(self):
start_date = self.get_body_argument('from')
end_date = self.get_body_argument('to')
self.redirect('/cdn/bandwidth?fd={}&td={}'.format(start_date, end_date))
class PurgerHandler(BaseHandler):
@require_node_privileges(CDNPrivilege.view_cdn, lambda c: 1)
def get(self):
self.render('cdn/purger.html')
@require_node_privileges(CDNPrivilege.manage_cdn, lambda c: 1)
def post(self):
urls = self.get_body_argument('urls')
url_list = urls.split()
provider_name = self.get_body_argument('provider')
provider_interface = ProviderInterface(provider_name)
result_msg = provider_interface.purge(url_list)
self.render(
'cdn/purger_submit.html',
result_msg=result_msg
)
class WatchdogHandler(BaseHandler):
@require_node_privileges(CDNPrivilege.view_cdn, lambda c: 1)
def get(self):
fd = self.get_query_argument('fd', '')
td = self.get_query_argument('td', '')
self.render('cdn/watchdog.html', fd=fd, td=td)
class WatchdogChartHandler(BaseHandler):
@require_node_privileges(CDNPrivilege.view_cdn, lambda c: 1)
def get(self):
chart_id = self.get_query_argument('chart_id')
task_id = self.get_query_argument('task_id')
fd = self.get_query_argument('fd', None)
td = self.get_query_argument('td', None)
nb = NetworkBench()
chart = nb.get_chart(chart_id, task_id, fd, td)
self.render('cdn/watchdog_chart.html', chart=chart)
class ACLHandler(BaseHandler):
@require_node_privileges(CDNPrivilege.view_cdn, lambda c: 1)
def get(self):
acl = ACLInterface()
acl_table = acl.get_acl_table()
self.render(
'cdn/acl.html',
acl_table=acl_table
)
@require_node_privileges(CDNPrivilege.manage_cdn, lambda c: 1)
def post(self):
domain = self.get_body_argument('domain')
user = self.get_body_argument('user')
action = self.get_body_argument('action')
acl = ACLInterface()
if action == 'add':
acl.add_user(domain, user)
elif action == 'del':
acl.del_user(domain, user)
else:
raise NotImplementedError
self.redirect('/cdn/acl')
handlers = [
('', IndexHandler),
(r'/bandwidth', BandwidthHandler),
(r'/bandwidth/query', BandwidthQueryHandler),
(r'/purger', PurgerHandler),
(r'/purger/submit', PurgerHandler),
(r'/watchdog', WatchdogHandler),
(r'/watchdog/chart', WatchdogChartHandler),
(r'/acl', ACLHandler)
]
|
# -*- coding:utf-8 -*-
import json
import os
import re
import urllib.request
from PIL import Image
import colorsys
import math
import time
from functools import cmp_to_key
from sklearn.cluster import KMeans
from collections import Counter
import cv2 # for resizing image
from colorthief import ColorThief
from colormath.color_diff import delta_e_cie1976
def merge_image(l):
# 拼接头像
numPic = len(l)
print(numPic)
numrow = 12
numcol = 12 # 向下取整
toImage = Image.new('RGBA', (59 * numrow, 33 * numcol)) # 先生成头像集模板
x = 0 # 小头像拼接时的左上角横坐标
y = 0 # 小头像拼接时的左上角纵坐标
for index, i in enumerate(l):
try:
# 打开图片
img = Image.open(i)
except IOError:
print(u'Error: 没有找到文件或读取文件失败')
else:
# 缩小图片
img = img.resize((59, 33), Image.ANTIALIAS)
# 拼接图片
toImage.paste(img, (x * 59, y * 33))
x += 1
if x == numrow:
x = 0
y += 1
print()
toImage.save('./sort_heros' + str(int(time.time())) + ".png")
def hex_to_rgb(value):
value = value.lstrip('0x')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def rgb_to_hex(rgb):
return '0x%02x%02x%02x%02x' % rgb
def get_color_img(color_list):
numPic = len(color_list)
print(numPic)
numrow = 12
numcol = 12
toImage = Image.new('RGB', (59 * numrow, 33 * numcol))
x = 0
y = 0
for index, i in enumerate(color_list):
try:
lst = list(i)
lst.pop(len(lst) - 1)
t = tuple(lst)
img = Image.new('RGBA', (59, 33), t)
except IOError:
print(u'Error: 没有找到文件或读取文件失败')
else:
# 缩小图片
img = img.resize((59, 33), Image.ANTIALIAS)
# 拼接图片
toImage.paste(img, (x * 59, y * 33))
x += 1
if x == numrow:
x = 0
y += 1
toImage.save('./colors_' + str(int(time.time())) + ".png")
def compute_average_image_color(p):
img = Image.open(p)
img = img.convert('RGBA')
width, height = img.size
r_total = 0
g_total = 0
b_total = 0
count = 0
for x in range(0, width):
for y in range(0, height):
r, g, b, a = img.getpixel((x, y))
r_total += r
g_total += g
b_total += b
count += 1
return (int(r_total / count), int(g_total / count), int(b_total / count), a)
def get_dominant_color(image_path):
# 颜色模式转换,以便输出rgb颜色值
image = Image.open(image_path)
image = image.convert('RGBA')
# 生成缩略图,减少计算量,减小cpu压力
image.thumbnail((200, 200))
max_score = 0
dominant_color = 0
for count, (r, g, b, a) in image.getcolors(image.size[0] * image.size[1]):
# 跳过纯黑色
if a == 0:
continue
saturation = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)[1]
y = min(abs(r * 2104 + g * 4130 + b * 802 + 4096 + 131072) >> 13, 235)
y = (y - 16.0) / (235 - 16)
#
# 忽略高亮色
if y > 0.9:
continue
# Calculate the score, preferring highly saturated colors.
# Add 0.1 to the saturation so we don't completely ignore grayscale
# colors by multiplying the count by zero, but still give them a low
# weight.
score = (saturation + 0.1) * count
if score > max_score:
max_score = score
dominant_color = (r, g, b, a)
return dominant_color
def get_accent_color(path):
im = Image.open(path)
if im.mode != "RGB":
im = im.convert('RGB')
delta_h = 0.3
avg_h = sum(t[0] for t in
[colorsys.rgb_to_hsv(*im.getpixel((x, y))) for x in range(im.size[0]) for y in range(im.size[1])]) / (
im.size[0] * im.size[1])
beyond = filter(lambda x: abs(colorsys.rgb_to_hsv(*x)[0] - avg_h) > delta_h,
[im.getpixel((x, y)) for x in range(im.size[0]) for y in range(im.size[1])])
if len(list(beyond)) > 0:
r = sum(e[0] for e in beyond) / len(list(beyond))
g = sum(e[1] for e in beyond) / len(list(beyond))
b = sum(e[2] for e in beyond) / len(list(beyond))
for i in range(im.size[0] / 2):
for j in range(im.size[1] / 10):
im.putpixel((i, j), (r, g, b))
im.save('res' + path)
return (r, g, b)
return (0, 0, 0)
def get_dominant_color_new(image_path, k=4, image_processing_size=None):
"""
takes an image as input
returns the dominant color of the image as a list
dominant color is found by running k means on the
pixels & returning the centroid of the largest cluster
processing time is sped up by working with a smaller image;
this resizing can be done with the image_processing_size param
which takes a tuple of image dims as input
>>> get_dominant_color(my_image, k=4, image_processing_size = (25, 25))
[56.2423442, 34.0834233, 70.1234123]
"""
# resize image if new dims provided
image = cv2.imread(image_path)
# image = image.convert('RGBA')
if image_processing_size is not None:
image = cv2.resize(image, image_processing_size,
interpolation=cv2.INTER_AREA)
# reshape the image to be a list of pixels
image = image.reshape((image.shape[0] * image.shape[1], 3))
# cluster and assign labels to the pixels
clt = KMeans(n_clusters=k)
labels = clt.fit_predict(image)
# count labels to find most popular
label_counts = Counter(labels)
# subset out most popular centroid
dominant_color = clt.cluster_centers_[label_counts.most_common(1)[0][0]]
return list(dominant_color)[0]
def get_dominant_color_3(image_path):
color_thief = ColorThief(image_path)
# get the dominant color
dominant_color = color_thief.get_color(quality=6)
l = list(dominant_color)
print(l)
l.append(0)
print(l)
print(tuple(l))
return tuple(l)
def get_all_hero_img():
try:
img_list = []
f = open('./sort_hero.json')
heros_json = json.load(f)
for d in heros_json['data']:
# print(d)
file_path = './images/%s' % re.search(r'[a-zA-Z\.\_]+png', d['img'], re.M | re.I).group()
img_list.append(file_path)
if not os.path.exists(file_path):
urllib.request.urlretrieve(d['img'], filename=file_path)
return img_list
except Exception as e:
print(e)
return []
def color_sort(a):
try:
i = int(rgb_to_hex(a[1]), 16)
print(i)
return i
except Exception as e:
print(e)
def distance(c1, c2):
(r1, g1, b1, a1) = c1
(r2, g2, b2, a2) = c2
return math.sqrt((r1 - r2) ** 2 + (g1 - g2) ** 2 + (b1 - b2) ** 2)
def ColourDistance(rgb_1, rgb_2):
R_1, G_1, B_1, A_1 = rgb_1
R_2, G_2, B_2, A_2 = rgb_2
rmean = (R_1 + R_2) / 2
R = R_1 - R_2
G = G_1 - G_2
B = B_1 - B_2
return math.sqrt((2 + rmean / 256) * (R ** 2) + 4 * (G ** 2) + (2 + (255 - rmean) / 256) * (B ** 2))
def sort_hero_by_color():
img_list = get_all_hero_img()
color_list = []
for img in img_list:
t = get_dominant_color_3(img)
print(img, t)
# d = distance(t, (0, 0, 0, 0))
d = ColourDistance(t, (127, 127, 127, 127))
c = (img, t, d)
color_list.append(c)
color_list = sorted(color_list, key=lambda c: c[1])
i_list = []
c_list = []
for color in color_list:
i_list.append(color[0])
c_list.append(color[1])
merge_image(i_list)
get_color_img(c_list)
if __name__ == '__main__':
sort_hero_by_color()
|
from django.db import models
from django.db.models.query import QuerySet
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator
from teams.models import Team
class PlayerMixin(object):
pass
class PlayerQuerySet(QuerySet, PlayerMixin):
pass
class PlayerManager(models.Manager, PlayerMixin):
def get_queryset(self):
return PlayerQuerySet(self.model, using=self._db).filter(delete=False)
class Player(models.Model):
"""
model to store player records
"""
team = models.ForeignKey(Team, related_name=_("team_player"), on_delete=models.CASCADE)
first_name = models.CharField(_("Player First Name"), max_length=64)
last_name = models.CharField(_("Player Last Name"), max_length=64)
imageuri = models.URLField(_("Player Image URI"))
jersey_no = models.IntegerField(_("Jersey Number"), validators=[MinValueValidator(0), ])
country = models.CharField(_("Player Country"), max_length=24)
matches = models.IntegerField(_("No. of Matches Played"))
run = models.IntegerField(_("Player's Run"))
highest_score = models.IntegerField(_("Highest Score"))
halfcentury = models.IntegerField(_("Half Century"))
century = models.IntegerField(_("Century"))
delete = models.BooleanField(default=False)
objects = PlayerManager()
class Meta:
verbose_name = "Player"
verbose_name_plural = "Players"
app_label = "players"
ordering = ("-first_name", )
def __unicode__(self):
return "%s" % (self._get_full_name())
def _get_full_name(self):
return self.first_name + " " + self.last_name
|
a,b,c,x,y = map(int,input().split())
p = [
2 * c * x + b * max(y-x, 0),
2 * c * y + a * max(x-y, 0),
a * x + b * y
]
print(min(p)) |
import os
import pandas as pd
from PIL import Image, ImageDraw
import numpy as np
"""LABEL GENERATION"""
labels = pd.read_csv("WashingtonOBRace/corners.csv", delimiter = ',', names=['image_name', 'x_top_left', 'y_top_left',
'x_top_right', 'y_top_right',
'x_bottom_right', 'y_bottom_right',
'x_bottom_left', 'y_bottom_left'])
image_name_list = []
for file in os.listdir("WashingtonOBRace/images/"):
if file.endswith(".png"):
image_name_list.append(os.path.join(file))
for image_name in image_name_list:
im = Image.open("WashingtonOBRace/images/" + image_name)
image_width, image_height = im.size
matches = labels[labels['image_name'].str.match(image_name)]
image_name_without_ext = os.path.splitext(image_name)[0]
label_file_name = image_name_without_ext + '.txt'
try:
os.remove('WashingtonOBRace/labels/' + label_file_name)
except OSError:
pass
file = open('WashingtonOBRace/labels/' + label_file_name, 'w')
for index, row in matches.iterrows():
# print(row['image_name'], row['x_top_left'])
x_center = (row['x_top_left'] + row['x_top_right'] + row['x_bottom_right'] + row['x_bottom_left']) / 4 / image_width
y_center = (row['y_top_left'] + row['y_top_right'] + row['y_bottom_right'] + row['y_bottom_left']) / 4 / image_height
width = abs((max(row['x_top_right'], row['x_bottom_right']) - min(row['x_top_left'], row['x_bottom_left'])) / image_width)
height = (max(row['y_bottom_right'], row['y_bottom_left']) - min(row['y_top_right'], row['y_top_left'])) / image_height
file.write('0 '+ str(x_center) + ' ' + str(y_center) + ' ' + str(width) + ' ' + str(height) + '\n')
if width < 0:
print(image_name)
print(row)
print(x_center, y_center, width, height)
im1 = ImageDraw.Draw(im)
w, h = 220, 190
# shape = [(row['x_top_left'], row['y_top_left']), (row['x_top_right'], row['y_top_right'])]
shape = [(x_center*image_width,y_center*image_height), ((x_center+1/2*width)*image_width, (y_center+1/2*height)*image_height)]
im1.line(shape, fill="red", width=3)
im1.point((x_center * image_width, y_center * image_height), fill='green')
im.show()
input('wait for keypress')
"""VALIDATION/TRAIN SPLIT"""
validation_split = 32/280
shuffle_dataset = True
random_seed = 42
# Creating data indices for training and validation splits:
dataset_size = len(image_name_list)
indices = list(range(dataset_size))
val_len = int(np.floor(validation_split * dataset_size))
validation_idx = np.random.choice(indices, size=val_len, replace=False)
train_idx = list(set(indices) - set(validation_idx))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[val_len:], indices[:val_len]
try:
os.remove('train.txt')
except OSError:
pass
file = open('train.txt', 'w')
for i in train_indices:
file.write('data/custom/images/' + image_name_list[i] + '\n')
try:
os.remove('valid.txt')
except OSError:
pass
file = open('valid.txt', 'w')
for i in val_indices:
file.write('data/custom/images/' + image_name_list[i] + '\n') |
import numpy as np
import lasagne
from numpy.random import RandomState
import theano
import theano.tensor as T
from braindecode.veganlasagne.layers import get_input_shape
def create_descent_function(layer, wanted_activation, learning_rate=0.1,
input_cost=None, n_trials=1, seed=983748374,
deterministic=True,
loss='sqr', init_factor=0.1):
"""
Create descent function that updates random variable to match given wanted activation.
Parameters
----------
layer :
Layer to compute descent from.
wanted_activation: list or nd array
Activation to move towards.
learning_rate : float
Learning rate for adam updates
input_cost : function or None
Optional additional cost on the input.
n_trials : int
Number of inputs to randomly initialize and optimize.
seed : int
Random seed to initialize random variable.
deterministic : boolean
Whether to use deterministic mode when computing activations,
i.e. no dropout etc.
loss : function or 'sqr'
Loss to use between wanted activation and actual activation.
init_factor : float
Factor for initialization of random variable.
Returns
-------
rand_in_var: theano shared variable
Random input variable to be optimized
update_fn: theano compiled function
Function to compute updates, returns current cost
"""
rng = RandomState(seed)
wanted_activation = np.array(wanted_activation)
in_shape = get_input_shape(layer)
in_shape = [n_trials] + list(in_shape[1:])
rand_input = rng.randn(*in_shape).astype(np.float32) * init_factor
rand_in_var = theano.shared(rand_input)
# have to supply input_var extra in case of final reshape layer
output = lasagne.layers.get_output(layer, deterministic=deterministic,
inputs=rand_in_var, input_var=rand_in_var)
if loss == 'sqr':
output_cost = T.sqr(output - wanted_activation[np.newaxis])
else:
output_cost = loss(output, wanted_activation[np.newaxis])
output_cost = T.mean(output_cost)
if input_cost is None:
cost = output_cost
else:
cost = output_cost + input_cost(rand_in_var)
updates = lasagne.updates.adam(cost, [rand_in_var], learning_rate=learning_rate)
update_fn = theano.function([], cost, updates=updates)
return rand_in_var, update_fn |
# import sbol3
# import labop
# import labop.type_inference
#
#
# # Pre-declare the ProtocolTyping class to avoid circularity with labop.type_inference
# class ProtocolTyping:
# pass
#
# primitive_type_inference_functions = {} # dictionary of identity : function for typing primitives
#
#
# # When there is no output, we don't need to do any inference
# def no_output_primitive_infer_typing(_, __: ProtocolTyping):
# pass
#
#
# #############################################
# # Liquid handling primitives
#
# LIQUID_HANDLING_PREFIX = 'https://bioprotocols.org/labop/primitives/liquid_handling/'
#
# # TODO: add amount information into sample records
#
#
# def liquid_handling_provision_infer_typing(executable, typing: ProtocolTyping):
# resource = executable.input_pin('resource').input_type(typing)
# location = executable.input_pin('destination').input_type(typing)
# samples = labop.ReplicateSamples(specification=resource)
# samples.in_location.append(location)
# executable.output_pin('samples').assert_output_type(typing, samples)
# primitive_type_inference_functions[LIQUID_HANDLING_PREFIX+'Provision'] = liquid_handling_provision_infer_typing
#
#
# def liquid_handling_dispense_infer_typing(executable, typing: ProtocolTyping):
# source = executable.input_pin('source').input_type(typing) # Assumed singular replicate
# assert isinstance(source, labop.ReplicateSamples), ValueError('Dispense must come from a homogeneous source, but found '+str(source))
# location = executable.input_pin('destination').input_type(typing).lookup()
# samples = labop.ReplicateSamples(specification=source.specification) # TODO: Fix the kludge here
# samples.in_location.append(location)
# executable.output_pin('samples').assert_output_type(typing, samples)
# primitive_type_inference_functions[LIQUID_HANDLING_PREFIX+'Dispense'] = liquid_handling_dispense_infer_typing
#
#
# def liquid_handling_transfer_infer_typing(executable, typing: ProtocolTyping):
# source = executable.input_pin('source').input_type(typing)
# destination = executable.input_pin('destination').input_type(typing)
# if isinstance(source, labop.ReplicateSamples):
# relocated = labop.ReplicateSamples(specification=source.specification)
# relocated.in_location.append(destination)
# elif isinstance(source, labop.LocatedSamples):
# relocated = labop.HeterogeneousSamples()
# kludge = labop.ReplicateSamples() # TODO: put something real here instead
# kludge.in_location.append(destination)
# relocated.replicate_samples.append(kludge)
# else:
# raise ValueError("Don't know how to infer type for Transfer with source of type "+str(type(source)))
# executable.output_pin('samples').assert_output_type(typing, relocated)
# primitive_type_inference_functions[LIQUID_HANDLING_PREFIX + 'Transfer'] = liquid_handling_transfer_infer_typing
#
#
# def liquid_handling_transferinto_infer_typing(executable, typing: ProtocolTyping):
# source = executable.input_pin('source').input_type(typing)
# destination = executable.input_pin('destination').input_type(typing)
# if isinstance(source, labop.ReplicateSamples) and isinstance(destination, labop.ReplicateSamples):
# contents = sbol3.Component(executable.display_id+'_contents', sbol3.SBO_FUNCTIONAL_ENTITY) # generic mixture
# mixture = labop.ReplicateSamples(specification=contents)
# mixture.in_location.append(destination)
# elif isinstance(source, labop.LocatedSamples) and isinstance(destination, labop.LocatedSamples):
# mixture = labop.HeterogeneousSamples()
# kludge = labop.ReplicateSamples() # TODO: put something real here instead
# kludge_loc = (destination.in_location if isinstance(destination, labop.ReplicateSamples) else destination.replicate_samples[0].in_location)
# kludge.in_location.append(kludge_loc[0])
# mixture.replicate_samples.append(kludge)
# else:
# raise ValueError("Don't know how to infer type for TransferInto "+executable.identity+" with source and destination types "+str(type(source))+', '+str(type(destination)))
# executable.output_pin('samples').assert_output_type(typing, mixture)
# primitive_type_inference_functions[LIQUID_HANDLING_PREFIX + 'TransferInto'] = liquid_handling_transferinto_infer_typing
#
#
# primitive_type_inference_functions[LIQUID_HANDLING_PREFIX+'PipetteMix'] = no_output_primitive_infer_typing
#
# #############################################
# # Plate handling primitives
#
# PLATE_HANDLING_PREFIX = 'https://bioprotocols.org/labop/primitives/plate_handling/'
#
#
# primitive_type_inference_functions[PLATE_HANDLING_PREFIX+'Cover'] = no_output_primitive_infer_typing
# primitive_type_inference_functions[PLATE_HANDLING_PREFIX+'Seal'] = no_output_primitive_infer_typing
# primitive_type_inference_functions[PLATE_HANDLING_PREFIX+'AdhesiveSeal'] = no_output_primitive_infer_typing
# primitive_type_inference_functions[PLATE_HANDLING_PREFIX+'ThermalSeal'] = no_output_primitive_infer_typing
# primitive_type_inference_functions[PLATE_HANDLING_PREFIX+'Uncover'] = no_output_primitive_infer_typing
# primitive_type_inference_functions[PLATE_HANDLING_PREFIX+'Unseal'] = no_output_primitive_infer_typing
# primitive_type_inference_functions[PLATE_HANDLING_PREFIX+'Incubate'] = no_output_primitive_infer_typing
#
#
# #############################################
# # Spectrophotometry primitives
#
# SPECTROPHOTOMETRY = 'https://bioprotocols.org/labop/primitives/spectrophotometry/'
#
#
# def spectrophotometry_infer_typing(executable, typing: ProtocolTyping):
# samples = executable.input_pin('samples').input_type(typing)
# # TODO: figure out how to add appropriate metadata onto these
# data = labop.LocatedData()
# data.from_samples = samples
# executable.output_pin('measurements').assert_output_type(typing, data)
# primitive_type_inference_functions[SPECTROPHOTOMETRY+'MeasureAbsorbance'] = spectrophotometry_infer_typing
# primitive_type_inference_functions[SPECTROPHOTOMETRY+'MeasureFluorescence'] = spectrophotometry_infer_typing
# primitive_type_inference_functions[SPECTROPHOTOMETRY+'MeasureFluorescenceSpectrum'] = spectrophotometry_infer_typing
|
# 多重继承
# 搜索方式:从左到右,广度优先
class P1:
def foo(self):
print("P1中的foo")
def bar(self):
print("P1中的bar")
class P2:
def foo(self):
print("P2中的foo")
def bar(self):
print("P2中的bar")
class C1(P1):
def foo(self):
print("C1中的foo")
class C2(P2):
def bar(self):
print("C2中的bar")
class D1(C1, C2, P1, P2):
pass
d = D1()
print(D1.__mro__) # 这个函数能看出运行时的查找顺序,首先查找的是 D1 ,其次是C1,再是C2,P1,P2,object
d.foo() # 运行的结果是C1中的foo 的返回结果,
|
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
for x in thisdict.values():
print(x)
*
**
***
****
***
**
*
n=6
6/2 = 3
n*n/2
for i=0 to 6
if(i<=3){
for (j=0;j<i+1;);
print()
j++
}else{
}
|
# The "if...elif...else"-statement
# If the numer is positive, we print an appropriate message
num = 3
if num > 0:
print(num, "is a positive number.")
print("This is always printed.")
num = -1
if num > 0:
print(num, "is a positive number.")
print("This also is always printed.")
# If else
num = 3
if num >= 0:
print(num, "positive or zero")
else:
print("negative")
num = -1
if num >= 0:
print(num, "positive or zero")
else:
print("negative")
num = 0
if num >= 0:
print(num, "positive or zero")
else:
print("negative")
# If elif else
num = 3
if num > 0:
print(num, "positive")
elif num == 0:
print(num, "zero")
else:
print(num, "negative")
num = -4.5
if num > 0:
print(num, "positive")
elif num == 0:
print(num, "zero")
else:
print(num, "negative")
num = 0
if num > 0:
print(num, "positive")
elif num == 0:
print(num, "zero")
else:
print(num, "negative")
# also nestable
num = float(input("Enter a numer: "))
if num >= 0:
if num == 0:
print(num, "zero")
else:
print(num, "positive")
else:
print(num, "negative")
|
"""
This module implements :class:`ImageSequence`, a 3D array.
:class:`ImageSequence` inherits from :class:`basesignal.BaseSignal` which
derives from :class:`BaseNeo`, and from :class:`quantites.Quantity`which
in turn inherits from :class:`numpy.array`.
Inheritance from :class:`numpy.array` is explained here:
http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
In brief:
* Initialization of a new object from constructor happens in :meth:`__new__`.
This is where user-specified attributes are set.
* :meth:`__array_finalize__` is called for all new objects, including those
created by slicing. This is where attributes are copied over from
the old object.
"""
from neo.core.analogsignal import AnalogSignal, _get_sampling_rate
import quantities as pq
import numpy as np
from neo.core.baseneo import BaseNeo
from neo.core.basesignal import BaseSignal
from neo.core.dataobject import DataObject
class ImageSequence(BaseSignal):
"""
Representation of a sequence of images, as an array of three dimensions
organized as [frame][row][column].
Inherits from :class:`quantities.Quantity`, which in turn inherits from
:class:`numpy.ndarray`.
*usage*::
>>> from neo.core import ImageSequence
>>> import quantities as pq
>>>
>>> img_sequence_array = [[[column for column in range(20)]for row in range(20)]
... for frame in range(10)]
>>> image_sequence = ImageSequence(img_sequence_array, units='V',
... sampling_rate=1*pq.Hz, spatial_scale=1*pq.micrometer)
>>> image_sequence
ImageSequence 10 frame with 20 px of height and 20 px of width; units V; datatype int64
sampling rate: 1.0
spatial_scale: 1.0
>>> image_sequence.spatial_scale
array(1.) * um
*Required attributes/properties*:
:image_data: (3D NumPy array, or a list of 2D arrays)
The data itself
:units: (quantity units)
:sampling_rate: *or* **sampling_period** (quantity scalar) Number of
samples per unit time or
interval beween to samples.
If both are specified, they are
checked for consistency.
:spatial_scale: (quantity scalar) size for a pixel.
*Recommended attributes/properties*:
:name: (str) A label for the dataset.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
*Optional attributes/properties*:
:dtype: (numpy dtype or str) Override the dtype of the signal array.
:copy: (bool) True by default.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Properties available on this object*:
:sampling_rate: (quantity scalar) Number of samples per unit time.
(1/:attr:`sampling_period`)
:sampling_period: (quantity scalar) Interval between two samples.
(1/:attr:`quantity scalar`)
:spatial_scale: size of a pixel
"""
_single_parent_objects = ('Segment',)
_single_parent_attrs = ('segment',)
_quantity_attr = 'image_data'
_necessary_attrs = (('image_data', pq.Quantity, 3),
('sampling_rate', pq.Quantity, 0),
('spatial_scale', pq.Quantity, 0))
_recommended_attrs = BaseNeo._recommended_attrs
def __new__(cls, image_data, units=None, dtype=None, copy=True, spatial_scale=None, sampling_period=None,
sampling_rate=None, name=None, description=None, file_origin=None,
**annotations):
"""
Constructs new :class:`ImageSequence` from data.
This is called whenever a new class:`ImageSequence` is created from
the constructor, but not when slicing.
__array_finalize__ is called on the new object.
"""
if spatial_scale is None:
raise ValueError('spatial_scale is required')
image_data = np.stack(image_data)
if len(image_data.shape) != 3:
raise ValueError('list doesn\'t have the good number of dimension')
obj = pq.Quantity(image_data, units=units, dtype=dtype, copy=copy).view(cls)
obj.segment = None
# function from analogsignal.py in neo/core directory
obj.sampling_rate = _get_sampling_rate(sampling_rate, sampling_period)
obj.spatial_scale = spatial_scale
return obj
def __init__(self, image_data, units=None, dtype=None, copy=True, spatial_scale=None, sampling_period=None,
sampling_rate=None, name=None, description=None, file_origin=None,
**annotations):
'''
Initializes a newly constructed :class:`ImageSequence` instance.
'''
DataObject.__init__(self, name=name, file_origin=file_origin, description=description,
**annotations)
def __array_finalize__spec(self, obj):
self.sampling_rate = getattr(obj, 'sampling_rate', None)
self.spatial_scale = getattr(obj, 'spatial_scale', None)
self.units = getattr(obj, 'units', None)
return obj
def signal_from_region(self, *region):
"""
Method that takes 1 or multiple regionofinterest, use the method of each region
of interest to get the list of pixel to average.
return a list of :class:`AnalogSignal` for each regionofinterest
"""
if len(region) == 0:
raise ValueError('no region of interest have been given')
region_pixel = []
for i, b in enumerate(region):
r = region[i].pixels_in_region()
if not r:
raise ValueError('region '+str(i)+'is empty')
else:
region_pixel.append(r)
analogsignal_list = []
for i in region_pixel:
data = []
for frame in range(len(self)):
picture_data = []
for v in i:
picture_data.append(self.view(pq.Quantity)[frame][v[0]][v[1]])
average = picture_data[0]
for b in range(1, len(picture_data)):
average += picture_data[b]
data.append((average * 1.0) / len(i))
analogsignal_list.append(AnalogSignal(data, units=self.units,
sampling_rate=self.sampling_rate))
return analogsignal_list
def _repr_pretty_(self, pp, cycle):
'''
Handle pretty-printing the :class:`ImageSequence`.
'''
pp.text("{cls} {frame} frame with {width} px of width and {height} px of height; "
"units {units}; datatype {dtype} ".format(cls=self.__class__.__name__,
frame=self.shape[0],
height=self.shape[1],
width=self.shape[2],
units=self.units.dimensionality.string,
dtype=self.dtype))
def _pp(line):
pp.breakable()
with pp.group(indent=1):
pp.text(line)
for line in ["sampling rate: {}".format(self.sampling_rate),
"spatial_scale: {}".format(self.spatial_scale)]:
_pp(line)
def _check_consistency(self, other):
'''
Check if the attributes of another :class:`ImageSequence`
are compatible with this one.
'''
if isinstance(other, ImageSequence):
for attr in ("sampling_rate", "spatial_scale"):
if getattr(self, attr) != getattr(other, attr):
raise ValueError("Inconsistent values of %s" % attr)
|
"""
default rig setup
main module
"""
import maya.cmds as mc
from rigLib.base import control
from rigLib.base import module
from rigLib.rig import spine
from rigLib.rig import neck
from rigLib.rig import ikChain
from rigLib.rig import leg
from rigLib.utils import joint
from . import defaultDeform
from . import project
mainProjectPath = project.projectPath + '/assets'
rootJnt = 'root1_jnt'
headJnt = 'head1_jnt'
numSpineJoints = 6
numNeckJoints = 6
pelvisJnt = 'pelvis1_jnt'
jawJnt = 'jaw1_jnt'
def build( characterName ):
"""
main function to build character rig
"""
# new scene
mc.file(new=1, force= 1)
#import model, builder extend path
subPath = '%s/%s/%s/%s_%s.mb'
for type in ['model', 'builder']:
fullPath = subPath %(mainProjectPath,characterName,type,characterName,type)
try:
mc.file(fullPath, i = 1)
except RuntimeError:
print('FILE PATH INCORRECT --> ' + fullPath)
# make base
baseRig = module.Base(characterName = characterName, scale = project.rigScale)
#parent model
modelGrp = '%s_model_grp'%characterName
mc.parent(modelGrp, baseRig.modelGrp)
#parent skeleton
mc.parent(rootJnt, baseRig.jointsGrp)
mc.joint(rootJnt, e=1, ch=1, radius = 0.2)
#deform setup
defaultDeform.build(baseRig,characterName)
#control setup
makeControlSetup(baseRig)
def makeControlSetup(baseRig):
"""
make control setup
"""
#spine
spineJoints = ['spine%s_jnt'%(i+1) for i in range(numSpineJoints)]
spineRig = spine.build(
spineJoints,
rootJoint = rootJnt,
spineCurve = 'spine_crv',
bodyLocator = 'body_loc',
chestLocator = 'chest_loc',
pelvisLocator = 'pelvis_loc',
prefix = 'spine',
rigScale = project.rigScale,
baseRig = baseRig
)
#neck
neckJoints = ['neck%s_jnt'%(i+1) for i in range(numNeckJoints)]
neckRig = neck.build(
neckJoints,
headJoint = headJnt,
neckCurve = 'neck_crv',
prefix = 'neck',
rigScale = project.rigScale,
baseRig = baseRig
)
mc.parentConstraint(spineJoints[-2], neckRig['neckBaseAttachGrp'], mo =1)
mc.parentConstraint(spineRig['bodyCtl'].C, neckRig['bodyAttachGrp'], mo =1)
#tail
tailJoints = joint.listHierarchy('tail1_jnt')
tailRig = ikChain.build(
chainJoints = tailJoints,
chainCurve = 'tail_crv',
prefix='tail',
rigScale = project.rigScale,
taperScale = .4,
fkParenting = False,
baseRig = baseRig
)
mc.parentConstraint(pelvisJnt, tailRig['baseAttachGrp'], mo = 1)
tongueJoints = joint.listHierarchy('tongue1_jnt')
tongueRig = ikChain.build(
chainJoints = tongueJoints,
chainCurve = 'tongue_crv',
prefix='tongue',
rigScale = project.rigScale * 0.2,
taperScale = 0.3,
fkParenting = True,
baseRig = baseRig
)
mc.parentConstraint(jawJnt, tongueRig['baseAttachGrp'], mo = 1)
#left arm
legJoints = ['l_shoulder1_jnt','l_elbow1_jnt','l_hand1_jnt','l_hand2_jnt','l_hand3_jnt']
topToeJoints = ['l_foreToeA1_jnt', 'l_foreToeB1_jnt', 'l_foreToeC1_jnt', 'l_foreToeD1_jnt', 'l_foreToeE1_jnt']
lArmRig = leg.build(
legJoints = legJoints,
topToeJoints = topToeJoints,
pvLocator = 'l_arm_pole_vector_loc',
scapJoint = 'l_scapula1_jnt',
prefix = 'l_arm',
rigScale = project.rigScale,
baseRig = baseRig
)
mc.parentConstraint(spineJoints[-2],lArmRig['baseAttachGrp'],mo =1)
mc.parentConstraint(spineRig['bodyCtl'].C,lArmRig['bodyAttachGrp'],mo =1)
#right arm
legJoints = ['r_shoulder1_jnt','r_elbow1_jnt','r_hand1_jnt','r_hand2_jnt','r_hand3_jnt']
topToeJoints = ['r_foreToeA1_jnt', 'r_foreToeB1_jnt', 'r_foreToeC1_jnt', 'r_foreToeD1_jnt', 'r_foreToeE1_jnt']
rArmRig = leg.build(
legJoints = legJoints,
topToeJoints = topToeJoints,
pvLocator = 'r_arm_pole_vector_loc',
scapJoint = 'r_scapula1_jnt',
prefix = 'r_arm',
rigScale = project.rigScale,
baseRig = baseRig
)
mc.parentConstraint(spineJoints[-2],rArmRig['baseAttachGrp'],mo =1)
mc.parentConstraint(spineRig['bodyCtl'].C,rArmRig['bodyAttachGrp'],mo =1)
#left leg
legJoints = ['l_hip1_jnt','l_knee1_jnt','l_foot1_jnt','l_foot2_jnt','l_foot3_jnt']
topToeJoints = ['l_hindToeA1_jnt', 'l_hindToeB1_jnt', 'l_hindToeC1_jnt', 'l_hindToeD1_jnt', 'l_hindToeE1_jnt']
lLegRig = leg.build(
legJoints = legJoints,
topToeJoints = topToeJoints,
pvLocator = 'l_Leg_pole_vector_loc',
scapJoint = '',
prefix = 'l_leg',
rigScale = project.rigScale,
baseRig = baseRig
)
mc.parentConstraint( spineJoints[-2], lLegRig['baseAttachGrp'], mo = 1 )
mc.parentConstraint(spineRig['bodyCtl'].C,lLegRig['baseAttachGrp'],mo =1)
#right leg
legJoints = ['r_hip1_jnt','r_knee1_jnt','r_foot1_jnt','r_foot2_jnt','r_foot3_jnt']
topToeJoints = ['r_hindToeA1_jnt', 'r_hindToeB1_jnt', 'r_hindToeC1_jnt', 'r_hindToeD1_jnt', 'r_hindToeE1_jnt']
rLegRig = leg.build(
legJoints = legJoints,
topToeJoints = topToeJoints,
pvLocator = 'r_Leg_pole_vector_loc',
scapJoint = '',
prefix = 'r_leg',
rigScale = project.rigScale,
baseRig = baseRig
)
mc.parentConstraint(spineJoints[-2],rLegRig['baseAttachGrp'],mo =1)
mc.parentConstraint(spineRig['bodyCtl'].C, rLegRig['baseAttachGrp'],mo =1)
|
#!/usr/bin/env python
import time, unittest, os, sys
from selenium import webdriver
from main.page.desktop_v3.login.pe_login import *
from main.page.desktop_v3.login.pe_logout import *
from main.page.desktop_v3.shop.pe_shop import *
from main.page.desktop_v3.product.pe_product import *
from main.page.desktop_v3.tx.pe_tx import *
from main.page.desktop_v3.sales.pe_myshop_order_status import *
from main.page.desktop_v3.sales.pe_myshop_order import *
from main.page.desktop_v3.sales.pe_myshop_order_process import *
from main.page.desktop_v3.purchase.pe_tx_payment_base import *
from main.page.desktop_v3.purchase.pe_tx_order_status import *
from main.page.desktop_v3.purchase.pe_tx_payment_confirmation import *
from main.page.desktop_v3.purchase.pe_tx_transaction_list import *
from main.activity.desktop_v3.activity_inbox_review import *
class TransactionActivity:
# constructor of TransactionActivity
def __init__(self, driver):
self.login = LoginPage(driver)
self.logout = LogoutPage(driver)
self.shop = ShopPage(driver)
self.prod = ProductPage(driver)
self.tx = TxPage(driver)
self.order_status = OrderStatusPage(driver)
self.myshop_order = MyshopOrderPage(driver)
self.process_order = MyshopOrderProcessPage(driver)
self.confirm_payment = PaymentConfirmationPage(driver)
self.list_transact = TransactionListPage(driver)
self.inbox_review = inboxReviewActivity()
self.inbox_review.setObject(driver)
def set_parameter(self, global_parameter):
self.dict = global_parameter
def transaction_with(self, payment):
order_product_name = None
order_shop_name = None
self.login.open(self.dict['site'])
self.login.do_login(self.dict['email_buyer'], self.dict['password_buyer'])
i = 0
while i < self.dict['loop']:
print("Automated Transaction -", (i + 1))
self.shop.domain(self.dict['site'], self.dict['domain_shop'])
self.shop.choose_product()
self.prod.add_to_cart(self.dict['shipping_agency'], self.dict['is_add_address'])
self.tx.choose_payment(payment)
if payment == "Partial Deposit":
self.tx.choose_partial_deposit(self.dict['partial_deposit'])
if self.dict["is_dropshipper"] == True:
self.tx.dropshipper(self.dict['dropshipper_name'], self.dict['dropshipper_telp'])
self.tx.checkout()
self.tx.pay(self.dict['password_buyer'])
if self.dict["is_confirm_payment"] == True and payment == "Bank":
self.list_transact.open(self.dict['site'])
inv = self.list_transact.get_last_inv()
self.confirm_payment.open(self.dict['site'])
self.confirm_payment.confirm_payment(inv, self.dict['payment_method'], self.dict['destination_bank'], self.dict['password_buyer'])
if self.dict["is_until_finish"] == True and payment == "Deposit":
self.order_status.open(self.dict['site'])
inv, order_product_name, order_shop_name = self.order_status.get_last_inv()
self.logout.open(self.dict['site'])
self.login.open(self.dict['site'])
self.login.do_login(self.dict['email_seller'], self.dict['password_seller'])
self.myshop_order.open(self.dict['site'])
self.myshop_order.response_order(inv)
time.sleep(5)
self.process_order.open(self.dict['site'])
self.process_order.confirm_shipping(inv)
self.logout.open(self.dict['site'])
self.login.open(self.dict['site'])
self.login.do_login(self.dict['email_buyer'], self.dict['password_buyer'])
self.order_status.open(self.dict['site'])
self.order_status.finish_order(inv)
self.inbox_review.goto_inbox_review(self.dict['site'])
self.inbox_review.change_filter_all()
self.inbox_review.skip_review(order_product_name, order_shop_name)
i = i + 1 |
from collections import Counter
import pandas as pd
import numpy as np
import math
import sys
import os
# THIS FUNCTION PROVIDES AN ALTERNATE DISTANCE METRIC TO SILHOUETTE
# SCORE.
#=========1=========2=========3=========4=========5=========6=========7=
#=========1=========2=========3=========4=========5=========6=========7=
# RETURNS: a list of the frequencydrop scores of each of the clusters,
# and a total, the average of all these scores.
def compute_freqdrop_score(cluster_directories):
# list of scores, total score
freqdrop_scores = []
freqdrop_scores_scaled = []
freqdrop_total = 0
dataset_size = 0
# iterate over directory, frequency lists
for path_counts in cluster_directories:
# get the total number of files in cluster
cluster_size = 0
for key, value in path_counts.items():
cluster_size += value
dataset_size += cluster_size
j = 0
# iterate over directory, frequency lists
for path_counts in cluster_directories:
# length of the current dictionary, the number of unique
# directories in this cluster
m = len(path_counts)
freqdrop_score = 0
freqdrop_scaled = 0
# get the total number of files in cluster
cluster_size = 0
for key, value in path_counts.items():
cluster_size += value
# in the nontrivial case
if m > 1:
sigma = 0
delta = 1
# Create a dataframe from path_counts
df = pd.DataFrame.from_dict(path_counts, orient='index')
# rename the frequency axis
df = df.rename(columns={ df.columns[0]: "freqs" })
# sort it with highest freqs on top
sorted_df = df.sort_values("freqs",ascending=False)
# make a new index, use the directory names as a new column
sorted_df = sorted_df.reset_index()
# get just the frequencies column
freq_df = sorted_df.loc[:,'freqs']
# list of frequency drop values
diffs = []
# list of frequencies in descending order
freq_list = freq_df.values
# add all frequency differences to the list
for i in range(len(freq_list) - 1):
diff = freq_list[i] - freq_list[i + 1]
diffs.append(diff)
# find the largest drop in frequency
max_diff = 0
max_diff_index = 0
for i in range(len(diffs)):
# print(diffs[i])
if diffs[i] >= max_diff:
max_diff = diffs[i]
max_diff_index = i
# print("max_diff_index: ", max_diff_index)
# get number of files in the head
head_size = 0
for i in range(max_diff_index + 1):
head_size += freq_list[i]
# print("head_size: ", head_size)
if m == 2:
# print("thinks m = 2. ")
sigma = 0
else:
# print(" IN else. ")
delta = max_diff_index + 1
sigma = math.log(delta, m - 1)
# print("m: ", m)
# print("sigma: ", sigma)
# print("delta: ", delta)
# print("cluster_size: ", cluster_size)
numerator = 1 - sigma
# print("1 - sigma: ", numerator)
numerator = math.pow(numerator, 2)
freqdrop_score = (numerator * head_size) / cluster_size
else:
freqdrop_score = 1
freqdrop_scores.append(freqdrop_score)
print("Frequency drop score for cluster",
j, "is: ", freqdrop_score)
j += 1
freqdrop_scaled = freqdrop_score * cluster_size / dataset_size
# print("cluster_size: ", cluster_size)
# print("Scaled single cluster score: ", freqdrop_scaled)
freqdrop_scores_scaled.append(freqdrop_scaled)
freqdrop_total = sum(freqdrop_scores_scaled)
print("Total frequency drop score: ", freqdrop_total)
return freqdrop_scores, freqdrop_total
#=========1=========2=========3=========4=========5=========6=========7=
#=========1=========2=========3=========4=========5=========6=========7=
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import os
import copy
import io
import operator
import prettyplotlib as ppl
import random
import cPickle as pickle
import pdb
import matplotlib
matplotlib.use('Agg')
matplotlib.rc('pdf', fonttype=42)
import matplotlib.pyplot as plt
try:
import graph_tool.all as gt
except ImportError:
pass # useful to import stuff from this script in other scripts
import numpy as np
# import matplotlib.pyplot as plt
import sys
def debug(*text):
"""wrapper for the print function that can be turned on and off"""
if False:
# if True:
print(' '.join(str(t) for t in text))
def load_graph(fpath):
graph = gt.load_graph(fpath, fmt='gt')
name2node = {graph.vp['name'][node]: node for node in graph.vertices()}
graph.name2node = name2node
return graph
class SimilarityMatrix(object):
"""
Class representing a similarity matrix for graph node similarities.
The class holds a numpy matrix, the indices of which are masked by a dict,
to allow the indexing with graph node descriptors in any format (e.g.,
string).
"""
def __init__(self, item2matrix_file, matrix_file):
item2matrix = dict()
with io.open(item2matrix_file, encoding='utf-8') as infile:
for line in infile:
u, v = line.strip().split('\t')
item2matrix[u] = int(v)
self.item2matrix = item2matrix
self.matrix = np.load(matrix_file)
def __getitem__(self, key):
return self.matrix[self.item2matrix[key[0]], self.item2matrix[key[1]]]
class MissionCollection(object):
"""This class represents a collection (set) of missions, i.e., navigation
problems the simulation should undertake."""
def __init__(self, missions):
self.missions = missions
self.stats = None
self.stretch = None
def __iter__(self):
return iter(self.missions)
def compute_stats(self):
"""compute statistics for the missions
call after all missions have been simulated"""
if self.stats is not None:
return
self.stats = np.zeros(STEPS_MAX + 1)
for m in self.missions:
m.compute_stats()
self.stats += 100 * m.stats
self.stats /= len(self.missions)
class Mission(object):
"""This class represents a Point-to-Point Search mission"""
# mission types
missions = [
u'Greedy Search',
u'Greedy Search (Random)',
u'Berrypicking',
u'Berrypicking (Random)',
u'Information Foraging',
u'Information Foraging (Random)'
]
def __init__(self, start, targets):
self.steps = 0
self.path = []
self.start = start
self.visited = set()
self.targets = targets
self.targets_original = copy.deepcopy(targets)
self.stats = None
def add(self, node):
"""add the current node to the path"""
self.steps += 1
self.path.append(node)
self.visited.add(node)
if node in self.targets[0]:
self.targets[0].remove(node)
def is_active(self):
"""check if the mission is still within its limits"""
if self.steps > STEPS_MAX or not self.targets[0]:
return False
return True
def reset(self):
"""reset the visited nodes and go to the next target set"""
self.visited = set()
del self.targets[0]
def compute_stats(self):
self.stats = np.zeros(STEPS_MAX + 1)
try:
ind = self.path.index(self.targets_original[0][0])
self.stats[ind:] = 1.0
except ValueError:
pass
class IFMission(Mission):
"""This class represents an Information Foraging mission"""
def __init__(self, start, targets):
super(IFMission, self).__init__(start, targets)
def compute_stats(self):
self.stats = np.zeros(STEPS_MAX + 1)
targets = copy.deepcopy(self.targets_original[0])
i = targets.index(self.path[0])
del targets[i]
curr = 0
for i, n in enumerate(self.path[:len(self.stats)]):
if n in targets:
targets.remove(n)
curr += (1 / 3) # new: normalize by no. of clusters instead
self.stats[i] = curr
if i < len(self.stats):
self.stats[i:] = curr
self.stats = np.array([min(i, 1.0) for i in self.stats])
class BPMission(Mission):
"""This class represents a Berrypicking mission"""
def __init__(self, start, targets):
super(BPMission, self).__init__(start, targets)
def add(self, node):
self.steps += 1
self.path.append(node)
self.visited.add(node)
if node in self.targets[0]:
self.targets[0] = []
def is_active(self):
if self.steps > STEPS_MAX or not self.targets[0]:
return False
return True
def compute_stats(self):
self.path_original = self.path[:] # DEBUG
self.stats = np.zeros(STEPS_MAX + 1)
self.path = self.path[2:]
if self.path[-2:] == ['*', '*']:
self.path = self.path[:-2]
diff = len(self.path) - 2 * self.path.count(u'*') - STEPS_MAX - 1
if diff > 0:
self.path = self.path[:-diff]
path = ' '.join(self.path).split('*')
path = [l.strip().split(' ') for l in path]
path = [path[0]] + [p[1:] for p in path[1:]]
del self.targets_original[0]
val = 0
len_sum = -1
for p in path:
self.stats[len_sum:len_sum+len(p)] = val
len_sum += len(p)
val += (1 / len(self.targets_original))
if len_sum < len(self.stats):
fill = self.stats[len_sum - 1]
if path[-1] and path[-1][-1] in self.targets_original[len(path)-1]:
fill = min(fill+1/3, 1.0)
self.stats[len_sum:] = fill
class Strategy(object):
"""This class represents a strategy for choosing the next hop.
During missions, the find_next method is called to select the next node
"""
strategies = [
u'random',
u'title',
# u'title_stochastic',
u'optimal'
]
def __init__(self):
pass
@staticmethod
def find_next(graph, strategy, mission, node, parent_node=None,
matrix=None):
"""Select the next node to go to in a navigation mission"""
debug('strategy =', strategy)
node_gt = graph.name2node[node]
nodes_gt = [n for n in node_gt.out_neighbours()]
nodes = [graph.vp['name'][n] for n in nodes_gt]
debug('nodes =', nodes)
if strategy == 'random':
if parent_node is not None and parent_node not in nodes:
nodes.append(parent_node)
return random.choice(nodes)
neighbor_targets = [n for n in nodes if n in mission.targets[0]]
if neighbor_targets:
debug('target in neighbors')
return neighbor_targets[0]
nodes = [n for n in nodes if n not in mission.visited]
try:
candidates = {n: matrix[n, mission.targets[0][0]] for n in nodes}
except KeyError:
pdb.set_trace()
except TypeError, e:
print(e)
pdb.set_trace()
if not candidates:
chosen_node = None # abort search
else:
if strategy == 'title_stochastic' and random.random() <= 0.05:
chosen_node = random.choice(candidates.keys())
debug('randomly selecting node', chosen_node)
return chosen_node
chosen_node = max(candidates.iteritems(),
key=operator.itemgetter(1))[0]
debug('candidates are:')
for k, v in candidates.items():
debug(k, ':', v)
if chosen_node == parent_node:
debug('backtracking to node', parent_node)
return None
debug('going to ', chosen_node)
return chosen_node
class DataSet(object):
def __init__(self, label, rec_types, pers_recs, personalization_types):
self.label = label
self.base_folder = os.path.join('..', 'data', self.label)
self.folder_graphs = os.path.join(self.base_folder, 'graphs')
self.folder_matrices = os.path.join(self.base_folder, 'matrices')
self.n_vals = n_vals
self.rec_types = rec_types
self.graphs = {}
for rec_type in self.rec_types:
self.graphs[rec_type] = [
os.path.join(
self.folder_graphs,
rec_type + '_' + unicode(N) + '.gt')
for N in self.n_vals
]
if rec_type in pers_recs:
self.graphs[rec_type] += [
os.path.join(
self.folder_graphs,
rec_type + '_' + unicode(N) + p + '.gt')
for N in self.n_vals
for p in personalization_types
]
self.compute_shortest_path_lengths(self.graphs[rec_type])
missions = {
u'Greedy Search': self.load_missions(Mission, u'missions.txt'),
u'Greedy Search (Random)': self.load_missions(Mission, u'missions_random.txt'),
u'Information Foraging': self.load_missions(IFMission, u'missions_if.txt'),
u'Information Foraging (Random)': self.load_missions(IFMission, u'missions_if_random.txt'),
u'Berrypicking': self.load_missions(BPMission, u'missions_bp.txt'),
u'Berrypicking (Random)': self.load_missions(BPMission, u'missions_bp_random.txt'),
}
# Structure: self.matrices[rec_type][graph][strategy]
# Structure: self.missions[rec_type][graph][strategy][scenario]
self.matrices = {}
self.missions = {}
for rec_type in self.rec_types:
self.matrices[rec_type] = {}
self.missions[rec_type] = {}
for graph in self.graphs[rec_type]:
self.matrices[rec_type][graph] = {}
self.missions[rec_type][graph] = {}
self.matrices[rec_type][graph]['random'] = [None, None]
self.matrices[rec_type][graph]['optimal'] = [None, None]
self.matrices[rec_type][graph]['title'] =\
[os.path.join(self.folder_matrices, 'title_matrix.npy'),
os.path.join(self.folder_matrices, 'title_matrix_c.npy')]
self.matrices[rec_type][graph]['title_stochastic'] =\
self.matrices[rec_type][graph]['title']
for strategy in Strategy.strategies:
self.missions[rec_type][graph][strategy] = {}
for m in missions:
mc = copy.deepcopy(missions[m])
self.missions[rec_type][graph][strategy][m] = mc
def compute_shortest_path_lengths(self, graph_files):
for i, gfile in enumerate(graph_files):
print(gfile, i + 1, '/', len(graph_files))
sp_file = gfile.rsplit('.', 1)[0] + '.npy'
if os.path.exists(sp_file):
print(' file exists!')
continue
print(' computing...')
graph = load_graph(gfile)
vertices = [n for n in graph.vertices()]
dist = gt.shortest_distance(graph)
d_max = np.iinfo(np.int32).max # graph-tool uses this to mean inf
sp = {}
for vidx, vertex in enumerate(vertices):
print(' ', vidx+1, '/', len(vertices), end='\r')
dists = zip(vertices, dist[vertex].a)
dists = {graph.vp['name'][v]: d for v, d in dists if d < d_max}
sp[graph.vp['name'][vertex]] = dists
with open(sp_file, 'wb') as outfile:
pickle.dump(sp, outfile, -1)
print('done computing path lengths')
def load_missions(self, mission_class, mission_file):
fpath = os.path.join(self.base_folder, mission_file)
with io.open(fpath, encoding='utf-8') as infile:
missions = []
start = None
targets = []
for line in infile:
parts = line.strip().split('\t')
if line[0] != '*':
if start:
missions.append(mission_class(start, targets))
start = parts[0]
targets = [parts[1:]]
else:
parts = line.strip().split('\t')
targets.append(parts[1:])
missions.append(mission_class(start, targets))
m = MissionCollection(missions)
return m
class Navigator(object):
def __init__(self, data_set):
self.data_set = data_set
def run(self):
"""run the simulations for all strategies (optimal, random and informed)
"""
print(' strategies...')
matrix_file = ''
matrix_s, matrix_c = None, None
# run for all but the optimal version
item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')
for rec_type in self.data_set.graphs:
for graph in self.data_set.graphs[rec_type]:
print(' ', graph)
gt_graph = load_graph(graph)
for strategy in Strategy.strategies:
if strategy == 'optimal':
continue
print(' ', strategy)
m_new = self.data_set.matrices[rec_type][graph][strategy][0]
m_newc = self.data_set.matrices[rec_type][graph][strategy][1]
debug(' ----', m_new)
debug(' ----', m_newc)
if not m_new:
debug(' ---- not m_new')
matrix_s, matrix_c, matrix_file = None, None, None
elif matrix_file != m_new:
matrix_s = SimilarityMatrix(item2matrix, m_new)
matrix_c = SimilarityMatrix(item2matrix, m_newc)
matrix_file = m_new
debug(' ---- matrix_file != m_new')
# for miss in self.data_set.missions[rec_type][graph][strategy]:
for miss in Mission.missions:
print(' ', miss)
if 'Information Foraging' in miss or 'Berrypicking' in miss:
matrix = matrix_c
else:
matrix = matrix_s
for m in self.data_set.missions[rec_type][graph][strategy][miss]:
for ti in xrange(len(m.targets_original)):
start = m.path[-2] if m.path else m.start
debug('++++' * 16, 'mission', ti, '/',
len(m.targets_original))
debug(m.targets_original[ti])
self.navigate(gt_graph, strategy, m, start,
None, matrix)
if ti > 0 and len(m.targets_original[ti]) == len(m.targets[0]):
# print('breaking...')
m.reset()
break
if not (ti + 1) == len(m.targets_original):
m.path.append(u'*')
m.reset()
# run the simulations for the optimal solution
print(' optimal...')
for rec_type in self.data_set.graphs:
for graph in self.data_set.graphs[rec_type]:
print(' ', graph)
sp_file = graph.rsplit('.', 1)[0] + '.npy'
with open(sp_file, 'rb') as infile:
sp = pickle.load(infile)
for miss in self.data_set.missions[rec_type][graph]['optimal']:
for m in self.data_set.missions[rec_type][graph]['optimal'][miss]:
for ti in xrange(len(m.targets_original)):
start = m.path[-2] if m.path else m.start
debug('++++' * 16, 'mission', ti, '/', len(m.targets_original))
debug(m.targets_original[ti])
self.optimal_path(m, start, sp)
if not (ti + 1) == len(m.targets_original):
m.path.append(u'*')
m.reset()
# # DEBUG
# item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')
# for rec_type in ['rbar']:
# for graph in self.data_set.graphs[rec_type]:
# print(' ', graph)
# gt_graph = load_graph(graph)
# sp_file = graph.rsplit('.', 1)[0] + '.npy'
# with open(sp_file, 'rb') as infile:
# sp = pickle.load(infile)
# m_newc = self.data_set.matrices[rec_type][graph]['title'][1]
# matrix = SimilarityMatrix(item2matrix, m_newc)
# sc = 'Berrypicking'
# mc1 = self.data_set.missions[rec_type][graph]['title'][sc]
# mc2 = self.data_set.missions[rec_type][graph]['optimal'][sc]
# mc3 = self.data_set.missions[rec_type][graph]['random'][sc]
# for m1, m2, m3 in zip(
# mc1,
# mc2,
# mc3
# ):
# # evalute with title strategy
# for ti in xrange(len(m1.targets_original)):
# start = m1.path[-2] if m1.path else m1.start
# debug('++++' * 16, 'mission', ti, '/', len(m1.targets_original))
# # debug(m1.targets_original[ti])
# self.navigate(gt_graph, 'title', m1, start, None, matrix)
# # print(m1.path, ti, len(m1.targets_original[ti]), len(m1.targets[0]))
# if ti > 0 and len(m1.targets_original[ti]) == len(m1.targets[0]):
# # print('breaking...')
# m1.reset()
# break
# if not (ti + 1) == len(m1.targets_original):
# m1.path.append(u'*')
# m1.reset()
#
# # evaluate with optimal strategy
# for ti in xrange(len(m2.targets_original)):
# start = m2.path[-2] if m2.path else m2.start
# # debug('++++' * 16, 'mission', ti, '/', len(m2.targets_original))
# # debug(m2.targets_original[ti])
# self.optimal_path(m2, start, sp)
# if not (ti + 1) == len(m2.targets_original):
# m2.path.append(u'*')
# m2.reset()
# # pdb.set_trace()
#
# # if len(m1.path) < len(m2.path):
# # print(len(m1.path), len(m2.path))
# # pdb.set_trace()
# # m1.compute_stats()
# # m2.compute_stats()
# # if m1.stats[-1] > m2.stats[-1]:
# # print(m1.stats)
# # print(m2.stats)
# # pdb.set_trace()
#
# print('MISSION COLLECTION DONE')
# mc1.compute_stats()
# mc2.compute_stats()
# print(mc1.stats[-1], mc2.stats[-1])
# pdb.set_trace()
# fname_5 = u'../data/bookcrossing/graphs/rbar_5.gt'
# fname_20 = u'../data/bookcrossing/graphs/rbar_20.gt'
# sp_file_5 = fname_5.rsplit('.', 1)[0] + '.npy'
# sp_file_20 = fname_20.rsplit('.', 1)[0] + '.npy'
# with open(sp_file_5, 'rb') as infile:
# sp_5 = pickle.load(infile)
# with open(sp_file_20, 'rb') as infile:
# sp_20 = pickle.load(infile)
# sc = 'Berrypicking'
# mc_5 = self.data_set.missions['rbar'][fname_5]['optimal'][sc]
# mc_52 = self.data_set.missions['rbar'][fname_5]['title'][sc]
# mc_20 = self.data_set.missions['rbar'][fname_20]['optimal'][sc]
# mc_202 = self.data_set.missions['rbar'][fname_20]['title'][sc]
# for m5, m20, m52, m202 in zip(
# mc_5,
# mc_20,
# mc_52,
# mc_202
# ):
# # evaluate 5 with optimal strategy
# for ti in xrange(len(m5.targets_original)):
# start = m5.path[-2] if m5.path else m5.start
# self.optimal_path(m5, start, sp_5)
# if not (ti + 1) == len(m5.targets_original):
# m5.path.append(u'*')
# m5.reset()
#
# # evaluate 20 with optimal strategy
# for ti in xrange(len(m20.targets_original)):
# start = m20.path[-2] if m20.path else m20.start
# self.optimal_path(m20, start, sp_20)
# if not (ti + 1) == len(m20.targets_original):
# m20.path.append(u'*')
# m20.reset()
#
# # if len(m5.path) < len(m20.path) or \
# if m5.path.count('*') > m20.path.count('*'):
# print(len(m5.path))
# for part in ' '.join(m5.path[2:]).split('*'):
# print(' ', part)
# print(len(m20.path))
# for part in ' '.join(m20.path[2:]).split('*'):
# print(' ', part)
# pdb.set_trace()
#
# print('MISSION COLLECTION DONE')
# mc_5.compute_stats()
# mc_20.compute_stats()
# print(mc_5.stats[-1], mc_20.stats[-1])
#
# for m5, m20 in zip(mc_5.missions, mc_20.missions):
# if m5.stats[-1] > m20.stats[-1]:
# print(m5.stats)
# print(m20.stats)
# pdb.set_trace()
# pdb.set_trace()
# write the results to a file
# self.write_paths()
self.save()
def optimal_path(self, mission, start, sp):
"""write a fake path to the mission, that is of the correct length
if more evaluations such as the set of visited nodes are needed,
this needs to be extended
"""
mission.add(start)
while mission.targets[0] and mission.is_active():
ds = [(sp[start][t], t) for t in mission.targets[0] if t in sp[start]]
if not ds:
mission.add(u'-1') # target not connected --> fill with dummies
continue
target = min(ds)
for i in range(target[0] - 1):
mission.add(u'0')
mission.add(target[1])
start = target[1]
def navigate(self, graph, strategy, mission, node, parent_node=None,
matrix=None):
debug('-' * 32 + '\n')
debug('navigate called with', node, '(parent: ', parent_node, ')')
# debug(mission.targets[0])
if not mission.is_active() or node == -1 and not parent_node:
debug('aborting')
return
mission.add(node)
# recurse
while mission.is_active():
out_node = Strategy.find_next(graph, strategy, mission, node,
parent_node, matrix)
debug('choosing node', out_node)
if not out_node: # backtracking
debug('backtracking')
if parent_node:
mission.add(parent_node)
return
self.navigate(graph, strategy, mission, out_node, node, matrix)
def write_paths(self):
fpath = os.path.join(self.data_set.base_folder, 'paths.txt')
with open(fpath, 'w') as outfile:
outfile.write('----' * 16 + ' ' + self.data_set.base_folder + '\n')
for rec_type in self.data_set.graphs:
outfile.write('----' * 16 + ' ' + rec_type + '\n')
for graph in self.data_set.graphs[rec_type]:
outfile.write('----' * 8 + ' ' + graph + '\n')
for strategy in Strategy.strategies:
outfile.write('----' * 4 + strategy + '\n')
for miss in Mission.missions:
outfile.write('----' * 2 + miss + '\n')
stras = self.data_set.missions[rec_type][graph][strategy][miss]
for m in stras:
outfile.write('\t'.join(m.path) + '\n')
def save(self):
fp = 'data_sets_' + self.data_set.label + '_' + str(STEPS_MAX) + '.obj'
with open(fp, 'wb') as outfile:
pickle.dump([self.data_set], outfile, -1)
class PlotData(object):
def __init__(self):
self.missions = {}
class Evaluator(object):
"""Class responsible for calculating stats and plotting the results"""
def __init__(self, datasets, stochastic=False, personalized=False,
suffix='', pdf=False, subtract_baseline=False):
self.data_sets = []
self.stochastic = stochastic
self.personalized = personalized
self.personalized_suffix = '_personalized' if self.personalized else ''
self.suffix = suffix
self.subtract_baseline = subtract_baseline
if self.subtract_baseline:
self.suffix += '_sb'
for dataset in datasets:
try:
with open('data_sets_' + dataset + '_' + str(STEPS_MAX) +
self.personalized_suffix + '_new.obj', 'rb')\
as infile:
print('loading...')
self.data_sets.append(pickle.load(infile)[0])
print('loaded')
except (IOError, EOFError):
print('loading failed... computing from scratch (%s)' % dataset)
with open('data_sets_' + dataset + '_' + str(STEPS_MAX) +
'.obj', 'rb') as infile:
data_set = pickle.load(infile)[0]
data_set_new = self.compute(label=dataset, data_set=data_set)
self.data_sets.append(data_set_new)
print('saving to disk...')
with open('data_sets_' + dataset + '_' + str(STEPS_MAX) +
self.personalized_suffix + '_new.obj', 'wb')\
as outfile:
pickle.dump([data_set_new], outfile, -1)
if not os.path.isdir('plots'):
os.makedirs('plots')
self.sc2abb = {
u'Greedy Search': u'ptp',
u'Greedy Search (Random)': u'ptp_random',
u'Information Foraging': u'if',
u'Information Foraging (Random)': u'if_random',
u'Berrypicking': u'bp',
u'Berrypicking (Random)': u'bp_random',
}
self.colors = ['#FFA500', '#FF0000', '#0000FF', '#05FF05', '#000000']
self.hatches = ['----', '/', 'xxx', '///', '---']
self.linestyles = ['-', '--', ':', '-.']
if not self.personalized:
self.graphs = {
'RB': ['rb_' + str(c) for c in n_vals],
'MF': ['rbmf_' + str(c) for c in n_vals],
'AR': ['rbar_' + str(c) for c in n_vals],
'IW': ['rbiw_' + str(c) for c in n_vals],
}
self.graph_labels = {
'CF': ['CF (' + str(c) + ')' for c in n_vals],
'MF': ['MF (' + str(c) + ')' for c in n_vals],
'AR': ['AR (' + str(c) + ')' for c in n_vals],
'IW': ['IW (' + str(c) + ')' for c in n_vals],
}
self.graph_order = ['AR', 'CF', 'IW', 'MF']
else:
self.graphs = {
'IW': ['rbiw_' + str(c) + p for c in n_vals for p in personalized_types],
'MF': ['rbmf_' + str(c) + p for c in n_vals for p in personalized_types],
}
p2pl = {
'_personalized_min': 'Pure',
'_personalized_median': 'Pure',
'_personalized_max': 'Pure',
'_personalized_mixed_min': 'Mixed',
'_personalized_mixed_median': 'Mixed',
'_personalized_mixed_max': 'Mixed',
}
self.graph_labels = {
'IW': ['IW (' + p2pl[p] + ')' for c in n_vals for p in personalized_types],
'MF': ['MF (' + p2pl[p] + ')' for c in n_vals for p in personalized_types],
}
self.graph_order = ['IW', 'MF']
self.rec_type2label = {
'rb': 'CF',
'rbmf': 'MF',
'rbar': 'AR',
'rbiw': 'IW',
}
self.label2rec_type = {v: k for k, v in self.rec_type2label.items()}
self.plot_file_types = [
'.png',
]
if pdf:
self.plot_file_types.append('.pdf')
def compute(self, label, data_set):
print('computing...')
print(' ', label)
pt = PlotData()
pt.label = data_set.label
pt.folder_graphs = data_set.folder_graphs
if not self.personalized:
for i, rec_type in enumerate(data_set.missions):
pt.missions[rec_type] = {}
for j, g in enumerate(n_vals):
graph = os.path.join(
data_set.folder_graphs,
rec_type + '_' + unicode(g) + '.gt'
)
pt.missions[rec_type][graph] = {}
for strategy in Strategy.strategies:
# for strategy in ['title']:
pt.missions[rec_type][graph][strategy] = {}
for scenario in Mission.missions:
# for scenario in ['Berrypicking']:
debug(rec_type, graph, strategy, scenario)
m = data_set.missions[rec_type][graph][strategy][scenario]
m.compute_stats()
pt.missions[rec_type][graph][strategy][scenario] = m.stats
else:
for i, rec_type in enumerate(data_set.missions):
if rec_type not in pers_recs:
continue
pt.missions[rec_type] = {}
for j, g in enumerate(n_vals):
for pers_type in personalized_types:
graph = os.path.join(
data_set.folder_graphs,
rec_type + '_' + unicode(g) + pers_type + '.gt'
)
pt.missions[rec_type][graph] = {}
for strategy in Strategy.strategies:
pt.missions[rec_type][graph][strategy] = {}
for scenario in Mission.missions:
debug(rec_type, graph, strategy, scenario)
m = data_set.missions[rec_type][graph][strategy][scenario]
m.compute_stats()
pt.missions[rec_type][graph][strategy][scenario] = m.stats
return pt
def plot(self):
print('plot()')
for data_set in self.data_sets:
for scenario in Mission.missions:
fig, axes = plt.subplots(len(data_set.missions),
len(data_set.missions['cf_cosine']),
figsize=(14, 14))
for i, rec_type in enumerate(rec_types):
for j, g in enumerate((5, 10, 15, 20)):
graph = data_set.folder_graphs + rec_type + '_' + \
unicode(g) + '.txt'
for strategy in Strategy.strategies:
debug(rec_type, graph, strategy, scenario)
stats = data_set.missions[rec_type][graph][strategy][scenario]
ppl.plot(axes[i, j],
np.arange(STEPS_MAX + 1),
stats, label=strategy, linewidth=1)
axes[i, j].set_ylim(0, 100)
axes[i, j].set_xlim(0, STEPS_MAX * 1.1)
label = rec_type + ', Top' + str(g)
axes[i, j].set_title(label, size=10)
fig.subplots_adjust(left=0.06, bottom=0.05, right=0.95,
top=0.98, wspace=0.30, hspace=0.30)
axes[0, 0].legend(loc=0, prop={'size': 6})
for i in range(axes.shape[0]):
axes[i, 0].set_ylabel('Success Ratio')
for j in range(axes.shape[1]):
axes[-1, j].set_xlabel('#Hops')
plt.savefig('plots/' + data_set.label + '_' +
self.sc2abb[scenario] + '.pdf')
def plot_aggregated(self):
print('plot_aggregated()')
colors = [['#66C2A5', '#46AF8E', '#34836A', '#235847'],
['#FC8D62', '#FB6023', '#DC4204', '#A03003'],
['#8DA0CB', '#657EB8', '#47609A', '#334670']]
styles = ['-', ':', '-.', '--', '-', ':', '-.', '--']
fig, axes = plt.subplots(len(self.data_sets), len(self.sc2abb),
figsize=(18, 7))
for dind, data_set in enumerate(self.data_sets):
for sind, scenario in enumerate(Mission.missions):
debug(data_set.label, scenario)
ax = axes[dind, sind]
ax.set_title(scenario)
for i, rec_type in enumerate(rec_types):
debug(' ', rec_type)
# for j, g in enumerate((5, 10, 15, 20)):
for j, g in enumerate((5, 20)):
c_max = None
val_max = -1
graph = data_set.folder_graphs + rec_type + '_' + \
unicode(g) + u'.txt'
for k, strategy in enumerate(Strategy.strategies):
if strategy in [u'random', u'optimal']:
continue
debug(' ', strategy, rec_type, g)
stats = data_set.missions[rec_type][graph][strategy][scenario]
auc = sum(stats)
if auc > val_max:
val_max = auc
c_max = stats
ls = styles[i]
lab = rec_type
if g == 5:
lab += u' '
lab += unicode(g)
x = np.arange(STEPS_MAX + 1)
cidx = 0 if i < len(rec_types)/2 else 1
ppl.plot(ax, x, c_max, label=lab, linewidth=2,
# linestyle=ls, color=colors[i][j])
linestyle=ls, color=colors[cidx][j])
ax.set_xlabel('#Hops')
ax.set_ylabel('Success Ratio')
ax.set_ylim(0, 70)
ax.set_xlim(0, STEPS_MAX * 1.1)
for row in range(axes.shape[0]):
t_x = (axes[row][0].get_ylim()[0] + axes[row][0].get_ylim()[1]) / 2
label = [u'MovieLens', u'BookCrossing'][row]
axes[row][0].text(-55, t_x, label, size='x-large')
leg = plt.legend(bbox_to_anchor=(2.6, 1.25), loc='center right')
leg.get_frame().set_linewidth(0.0)
fig.subplots_adjust(left=0.2, bottom=0.08, right=0.75, top=0.93,
wspace=0.31, hspace=0.42)
# plt.show()
# plt.savefig('plots/navigation_aggregated_' + unicode(g) + '.pdf')
plt.savefig('plots/navigation_aggregated.pdf')
def plot_bar(self):
print('plot_bar()')
# plot the legend in a separate plot
# fig = plt.figure()
# ax = fig.add_subplot(111)
# patches = [ax.bar([0], [0]) for i in range(4)]
# for pidx, p in enumerate(patches):
# p[0].set_fill(False)
# p[0].set_edgecolor(self.colors[pidx])
# p[0].set_hatch(self.hatches[pidx])
# figlegend = plt.figure(figsize=(7.75, 0.465))
# figlegend.legend(patches, ['No Diversification', 'ExpRel', 'Diversify', 'Random'], ncol=4)
# fig.subplots_adjust(left=0.19, bottom=0.06, right=0.91, top=0.92,
# wspace=0.34, hspace=0.32)
# # plt.show()
# figlegend.savefig('plots/nav_legend.pdf')
print('---------------------------------------------------------------')
# plot the scenarios
better = []
x_vals = [1, 2, 4, 5, 7, 8, 10, 11]
for scenario in Mission.missions:
if 'Information' not in scenario:
continue
print(scenario)
av_5, av_20 = [], []
for data_set in self.data_sets:
print(' ', data_set.label)
fig, ax = plt.subplots(1, figsize=(6, 3))
# plot optimal solutions
bar_vals = []
for graph_type in self.graph_order:
rec_type = self.label2rec_type[graph_type]
for nidx, N in enumerate(n_vals):
# print(' ', N, rec_type)
g = data_set.folder_graphs + '/' + rec_type +\
'_' + str(N) + '.gt'
o = data_set.missions[rec_type][g]['optimal'][scenario][-1]
r = data_set.missions[rec_type][g]['random'][scenario][-1]
val = o-r if self.subtract_baseline else o
# print(' ', o)
bar_vals.append(val)
bars = ax.bar(x_vals, bar_vals, align='center', color='#EFEFEF')
# Beautification
for bidx, bar in enumerate(bars):
# bar.set_fill(False)
bar.set_edgecolor('#AAAAAA')
# plot simulation results
bar_vals = []
for graph_type in self.graph_order:
# print(' ', graph_type)
rec_type = self.label2rec_type[graph_type]
for nidx, N in enumerate(n_vals):
# print(' ', N)
g = data_set.folder_graphs + '/' + rec_type +\
'_' + str(N) + '.gt'
if self.stochastic:
s = data_set.missions[rec_type][g]['title_stochastic'][scenario][-1]
else:
s = data_set.missions[rec_type][g]['title'][scenario][-1]
r = data_set.missions[rec_type][g]['random'][scenario][-1]
o = data_set.missions[rec_type][g]['optimal'][scenario][-1]
if self.subtract_baseline:
bar_vals.append(s-r)
else:
bar_vals.append(s)
if r == 0:
print('for', g, 'random walk found no targets')
else:
better.append(s/r)
if N == 5:
av_5.append(s)
elif N == 20:
av_20.append(s)
print(' ', scenario, graph_type, N, '%.2f' % (s))
# pdb.set_trace()
# print(' %.2f, %.2f, %.2f' % (r, bar_vals[-1], o))
# if s > o:
# print(scenario, data_set.label, graph_type, N, '%.2f > %.2f' % (bar_vals[-1], o))
# print(g)
# pdb.set_trace()
bars = ax.bar(x_vals, bar_vals, align='center')
# print(' 5: %.2f' % np.mean(av_5))
# print(' 20: %.2f' % np.mean(av_20))
# Beautification
for bidx, bar in enumerate(bars):
bar.set_fill(False)
bar.set_hatch(self.hatches[bidx % 2])
bar.set_edgecolor(self.colors[int(bidx/2)])
if not self.subtract_baseline:
# plot random walk solutions (as a dot)
bar_vals = []
for graph_type in self.graph_order:
rec_type = self.label2rec_type[graph_type]
for nidx, N in enumerate(n_vals):
g = data_set.folder_graphs + '/' + rec_type +\
'_' + str(N) + '.gt'
val = data_set.missions[rec_type][g]['random'][scenario][-1]
bar_vals.append(val)
ax.plot(x_vals, bar_vals, c='black', ls='', marker='.', ms=10)
ax.set_xlim(0.25, 3 * len(self.graphs))
ax.set_xticks([x - 0.25 for x in x_vals])
for tic in ax.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
labels = [g for k in self.graph_order for g in self.graph_labels[k]]
ax.set_xticklabels(labels, rotation='-50', ha='left')
ax.set_ylim(0, 100)
ylabel = 'Found Nodes (%)'
ax.set_ylabel(ylabel)
plt.tight_layout()
stochastic_suffix = '_stochastic' if self.stochastic else ''
sc = scenario.lower().replace(' ', '_').replace('(', '').replace(')', '')
fname = data_set.label + '_' + str(STEPS_MAX) + '_' + sc + \
stochastic_suffix + self.suffix
fpath = os.path.join('plots', fname)
for ftype in self.plot_file_types:
plt.savefig(fpath + ftype)
plt.close()
# print('random walks average is %.2f' % np.average(hugo))
print('simulations were on average %.2f times better than'
' the random walks' % np.average(better))
print('---------------------------------------------------------------')
def plot_bar_personalized(self):
print('plot_bar_personalized()')
# plot the scenarios
better = []
x_vals = [1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15]
for scenario in Mission.missions:
hugo = []
# print(scenario)
for data_set in self.data_sets:
# print(' ', data_set.label)
fig, ax = plt.subplots(1, figsize=(12, 6))
# plot optimal solutions
bar_vals = []
for graph_type in self.graph_order:
rec_type = self.label2rec_type[graph_type]
for nidx, N in enumerate(n_vals):
for pidx, pt in enumerate(personalized_types):
g = data_set.folder_graphs + '/' + rec_type +\
'_' + str(N) + pt + '.gt'
bar_vals.append(data_set.missions[rec_type][g]['optimal'][scenario][-1])
bars = ax.bar(x_vals, bar_vals, align='center', color='#EFEFEF')
# Beautification
for bidx, bar in enumerate(bars):
# bar.set_fill(False)
bar.set_edgecolor('#AAAAAA')
# plot simulation results
bar_vals = []
for graph_type in self.graph_order:
# print(' ', graph_type)
rec_type = self.label2rec_type[graph_type]
for nidx, N in enumerate(n_vals):
for pidx, pt in enumerate(personalized_types):
g = data_set.folder_graphs + '/' + rec_type +\
'_' + str(N) + pt + '.gt'
if self.stochastic:
s = data_set.missions[rec_type][g]['title_stochastic'][scenario][-1]
else:
s = data_set.missions[rec_type][g]['title'][scenario][-1]
r = data_set.missions[rec_type][g]['random'][scenario][-1]
o = data_set.missions[rec_type][g]['optimal'][scenario][-1]
bar_vals.append(s)
better.append(s/r)
hugo.append(r)
# print(' %.2f, %.2f, %.2f' % (r, bar_vals[-1], o))
if s > o:
print(scenario, data_set.label, graph_type, '%.2f > %.2f' % (bar_vals[-1], o))
bars = ax.bar(x_vals, bar_vals, align='center')
# Beautification
for bidx, bar in enumerate(bars):
bar.set_fill(False)
bar.set_hatch(self.hatches[bidx % 2])
bar.set_edgecolor(self.colors[3])
# plot random walk solutions (as a dot)
bar_vals = []
for graph_type in self.graph_order:
rec_type = self.label2rec_type[graph_type]
for nidx, N in enumerate(n_vals):
for pidx, pt in enumerate(personalized_types):
g = data_set.folder_graphs + '/' + rec_type +\
'_' + str(N) + pt + '.gt'
bar_vals.append(data_set.missions[rec_type][g]['random'][scenario][-1])
ax.plot(x_vals, bar_vals, c='black', ls='', marker='.', ms=10)
ax.set_xlim(0.25, x_vals[-1] + 0.75)
ax.set_xticks([x - 0.25 for x in x_vals])
for tic in ax.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
labels = [g for k in self.graph_order for g in self.graph_labels[k]]
ax.set_xticklabels(labels, rotation='-50', ha='left')
ax.set_ylim(0, 100)
ylabel = 'Found Nodes (%)'
ax.set_ylabel(ylabel)
plt.tight_layout()
stochastic_suffix = 'stochastic_' if self.stochastic else ''
fname = data_set.label + '_' + str(STEPS_MAX) + '_personalized_' + \
stochastic_suffix +\
scenario.lower().replace(' ', '_').replace('(', '').replace(')', '') +\
self.suffix
if not os.path.isdir(os.path.join('plots', 'personalized')):
os.makedirs(os.path.join('plots', 'personalized'))
fpath = os.path.join('plots', 'personalized', fname)
for ftype in self.plot_file_types:
plt.savefig(fpath + ftype)
plt.close()
# print('random walks average is %.2f' % np.average(hugo))
print('simulations were on average %.2f times better than'
' the random walks' % np.average(better))
def plot_bar_personalized_simple(self):
print('plot_bar_personalized_simple()')
personalized_types_simple = [
'_personalized_median',
'_personalized_mixed_median',
]
n_vals_simple = [
20
]
# plot the scenarios
better = []
x_vals = [1, 2, 4, 5]
for scenario in Mission.missions:
# print(scenario)
if 'random' in scenario.lower():
continue
for data_set in self.data_sets:
# print(' ', data_set.label)
fig, ax = plt.subplots(1, figsize=(4, 3))
# plot optimal solutions
bar_vals = []
for graph_type in self.graph_order:
rec_type = self.label2rec_type[graph_type]
for nidx, N in enumerate(n_vals_simple):
for pidx, pt in enumerate(personalized_types_simple):
g = data_set.folder_graphs + '/' + rec_type + \
'_' + str(N) + pt + '.gt'
bar_vals.append(
data_set.missions[rec_type][g]['optimal'][
scenario][-1])
bars = ax.bar(x_vals, bar_vals, align='center', color='#EFEFEF')
# Beautification
for bidx, bar in enumerate(bars):
# bar.set_fill(False)
bar.set_edgecolor('#AAAAAA')
# plot simulation results
bar_vals = []
for graph_type in self.graph_order:
# print(' ', graph_type)
rec_type = self.label2rec_type[graph_type]
for nidx, N in enumerate(n_vals_simple):
for pidx, pt in enumerate(personalized_types_simple):
g = data_set.folder_graphs + '/' + rec_type + \
'_' + str(N) + pt + '.gt'
# if self.stochastic:
# s = data_set.missions[rec_type][g][
# 'title_stochastic'][scenario][-1]
# else:
s = data_set.missions[rec_type][g]['title'][scenario][-1]
# r = data_set.missions[rec_type][g]['random'][scenario][-1]
# o = data_set.missions[rec_type][g]['optimal'][scenario][-1]
bar_vals.append(s)
# better.append(s / r)
# print(' %.2f, %.2f, %.2f' % (r, bar_vals[-1], o))
# if s > o:
# print(scenario, data_set.label, graph_type,
# '%.2f > %.2f' % (bar_vals[-1], o))
bars = ax.bar(x_vals, bar_vals, align='center')
# Beautification
for bidx, bar in enumerate(bars):
bar.set_fill(False)
bar.set_hatch(self.hatches[1])
bar.set_edgecolor(self.colors[(bidx>1)+2])
# plot random walk solutions (as a dot)
bar_vals = []
for graph_type in self.graph_order:
rec_type = self.label2rec_type[graph_type]
for nidx, N in enumerate(n_vals_simple):
for pidx, pt in enumerate(personalized_types_simple):
g = data_set.folder_graphs + '/' + rec_type + \
'_' + str(N) + pt + '.gt'
bar_vals.append(
data_set.missions[rec_type][g]['random'][
scenario][-1])
ax.plot(x_vals, bar_vals, c='black', ls='', marker='.', ms=10)
ax.set_xlim(0.25, x_vals[-1] + 0.75)
ax.set_xticks([x - 0.25 for x in x_vals])
for tic in ax.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
labels = [
'MF (Pure)', 'MF (Mixed)',
'IW (Pure)', 'IW (Mixed)',
]
ax.set_xticklabels(labels, rotation='-50', ha='left')
ax.set_ylim(0, 100)
ylabel = 'Found Nodes (%)'
ax.set_ylabel(ylabel)
plt.tight_layout()
stochastic_suffix = 'stochastic_' if self.stochastic else ''
fname = data_set.label + '_' + str(
STEPS_MAX) + '_personalized_' + \
stochastic_suffix + \
scenario.lower().replace(' ', '_').replace('(',
'').replace(
')', '') + \
self.suffix + '_simple'
if not os.path.isdir(os.path.join('plots', 'personalized_simple')):
os.makedirs(os.path.join('plots', 'personalized_simple'))
fpath = os.path.join('plots', 'personalized_simple', fname)
for ftype in self.plot_file_types:
plt.savefig(fpath + ftype)
plt.close()
# print('random walks average is %.2f' % np.average(hugo))
print('simulations were on average %.2f times better than'
' the random walks' % np.average(better))
def plot_sample(self):
"""plot and save an example evaluation showing all types of background
knowledge used in the simulations
"""
print(u'plot_sample()')
data_set = self.data_sets[1]
scenario = u'Greedy Search'
titles = [u'Collaborative Filtering', u'Content-based']
fig, axes = plt.subplots(1, 2, figsize=(10, 5))
for i, rec_type in enumerate(data_set.missions):
graph = data_set.folder_graphs + rec_type + '_' + str(15) + u'.txt'
for strategy in Strategy.strategies:
m = data_set.missions[rec_type][graph][strategy][scenario]
m.compute_stats()
ppl.plot(axes[i], np.arange(STEPS_MAX + 1),
m.stats, label=strategy, linewidth=2)
axes[i].set_xlabel(u'#Hops')
axes[i].set_ylabel(u'Success Ratio')
axes[i].set_ylim(0, 85)
axes[i].set_xlim(0, STEPS_MAX * 1.01)
axes[i].set_title(titles[i])
ppl.legend(axes[i], loc=0)
# plt.suptitle(u'Greedy Search on the BookCrossing for N=15',
# size='xx-large', x=0.5)
fig.subplots_adjust(left=0.08, right=0.97, top=0.9)
plt.savefig('plots/sample.png')
plt.savefig('plots/sample.pdf')
def print_results(self):
import pandas as pd
num_rows = len(self.data_sets) * len(rec_types) * len(n_vals) *\
len(Strategy.strategies) * len(Mission.missions)
df = pd.DataFrame(
index=np.arange(0, num_rows),
columns=['val', 'data_set', 'rec_type', 'is_random', 'N', 'strategy', 'scenario']
)
i = 0
for data_set in self.data_sets:
# print(data_set.label)
dm = data_set.missions
for rec_type in dm:
for nidx, N in enumerate(n_vals):
graph = data_set.folder_graphs + '/' + rec_type + \
'_' + str(N) + '.gt'
# print(' ', graph)
for strategy in dm[rec_type][graph]:
# print(' ', strategy)
for scenario in dm[rec_type][graph][strategy]:
is_random = True if 'Random' in scenario else False
val = dm[rec_type][graph][strategy][scenario][-1]
# print(' %.2f %s' % (val, scenario))
df.loc[i] = [val, data_set.label, rec_type, is_random, N, strategy, scenario]
i += 1
df['val'] = df['val'].astype(float)
# pd.pivot_table(df, values='val', index='scenario', columns=['rec_type', 'N'])
# pd.pivot_table(df[df['strategy'] == 'title'], values='val', index='rec_type', columns=['is_random'])
df_agg = pd.pivot_table(
df[df['strategy'] == 'title'], values='val',
index='rec_type',
columns=['is_random', 'data_set']
)
df_agg = pd.pivot_table(df[df['strategy'] == 'title'], values='val', index='scenario', columns=['is_random', 'data_set'])
pdb.set_trace()
rec_types = [
'rb',
'rbar',
'rbiw',
'rbmf',
]
pers_recs = [
'rbiw',
'rbmf',
]
personalized_types = [
# '_personalized_min',
'_personalized_median',
# '_personalized_max',
# '_personalized_mixed_min',
'_personalized_mixed_median',
# '_personalized_mixed_max',
]
n_vals = [
5,
20
]
if __name__ == '__main__':
if len(sys.argv) < 2 or sys.argv[1] not in [
'bookcrossing',
'movielens',
'imdb',
'evaluate'
]:
print('dataset not supported')
sys.exit()
dataset_label = sys.argv[1]
if len(sys.argv) > 2:
STEPS_MAX = int(sys.argv[2])
else:
STEPS_MAX = 50
print(dataset_label)
print(STEPS_MAX)
if dataset_label != 'evaluate':
dataset = DataSet(dataset_label, rec_types, pers_recs, personalized_types)
nav = Navigator(dataset)
print('running...')
nav.run()
else:
datasets = [
'bookcrossing',
# 'movielens',
# 'imdb'
]
# evaluator = Evaluator(datasets=datasets, pdf=True)
# evaluator.plot_bar()
# evaluator = Evaluator(datasets=datasets, subtract_baseline=True, pdf=True)
# evaluator.plot_bar()
# evaluator.print_results()
evaluator = Evaluator(datasets=datasets, personalized=True, pdf=True)
# evaluator.plot_bar_personalized()
evaluator.plot_bar_personalized_simple()
|
__author__ = 'korhammer'
import pandas as pd
import h5py
import numpy as np
from os import listdir
from os.path import join, isfile, isdir
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import misc
class Evaluation:
def __init__(self, allocate=50000):
self.results = pd.DataFrame()
self.filtered = self.results
self.order = {}
self.order_all = {}
self.add_counter = -1
self.allocate = allocate
self.patterns = ["/", "\\", "|", "-", "+", "x", "o", "O", ".", "*"]
def add_folder(self, path, file_method, recursive=True):
""" Add a folder with files of a certain file type. """
for file in listdir(path):
full_file_path = join(path, file)
if isfile(full_file_path):
self.add_hdf5(full_file_path)
elif isdir(full_file_path) and recursive:
self.add_folder(full_file_path, file_method, recursive)
def add_hdf5(self, path):
""" Add a HDF5 file. """
if path.endswith('.hdf5'):
h5File = h5py.File(path, 'r')
for group in h5File:
hdf_dic = {}
for att in h5File[group].attrs:
hdf_dic[att] = h5File[group].attrs[att]
hdf_dic['train mean'] = np.mean(h5File[group + '/LL train'])
hdf_dic['train std'] = np.std(h5File[group + '/LL train'])
hdf_dic['test mean'] = np.mean(h5File[group + '/LL test'])
hdf_dic['test std'] = np.std(h5File[group + '/LL test'])
# allocate large DataFrame and initialize with nans
if self.add_counter < 0:
self.results = self.results.append(
hdf_dic, ignore_index=True)
self.results = pd.DataFrame(np.zeros([self.allocate, len(self.results.columns)]) + np.nan,
columns=self.results.columns)
self.add_counter = 0
self.results.iloc[self.add_counter] = pd.Series(hdf_dic)
self.add_counter += 1
h5File.close()
def set_order(self, attribute, order):
"""
Set the order for a certain attribute to either ascending, descending, or a complete given order.
"""
if not (set(self.results[attribute].unique()).issubset(order) or isinstance(order, str)):
raise InputError(
'order', order, 'is inconsistent with current entries')
self.order[attribute] = order
def set_all_orders(self):
for att in self.results.columns:
if self.order.has_key(att):
if isinstance(self.order[att], list):
self.order_all[att] = self.order[att]
elif self.order[att] is 'ascend':
self.order_all[att] = np.sort(self.results[att].unique())
elif self.order[att] is 'descend':
self.order_all[att] = np.sort(
self.results[att].unique())[::-1]
else:
self.order_all[att] = self.results[att].unique()
def filter(self, attribute, values, filter_type='in'):
if filter_type is 'in' or 'is':
if not isinstance(values, list):
values = [values]
self.filtered = self.filtered[
self.filtered[attribute].isin(values)]
elif filter_type is ('<' or 'smaller'):
self.filtered = self.filtered[self.filtered[attribute] < values]
elif filter_type is ('>' or 'greater'):
self.filtered = self.filtered[self.filtered[attribute] > values]
elif filter_type is ('<=' or 'se'):
self.filtered = self.filtered[self.filtered[attribute] <= values]
elif filter_type is ('>=' or 'ge'):
self.filtered = self.filtered[self.filtered[attribute] >= values]
elif filter_type is 'not':
if not isinstance(values, list):
values = [values]
self.filtered = self.filtered[-
self.filtered[attribute].isin(values)]
else:
warnings.warn(
'Filter type unknown. No filter was applied.', UserWarning)
def unfilter(self):
self.filtered = self.results
def convert_flags(self, flags=['rectified', 'scaled', 'whitened'], name='flags', default='raw'):
self.results.loc[:, name] = ''
try:
for flag in flags:
self.results[name] += self.results[flag] * flag
except:
for flag in flags:
single_flag = self.results[flag].copy()
single_flag[single_flag != 0] = flag
single_flag[single_flag == 0] = ''
self.results.loc[:, name] += single_flag
single_flag = self.results[name].copy()
single_flag[single_flag == ''] = default
self.results.loc[:, name] = single_flag
def calc_mean_and_std(self, attributes=['test_accuracy0', 'test_accuracy1', 'test_accuracy2'],
output_attributes=['mean_test_accuracy', 'std_test_accuracy']):
means = self.results[attributes].mean(axis=1)
stds = self.results[attributes].std(axis=1)
self.results[output_attributes[0]] = means
self.results[output_attributes[1]] = stds
def convert_flags_abbr(self, flags=['rectified', 'scaled', 'whitened'], name='flags', default='raw'):
self.results.loc[:, name] = ''
try:
for flag in flags:
self.results[name] += self.results[flag] * flag[0]
except:
for flag in flags:
single_flag = self.results[flag].copy()
single_flag[single_flag != 0] = flag[0]
single_flag[single_flag == 0] = ''
self.results.loc[:, name] += single_flag
single_flag = self.results[name].copy()
single_flag[single_flag == ''] = default
self.results.loc[:, name] = single_flag
def best_results_for(self, attributes, objective='test mean',
outputs=[
'test mean', 'train mean', 'test std', 'train std'],
fun='max'):
if fun == 'max':
best = self.filtered.sort(objective).groupby(
attributes)[outputs].last()
elif fun == 'min':
best = self.filtered.sort(objective).groupby(
attributes)[outputs].first()
elif fun == 'mean':
best = self.filtered.groupby(attributes)[outputs].mean()
elif fun == 'count':
best = self.filtered.groupby(attributes)[objective].count()
return best
def make_same(self, attribute, values):
self.results[attribute].replace(values, values[0], inplace=True)
def rename_attribute(self, attribute, new_name):
self.results.rename(columns={attribute: new_name}, inplace=True)
def bring_in_order(self, attributes, attribute):
satts = set(attributes)
return [(i, att) for i, att in enumerate(self.order_all[attribute]) if att in satts]
def group_subplots(self, best, counts=None,
error=False, no_rows=2,
adapt_bottom=True, plot_range=None, base=5, eps=.5,
plot_fit=True, colormap='pastel1', max_n_cols=10,
legend_position='lower right', legend_pad='not implemented',
print_value='auto'):
""" Create a single barplot for each group of the first attribute in best.
"""
no_subplots = len(best.index.levels[0])
f, ax_arr = plt.subplots(
no_rows, np.int(np.ceil(no_subplots / np.float(no_rows))))
ax_flat = ax_arr.flatten()
att_names = best.index.names
self.set_all_orders()
lev0 = self.bring_in_order(best.index.levels[0], att_names[0])
lev1 = self.bring_in_order(best.index.levels[1], att_names[1])
best = best.reset_index()
if counts is not None:
counts = counts.reset_index()
bar_x = np.arange(len(lev1))
offset = 1
cmap = plt.cm.get_cmap(colormap)
dummy_artists = []
for plt_i, (lev0_ind, lev0_att) in enumerate(lev0):
for bar_i, (lev1_ind, lev1_att) in enumerate(lev1):
c = cmap(np.float(lev1_ind) / len(lev1))
dummy_artists.append(Rectangle((0, 0), 1, 1, fc=c))
# compute plot limits
if plot_range:
bottom = plot_range[0]
ceil = plot_range[1]
elif adapt_bottom:
relevant = best[
(best[att_names[0]] == lev0_att) & -(best['test mean'] == 0)]
if error:
ceil = misc.based_ceil(np.max(relevant['test mean'])
+ np.max(relevant['test std']) + eps, base)
bottom = misc.based_floor(np.min(relevant['test mean'])
- np.max(relevant['test std']) - eps, base)
else:
ceil = misc.based_ceil(
np.max(relevant['test mean']) + eps, base)
bottom = misc.based_floor(
np.min(relevant['test mean']) - eps, base)
test_mean = misc.float(best[(best[att_names[0]] == lev0_att)
& (best[att_names[1]] == lev1_att)]['test mean'])
test_std = misc.float(best[(best[att_names[0]] == lev0_att)
& (best[att_names[1]] == lev1_att)]['test std'])
train_mean = misc.float(best[(best[att_names[0]] == lev0_att)
& (best[att_names[1]] == lev1_att)]['train mean'])
train_std = misc.float(best[(best[att_names[0]] == lev0_att)
& (best[att_names[1]] == lev1_att)]['train std'])
# create bar plots
if (test_mean is not 0) and (test_mean is not np.nan) and (train_mean is not np.nan):
if plot_fit:
if error:
ax_flat[plt_i].bar(bar_x[bar_i], train_mean - bottom, .4,
color=c, bottom=bottom, yerr=train_std, ecolor='gray', alpha=.5,
linewidth=0.)
ax_flat[plt_i].bar(bar_x[bar_i] + .4, test_mean - bottom, .4,
color=c, bottom=bottom, yerr=test_std, ecolor='gray', linewidth=0.)
else:
ax_flat[plt_i].bar(bar_x[bar_i], train_mean - bottom, .4,
color=c, bottom=bottom, alpha=.5, linewidth=0.)
ax_flat[plt_i].bar(bar_x[bar_i] + .4, test_mean - bottom, .4,
color=c, bottom=bottom, linewidth=0.)
if print_value is True or (print_value is not False and counts is None):
ax_flat[plt_i].text(bar_x[bar_i] + .25, (test_mean + bottom) / 2, '%.2f' % train_mean,
ha='center', va='top', rotation='vertical')
else:
if error:
ax_flat[plt_i].bar(bar_x[bar_i], test_mean - bottom,
color=c, bottom=bottom, yerr=test_std, ecolor='gray', linewidth=0.)
else:
ax_flat[plt_i].bar(bar_x[bar_i], test_mean - bottom,
color=c, bottom=bottom, linewidth=0.)
if print_value is True or (print_value is not False and counts is None):
ax_flat[plt_i].text(bar_x[bar_i] + .5, (test_mean + bottom) / 2, '%.2f' % test_mean,
ha='center', va='center', rotation='vertical')
# print count
if counts is not None:
count = misc.int(counts[(counts[att_names[0]] == lev0_att)
& (counts[att_names[1]] == lev1_att)]['test mean'])
if count > 0:
ax_flat[plt_i].text(bar_x[bar_i] + .4, (test_mean + bottom) / 2, '%d' % count,
ha='center', va='center', rotation='vertical')
ax_flat[plt_i].set_title(lev0_att)
ax_flat[plt_i].set_xticks([])
ax_flat[plt_i].set_ylim(bottom, ceil)
ax_flat[plt_i].spines['top'].set_visible(False)
ax_flat[plt_i].spines['right'].set_visible(False)
ax_flat[plt_i].spines['left'].set_color('gray')
ax_flat[plt_i].spines['bottom'].set_color('gray')
# for plt_i in range(len(lev0), len(ax_flat)): #wrong range, doesn't do anything, why turn off anyway?
# ax_flat[plt_i].axis('off')
legend = [(int(att) if isinstance(att, float) else att)
for i, att in lev1]
n_col = len(legend)
if n_col > max_n_cols:
n_col = int((n_col + 1) / 2)
plt.figlegend(
dummy_artists, legend, loc=legend_position, ncol=n_col, title=att_names[1])
def group_subplots_3(self, best, counts=None,
error=False, no_rows=2,
adapt_bottom=True, plot_range=None, base=5, eps=.5,
plot_fit=True, colormap='pastel1', max_n_cols=10,
legend_position='lower right', legend_pad='not implemented',
print_value='auto'):
""" Create a single barplot for each group of the first attribute in best.
"""
no_subplots = len(best.index.levels[0])
f, ax_arr = plt.subplots(
no_rows, np.int(np.ceil(no_subplots / np.float(no_rows))))
ax_flat = ax_arr.flatten()
att_names = best.index.names
self.set_all_orders()
lev0 = self.bring_in_order(best.index.levels[0], att_names[0])
lev1 = self.bring_in_order(best.index.levels[1], att_names[1])
lev2 = self.bring_in_order(best.index.levels[2], att_names[2])
best = best.reset_index()
if counts is not None:
counts = counts.reset_index()
bar_x = np.arange(len(lev1))
block_x = np.arange(len(lev2))
width = 1.0 / len(lev2)
bar_width = .4 * width
offset = 1
cmap = plt.cm.get_cmap(colormap)
dummy_artists = []
for plt_i, (lev0_ind, lev0_att) in enumerate(lev0):
for bar_i, (lev1_ind, lev1_att) in enumerate(lev1):
c = cmap(np.float(lev1_ind) / len(lev1))
dummy_artists.append(Rectangle((0, 0), 1, 1, fc=c))
for block_i, (lev2_ind, lev2_att) in enumerate(lev2):
h = self.patterns[block_i]
# compute plot limits
if plot_range:
bottom = plot_range[0]
ceil = plot_range[1]
elif adapt_bottom:
relevant = best[
(best[att_names[0]] == lev0_att) & -(best['test mean'] == 0)]
if error:
ceil = misc.based_ceil(np.max(relevant['test mean'])
+ np.max(relevant['test std']) + eps, base)
bottom = misc.based_floor(np.min(relevant['test mean'])
- np.max(relevant['test std']) - eps, base)
else:
ceil = misc.based_ceil(
np.max(relevant['test mean']) + eps, base)
bottom = misc.based_floor(
np.min(relevant['test mean']) - eps, base)
test_mean = misc.float(best[(best[att_names[0]] == lev0_att)
& (best[att_names[1]] == lev1_att)
& (best[att_names[2]] == lev2_att)]['test mean'])
test_std = misc.float(best[(best[att_names[0]] == lev0_att)
& (best[att_names[1]] == lev1_att)
& (best[att_names[2]] == lev2_att)]['test std'])
train_mean = misc.float(best[(best[att_names[0]] == lev0_att)
& (best[att_names[1]] == lev1_att)
& (best[att_names[2]] == lev2_att)]['train mean'])
train_std = misc.float(best[(best[att_names[0]] == lev0_att)
& (best[att_names[1]] == lev1_att)
& (best[att_names[2]] == lev2_att)]['train std'])
# create bar plots
if (test_mean is not 0) and (test_mean is not np.nan) and (train_mean is not np.nan):
if plot_fit:
if error:
ax_flat[plt_i].bar(bar_x[bar_i] + block_x[block_i] * width, train_mean - bottom, bar_width,
color=c, hatch=h, bottom=bottom, yerr=train_std, ecolor='gray', alpha=.5,
linewidth=0.)
ax_flat[plt_i].bar(bar_x[bar_i] + block_x[block_i] * width + bar_width, test_mean - bottom, bar_width,
color=c, hatch=h, bottom=bottom, yerr=test_std, ecolor='gray', linewidth=0.)
else:
ax_flat[plt_i].bar(bar_x[bar_i] + block_x[block_i] * width, train_mean - bottom, bar_width,
color=c, hatch=h, bottom=bottom, alpha=.5, linewidth=0.)
ax_flat[plt_i].bar(bar_x[bar_i] + block_x[block_i] * width + bar_width, test_mean - bottom, bar_width,
color=c, hatch=h, bottom=bottom, linewidth=0.)
if print_value is True or (print_value is not False and counts is None):
ax_flat[plt_i].text(bar_x[bar_i] + block_x[block_i] * width + .25, (test_mean + bottom) / 2, '%.2f' % train_mean,
ha='center', va='top', rotation='vertical')
else:
if error:
ax_flat[plt_i].bar(bar_x[bar_i] + block_x[block_i] * width, test_mean - bottom,
color=c, hatch=h, bottom=bottom, yerr=test_std, ecolor='gray', linewidth=0.)
else:
ax_flat[plt_i].bar(bar_x[bar_i] + block_x[block_i] * width, test_mean - bottom,
color=c, hatch=h, bottom=bottom, linewidth=0.)
if print_value is True or (print_value is not False and counts is None):
ax_flat[plt_i].text(bar_x[bar_i] + block_x[block_i] * width + .5, (test_mean + bottom) / 2, '%.2f' % test_mean,
ha='center', va='center', rotation='vertical')
# print count
if counts is not None:
count = misc.int(counts[(counts[att_names[0]] == lev0_att)
& (counts[att_names[1]] == lev1_att)
& (counts[att_names[2]] == lev2_att)]['test mean'])
if count > 0:
ax_flat[plt_i].text(bar_x[bar_i] + block_x[block_i] * bar_width + .4, (test_mean + bottom) / 2, '%d' % count,
ha='center', va='center', rotation='vertical')
ax_flat[plt_i].set_title(lev0_att)
ax_flat[plt_i].set_xticks([])
ax_flat[plt_i].set_ylim(bottom, ceil)
ax_flat[plt_i].spines['top'].set_visible(False)
ax_flat[plt_i].spines['right'].set_visible(False)
ax_flat[plt_i].spines['left'].set_color('gray')
ax_flat[plt_i].spines['bottom'].set_color('gray')
for block_i, (lev2_ind, lev2_att) in enumerate(lev2):
h = self.patterns[block_i]
dummy_artists.append(Rectangle((0, 0), 1, 1, fc='w', hatch=h))
# for plt_i in range(len(lev0), len(ax_flat)): #doesn't do anything, why turn off anyway?
# ax_flat[plt_i].axis('off')
legend = [(int(att) if isinstance(att, float) else att) for i, att in lev1] + \
[(int(att) if isinstance(att, float) else att) for i, att in lev2]
n_col = len(legend)
if n_col > max_n_cols:
n_col = int((n_col + 1) / 2)
plt.figlegend(
dummy_artists, legend, loc=legend_position, ncol=n_col, title=att_names[1])
def group_subplots_all_parameters(self, best, counts=None,
error=False, no_rows=2,
adapt_bottom=True, plot_range=None, base=5, eps=.5,
plot_fit=True, colormap='pastel1', max_n_cols=10,
legend_position='lower right', legend_pad='not implemented',
print_value='auto'):
""" Create a single barplot for each group of the first attribute in best.
"""
no_subplots = len(best.index.levels[0])
f, ax_arr = plt.subplots(
no_rows, np.int(np.ceil(no_subplots / np.float(no_rows))))
ax_flat = ax_arr.flatten()
att_names = best.index.names
self.set_all_orders()
lev0 = self.bring_in_order(best.index.levels[0], att_names[0])
lev1 = self.bring_in_order(best.index.levels[1], att_names[1])
lev2 = self.bring_in_order(best.index.levels[2], att_names[2])
best = best.reset_index()
if counts is not None:
counts = counts.reset_index()
bar_x = np.arange(len(lev1))
block_x = np.arange(len(lev2))
width = 1.0 / len(lev2)
bar_width = .4 * width
offset = 1
cmap = plt.cm.get_cmap(colormap)
dummy_artists = []
ticks = []
tick_labels = []
for plt_i, (lev0_ind, lev0_att) in enumerate(lev0):
for bar_i, (lev1_ind, lev1_att) in enumerate(lev1):
c = cmap(np.float(lev1_ind) / len(lev1))
dummy_artists.append(Rectangle((0, 0), 1, 1, fc=c))
for block_i, (lev2_ind, lev2_att) in enumerate(lev2):
# compute plot limits
if plot_range:
bottom = plot_range[0]
ceil = plot_range[1]
elif adapt_bottom:
relevant = best[
(best[att_names[0]] == lev0_att) & -(best['test mean'] == 0)]
if error:
ceil = misc.based_ceil(np.max(relevant['test mean'])
+ np.max(relevant['test std']) + eps, base)
bottom = misc.based_floor(np.min(relevant['test mean'])
- np.max(relevant['test std']) - eps, base)
else:
ceil = misc.based_ceil(
np.max(relevant['test mean']) + eps, base)
bottom = misc.based_floor(
np.min(relevant['test mean']) - eps, base)
test_mean = misc.float(best[(best[att_names[0]] == lev0_att)
& (best[att_names[1]] == lev1_att)
& (best[att_names[2]] == lev2_att)]['test mean'])
test_std = misc.float(best[(best[att_names[0]] == lev0_att)
& (best[att_names[1]] == lev1_att)
& (best[att_names[2]] == lev2_att)]['test std'])
train_mean = misc.float(best[(best[att_names[0]] == lev0_att)
& (best[att_names[1]] == lev1_att)
& (best[att_names[2]] == lev2_att)]['train mean'])
train_std = misc.float(best[(best[att_names[0]] == lev0_att)
& (best[att_names[1]] == lev1_att)
& (best[att_names[2]] == lev2_att)]['train std'])
# create bar plots
if (test_mean is not 0) and (test_mean is not np.nan) and (train_mean is not np.nan):
if plot_fit:
if error:
ax_flat[plt_i].bar(bar_x[bar_i] + block_x[block_i] * width, train_mean - bottom, bar_width,
color=c, bottom=bottom, yerr=train_std, ecolor='gray', alpha=.5,
linewidth=0.)
ax_flat[plt_i].bar(bar_x[bar_i] + block_x[block_i] * width + bar_width, test_mean - bottom, bar_width,
color=c, bottom=bottom, yerr=test_std, ecolor='gray', linewidth=0.)
else:
ax_flat[plt_i].bar(bar_x[bar_i] + block_x[block_i] * width, train_mean - bottom, bar_width,
color=c, bottom=bottom, alpha=.5, linewidth=0.)
ax_flat[plt_i].bar(bar_x[bar_i] + block_x[block_i] * width + bar_width, test_mean - bottom, bar_width,
color=c, bottom=bottom, linewidth=0.)
if print_value is True or (print_value is not False and counts is None):
ax_flat[plt_i].text(bar_x[bar_i] + block_x[block_i] * width + .25, (test_mean + bottom) / 2, '%.2f' % train_mean,
ha='center', va='top', rotation='vertical')
else:
if error:
ax_flat[plt_i].bar(bar_x[bar_i] + block_x[block_i] * width, test_mean - bottom,
color=c, bottom=bottom, yerr=test_std, ecolor='gray', linewidth=0.)
else:
ax_flat[plt_i].bar(bar_x[bar_i] + block_x[block_i] * width, test_mean - bottom,
color=c, bottom=bottom, linewidth=0.)
if print_value is True or (print_value is not False and counts is None):
ax_flat[plt_i].text(bar_x[bar_i] + block_x[block_i] * width + .5, (test_mean + bottom) / 2, '%.2f' % test_mean,
ha='center', va='center', rotation='vertical', hatch=self.patterns[block_i])
if plt_i == 0:
ticks.append(
bar_x[bar_i] + block_x[block_i] * width + width * 0.5)
tick_labels += [lev2_att]
# print count
if counts is not None:
count = misc.int(counts[(counts[att_names[0]] == lev0_att)
& (counts[att_names[1]] == lev1_att)
& (counts[att_names[2]] == lev2_att)]['test mean'])
if count > 0:
ax_flat[plt_i].text(bar_x[bar_i] + block_x[block_i] * bar_width + .4, (test_mean + bottom) / 2, '%d' % count,
ha='center', va='center', rotation='vertical')
ax_flat[plt_i].set_title(lev0_att)
ax_flat[plt_i].set_xticks([])
ax_flat[plt_i].set_ylim(bottom, ceil)
ax_flat[plt_i].spines['top'].set_visible(False)
ax_flat[plt_i].spines['right'].set_visible(False)
ax_flat[plt_i].spines['left'].set_color('gray')
ax_flat[plt_i].spines['bottom'].set_color('gray')
for plt_i in range(len(ax_flat)):
# ax_flat[plt_i].axis('off')
ax_flat[plt_i].set_xticks(ticks)
ax_flat[plt_i].set_xticklabels(tick_labels, rotation=90)
print 'shit'
legend = [(int(att) if isinstance(att, float) else att)
for i, att in lev1]
n_col = len(legend)
if n_col > max_n_cols:
n_col = int((n_col + 1) / 2)
plt.figlegend(
dummy_artists, legend, loc=legend_position, ncol=n_col, title=att_names[1])
|
#!/usr/bin/python
# Libraries
from PIL import Image, ImageTk
from math import sqrt, floor, ceil, sin, cos, tan, atan2, radians, degrees
from random import random, randint
import numpy
from time import time
from sys import maxint
# ======= solid colors =======
WHITE = (255,255,255)
BLACK = (0,0,0)
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
GRAY = (128,128,128)
# ============================
def percentage(x,y):
return round(float(y*100.0/x),2)
def slicing(l, n):
return [l[a:a+n] for a in xrange(0, len(l), n)]
def de_slicing(p):
pixels = list()
for a in p:
pixels += a
return pixels
def array2list(a):
aS = a.shape
newPixels = list()
for x in xrange(aS[0]):
for y in xrange(aS[1]):
newPixels.append(tuple([int(v) for v in a[x,y]]))
return newPixels
def turn(p1, p2, p3):
return cmp((p2[0] - p1[0])*(p3[1] - p1[1]) - (p3[0] - p1[0])*(p2[1] - p1[1]), 0)
TURN_LEFT, TURN_RIGHT, TURN_NONE = (1, -1, 0)
def graham_scan(points):
u = list()
l = list()
points.sort()
for p in points:
while len(u) > 1 and turn(u[-2], u[-1], p) <= 0:
u.pop()
while len(l) > 1 and turn(l[-2], l[-1], p) >= 0:
l.pop()
u.append(p)
l.append(p)
l = l[1:-1][::-1]
l += u
return l
def getNeighbours(pixels, a, b):
neighbours = list()
try: neighbours.append(pixels[a-1][b-1])
except IndexError: pass
try: neighbours.append(pixels[a-1][b])
except IndexError: pass
try: neighbours.append(pixels[a-1][b+1])
except IndexError: pass
try: neighbours.append(pixels[a+1][b-1])
except IndexError: pass
try: neighbours.append(pixels[a+1][b])
except IndexError: pass
try: neighbours.append(pixels[a+1][b+1])
except IndexError: pass
try: neighbours.append(pixels[a][b-1])
except IndexError: pass
try: neighbours.append(pixels[a][b+1])
except IndexError: pass
return neighbours
def normalize(pixels):
newPixels = list()
maximum = map(max, zip(*pixels))
minimum = map(min, zip(*pixels))
div = tuple([a-b for a,b in zip(maximum, minimum)])
for pixel in pixels:
newPixels.append(tuple([(p-m)/d for p,m,d in zip(pixel, minimum, div)]))
return newPixels
def euclidean(values, shape):
newValues = numpy.zeros(shape=shape)
for x in xrange(shape[0]):
for y in xrange(shape[1]):
pixel = sum([(value[x,y]**2) for value in values])
pixel = tuple([int(floor(sqrt(p))) for p in pixel])
newValues[x,y] = pixel
return newValues
def grayscale(pixels, lmin=0, lmax=255):
for a, pixel in enumerate(pixels):
color = sum(pixel)/3
color = 255 if(color >= lmax) else color
color = 0 if(color <= lmin) else color
pixels[a] = (color, color, color)
return pixels
def blurPixel(pixels):
newPixel = (sum([pixel[0] for pixel in pixels])/len(pixels),\
sum([pixel[1] for pixel in pixels])/len(pixels),\
sum([pixel[2] for pixel in pixels])/len(pixels))
return newPixel
def blur(pixels, width, height):
newPixels = list()
pixels = slicing(pixels, width)
for a, pLine in enumerate(pixels):
print str(percentage(height,a))
for b, pixel in enumerate(pLine):
pNeighbours = getNeighbours(pixels, a, b)
pNeighbours.append(pixel)
newPixel = blurPixel(pNeighbours)
newPixels.append(newPixel)
return newPixels
def negative(pixels, cMax=255):
for a, pixel in enumerate(pixels):
pixels[a] = tuple([cMax-p for p in pixel])
return pixels
def sepia(pixels):
pixels = grayscale(pixels)
values = (1.0, 1.2, 2.0)
for a, pixel in enumerate(pixels):
pixels[a] = tuple([int(p/v) for p,v in zip(pixel, values)])
return pixels
def noise(pixels, level, intensity):
level *= 0.01
intensity *= 13
for a, pixel in enumerate(pixels):
if(random() < level):
color = (0+intensity) if random() < 0.5 else (255-intensity)
pixel = (color, color, color)
pixels[a] = pixel
return pixels
def removeNoise(pixels, width, height, aggressiveness):
aggressiveness *= 10
newPixels = list()
pixels = slicing(pixels, width)
for a, pLine in enumerate(pixels):
print str(percentage(height,a))
for b, pixel in enumerate(pLine):
pNeighbours = getNeighbours(pixels, a, b)
newPixel = blurPixel(pNeighbours)
a1 = abs(newPixel[0] - pixel[0])
a2 = abs(newPixel[1] - pixel[1])
a3 = abs(newPixel[2] - pixel[2])
if(a1>aggressiveness and a2>aggressiveness and a3>aggressiveness):
newPixels.append(newPixel)
else:
newPixels.append(pixel)
return newPixels
def difference(pixels, width, height):
pixelsOr = grayscale(pixels)
pixelsBG = blur(pixelsOr, width, height)
newPixels = list()
for a, pixel in enumerate(pixelsOr):
newPixel = tuple([p1-p2 for p1,p2 in zip(pixelsOr[a], pixelsBG[a])])
newPixels.append(newPixel)
return grayscale(newPixels, lmin=10, lmax=10)
def convolution2D(f,h):
fS, hS = f.shape, h.shape
F = numpy.zeros(shape=fS)
for x in xrange(fS[0]):
print str(percentage(fS[0],x))
for y in xrange(fS[1]):
mSum = numpy.array([0.0, 0.0, 0.0])
for i in xrange(hS[0]):
i1 = i-(hS[0]/2)
for j in xrange(hS[1]):
j2 = j-(hS[0]/2)
try:
mSum += f[x+i1,y+j2]*h[i,j]
except IndexError: pass
F[x,y] = mSum
return F
def applyMask(pixels, width, gray=True):
if(gray): pixels = grayscale(pixels)
pixels = slicing(pixels, width)
pixels = numpy.array(pixels)
pS = pixels.shape
n = 1.0/9.0
mask = numpy.array([[0.0, 1.0, 0.0],[1.0, -6.0, 1.0],[0.0, 1.0, 0.0]]) * n
newPixels = array2list(convolution2D(pixels, mask))
return newPixels
def borderDetection(pixels, width):
#start = time()
pixels = grayscale(pixels)
pixels = slicing(pixels, width)
pixels = numpy.array(pixels)
pS = pixels.shape
n = 1.0/1.0
#mask1 = numpy.array([[-1,0,1],[-1,0,1],[-1,0,1]]) * n
#mask2 = numpy.array([[1,1,1],[0,0,0],[-1,-1,-1]]) * n
#mask3 = numpy.array([[-1,1,1],[-1,-2,1],[-1,1,1]])* n
#mask4 = numpy.array([[1,1,1],[-1,-2,1],[-1,-1,1]])* n
mask1 = numpy.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) * n
mask2 = numpy.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) * n
g1 = convolution2D(pixels, mask1)
g2 = convolution2D(pixels, mask2)
#g3 = convolution2D(pixels, mask3)
#g4 = convolution2D(pixels, mask4)
newPixels = grayscale(array2list(euclidean([g1,g2], pS)))
#end = time()
#print (end - start)
return newPixels
def bfs(pixels, visited, coordinates, newColor, width, height):
queue = [coordinates]
original = pixels[coordinates[1]][coordinates[0]]
localPixels = list()
while(len(queue) > 0):
(x,y) = queue.pop(0)
pColor = pixels[y][x]
if(pColor == original or pColor == newColor):
for dx in [-1,0,1]:
for dy in [-1,0,1]:
(i,j) = (x + dx, y + dy)
if(i >= 0 and i < width and j >= 0 and j < height):
contenido = pixels[j][i]
if(contenido == original):
pixels[j][i] = newColor
queue.append((i,j))
visited[j][i] = 1
localPixels.append((i,j))
return pixels, visited, localPixels
def objectDetection(pixels, width, height, coordinates, color):
pixels = slicing(pixels, width)
visited = [[0 for b in xrange(width)] for a in xrange(height)]
pixels, visited, objPixels = bfs(pixels, visited, coordinates, color, width, height)
return de_slicing(pixels), visited, objPixels
def objectClassification(pixels, width, height, color=BLACK):
pixels = slicing(pixels, width)
visited = [[0 for b in xrange(width)] for a in xrange(height)]
objects = list()
objID = 1
for x in xrange(height):
print str(percentage(height,x))
for y in xrange(width):
if(not visited[x][y] and pixels[x][y] == color):
objColor = (randint(0,255), randint(0,255), randint(0,255))
pixels, visited, objPixels = bfs(pixels, visited, (y,x), objColor, width, height)
objSize = len(objPixels)
objPrcnt = percentage(width*height, objSize)
if(objPrcnt > 0.1):
ySum = sum(i for i,j in objPixels)
xSum = sum(j for i,j in objPixels)
objCenter = tuple([ySum/len(objPixels), xSum/len(objPixels)])
mObject = {"id":objID, "size":objSize, "percentage":objPrcnt, "center":objCenter, "pixels":objPixels}
objects.append(mObject)
objID += 1
biggestObject = max(objects, key=lambda x:x["percentage"])
for p in biggestObject["pixels"]:
pixels[p[1]][p[0]] = GRAY
return de_slicing(pixels), objects
def houghTransform(pixels, width, height):
newPixels = list()
results = [[None for a in xrange(width)] for b in xrange(height)]
combinations = dict()
pixelsOr = slicing(pixels, width)
pixels = slicing(pixels, width)
pixels = numpy.array(pixels)
pS = pixels.shape
maskX = numpy.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) * 1.0/8.0
maskY = numpy.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) * 1.0/8.0
gx = convolution2D(pixels, maskX)
gy = convolution2D(pixels, maskY)
for y in xrange(height):
for x in xrange(width):
h = gx[y,x][0]
v = gy[y,x][0]
if(abs(h) + abs(v) <= 0.0):
theta = None
else:
theta = atan2(v,h)
if theta is not None:
rho = ceil((x - width/2) * cos(theta) + (height/2 - y) * sin(theta))
theta = int(degrees(theta))/18
combination = ("%d"%(theta), "%d"%rho)
results[y][x] = combination
if x > 0 and y > 0 and x < width-1 and y < height-1:
if combination in combinations:
combinations[combination] += 1
else:
combinations[combination] = 1
else:
results[y][x] = (None, None)
frec = sorted(combinations, key=combinations.get, reverse=True)
frec = frec[:int(ceil(len(combinations) * 0.3))]
for y in xrange(height):
for x in xrange(width):
(ang, rho) = results[y][x]
if(ang, rho) in frec:
ang = int(ang)
if(ang == -10 or ang == 0 or ang == 10):
newPixels.append(RED)
elif(ang == -5 or ang == 5):
newPixels.append(BLUE)
else:
newPixels.append(GREEN)
else:
newPixels.append(GRAY)
return newPixels
|
import random, string, os
import host
from Time import Time
class Power:
# ----------------------------------------------------------------------
def __init__(self, game, name, type = None):
vars(self).update(locals())
self.reinit()
# ----------------------------------------------------------------------
def __repr__(self):
text = '\n' + (self.type and self.type + ' ' or '') + self.name
if self.player: text += '\nPLAYER ' + ' '.join(self.player)
if self.address and not self.isResigned() and not self.isDummy():
text += '\nADDRESS ' + ' '.join(self.address)
if self.ceo: text += '\nCONTROL ' + ' '.join(self.ceo)
elif self.password: text += '\nPASSWORD ' + self.password
if self.omniscient: text += '\nOMNISCIENT!'[:10 + self.omniscient]
if self.wait: text += '\nWAIT'
if self.cd: text += '\nCD'
if self.vote: text += ('\nVOTE ' +
{'0': 'LOSS', '1': 'SOLO', 'YES': 'YES'}.get(self.vote,
self.vote + 'WAY'))
for line in self.msg: text += '\nMSG ' + line
if self.homes or not self.type and self.homes is not None:
text += '\nINHABITS ' + ' '.join(self.homes)
if self.centers: text += '\nOWNS ' + ' '.join(self.centers)
if self.sees: text += '\nSEES ' + ' '.join(self.sees)
if self.hides: text += '\nHIDES ' + ', '.join(self.hides)
if self.balance != None: self.funds['$'] = self.balance
if self.funds:
text += '\nFUNDS'
for type, amt in self.funds.items():
text += ' ' + {'$': '$' + `amt`}.get(type, `amt` + type)
for unit, places in self.retreats.items():
text += '\n' + ' '.join([unit, '-->'] + places)
text = '\n'.join([text] + self.units + self.adjust) + '\n'
return text.encode('latin-1')
# ----------------------------------------------------------------------
def reinit(self, includeFlags = 6):
# ---------------------------------------------
# Reinitializes the power specific data.
# Relevant bit values for includeFlags:
# 1: include orders
# 2: include persistent data
# 4: include transient data
# ---------------------------------------------
# ------------------------------------
# Initialize the persistent parameters
# ------------------------------------
if includeFlags & 2:
address = password = abbrev = None
omniscient = 0
player, msg = [], []
# -----------------------------------
# Initialize the transient parameters
# -----------------------------------
if includeFlags & 4:
balance = homes = None
centers, units, ceo = [], [], []
retreats, funds, sees, hides = {}, {}, [], []
# ---------------------------------------
# Initialize the order-related parameters
# ---------------------------------------
if includeFlags & 5:
wait = vote = None
cd = 0
adjust = []
held = goner = 0
vars(self).update(locals())
# ----------------------------------------------------------------------
def compare(self, other):
return cmp(self.type, other.type) or cmp(self.name, other.name)
# ----------------------------------------------------------------------
def initialize(self, game):
self.game = game
if self.type: return
if self.homes is None:
if game.map.homeYears: self.homes = []
else: self.homes = game.map.homes.get(self.name, [])
if 'MOBILIZE' in game.rules:
self.centers = ['SC!'] * ('IMMOBILE_DUMMIES' in game.rules and
self.isDummy() and len(game.map.homes.get(self.name, [])) or 1)
elif 'BLANK_BOARD' in game.rules:
if not self.centers:
self.centers = game.map.centers.get(self.name, [])
self.units = self.units or [x for x in
game.map.units.get(self.name, []) if x[2:5] not in
self.centers]
else:
self.centers = self.centers or game.map.centers.get(self.name, [])
self.units = self.units or game.map.units.get(self.name, [])
# ----------------------------------------------------------------------
def resign(self, byMaster = 0):
for num, power in enumerate(self.game.powers):
if power.name == self.name: break
if self.type or self.game.status[1] == 'forming':
if (self.game.status[1] in ('forming', 'active', 'waiting')
and self.type != 'MONITOR'): # maybe should be "if self.centers"?
# Tell at least the GM about the resignation
self.game.openMail('Diplomacy resignation notice',
mailTo = self.game.master[1], mailAs = host.dpjudge)
self.game.mail.write(
(("You have resigned %s from game '%s'.\n",
"%s has resigned from game '%s'.\n")[not byMaster])
% (self.game.anglify(self.name), self.game.name))
self.game.mail.write(
'\n(This notice is sent ONLY to the GameMaster.)')
self.game.mail.close()
del self.game.powers[num]
else:
if (self.game.status[1] in ('active', 'waiting')
and not self.isEliminated()):
self.game.avail += ['%s-(%s)' % (self.name,
('%d/%d' % (len(self.units), len(self.centers)),
'?/?')['BLIND' in self.game.rules])]
self.game.delay = None
self.game.changeStatus('waiting')
when = self.game.phaseAbbr()
if when[0] == '?':
when = self.game.outcome and self.game.outcome[0] or ''
if when and self.player[1:2] != [when]:
if self.player and self.address:
try:
player = self.player[0].split('|')
player[1] = self.address[0]
self.player[0] = '|'.join(player)
except: pass
self.player[:0] = [when]
else: del self.player[0]
if self.player: self.player[:0] = ['RESIGNED']
if 'BLIND' in self.game.rules: self.removeBlindMaps()
self.password = None
if not self.isDummy() and self.address:
self.message, self.pressSent = [], 1
self.game.openMail('Diplomacy resignation notice',
mailTo = ','.join(self.address), mailAs = host.dpjudge)
self.game.mail.write(
(("The Master has resigned you as %s from game '%s'.",
"You have resigned as %s from game '%s'.")[not byMaster])
% (self.game.anglify(self.name), self.game.name))
self.game.mail.close()
self.message, self.pressSent = [], 1
if not self.isEliminated():
self.game.mailPress(None, ['All!'],
(("The Master has resigned %s from game '%s'.",
"%s has resigned from game '%s'.")[not byMaster])
% (self.game.anglify(self.name), self.game.name) +
('', '\nThe deadline for orders is now %s.\n' %
self.game.timeFormat())[not self.game.avail],
subject = 'Diplomacy resignation notice')
else:
# Tell at least the GM about the resignation
self.game.openMail('Diplomacy resignation notice',
mailTo = self.game.master[1], mailAs = host.dpjudge)
self.game.mail.write(
(("You have resigned %s from game '%s'.\n",
"%s has resigned from game '%s'.\n")[not byMaster])
% (self.game.anglify(self.name), self.game.name))
self.game.mail.write(
'\n(This notice is sent ONLY to the GameMaster.)')
self.game.mail.close()
self.address = None
self.game.save()
# ----------------------------------------------------------------------
def takeover(self, dppd = None, email = None, password = None,
byMaster = 0):
resigned, dummy = self.isResigned(), self.isDummy()
revived, generated = not dppd, not password
phase = self.game.phaseAbbr()
if phase[0] == '?':
phase = self.game.outcome and self.game.outcome[0] or ''
if not resigned and not dummy:
if not password or self.isValidPassword(password) != 1:
return ('You need to specify the password of the current ' +
'player in order to take over.')
elif not password:
password = self.generatePassword()
if resigned or dummy:
if len(self.player) > 2: self.player = self.player[2:]
elif revived:
return 'Cannot revive a power never assigned to a player.'
else: self.player = []
if self.player and (not dppd or
self.player[0].split('|')[0] == dppd.split('|')[0]): revived = 1
else: self.player[:0] = [dppd, phase]
if self.isDummy():
self.address = self.password = None
else:
self.address = [self.player[0].split('|')[1]]
if email and email != self.address[0]:
self.address[:0] = [email]
self.password = password
self.game.openMail('Diplomacy takeover notice',
mailTo = self.name, mailAs = host.dpjudge)
self.game.mail.write(
"You are %s %s in game '%s'.\n" %
(('now', 'again')[revived],
self.game.anglify(self.name), self.game.name) +
("Your password is '%s'.\n" % password) *
(generated or byMaster) + "Welcome %sto the %s.\n" %
('back ' * revived, host.dpjudgeNick))
self.game.mail.close()
if resigned: self.game.avail = [x for x in self.game.avail
if not x.startswith(self.name + '-')]
if not self.game.avail:
self.game.changeStatus('active')
self.game.setDeadline()
self.game.save()
if 'BLIND' in self.game.rules: self.game.makeMaps()
self.game.mailPress(None, ['All!'],
"The abandoned %s has been taken over in game '%s'.\n" %
(self.game.anglify(self.name), self.game.name) +
('', 'The deadline for orders is now %s.\n' %
self.game.timeFormat())[not self.game.avail],
subject = 'Diplomacy position taken over')
# ----------------------------------------------------------------------
def dummy(self):
if self.isResigned(): self.player[0] = 'DUMMY'
else:
when = self.game.phaseAbbr()
if self.player[1:2] != [when]:
if when[0] == '?': when = self.game.outcome[0]
if self.player and self.address:
try:
player = self.player[0].split('|')
player[1] = self.address[0]
self.player[0] = '|'.join(player)
except: pass
self.player[:0] = [when]
else: del self.player[0]
self.player[:0] = ['DUMMY']
try: self.game.avail.remove([x for x in self.game.avail
if x.startswith(self.name)][0])
except: pass
if not self.game.avail:
self.game.changeStatus('active')
self.game.setDeadline()
self.password = None
self.game.save()
if 'BLIND' in self.game.rules: self.game.makeMaps()
self.game.mailPress(None, ['All!'],
"%s has been dummied in game '%s'.\n" %
(self.game.anglify(self.name), self.game.name) +
('', 'The deadline for orders is now %s.\n' %
self.game.timeFormat())[not self.game.avail],
subject = 'Diplomacy position dummied')
# ----------------------------------------------------------------------
def controller(self):
if not self.ceo or self.ceo[0] == 'MASTER': return None
for power in self.game.powers:
if power.name == self.ceo[0]: return power
return None
# ----------------------------------------------------------------------
def controls(self, power):
return (power is self or power.controller() is self or
self.name == 'MASTER')
# ----------------------------------------------------------------------
def vassals(self, public = False, all = False, indirect = False):
return [x for x in self.game.powers if
(x.ceo[:1] == [self.name] or indirect and self.name == 'MASTER')
and (all or not x.isEliminated(public))]
# ----------------------------------------------------------------------
def isResigned(self):
return self.player[:1] == ['RESIGNED']
# ----------------------------------------------------------------------
def isDummy(self, public = False):
return self.player[:1] == ['DUMMY'] and not (
public and 'HIDE_DUMMIES' in self.game.rules)
# ----------------------------------------------------------------------
def isEliminated(self, public = False, personal = False):
return not (self.units or self.centers or self.retreats or
(public and 'BLIND' in self.game.rules) or
(not personal and self.vassals()))
# ----------------------------------------------------------------------
def isCD(self, after = 0):
# -----------------------------------
# Set after to 1 to reveal what will happen after the grace expires,
# or to -1 to know the status before any deadline expires.
# A power is CD...
# if a CIVIL_DISORDER rule is on
# and the player has not RESIGNED
# and the deadline has passed,
# or it is an unCONTROLled DUMMY
# and the CD_DUMMIES rule is on,
# or it is an CONTROLled DUMMY
# and the CD_DUMMIES rule is on
# and the grace period has expired.
# -----------------------------------
game = self.game
return not self.type and self.player and (
self.isDummy() and (
not self.ceo and 'CD_DUMMIES' in game.rules or (
after > 0 or 'NO_DEADLINE' not in game.rules and
game.deadline and game.deadline <= game.getTime() and (
not self.ceo or game.graceExpired()
)
) and (
self.ceo and 'CD_DUMMIES' in game.rules or
'CIVIL_DISORDER' in game.rules
)
) or
not self.isResigned() and (
after > 0 or 'NO_DEADLINE' not in game.rules and
game.deadline and game.deadline <= game.getTime() and
game.graceExpired()
) and
'CIVIL_DISORDER' in game.rules
)
# ----------------------------------------------------------------------
def isValidPassword(self, password):
# -------------------------------------------------------------------
# If power is run by controller, password is in the controller's data
# -------------------------------------------------------------------
ceo = self.controller()
if ceo: return ceo.isValidPassword(password)
# ---------------------------
# Determine password validity
# ---------------------------
if not password: return 0
password = password.upper()
if password == host.judgePassword.upper(): return 5
if password == self.game.password.upper(): return 4
if self.password and password == self.password.upper(): return 3
# ----------------------------------------
# Check against omniscient power passwords
# ----------------------------------------
if self.name == 'MASTER': return 0
if [1 for x in self.game.powers if x.omniscient
and x.password and password == x.password.upper()]: return 2
return 0
# ----------------------------------------------------------------------
def isValidUserId(self, userId):
# -------------------------------------------------------------------
# If power is run by controller, userId is in the controller's data
# -------------------------------------------------------------------
ceo = self.controller()
if ceo: return ceo.isValidUserId(userId)
# ---------------------------
# Determine userId validity
# ---------------------------
if userId < 0: return 0
id = '#' + str(userId)
if self.game.master and id == self.game.master[0]: return 4
if self.player and id == self.player[0].split('|')[0]: return 3
# ----------------------------------------
# Check against omniscient power passwords
# ----------------------------------------
if self.name == 'MASTER': return 0
if [1 for x in self.game.powers if x.omniscient
and x.player and id == x.player[0].split('|')[0]]: return 2
return 0
# ----------------------------------------------------------------------
def generatePassword(self):
random.seed()
return ''.join(random.sample(string.lowercase + string.digits, 6))
# ----------------------------------------------------------------------
def removeBlindMaps(self):
for suffix in ('.ps', '.pdf', '.gif', '_.gif'):
try: os.unlink(host.gameMapDir + '/' + self.game.name + '.' +
(self.abbrev or 'O') +
`hash((self.password or self.game.password) + self.name)` +
suffix)
except: pass
for vassal in self.vassals(all = True):
vassal.removeBlindMaps()
# ----------------------------------------------------------------------
def movesSubmitted(self):
# Each variant had pretty much better override this guy! :-)
return False
# ----------------------------------------------------------------------
def canVote(self):
return not self.ceo and (self.centers or
[1 for x in self.vassals() if x.centers])
# ----------------------------------------------------------------------
def visible(self, unit, order = None):
# --------------------------------------------------------------
# This function returns a dictionary listing a number for each
# of the powers. The number is a bitmap, with the following
# meaning. If the bitvalue 1 is set, this means the power could
# "see" the unit in question BEFORE the move. If the bitvalue 2
# is set, this means the power could "see" (AFTER the move) the
# location where the unit in question began the turn. If the
# bitvalue 4 is set, the power could "see" (BEFORE the move) the
# location where the unit in question ended the turn, and if the
# bitvalue 8 is set, the power could "see" (AFTER the move) the
# location where the unit in question ended the turn. If "unit"
# is simply a center location, determines center visibility.
# --------------------------------------------------------------
game = self.game
if 'command' not in vars(game): game.command = {}
shows, order = {'MASTER': 15}, order or game.command.get(unit, 'H')
old = new = unit.split()[-1][:3]
dislodging = 0
if order[0] == '-' and (game.phaseType != 'M'
or not game.result.get(unit)):
new = order.split()[-1][:3]
if game.phaseType == 'M':
# -------------------------------------------------------
# If this unit is dislodging another unit (which might be
# of the same power), we'll pretend that any unit able to
# see the destination after the move can also see it
# before the move. This way the unit will always be
# arriving, allowing to depict both the dislodging and
# dislodged units.
# -------------------------------------------------------
dislodging = [x for p in game.powers for x in p.units
if x[2:5] == new[:3]
and 'dislodged' in game.result.get(x, [])] and 1 or 0
rules = game.rules
for seer in game.powers:
shows[seer.name] = 15 * bool(self is seer or seer.omniscient
or seer is self.controller())
if (shows[seer.name]
or ('SEE_NO_SCS', 'SEE_NO_UNITS')[' ' in unit] in rules): continue
# --------------------------------------------------
# Get the list of the "seer"s sighted units (if any)
# with their positions before and after any movement
# --------------------------------------------------
vassals = [seer] + seer.vassals()
units = [y for x in vassals for y in x.units]
adjusts = [y for x in vassals for y in x.adjust]
retreats = [y for x in vassals for y in x.retreats.keys()]
if 'NO_UNITS_SEE' in rules: before = after = []
else:
spotters = 'AF'
if ' ' in unit:
if 'UNITS_SEE_SAME' in rules: spotters = unit[0]
elif 'UNITS_SEE_OTHER' in rules:
spotters = spotters.replace(unit[0], '')
before = after = [x[2:] for x in units + retreats
if x[0] in spotters]
if game.phaseType == 'M':
after = []
for his in units:
if his[0] not in spotters: continue
if (game.command.get(his, 'H')[0] != '-'
or game.result.get(his)): after += [his[2:]]
else:
after += [game.command[his].split()[-1]]
elif game.phaseType == 'R':
if 'popped' not in vars(game): game.popped = []
if adjusts:
after = [x for x in before if x not in
[y[2:] for y in retreats]]
for adjusted in adjusts:
word = adjusted.split()
if word[1][0] not in spotters: continue
if word[3][0] == '-' and word[2] not in game.popped:
after += [word[-1]]
else:
after = [x for x in before if x not in
[y[2:] for y in game.popped if y in retreats]]
elif game.phaseType == 'A':
after = [z for z in before if z not in
[x[1] for x in [y.split()[1:] for y in adjusts]
if len(x) > 1]] + [x[1] for x in
[y.split()[1:] for y in adjusts if y[0] == 'B']
if len(x) > 1 and x[0][0] in spotters]
# ------------------------------------------------
# Get the list of the "seer"s sighted scs (if any)
# ------------------------------------------------
if 'NO_SCS_SEE' in rules: pass
elif ('OWN_SCS_SEE' in rules
or game.map.homeYears and not [x for x in game.powers if x.homes]):
# ------------------------------------
# The seer's owned centers are sighted
# ------------------------------------
scs = [y for x in vassals for y in x.centers]
if 'SC!' in scs:
scs = [x[8:11] for x in adjusts if x[:5] == 'BUILD']
after += scs
if 'OWN_SCS_SEE' in rules:
if 'lost' in vars(game):
for what, who in game.lost.items():
if what in scs and who not in vassals:
scs.remove(what)
elif what not in scs and who in vassals:
scs.append(what)
before += scs
else:
# -----------------------------------
# The seer's home centers are sighted
# -----------------------------------
scs = [y for x in vassals for y in x.homes or []]
# ----------------------------------------------------------
# Also add locations where the power had units at game start
# (helping void variant games, where units start on non-SCs)
# ----------------------------------------------------------
if 'BLANK_BOARD' not in rules and 'MOBILIZE' not in rules:
scs += [y[2:] for x in vassals
for y in game.map.units.get(x.name, [])]
after += scs
before += scs
# -------------------------------------------------
# When it comes to visibility, we can ignore coasts
# -------------------------------------------------
before = set([x[:3] for x in before])
after = set([x[:3] for x in after])
both = before & after
before, after = before - both, after - both
old, new = old[:3], new[:3]
places = (' ' in unit and unit in self.hides and
[(new, 4)] or old == new and [(old, 5)] or [(old, 1), (new, 4)])
# -------------------------------------------------------
# Set the bitmap for this "seer" if any unit or sc in the
# lists (before, after, scs) can see the site in question
# -------------------------------------------------------
for bit in [b * m for (y, m) in places for (b, l) in [(1, before),
(2 + ((m & 4) and dislodging), after), (3, both)]
for x in l if x == y or game.abuts('?', y, 'S', x)]:
shows[seer.name] |= bit
return shows
# ----------------------------------------------------------------------
def showLines(self, unit, notes = [], line = None):
game = self.game
list, lost, found, gone, came, all = [], [], [], [], [], []
#list += ['# Show ' + unit]
if game.phaseType == 'M':
if game.command.get(unit, 'H')[0] != '-' or game.result[unit]:
there = unit
else: there = unit[:2] + game.command[unit].split()[-1]
cmd = None
else:
word = unit.split()
unit = ' '.join(word[1:3])
if len(word) > 4 and word[2] not in notes:
cmd, there = ' '.join(word[3:]), unit[:2] + word[-1]
elif unit == 'WAIVED' or (len(word) > 3 and word[-1] == 'HIDDEN'):
return line and ['SHOW MASTER ' + ' '.join([x.name
for x in game.powers if x == self or x == self.controller()
or x.omniscient]), line] or []
else: cmd, there = word[0][0], unit
c = not cmd and 'M' or len(cmd) == 1 and cmd or 'M'
for who, how in self.visible(unit, cmd).items():
if how & 8:
if how & 1: all += [who]
elif how & 4: came.append(who)
elif game.phaseType == 'A':
if c in 'BR': all += [who]
if c != 'B': found.append(who)
elif c not in 'RD': found.append(who)
elif how & 1:
if how & 2: gone.append(who)
else:
if c not in 'RD': lost.append(who)
if c in 'BRD': all += [who]
if game.phaseType == 'M':
fmt = '%s %s %s.' + ' (*dislodged*)' * ('dislodged' in notes)
else:
fmt = '%-11s %s %s.'
for who, what in ((found, 'FOUND'), (came, 'ARRIVES')):
if not who: continue
list += ['SHOW ' + ' '.join(who),
fmt % (game.anglify(self.name) + ':',
game.anglify((unit, there)[what[0] in 'FA'], self), what)]
if line: list += ['SHOW ' + ' '.join(all), line]
for who, what in ((gone, 'DEPARTS'), (lost, 'LOST')):
if not who: continue
list += ['SHOW ' + ' '.join(who),
fmt % (game.anglify(self.name) + ':',
game.anglify((unit, there)[what[0] in 'FA'], self), what)]
return list
# ----------------------------------------------------------------------
|
data = 'From stephen.marquard@uct.ac.za Sat jan 5'
atposition = data.find('@')
print(atposition)
spaceposition = data.find(' ', atposition)
print(spaceposition)
host = data[atposition+1:spaceposition]
print(host)
|
# from tables.proxies import proxies
import json
_DEFAULT_DICT = {'http': 'http://103.207.4.170:60570', 'https': 'https://103.207.4.170:60570',
'ftp_proxy': 'ftp://103.207.4.170:60570'}
_proxies = []
ips = open('tables/proxies.txt', 'r').readlines()
ports = open('tables/ports.txt', 'r').readlines()
ips = [ip.strip() for ip in ips]
ports = [port.strip() for port in ports]
for ip, port in zip(ips, ports):
_proxies.append({'http': f'http://{ip}:{port}', 'https': f'https://{ip}:{port}',
'ftp_proxy': f'ftp://{ip}:{port}'})
with open('tables/proxies.json', 'w') as _input_file:
json.dump(_proxies, _input_file)
# print(_proxies)
print(len(_proxies))
|
"""Create a brute-force winner determination mechanism for XOR-bids based combinatorial
auctions. As input, your mechanism should take XOR-bids from a set of agents, and should
then output an allocation as well as the social welfare obtained.
Example:
3 agents with bids:
- a: 3 xor a, b: 100 xor c: 4
- a, b: 2 xor d: 20
- c: 20 xor a, d: 21
-> Result:
Best allocation: {0: {'b', 'a'}, 1: {'d'}, 2: {'c'}} (social welfare: 140.0)
"""
import re
import itertools as it
import numpy as np
from collections.abc import Iterable
from misc import misc
BID_PATTERN = r'(\w*\s*,\s*)*\w+\s*:\s*\d+'
class BidDict(dict):
def _get_valuation(self, goods):
if not isinstance(goods, Iterable):
goods = (goods,)
goods_set = set(goods)
max_val = 0
for key, value in self.items():
if set(key).issubset(goods_set):
if value > max_val:
max_val = value
return max_val
def __getitem__(self, item):
return self._get_valuation(item)
def parse_bid(bid):
bid_dict = BidDict()
sub_bids = bid.lower().split(' xor ')
if 'xor' in bid.lower() and len(sub_bids) < 2:
print("Invalid entry: sub-bids should be separated with ' xor ' (with spaces around)")
return
for sub_bid in sub_bids:
if re.fullmatch(BID_PATTERN, sub_bid) is None:
print(f"Invalid entry: sub-bids should have the format: <good1, good2, ...> : <value> (got '{sub_bid}')")
return
goods, value = sub_bid.replace(' ', '').split(':')
value_num = float(value)
if not goods and value_num > 0:
print(f"Invalid entry: an empty sub-bid should be associated with the value of 0 (got {value_num})")
return
bid_dict[tuple(goods.split(','))] = value_num
return bid_dict
def record_bid(agent=None):
b = None
while b is None:
b = parse_bid(input(f"Enter a (XOR) bid for agent{' ' + agent if agent else ''}: "))
return b
def collect_data():
n = misc.validate("Enter the number of agents: ", int)
agents = []
for i in range(n):
agents.append(record_bid(str(i)))
return agents
def extract_items(agents):
all_items = set()
for agent in agents:
for key in agent.keys():
for item in key:
all_items.add(item)
return sorted(all_items)
def compute_social_welfare(agents, verbose=False):
def wrapper(alloc_dict, skip=None):
social_welfare = 0
for i, (agent, items) in enumerate(alloc_dict.items()):
if i == skip: # for computing value without an agent(Vickery/Clarke/Groves Mechanism)
continue
val = agents[i][items]
social_welfare += val
if verbose:
print(f"Agent {i}: {items} = {val}")
return social_welfare
return wrapper
def allocate(agents, verbose=True):
def vprint(*args):
if verbose:
print(*args)
all_items = extract_items(agents)
vprint(f"\nItems to be distributed: {all_items}")
all_items = np.array(all_items)
rn = range(len(agents))
best_alloc = ()
best_social_welfare = 0
eval_alloc = compute_social_welfare(agents, verbose=verbose)
for alloc_c in it.combinations_with_replacement(rn, len(all_items)):
for alloc in set(it.permutations(alloc_c)):
alloc_arr = np.array(alloc)
vprint(f"\nAllocation: {dict(zip(all_items, alloc_arr))}")
alloc_by_agent = dict()
for i, agent in enumerate(agents):
items_i = all_items[np.where(alloc_arr == i)]
alloc_by_agent[i] = sorted(items_i)
social_welfare = eval_alloc(alloc_by_agent)
vprint(f"Social welfare: {social_welfare}")
if social_welfare > best_social_welfare:
best_social_welfare = social_welfare
best_alloc = alloc_by_agent
return best_alloc, best_social_welfare
def allocate_dummies(agents, **kwargs):
dummy_agent = BidDict()
welfare_with_dummies = []
for i in range(len(agents)):
agd = agents[:i] + [dummy_agent] + agents[i+1:]
welfare_with_dummies.append(allocate(agd, **kwargs)[1])
return welfare_with_dummies
def calculate_payments(agents, allocation, verbose=False):
dummy_welfares = allocate_dummies(agents, verbose=verbose)
print(f"Welfares with agents replaced with dummies (indifferent to all outcomes): {dummy_welfares}")
rn = range(len(agents))
compute_welfares = compute_social_welfare(agents, verbose=verbose)
skipping_welfares = [compute_welfares(allocation, skip=i) for i in rn]
print(f"Welfares with skipping an agent: {skipping_welfares}")
return {i: dummy_welfares[i] - skipping_welfares[i] for i in rn}
def main():
all_agents = collect_data()
print(all_agents)
allocation, score = allocate(all_agents, verbose=False)
print(f"\n{20 * '-'}\nBest allocation: {allocation} (social welfare: {score})\n")
payments = calculate_payments(all_agents, allocation, verbose=False)
print(f"\nHow much agents should pay: {payments}")
if __name__ == '__main__':
main()
|
# Generated by Django 3.0.4 on 2020-04-22 05:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apps', '0009_auto_20200419_1505'),
]
operations = [
migrations.AddField(
model_name='section',
name='sequence',
field=models.SmallIntegerField(default=1, unique=True, verbose_name='Порядок следования'),
preserve_default=False,
),
]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import init
import numpy as np
import math
import sys
import datetime
def print_now(cmd):
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print('%s %s' % (time_now, cmd))
sys.stdout.flush()
class NoisyLinear(nn.Module):
def __init__(self, in_features, out_features, std_init=0.1):
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
# Uniform Distribution bounds:
# U(-1/sqrt(p), 1/sqrt(p))
self.lowerU = -1.0 / math.sqrt(in_features) #
self.upperU = 1.0 / math.sqrt(in_features) #
self.sigma_0 = std_init
self.sigma_ij_in = self.sigma_0 / math.sqrt(self.in_features)
self.sigma_ij_out = self.sigma_0 / math.sqrt(self.out_features)
"""
Registre_Buffer: Adds a persistent buffer to the module.
A buffer that is not to be considered as a model parameter -- like "running_mean" in BatchNorm
It is a "persistent state" and can be accessed as attributes --> self.weight_epsilon
"""
self.weight_mu = nn.Parameter(torch.empty(out_features, in_features))
self.weight_sigma = nn.Parameter(torch.empty(out_features, in_features))
self.register_buffer('weight_epsilon', torch.empty(out_features, in_features))
self.bias_mu = nn.Parameter(torch.empty(out_features))
self.bias_sigma = nn.Parameter(torch.empty(out_features))
self.register_buffer('bias_epsilon', torch.empty(out_features))
self.reset_parameters()
self.sample_noise()
def reset_parameters(self):
self.weight_mu.data.uniform_(self.lowerU, self.upperU)
self.weight_sigma.data.fill_(self.sigma_ij_in)
self.bias_mu.data.uniform_(self.lowerU, self.upperU)
self.bias_sigma.data.fill_(self.sigma_ij_out)
def sample_noise(self):
eps_in = self.func_f(self.in_features)
eps_out = self.func_f(self.out_features)
# Take the outter product
"""
>>> v1 = torch.arange(1., 5.) [1, 2, 3, 4]
>>> v2 = torch.arange(1., 4.) [1, 2, 3]
>>> torch.ger(v1, v2)
tensor([[ 1., 2., 3.],
[ 2., 4., 6.],
[ 3., 6., 9.],
[ 4., 8., 12.]])
"""
eps_ij = eps_out.ger(eps_in)
self.weight_epsilon.copy_(eps_ij)
self.bias_epsilon.copy_(eps_out)
def func_f(self, n): # size
# sign(x) * sqrt(|x|) as in paper
x = torch.rand(n)
return x.sign().mul_(x.abs().sqrt_())
def forward(self, x):
if self.training:
return F.linear(x, self.weight_mu + self.weight_sigma*self.weight_epsilon,
self.bias_mu + self.bias_sigma *self.bias_epsilon)
else:
return F.linear(x, self.weight_mu,
self.bias_mu)
class DQN(nn.Module):
def __init__(self, num_inputs, hidden_size=512, num_actions=1, use_duel=False, use_noisy_net=False):
super(DQN, self).__init__()
init_ = lambda m: init(m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain('relu'))
init2_ = lambda m: init(m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0))
self.use_duel = use_duel
self.use_noisy_net = use_noisy_net
self.conv1 = init_(nn.Conv2d(num_inputs, 32, 8, stride=4))
self.conv2 = init_(nn.Conv2d(32, 64, 4, stride=2))
self.conv3 = init_(nn.Conv2d(64, 32, 3, stride=1))
if use_noisy_net:
Linear = NoisyLinear
else:
Linear = nn.Linear
if self.use_duel:
self.val_fc = Linear(32*7*7, hidden_size)
self.val = Linear(hidden_size, 1)
self.adv_fc = Linear(32*7*7, hidden_size)
self.adv = Linear(hidden_size, num_actions)
if not use_noisy_net:
self.val_fc = init_(self.val_fc)
self.adv_fc = init_(self.adv_fc)
self.val = init2_(self.val)
self.adv = init2_(self.adv)
else:
self.fc = Linear(32*7*7, hidden_size)
self.critic_linear = Linear(hidden_size, num_actions)
if not use_noisy_net:
self.fc = init_(self.fc)
self.critic_linear = init2_(self.critic_linear)
self.train()
def forward(self, x):
x = x / 255.0
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(x.size(0), -1)
if self.use_duel:
val = self.val(F.relu(self.val_fc(x)))
adv = self.adv(F.relu(self.adv_fc(x)))
y = val + adv - adv.mean()
else:
x = F.relu(self.fc(x))
y = self.critic_linear(x)
return y
def sample_noise(self):
if self.use_noisy_net:
if self.use_duel:
self.val_fc.sample_noise()
self.val.sample_noise()
self.adv_fc.sample_noise()
self.adv.sample_noise()
else:
self.fc.sample_noise()
self.critic_linear.sample_noise()
class C51(nn.Module):
def __init__(self, num_inputs, hidden_size=512, num_actions=4,
use_duel=False, use_noisy_net=False, atoms=51, vmin=-10, vmax=10, use_qr_c51=False):
super(C51, self).__init__()
self.atoms = atoms
self.vmin = vmin
self.vmax = vmax
self.num_actions = num_actions
self.use_duel = use_duel
self.use_noisy_net = use_noisy_net
self.use_qr_c51 = use_qr_c51
init_ = lambda m: init(m,
nn.init.kaiming_uniform_,
lambda x: nn.init.constant_(x, 0),
nonlinearity='relu',
mode='fan_in')
init2_ = lambda m: init(m,
nn.init.kaiming_uniform_,
lambda x: nn.init.constant_(x, 0),
nonlinearity='relu',
mode='fan_in')
self.conv1 = init_(nn.Conv2d(num_inputs, 32, 8, stride=4))
self.conv2 = init_(nn.Conv2d(32, 64, 4, stride=2))
self.conv3 = init_(nn.Conv2d(64, 32, 3, stride=1))
if use_noisy_net:
Linear = NoisyLinear
else:
Linear = nn.Linear
self.fc1 = Linear(32*7*7, hidden_size)
self.fc2 = Linear(hidden_size, num_actions*atoms)
if self.use_duel:
self.val_fc = Linear(32*7*7, hidden_size)
self.val = Linear(hidden_size, atoms)
# Param init
if not use_noisy_net:
self.fc1 = init_(self.fc1)
self.fc2 = init2_(self.fc2)
if self.use_duel:
self.val_fc = init_(self.val_fc)
self.val = init2_(self.val)
def forward(self, x):
x = x / 255.0
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(x.size(0), -1)
if self.use_duel:
val_x = F.relu(self.val_fc(x))
values = self.val(val_x).unsqueeze(1) # from batch x atoms to batch x 1 x atoms
x = F.relu(self.fc1(x))
x = self.fc2(x)
x_batch = x.view(-1, self.num_actions, self.atoms)
duel = values + x_batch - x_batch.mean(1, keepdim=True)
if self.use_qr_c51:
y = duel
else:
y = F.log_softmax(duel, dim = 2).exp() # y is of shape [batch x action x atoms]
else:
# A Tensor of shape [batch x actions x atoms].
x = F.relu(self.fc1(x))
x = self.fc2(x)
x_batch = x.view(-1, self.num_actions, self.atoms)
if self.use_qr_c51:
y = x_batch
else:
y = F.log_softmax(x_batch, dim=2).exp() # y is of shape [batch x action x atoms]
return y
def sample_noise(self):
if self.use_noisy_net:
if self.use_duel:
self.fc1.sample_noise()
self.fc2.sample_noise()
self.val_fc.sample_noise()
self.val.sample_noise()
else:
self.fc1.sample_noise()
self.fc2.sample_noise()
class IQN_C51(nn.Module):
def __init__(self, num_inputs, hidden_size=512, num_actions=4,
use_duel=False, use_noisy_net=False):
super(IQN_C51, self).__init__()
self.num_actions = num_actions
self.use_duel = use_duel
self.use_noisy_net = use_noisy_net
self.quantile_embedding_dim = 64
self.pi = np.pi
init_ = lambda m: init(m,
nn.init.kaiming_uniform_,
lambda x: nn.init.constant_(x, 0),
gain=nn.init.calculate_gain('relu'),
mode='fan_in')
init2_ = lambda m: init(m,
nn.init.kaiming_uniform_,
lambda x: nn.init.constant_(x, 0),
gain=nn.init.calculate_gain('relu'),
mode='fan_in')
self.conv1 = init_(nn.Conv2d(num_inputs, 32, 8, stride=4))
self.conv2 = init_(nn.Conv2d(32, 64, 4, stride=2))
self.conv3 = init_(nn.Conv2d(64, 32, 3, stride=1))
if use_noisy_net:
Linear = NoisyLinear
else:
Linear = nn.Linear
# ----------------------------------------------------------------------------
# self.fc1 = Linear(32*7*7, hidden_size)
self.fc2 = Linear(hidden_size, num_actions*1)
# ----------------------------------------------------------------------------
Atari_Input = torch.FloatTensor(1, num_inputs, 84, 84)
temp_fea = self.conv3(self.conv2(self.conv1(Atari_Input)))
temp_fea = temp_fea.view(temp_fea.size(0), -1)
state_net_size = temp_fea.size(1)
del Atari_Input
del temp_fea
self.quantile_fc0 = nn.Linear(self.quantile_embedding_dim, state_net_size)
self.quantile_fc1 = nn.Linear(state_net_size, hidden_size)
# ----------------------------------------------------------------------------
if self.use_duel:
self.quantile_fc_value = Linear(hidden_size, 1)
# Param init
if not use_noisy_net:
self.quantile_fc0 = init2_(self.quantile_fc0)
self.quantile_fc1 = init2_(self.quantile_fc1)
self.fc2 = init2_(self.fc2)
if self.use_duel:
self.quantile_fc_value = init2_(self.quantile_fc_value)
def forward(self, x, num_quantiles):
x = x / 255.0
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(x.size(0), -1)
BATCH_SIZE = x.size(0)
state_net_size = x.size(1)
tau = torch.FloatTensor(BATCH_SIZE * num_quantiles, 1).to(x)
tau.uniform_(0, 1)
# ----------------------------------------------------------------------------------------------
quantile_net = torch.FloatTensor([i for i in range(1, 1+self.quantile_embedding_dim)]).to(x)
# -------------------------------------------------------------------------------------------------
tau_expand = tau.unsqueeze(-1).expand(-1, -1, self.quantile_embedding_dim) # [Batch*Np x 1 x 64]
quantile_net = quantile_net.view(1, 1, -1) # [1 x 1 x 64] --> [Batch*Np x 1 x 64]
quantile_net = quantile_net.expand(BATCH_SIZE*num_quantiles, 1, self.quantile_embedding_dim)
cos_tau = torch.cos(quantile_net * self.pi * tau_expand) # [Batch*Np x 1 x 64]
cos_tau = cos_tau.squeeze(1) # [Batch*Np x 64]
# -------------------------------------------------------------------------------------------------
out = F.relu(self.quantile_fc0(cos_tau)) # [Batch*Np x feaSize]
# fea_tile = torch.cat([x]*num_quantiles, dim=0)
fea_tile = x.unsqueeze(1).expand(-1, num_quantiles, -1) # [Batch x Np x feaSize]
out = out.view(BATCH_SIZE, num_quantiles, -1) # [Batch x Np x feaSize]
product = (fea_tile * out).view(BATCH_SIZE*num_quantiles, -1)
combined_fea = F.relu(self.quantile_fc1(product)) # (Batch*atoms, 512)
if self.use_duel:
values = self.quantile_fc_value(combined_fea) # from [batch*atoms x 1] to [Batch x 1 x Atoms]
values = values.view(-1, num_quantiles).unsqueeze(1)
x = self.fc2(combined_fea)
x_batch = x.view(BATCH_SIZE, num_quantiles, self.num_actions)
# After transpose, x_batch becomes [batch x actions x atoms]
x_batch = x_batch.transpose(1, 2).contiguous()
action_component = x_batch - x_batch.mean(1, keepdim=True)
duel_y = values + action_component
y = duel_y
else:
x = self.fc2(combined_fea)
# [batch x atoms x actions].
y = x.view(BATCH_SIZE, num_quantiles, self.num_actions)
# output should be # A Tensor of shape [batch x actions x atoms].
y = y.transpose(1, 2).contiguous()
# ------------------------------------------------------------------------------------------------ #
return y, tau # [batch x actions x atoms]
def sample_noise(self):
if self.use_noisy_net:
if self.use_duel:
self.fc2.sample_noise()
self.quantile_fc_value.sample_noise()
else:
self.fc2.sample_noise() |
import math
from typing import Any, List
import torch
from torch import nn
from torch.nn.functional import pad
def init_weights_xavier_(network, activation="tanh"):
"""
Initializes the layers of a given network with random values.
Bias layers will be filled with zeros.
Parameters
----------
network : torch.nn object
The network to initialize.
"""
for name, param in network.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0.0)
elif "weight" in name:
nn.init.xavier_normal_(param, gain=nn.init.calculate_gain(activation))
def initialize_xavier_dynet_(model, gain=1.0):
"""
Xaviar initialization similar to the implementation in DyNet.
Note that in contrast to the PyTorch version, this function also randomly
sets weights for the bias parameter.
Parameters
----------
model : nn.torch.Module
Model to initialize the weights of
gain : float
See the paper. Should be 1.0 for Hyperbolic Tangent
"""
def set_weights(param):
dim_len = len(param.size())
dims = sum(param.size())
scale = gain * math.sqrt(3 * dim_len) / math.sqrt(dims)
torch.nn.init.uniform_(param, -scale, scale)
if isinstance(model, torch.nn.Parameter):
set_weights(model)
else:
for name, param in model.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0.0)
elif "weight" in name:
set_weights(param)
def freeze(module):
for param in module.parameters():
param.requires_grad = False
def unfreeze(module):
for param in module.parameters():
param.requires_grad = True
def pad_list(sequence: List, length: int, padding: Any):
assert len(sequence) <= length
return sequence + (length - len(sequence)) * [padding]
def pad_list_of_lists(sequences: List[List], padding: Any = 0):
max_length = max([len(sequence) for sequence in sequences])
return [
pad_list(sequence, length=max_length, padding=padding) for sequence in sequences
]
def pad_tensor(tensor, length, padding=0):
tensor_length = tensor.shape[0]
assert length >= tensor_length, "Tensor too long to pad."
assert len(tensor.shape) == 1, "Tensor must be one-dimensional."
return pad(tensor, [0, length - tensor_length], value=padding)
def pad_tensor_list(tensors, padding=0, length=None):
max_length = max([len(tensor) for tensor in tensors])
if length is not None:
max_length = max(max_length, length)
padded_tensors = [
pad_tensor(tensor, length=max_length, padding=padding) for tensor in tensors
]
return torch.stack(padded_tensors)
def lookup_tensors_for_indices(indices_batch, sequence_batch):
return torch.stack(
[
torch.index_select(batch, dim=0, index=indices)
for batch, indices in zip(sequence_batch, indices_batch)
]
)
def get_padded_tensors_for_indices(
indices: torch.Tensor,
lengths: torch.Tensor,
contextualized_input_batch: torch.Tensor,
max_length: int,
padding: torch.Tensor,
device: str = "cpu",
):
indices = pad_tensor_list(indices, length=max_length)
# Lookup the contextualized tokens from the indices
batch = lookup_tensors_for_indices(indices, contextualized_input_batch)
batch_size = batch.size(0)
sequence_size = max(batch.size(1), max_length)
token_size = batch.size(2)
# Expand the padding vector over the size of the batch
padding_batch = padding.expand(batch_size, sequence_size, token_size)
if max(lengths) == 0:
# If the batch is completely empty, we can just return the whole padding batch
batch_padded = padding_batch
else:
# Build a mask and expand it over the size of the batch
mask = torch.arange(sequence_size, device=device)[None, :] < lengths[:, None]
mask = mask.unsqueeze(2).expand(batch_size, sequence_size, token_size)
batch_padded = torch.where(
mask, # Condition
batch, # If condition is 1
padding_batch, # If condition is 0
)
# Cut the tensor at the specified length
batch_padded = torch.split(batch_padded, max_length, dim=1)[0]
# Flatten the output by concatenating the token embeddings
return batch_padded.contiguous().view(batch_padded.size(0), -1)
def get_mask(batch, lengths, device="cpu"):
max_len = batch.size(1)
return torch.arange(max_len, device=device)[None, :] < lengths[:, None]
def mask_(batch, lengths, masked_value=float("-inf"), device="cpu"):
mask = get_mask(batch, lengths, device)
batch[~mask] = masked_value
return batch
def to_int_tensor(data: Any, device="cpu"):
if isinstance(data, torch.Tensor):
return data.type(torch.int64).to(device=device)
return torch.tensor(data, dtype=torch.int64, device=device)
def to_byte_tensor(data: Any, device="cpu"):
if isinstance(data, torch.Tensor):
return data.type(torch.uint8).to(device=device)
return torch.tensor(data, dtype=torch.uint8, device=device)
def to_float_tensor(data: Any, device="cpu"):
if isinstance(data, torch.Tensor):
return data.type(torch.float32).to(device=device)
return torch.tensor(data, dtype=torch.float32, device=device)
|
import pygame,enteties
class Projectil(enteties.Entetie):
frames={}
def __init__(self, position, side_left,tipo,movment,time=100):
super(Projectil, self).__init__(None, position)
self.movement = movment
self.frame_index = 0
self.frame_on = "ball"
self.rect=Projectil.frame["ball"][0].get_rect()
self.rect.center=position
self.hit_box=pygame.rect.Rect((self.rect.center),(5,5))
self.side_left = side_left
self.time=time
self.test_mode=True
self.is_end=False
def move(self,player,tile_list):
self.rect.topleft=(self.rect.x+self.movement[0],self.rect.y+self.movement[1])
self.hit_box.center=self.rect.center
def draw(self, screen, camera=(0, 0)):
if self.animation() and self.frame_on=="explosion":
self.is_end=True
screen.blit(pygame.transform.flip(Projectil.frame[self.frame_on][int(self.frame_index)-1], self.side_left, False),
(self.rect.x - camera[0], self.rect.y - camera[1]))
rect = pygame.Surface(self.hit_box.size).convert_alpha()
rect.fill((200, 0, 0, 100))
if self.test_mode:
screen.blit(rect, (self.hit_box.x - camera[0], self.hit_box.y - camera[1]))
def animation(self):
self.frame_index += 13*(1/40)
if len(Projectil.frame[self.frame_on]) <= self.frame_index-1:
self.frame_index = 0
return True
return False
def colisao(self,player):
if self.rect.colliderect(player):
self.movement=[0,0]
self.frame_on="explosion"
self.frame_index=0
return True |
#! encoding=utf-8
# Creation Date: 2018-03-27 21:19:29
# Created By: Heyi Tang
import json
import os
def json2f(data, f):
if isinstance(data, set):
data = list(data)
with open(f, "w") as fout:
json.dump(data, fout, indent = 2)
def f2json(f):
with open(f) as fin:
data = json.load(fin)
return data
def traverse_files(fdir, ext, prefix = ""):
files = []
for f in os.listdir(fdir):
f_full = fdir + f
if os.path.isdir(f_full):
files += traverse_files(f_full + "/", ext, f + "/")
elif os.path.isfile(f_full):
if f.split(".")[-1] == ext:
files.append(prefix + f)
return files
|
# 作业:用代码模拟博客园系统
# 项目分析
## 一. 首先程序启动,页面显示下面内容供用户选择
'''
1. 请登录
2. 请注册
3. 进入文章页面
4. 进入评论页面
5. 进入日记页面
6. 进入收藏页面
7. 注销账号
8. 退出整个程序
'''
## 二.必须实现的功能
'''
1.注册功能要求
a.用户名、密码要记录在文件中
b.用户名要求:只能含有字母或者数字,不能含有特殊字符并且确保用户明唯一
c.密码要求:长度要在 6~14 个字符之间
d.超过三册登陆还未成功则退出整个程序
2.登陆功能要求
a.用户输入用户名、密码进行登陆验证
b.登录成功后,才可以访问 3~7 选项,如果没有登陆或者登陆不成功时访问3-7选项,不允许访问,让其先登录。(装饰器)
3.进入文章页面要求:
a.提示欢迎 xxx 进入文章页面
b.此时用户可以选择:直接写入内容,还是导入 .md 文件
- 如果选择直接写入内容:让他直接写入文件明|文件内容......最后创建一个文章
- 如果选择导入 .md 文件:让用户输入已经准备好的 .md 文件的文件路径(相对路径即可:比如函数的进阶.md),然后将此 .md 文件的全部内容写入文章(函数的进阶.txt)中
4.进入评论页面要求:
提示欢迎 xxx 进入评论页面
5.进入日记页面要求:
提示欢迎 xxx 进入日记页面
6.进入收藏页面要求:
提示欢迎 xx 进入收藏页面
7.注销账号要求:
不是退出真个程序,而是将已经登陆的状态变成未登录的状态(访问 3-7 选项时需要重新登录)
8.退出整个程序要求:
就是结束整个程序
'''
## 三、选作功能
'''
1.评论页面要求:
a.提示欢迎 xx 进入评论页面
b.让用户选择要评论的文章
这个需要借助于 os 模块实现此功能。将所有的文章文件单独放置在一个目录里,利用 os 模块 listdir 功能,可以将一个目录下所有的文件名以字符串的形式存在一个列表中并返回。
#代码:
import os
print(os.listdir(r'D:\\teaching_show\\article'))
# 返回 ['01 函数的初始.txt','02 函数的进阶.txt']
c.选择要评论的文章后,先要将原文章内容全部读一遍,然后输入你的评论,评论要过滤掉敏感字符:“苍老师”“东京热”“武藤兰”“波多野结衣”,将敏感次替换成等长度的 * ,之后,写在文章的评论区最下边
文章的结构:
文章具体内容
.....
评论区:
------------------------------------------
(用户名)xx:
评论内容。。。。。
(用户名)oo:
评论内容。。。。。
|
import cmath
from cmath import exp, pi, sin, cos
import numpy as np
import matplotlib.pyplot as plt
def FFT(A):
N = len(A)
if N == 1:
return A
else:
Wn = exp(2*pi*1j/N)
W = 1
A_even = []
A_odd = []
for i in range(0, N):
if i%2 == 0:
A_even.append(A[i])
else:
A_odd.append(A[i])
Y_even = FFT(A_even)
Y_odd = FFT(A_odd)
Y1 = []
Y2 = []
half_N = int(N/2)
for i in range(half_N):
Y1.append(Y_even[i] + W*Y_odd[i])
Y2.append(Y_even[i] - W*Y_odd[i])
W = W*Wn
return Y1+Y2
m = 10 #choose the pow of two!!!s
N = pow(2, m) #1024 samples
T = 1/100 #sample spacing
time = np.linspace(0.0, N*T, N)
freq = np.linspace(0.0, 1/(2*T), N)
signal = []
for t in time:
signal.append(2*cos(0.5*pi*t + 1))
fourier_signal = FFT(signal)
plt.figure(1)
plt.plot(time, signal)
plt.grid()
plt.xlabel('Time (sec)')
plt.ylabel('Amplitude')
plt.title('Signal')
plt.figure(2)
plt.title('Fourier Transform')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude')
plt.plot(freq, np.abs(fourier_signal))
plt.grid()
plt.show()
|
import unittest
from katas.kyu_7.triangular_treasure import triangular
class TriangularTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(triangular(0), 0)
def test_equals_2(self):
self.assertEqual(triangular(2), 3)
def test_equals_3(self):
self.assertEqual(triangular(3), 6)
def test_equals_4(self):
self.assertEqual(triangular(-10), 0)
def test_equals_5(self):
self.assertEqual(triangular(613827063227449),
188391831775217643091685137525)
|
from Bio.PDB import * |
from flask import Flask
from flask import jsonify
import mysql.connector
from util import db_util
from util import youtube_util
from util import ssl_util
app = Flask(__name__)
@app.route("/")
def hello_world():
return "Hello, World!"
@app.route("/api/get/<key>", methods=["GET"])
def api_get(key):
return key
@app.route("/api/get_db/<key>", methods=["GET"])
def api_get_db(key):
data = {key: []}
sql = 'SELECT * FROM use_money'
try:
conn = db_util.conn_db() #ここでDBに接続
cursor = conn.cursor() #カーソルを取得
cursor.execute(sql) #selectを投げる
rows = cursor.fetchall() #selectの結果を全件タプルに格納
except(mysql.connector.errors.ProgrammingError) as e:
print('エラーだぜ')
print(e)
for t_row in rows:
res = {"id": t_row.id, "money": t_row.money}
data[key].append(res)
return jsonify(data)
#本API実行でレスポンスがtrueならDB接続可能
@app.route("/api/status/<key>", methods=["GET"])
def api_get_db_status(key):
data = {key: []}
try:
conn = db_util.conn_db()
#cursor = conn.cursor()
status = conn.is_connected()
data[key].append(status)
except(mysql.connector.errors.InterfaceError) as e:
return format(e)
return jsonify(data)
@app.route("/api/youtube/<query>", methods=["GET"])
def search_youtube(query):
return youtube_util.search_video(query)
@app.route("/api/openssl", methods=["GET"])
def getDegitalCertificate():
data = ssl_util.getPublicKey()
json = {"key": data}
return jsonify(json)
@app.route("/api/privatekey", methods=["GET"])
def getPrivateKey():
key = ssl_util.getPrivateKey()
return jsonify(
{
"key": key
}
)
@app.route("/api/signature", methods=["GET"])
def signature():
result = ssl_util.signature()
return jsonify({"result":result})
app.run(host='0.0.0.0',port=7010,debug=True) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.