text stringlengths 8 6.05M |
|---|
from keras.models import Sequential
from keras.callbacks import EarlyStopping
from keras.layers import Dense, Dropout, BatchNormalization
import numpy
import tensorflow as tf
#시드값 생성
seed = 0
numpy.random.seed(seed)
tf.set_random_seed(seed)
#데이터 로드
dataset = numpy.loadtxt('./data/pima-indians-diabetes.csv', delimiter=',')
x = dataset[:, 0:8]
y = dataset[:, 8]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=66, test_size=0.2)
from sklearn.preprocessing import MinMaxScaler, StandardScaler
scaler = StandardScaler()
# scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
#모델의 생성
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(6, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
#모델 컴파일
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
early = EarlyStopping(monitor='val_acc', patience=30, mode='auto')
#모델 실행
model.fit(x_train, y_train, epochs=20, batch_size=8, callbacks=[early], validation_data=(x_test, y_test))
#결과 출력
print("\n Accuracy: %.4f" % (model.evaluate(x_test,y_test)[1]))
|
from models import Category, Subcategory
def categories(request):
categories = Category.objects.all()
subcategories = {}
for cat in categories:
subcategories[cat.name] = cat.subcategory_set.all()
return {'categories':categories, 'subcategories':subcategories}
|
from selenium import webdriver
import time
driver = webdriver.Chrome(executable_path="C:/Users/ABHAY/Selenium/chromedriver.exe")
driver.implicitly_wait(10)
driver.maximize_window()
driver.get("http://the-internet.herokuapp.com/infinite_scroll")
time.sleep(2)
n = 4
for i in range(0,4):
driver.execute_script("window.scrollTo(0, 1080)")
time.sleep(2)
driver.quit()
|
import os
import xlrd
from tqdm import tqdm
def all_files_path(rootDir):
f1 = open('dir.txt', 'a', encoding='utf-8')
filepaths = []
for root, dirs, files in os.walk(rootDir):
for file in files:
file_path = os.path.join(root, file)
filepaths.append(file_path)
for filepath in filepaths:
f1.write(filepath + '\n')
def strueture_data():
f2 = open('三级节点.txt', 'a', encoding='utf-8')
f3 = open('二三级关系.txt', 'a', encoding='utf-8')
f4 = open('dir.txt', 'r', encoding='utf-8')
for filepath in tqdm(f4.readlines()):
filepath = filepath.strip()
book = xlrd.open_workbook(filepath)
table = book.sheets()[0]
nrows= table.nrows
list1 = []
list2 = []
list3 = []
list4 = []
for nrow in range(2, nrows):
id = str(table.cell(nrow, 0)).strip().split(':')[-1]
name = str(table.cell(nrow, 1)).strip().split(':')[-1]
lastname = str(table.cell(0, 1)).strip().split(':')[-1]
list1.append(id)
list1.append(name)
list2.append(list1)
list1 = []
list3.append(lastname)
list3.append(name)
list4.append(list3)
list3 = []
# for i in list2:
# print(list2, len(list2))
for i in range(len(list2)):
for j in range(len(list2[i])):
f2.write(str(list2[i][j]))
f2.write('\t')
f2.write('\n')
# print(list4)
for a in range(len(list4)):
for b in range(len(list4[a])):
f3.write(str(list4[a][b]))
f3.write('\t')
f3.write('\n')
f2.close()
f3.close()
f4.close()
def test():
f = open('三级节点.txt', 'r', encoding='utf-8')
for i in f.readlines():
print(i, type(i))
if __name__ == '__main__':
# all_files_path('.\提取的数据')
strueture_data()
# test() |
import SimpleHTTPServer
import SocketServer
PORT = 8008
class MyHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def end_headers(self):
self.send_my_headers()
SimpleHTTPServer.SimpleHTTPRequestHandler.end_headers(self)
def send_my_headers(self):
self.send_header("Cache-Control", "no-cache, no-store, must-revalidate")
self.send_header("Pragma", "no-cache")
self.send_header("Expires", "0")
httpd = SocketServer.TCPServer(("", PORT), MyHTTPRequestHandler)
print "serving at port", PORT
httpd.serve_forever()
|
import unittest
from katas.kyu_8.days_in_the_year import year_days
class YearDaysTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(year_days(0), '0 has 366 days')
def test_equals_2(self):
self.assertEqual(year_days(-64), '-64 has 366 days')
def test_equals_3(self):
self.assertEqual(year_days(2016), '2016 has 366 days')
def test_equals_4(self):
self.assertEqual(year_days(1974), '1974 has 365 days')
def test_equals_5(self):
self.assertEqual(year_days(-10), '-10 has 365 days')
def test_equals_6(self):
self.assertEqual(year_days(666), '666 has 365 days')
def test_equals_7(self):
self.assertEqual(year_days(1857), '1857 has 365 days')
def test_equals_8(self):
self.assertEqual(year_days(2000), '2000 has 366 days')
def test_equals_9(self):
self.assertEqual(year_days(-300), '-300 has 365 days')
def test_equals_10(self):
self.assertEqual(year_days(-1), '-1 has 365 days')
|
'''
|Задание 1| Дан список из чисел. Напишите программу, которая удаляет
все четные числа из списка и выводит список через print()
nums = [14, 21, 565, 18, 33, 20, 102, 108, 167, 891, 400]
|Задание 2| Дан список слов. Напишите программу, которая находит
слова длиннее 4 букв и записывает их в другой список.
sentence = ['The', 'quick', 'brown', 'fox', 'jumps',
'over', 'the', 'lazy', 'dog']
|Задание 3| Создайте список квадратов от 1 до 30. Выведите
первые 5 и последние 5 элементов списка.
'''
|
import argparse
import json
import joblib
from pathlib import Path
import rlkit.torch.pytorch_util as ptu
from rlkit.core.eval_util import get_generic_path_information
from rlkit.torch.tdm.sampling import multitask_rollout
from rlkit.core import logger
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, help='path to the snapshot file')
parser.add_argument('--H', type=int, default=300,
help='Max length of rollout')
parser.add_argument('--nrolls', type=int, default=1,
help='Number of rollout per eval')
parser.add_argument('--mtau', type=float, help='Max tau value')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--hide', action='store_true')
args = parser.parse_args()
data = joblib.load(args.file)
if args.mtau is None:
# Load max tau from variant.json file
variant_path = Path(args.file).parents[0] / 'variant.json'
variant = json.load(variant_path.open())
try:
max_tau = variant['tdm_kwargs']['max_tau']
print("Max tau read from variant: {}".format(max_tau))
except KeyError:
print("Defaulting max tau to 0.")
max_tau = 0
else:
max_tau = args.mtau
env = data['env']
policy = data['policy']
policy.train(False)
if args.gpu:
ptu.set_gpu_mode(True)
policy.cuda()
while True:
paths = []
for _ in range(args.nrolls):
goal = env.sample_goal_for_rollout()
path = multitask_rollout(
env,
policy,
init_tau=max_tau,
goal=goal,
max_path_length=args.H,
animated=not args.hide,
cycle_tau=True,
decrement_tau=True,
)
paths.append(path)
env.log_diagnostics(paths)
for key, value in get_generic_path_information(paths).items():
logger.record_tabular(key, value)
logger.dump_tabular()
|
height = float(input('type the height the wall'))
widht = float(input('Type the widht the wall'))
area = height * widht
paint = area / 2
print('The total area the wall is {} you need {} buckets of ink'.format(area,paint)) |
import lists,scraper,parsing,objects
import veggieTransformer
import healthyTransformer
import cuisineTransformer
cuisineTypes = ['indian','mexican','chinese']
def main():
lists.ingredientDB = parsing.readIngredientsFromFile('FOOD_DATA/FOOD_DES.txt')
lists.updateNameDB()
debug = True
#Welcome message
print "Welcome to SVJ Recipe Transformer created by Josiah Evans, Vanessa Fang and Salil Gupta"
#Recipe input
recipeURL = raw_input("Please input a recipe url from allrecipes.com:")
print 'Loading recipe from allrecipes.com...'
recipeInfo = scraper.retrieveRecipe(recipeURL)
print 'Building recipe object...'
recipe = parsing.buildRecipeObject(recipeInfo)
print "Thanks! Now choose a transformation.\n We have three options:\n For a vegetarian tranformation (or 'un-vegetarian') type 0\n For a healthy transformation type 1\n For a cuisine transformation type 2"
transformationType = int(raw_input("Transformation Selection: "))
if transformationType ==0:
print "Thanks! You have selected a veggie transformation. We are processing your request!"
vegRecipe = veggieTransformer.veggieTransformer(recipe)
veggieTransformer.printRecipe(vegRecipe, "Vegetarian")
elif transformationType ==1:
print "Thanks! You have selected a healthy transformation. We are processing your request!"
healthyRecipe = healthyTransformer.healthyTransformer(recipe)
healthyTransformer.printRecipe(healthyRecipe, "Healthy")
elif transformationType ==2:
print "Thanks! You have selected a cuisine transformation. Please choose a cuisine type:\n For Indian type 0 \n For Mexican type 1 \n for Chinese Type 2"
cuisineType = int(raw_input("Cuisine Type: "))
if cuisineType ==0:
cuisineTransformer.cuisineChange(recipe,cuisineTypes[0])
elif cuisineType ==1:
cuisineTransformer.cuisineChange(recipe,cuisineTypes[1])
elif cuisineType == 2:
cuisineTransformer.cuisineChange(recipe,cuisineTypes[2])
else:
print "Sorry! But you must have typed an incorrect cuisine number!"
else:
print "Sorry! But you must have typed an incorrect transformation number!"
main()
|
class Polynomial(object):
def __init__(self, coeffs):
if (type(coeffs) is list) and all(isinstance(x,(int, float)) for x in coeffs):
i=0
n=len(coeffs)-1
while i<n and coeffs[i]==0:
i=i+1
self.coeffs = coeffs[i:(n+1)]
self.n=len(self.coeffs)-1
else:
raise TypeError("error: coeffs is not list of int or float")
def __str__(self):
s=''
if self.n==0:
s=s+str(self.coeffs[self.n])
else:
if (self.coeffs[0]==1):
if (self.n)!=1:
s='x'+ str(self.n)
else:
s='x'
elif (self.coeffs[0]==-1):
if (self.n)!=1:
s='-x'+ str(self.n)
else:
s='-x'
else:
if (self.n)!=1:
s=str(self.coeffs[0])+'x'+ str(self.n-0)
else:
s=str(self.coeffs[0])+'x'
for i in range(1,self.n):
if self.coeffs[i]!=0:
if (self.coeffs[i]>0):
if(self.coeffs[i]!=1):
if(self.n-i)!=1:
s=s+'+'+ str(self.coeffs[i])+'x' + str(self.n-i)
else:
s=s+'+'+ str(self.coeffs[i])+'x'
else:
if(self.n-i)!=1:
s=s+'+'+'x' + str(self.n-i)
else:
s=s+'+'+'x'
else:
if(self.coeffs[i]!=-1):
if(self.n-i)!=1:
s=s+ str(self.coeffs[i])+'x' + str(self.n-i)
else:
s=s+ str(self.coeffs[i])+'x'
else:
if(self.n-i)!=1:
s=s+'-'+'x' + str(self.n-i)
else:
s=s+'-'+'x'
if self.coeffs[self.n]!=0:
if (self.coeffs[self.n]>0):
s=s+'+'+str(self.coeffs[self.n])
else:
s=s+str(self.coeffs[self.n])
return s
def __add__(self, p):
if (type(p) is not int) and (type(p) is not float):
if type(p) is Polynomial:
if self.n > p.n:
c1=[0]*(self.n-p.n)+p.coeffs
c2=self.coeffs
else:
c1=[0]*(p.n-self.n)+self.coeffs
c2=p.coeffs
new=[i+j for i, j in zip(c1,c2)]
else:
raise TypeError("Unexpected type of argument")
else:
new=self.coeffs
new[self.n]=new[self.n]+p
return Polynomial(new)
def __sub__(self, p):
if (type(p) is not int) and (type(p) is not float):
if type(p) is Polynomial:
if self.n >= p.n:
c1=[0]*(self.n-p.n)+p.coeffs
c2=self.coeffs
else:
c1=p.coeffs
c2=[0]*(p.n-self.n)+self.coeffs
new=[j-i for i, j in zip(c1,c2)]
else:
raise TypeError("Unexpected type of argument")
else:
new=self.coeffs
new[self.n]=new[self.n]-p
return Polynomial(new)
def __mul__(self, p):
if (type(p) is int) or (type(p) is float):
new=[p*i for i in self.coeffs]
return Polynomial(new)
else:
if type(p) is Polynomial:
new=[0]*(self.n+p.n+1)
for i in range(self.n+1):
for j in range(p.n+1):
new[i+j]=new[i+j]+self.coeffs[i]*p.coeffs[j]
return Polynomial(new)
else:
raise TypeError("Unexpected type of argument")
def __eq__(self, p):
if (type(p) is Polynomial):
if self.n!=p.n:
return False
return self.coeffs==p.coeffs
else:
raise TypeError("Incorrect type")
def __ne__(self, p):
if (type(p) is Polynomial):
return not(self==p)
else:
raise TypeError("Incorrect type")
def __radd__(self, p):
return self.__add__(p)
def __rsub__(self, p):
return self.__mul__(-1).__add__(p)
def __rmul__(self, p):
return self.__mul__(p) |
# -*- encoding:utf-8 -*-
from __future__ import print_function
import os
import random
import shutil
import pandas as pd
from Decorator import warnings_filter
'''
cPickle是C语言写的,速度快,pickle是纯Python写的,速度慢
'''
try:
import cPickle as pickle
except ImportError:
import pickle
def write_chr(f, ch):
f.write(chr(ch))
def write_int(f, no):
if no > 65535:
print(no)
b1 = no & 0xff
b2 = no >> 8
f.write(chr(b1))
f.write(chr(b2))
def write_long(f, no): # int32
no1 = no & 0xffff
write_int(f, no1)
no2 = no >> 16
write_int(f, no2)
def write_int64(f, no):
no1 = no & 0xffffffff
write_long(f, no1)
no2 = no >> 32
write_long(f, no2)
def read_chr(f):
ch = f.read(1)
return ch
def read_int(f):
b1 = ord(f.read(1))
b2 = ord(f.read(1))
cnt = b2 << 8 | b1
return cnt
def read_long(f):
no1 = read_int(f)
no2 = read_int(f)
num = no2 << 16 | no1
return num
def read_int64(f):
no1 = read_long(f)
no2 = read_long(f)
num = no2 << 64 | no1
return num
def get_file_array_from_name(root_dir, name, ret_array):
for lists in os.listdir(root_dir):
path = os.path.join(root_dir, lists)
if os.path.isdir(path):
get_file_array_from_name(path, name, ret_array)
elif os.path.basename(path) == name:
ret_array.append(path)
def list_all_file(root_dir, all_ext_list):
print(root_dir)
for lists in os.listdir(root_dir):
path = os.path.join(root_dir, lists)
if os.path.isdir(path):
list_all_file(path, all_ext_list)
else:
all_ext_list.append(path)
def list_all_ext_file(root_dir, ext_type, all_ext_list):
for lists in os.listdir(root_dir):
path = os.path.join(root_dir, lists)
if os.path.isdir(path):
list_all_ext_file(path, ext_type, all_ext_list)
elif path.endswith(ext_type):
all_ext_list.append(path)
def str_is_num10(a_str):
try:
int(a_str, 10)
return True
except:
return False
def str_is_num16(a_str):
try:
int(a_str, 16)
return True
except:
return False
def str_xor(a_str, key):
a = []
for x in a_str:
rs = ord(x) ^ key
a.append(chr(rs))
return ''.join(a)
def str_replace_infile_once(target_name, a_str, match_str):
f = open(target_name, "rb")
lines = f.readlines()
f.close()
f = open(target_name, "wb")
find = False
for line in lines:
if (not find) and (line.find(match_str) >= 0):
line = line.replace(match_str, a_str)
find = True
f.write(line)
f.close()
def str_replace_infile(target_name, a_str, match_str):
f = open(target_name, "rb")
lines = f.readlines()
f.close()
f = open(target_name, "wb")
for line in lines:
if line.find(match_str):
line = line.replace(match_str, a_str)
f.write(line)
f.close()
def str_insert_infile(target_name, a_str, match_str):
f = open(target_name, "rb")
lines = f.readlines()
f.close()
f = open(target_name, "wb")
for line in lines:
if line.find(match_str) > 0:
d_pos = line.find(match_str)
tmp_str = line[
0:d_pos + len(match_str)] + " " + a_str + line[d_pos + len(match_str):]
line = tmp_str
f.write(line)
f.close()
def str_insert_infile_before(target_name, a_str, match_str):
f = open(target_name, "rb")
lines = f.readlines()
f.close()
f = open(target_name, "wb")
for line in lines:
if line.find(match_str) > 0:
d_pos = line.find(match_str)
tmp_str = line[0:d_pos] + " " + a_str + " " + line[d_pos:]
line = tmp_str
f.write(line)
f.close()
def ch_is_num(ch):
if len(ch) > 1:
return False
if ch == '0' or ch == '1' or ch == '2' or ch == '3' or ch == '4' or ch == '5' \
or ch == '6' or ch == '7' or ch == '8' or ch == '9':
return True
return False
def force_change_str_to_int10(will_str):
index = 0
while index < len(will_str):
if not str_is_num10(will_str[index]):
will_str = will_str[index + 1:]
else:
break
index += 1
int_value = int(will_str, 10)
return int_value
def create_random_tmp_name_with_num(salt_count):
seed = "0123456789"
sa = []
for i in range(salt_count):
sa.append(random.choice(seed))
salt = ''.join(sa)
# print("* createRandomTmpNameWithNum name = " + salt)
return salt
def create_random_tmp_name(salt_count):
seed = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
sa = []
for i in range(salt_count):
sa.append(random.choice(seed))
salt = ''.join(sa)
print('* CreateRandomTmpName name = ' + salt)
return salt
def create_random_tmp_name_with_num_low(salt_count):
seed = "abcdefghijklmnopqrstuvwxyz0123456789"
sa = []
for i in range(salt_count):
sa.append(random.choice(seed))
salt = ''.join(sa)
# print "* createRandomTmpNameWithNumAndLow name = " + salt
return salt
def ensure_dir(a_dir):
# ZEnv.print_Str("ensureDir:" + aDir)
a_dir = os.path.dirname(a_dir)
if not os.path.exists(a_dir):
os.makedirs(a_dir)
print("makedirs " + a_dir)
def file_exist(a_file):
if os.path.exists(a_file):
return True
return False
def move_fileto(source, target_dir):
shutil.copy(source, target_dir)
def load_pickle(file_name):
if not file_exist(file_name):
return None
fr = open(file_name)
ret = pickle.load(fr)
fr.close()
return ret
def dump_pickle(input_obj, file_name):
ensure_dir(file_name)
fw = open(file_name, 'w')
pickle.dump(input_obj, fw)
fw.close()
@warnings_filter
def dump_hdf5(input_obj, input_key, file_name):
"""
warnings 有优化数据结构提示警告, 忽略
:param input_obj:
:param input_key:
:param file_name:
:return:
"""
# h5s = pd.HDFStore(file_name, 'w')
# h5s[input_key] = input_obj
# h5s.close()
with pd.HDFStore(file_name, 'w') as h5s:
h5s[input_key] = input_obj
def load_hdf5(file_name, load_key):
if not file_exist(file_name):
return None
with pd.HDFStore(file_name, 'r') as h5s:
# h5s = pd.HDFStore(file_name, 'r')
# h5s.close()
ret = h5s[load_key]
return ret
def save_file(ct, file_name):
ensure_dir(file_name)
with open(file_name, 'w') as f:
f.write(ct)
def save_list_file(file_name, str_list):
target_file = open(file_name, "wb")
for x in str_list:
target_file.write(str(x))
target_file.write('\n')
target_file.close()
def get_class(kls):
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def is_iterable(obj):
try:
iter(obj)
return True
except:
return False
|
from django import forms
from .models import Purchase
class MakePaymentForm(forms.Form):
MONTH_CHOICES = [(i, i) for i in range(1, 12)]
YEAR_CHOICES = [(i, i) for i in range(2018, 2036)]
credit_card_number = forms.CharField(label='Card Number', required=False)
cvv = forms.CharField(label="Security Number (CVV)", required=False)
expiry_month = forms.ChoiceField(label="Expiration Date", choices=MONTH_CHOICES, required=False)
expiry_year = forms.ChoiceField(label="", choices=YEAR_CHOICES, required=False)
stripe_id = forms.CharField(widget=forms.HiddenInput)
class PurchaseForm(forms.ModelForm):
class Meta:
model = Purchase
fields = ('full_name', 'phone_number', 'street_address_1', 'street_address_2', 'town_or_city', 'postcode', 'country') |
'''
Problem: You have 100 doors in a row that are all initially closed. You make 100 passes by the doors. The first time through, you visit every door and toggle the door (if the door is closed, you open it; if it is open, you close it). The second time you only visit every 2nd door (door #2, #4, #6, ...). The third time, every 3rd door (door #3, #6, #9, ...), etc, until you only visit the 100th door.
Alternate: As noted in this page's discussion page, the only doors that remain open are whose numbers are perfect squares of integers. Opening only those doors is an optimization that may also be expressed
'''
#unoptimized solution
doors = [False] * 100
for i in range(100):
for j in range(i, 100, i+1):
doors[j] = not doors[j]
print("Door %d:" % (i+1), 'open' if doors[i] else 'close')
#optimized solution
for i in range(1, 101):
if i**0.5 % 1: # Test if i is perfect square or not
state = 'open'
else:
state = 'closed'
print('Door {} is {}. '.format(i, state), end=' ')
#ultra-optimized solution
for i in range(1,11):
print("Door %s is open" % i**2)
|
from flask import Flask, jsonify, flash, request, redirect, render_template
import pandas as pd
from transformers import load_transformers
import pickle
import os
localhost = '0.0.0.0'
ALLOWED_EXTENSIONS = set(['csv'])
app = Flask(__name__)
# flask uploader folder
app.config['UPLOAD_FOLDER'] = 'download/'
# loading model and transformer
model = pickle.load(open('misc/rf_model.mdl', 'rb'))
transformer = load_transformers()
# print(transformer.named_steps['type_setter'].num_cols)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def greeting():
return redirect('/predict')
@app.route('/predict', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = file.filename
full_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(full_path)
values = str(predict(full_path))
values = values.replace('\n', '')
return jsonify({'prediction': values})
return render_template('upload.html')
def predict(filename):
values = ''
df = pd.read_csv(filename)
df['x23'] = df['x23'].apply(lambda x: 0 if x == 'FALSE' else 1)
print(df.shape, type(df), df.dtypes)
inputs = transformer.transform(df)
if model is not None:
values = model.predict(inputs)
return values
app.run(host=localhost, port='3000')
|
# -*- coding: utf-8 -*-
"""Amazon boto interface."""
from __future__ import absolute_import, unicode_literals
try:
import boto
except ImportError: # pragma: no cover
boto = get_regions = ResultSet = RegionInfo = XmlHandler = None
class _void(object):
pass
AWSAuthConnection = AWSQueryConnection = _void # noqa
class BotoError(Exception):
pass
exception = _void()
exception.SQSError = BotoError
exception.SQSDecodeError = BotoError
else:
from boto import exception
from boto.connection import AWSAuthConnection, AWSQueryConnection
from boto.handler import XmlHandler
from boto.resultset import ResultSet
from boto.regioninfo import RegionInfo, get_regions
__all__ = [
'exception', 'AWSAuthConnection', 'AWSQueryConnection',
'XmlHandler', 'ResultSet', 'RegionInfo', 'get_regions',
]
|
import os
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# raw folder
TRAIN_FEATURES = os.path.join(ROOT_DIR,'input','raw','train_features.csv')
TRAIN_TARGET_SCORED = os.path.join(ROOT_DIR,'input','raw','train_targets_scored.csv')
# processed folder
TRAIN_TRAGET_FOLDS = os.path.join(ROOT_DIR,'input','processed','train_targets_folds.csv')
if __name__ == '__main__':
print(ROOT_DIR) |
import time
def Convert_Height(feet,inch):
feet_cm=30.48*feet
inch_cm=2.54*inch
print("Converting units ...")
time.sleep(1)
print(feet,"feet is equal to :",feet_cm,"cm")
print(inch,"inch is equal to :",inch_cm,"cm")
Convert_Height(20,20) |
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def bstFromPreorder(self, preorder):
"""
Construct a Binary search tree from pre order traversal
"""
if not preorder:
return None
return self.build_from_preorder(preorder)
def build_from_preorder(self, preorder, left=None, right=None):
"""
Build Binary Search Tree from pre-order traversal array
Runtime: O(n^2), because using loop which takes O(n) time in
recursive call, and whole recursive call is O(n), so overall
O(n^2)
"""
if left is None and right is None:
left = 0
right = len(preorder)-1
# base case
if left > right:
return None
root = TreeNode(preorder[left])
if left == right:
return root
# first we need to find the 1st bigger element than the root
# on the right side of the all elements of that bigger element is
# going to be the right subtree of tree
index = left + 1
while index <= right and preorder[index] < root.val:
index += 1
# left subtree
root.left = self.build_from_preorder(preorder, left+1, index-1)
# right subtree
root.right = self.build_from_preorder(preorder, index, right)
return root
|
from urllib.parse import urlencode
from urllib.request import urlopen
from amath.DataTypes.Function2 import Function
from amath.Errors import Failure
def formulaLookup(x):
"""Lookup formulas"""
def wolfram_cloud_call(**args):
arguments = dict([(key, arg) for key, arg in args.items()])
try:
result = urlopen("http://www.wolframcloud.com/objects/5c991864-3fbd-4b30-8200-d1a398aee0e2",
urlencode(arguments).encode("ascii"))
except:
raise Failure("Cannot connect to servers")
return result.read()
textresult = wolfram_cloud_call(x=x)
return textresult.decode("ascii")
def formulaData(x):
def wolfram_cloud_call(**args):
arguments = dict([(key, arg) for key, arg in args.items()])
try:
result = urlopen("http://www.wolframcloud.com/objects/724d6409-5efb-4bcb-907a-6897aad95193",
urlencode(arguments).encode("ascii"))
except:
raise Failure("Cannot connect to servers")
return result.read()
import re
textresult = wolfram_cloud_call(x=x) # .decode("ascii")
# print(textresult)
textresult = textresult.split(b"==")[1]
var = re.findall(b"(\w+)(?=\",)", textresult)
# print(var)
textresult = textresult.replace(b"QuantityVariable[\"", b"")
textresult = re.sub(b'", "\w+"]', b"", textresult).replace(b"^", b"**")
# print(textresult)
return Function(textresult.decode(), list(map(bytes.decode, var)))
EnergyRelativistic = Function('m*(c**2)', {'m': 'Real'})
GravitationalForce = Function('(G*m1*m2)/(d**2)', {'m1': 'Real', 'm2': 'Real', 'd': 'Real'})
Pythagorean = Function('sqrt((a**2)+(b**2))', {'a': 'Real', 'b': 'Real'})
StandardNormalDistribution = Function('(e**(-(1/2.0)*(x**2)))/sqrt(2*pi)', {'x': 'Real'})
NormalDistribution = Function('1/(e**((-m + x)**2/(2.*s**2))*sqrt(2*pi)*s)', {'m': 'Real', 's': 'Real', 'x': 'Real'})
LorentzFactor = Function("1.0/sqrt(1-(v**2)/(c**2))", {'v': 'Real'})
KineticEnergy = Function("(1/2.0)*m*(v**2)", {'m': 'Real', 'v': 'Real'})
Momentum = Function("m * v", {'m': 'Real', 'v': 'Real'})
MinimumPowerRequiredToMoveObject = Function('(4*(D**2)*m)/(t**3)', {'D': 'Real', "m": 'Real', 't': 'Real'})
Velocity = Function("s/t", {'s': 'Real', 't': 'Real'})
Acceleration = Function('dv/dt', {'dv': 'Real', 'dt': 'Real'})
EscapeVelocity = Function("sqrt(2)*sqrt((G * m)/r)", {"m": "real", "r": "real"})
GravitationalPotentialEnergy = Function("g * h * m", {"g": "real", "h": "real", "m": 'real'})
Density = Function("M / V", {"M": 'real', "V": "real"})
NewtonsSecondLawConstantMass = Function("a * m", {"a": 'real', 'm': 'real'})
MomentumKineticEnergy = Function("sqrt(2)*sqrt(k*m)", {'k': 'Real', 'm': 'Real'})
Work = Function("a * d * m", {"a": "real", "d": 'real', "m": 'real'})
TimeDilationRelativistic = Function("t/sqrt(1 - v**2/c**2)", {"t": 'Real', "v": 'Real'})
TimeDilationGravitational = Function("t/sqrt(1 - (g*r)/c**2)", {"t": 'real', "g": 'real', "r": 'real'})
def HarmonicNumber(n):
from .stats.stats import sum
return sum(lambda x: 1 / x, 1, n)
|
#!/usr/bin/python3
import sys
from datetime import datetime
import timeit
# Global variables
instructions = []
registers = dict()
result01 = 0
result02 = 0
# Functions
def part01():
global registers
global instructions
global result01
sum = 0
for instruction in instructions:
# create register if needed
if instruction[0] not in registers:
registers[instruction[0]] = 0
if instruction[4] not in registers:
registers[instruction[4]] = 0
# determine condition and check
if instruction[5] == '<':
if registers[instruction[4]] < int(instruction[6]):
calculateInstruction(instruction[1], instruction[0], int(instruction[2]))
elif instruction[5] == '<=':
if registers[instruction[4]] <= int(instruction[6]):
calculateInstruction(instruction[1], instruction[0], int(instruction[2]))
elif instruction[5] == '==':
if registers[instruction[4]] == int(instruction[6]):
calculateInstruction(instruction[1], instruction[0], int(instruction[2]))
elif instruction[5] == '>':
if registers[instruction[4]] > int(instruction[6]):
calculateInstruction(instruction[1], instruction[0], int(instruction[2]))
elif instruction[5] == '>=':
if registers[instruction[4]] >= int(instruction[6]):
calculateInstruction(instruction[1], instruction[0], int(instruction[2]))
elif instruction[5] == '!=':
if registers[instruction[4]] != int(instruction[6]):
calculateInstruction(instruction[1], instruction[0], int(instruction[2]))
else:
print('Unknown Instruction:', instruction[5])
return
result01=(max(registers.values()))
def calculateInstruction(operator='', register='', value=0):
global registers
global result02
if operator == 'inc':
registers[register] += value
elif operator == 'dec':
registers[register] -= value
else:
print ('Unknown Operator:', operator)
def part02():
global registers
global instructions
global result02
for instruction in instructions:
# create register if needed
if instruction[0] not in registers:
registers[instruction[0]] = 0
if instruction[4] not in registers:
registers[instruction[4]] = 0
# determine condition and check
if instruction[5] == '<':
if registers[instruction[4]] < int(instruction[6]):
calculateInstruction(instruction[1], instruction[0], int(instruction[2]))
elif instruction[5] == '<=':
if registers[instruction[4]] <= int(instruction[6]):
calculateInstruction(instruction[1], instruction[0], int(instruction[2]))
elif instruction[5] == '==':
if registers[instruction[4]] == int(instruction[6]):
calculateInstruction(instruction[1], instruction[0], int(instruction[2]))
elif instruction[5] == '>':
if registers[instruction[4]] > int(instruction[6]):
calculateInstruction(instruction[1], instruction[0], int(instruction[2]))
elif instruction[5] == '>=':
if registers[instruction[4]] >= int(instruction[6]):
calculateInstruction(instruction[1], instruction[0], int(instruction[2]))
elif instruction[5] == '!=':
if registers[instruction[4]] != int(instruction[6]):
calculateInstruction(instruction[1], instruction[0], int(instruction[2]))
else:
print('Unknown Instruction:', instruction[5])
return
if registers[instruction[0]] > result02:
result02 = registers[instruction[0]]
def bench(part=0, filename=''):
global instructions
global registers
instructions = []
if filename != '':
with open(filename, 'r') as f:
for line in f:
instructions.append(line.rstrip().split(' '))
if part == 1:
duration01 = timeit.timeit("part01()", setup="from day08 import part01", number=1)
print(8, 1, result01, int(duration01 * 10 ** 6))
elif part == 2:
registers = dict()
duration02 = timeit.timeit("part02()", setup="from day08 import part02", number=1)
print(8, 2, result02, int(duration02 * 10 ** 6))
# Main
if __name__ == '__main__':
with open(sys.argv[1], 'r') as f:
for line in f:
instructions.append(line.rstrip().split(' '))
duration01 = timeit.timeit("part01()", setup="from __main__ import part01", number=1)
print(8, 1, result01, int(duration01 * 10 ** 6))
registers = dict()
duration02 = timeit.timeit("part02()", setup="from __main__ import part02", number=1)
print(8, 2, result02, int(duration02 * 10 ** 6))
|
#!/usr/bin/env python
""" Tools which enable feature generation
for sources in the StarVars project.
*** TODO parse the LINEAR file into a string for below
*** parse raw LINEAR ts files (as string):
tutor_database_project_insert.py:parse_asas_ts_data_str(ts_str)
*** The aperture is chosen and the cooresp timeseries is decided in:
tutor_database_project_insert.py:filter_best_ts_aperture()
*** TODO insert the resulting v_array int CSV parsing & freature generation code
*** TODO store the resulting features in an arff file / CSV format?
NOTE: I resolved library / python package dependencies by doing:
1) editing my ~/.bashrc.ext:
export PATH=/global/homes/d/dstarr/local/bin:${PATH}
export TCP_DIR=/global/homes/d/dstarr/src/TCP/
2) loading some modules:
module load python/2.7.1 numpy/1.6.1 scipy/0.10.1 ipython/0.12.1 R/2.12.1 mysql/5.1.63
"""
import sys, os
import cPickle
#sys.path.append("/global/u1/d/dchesny/BUILD/MySQL-python-1.2.3/build/lib.linux-x86_64-2.7")
sys.path.insert(0,os.path.expandvars("/global/u1/d/dchesny/BUILD/MySQL-python-1.2.3/build/lib.linux-x86_64-2.7"))
import MySQLdb
import time, matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
#from matplotlib.pylab import *
"""FUNCTION Index #####################################
#+
# PURPOSE: given a directory, returns a list of the names of all files with full path
#
# CALLING SEQUENCE: files = index( directory )
#
# INPUTS:
# indir - a directory path
#
# OUTPUTS:
# files = a list containing the full path names of all files in the directory
#-
"""
def index( directory ):
# import os, sys
stack = [directory]
files = []
while stack:
directory = stack.pop()
for file in os.listdir(directory):
if file.endswith('.dat'): # only get files that end in .dat [change for other purposes]
fullname = os.path.join(directory, file)
files.append(fullname)
if os.path.isdir(fullname) and not os.path.islink(fullname):
stack.append(fullname)
return files
"""FUNCTION readLC #####################################
#+
# PURPOSE: given a phased light curve file, returns num.array objects phase, mag, dmag
#
# CALLING SEQUENCE: readLC( infile )
#
# INPUTS:
# infile - a 3-column light curve date[], mag[], dmag[] as text file
#
# OUTPUTS:
# A file containing three numpy arrays: date, mag, dmag
#-
"""
# def readLC(infile): # reads in 3-column data and returns phase[], mag[], dmag[]
def readLC(infile):
import numpy as np
date = []
mag = []
dmag = []
mag_data_dict = {}
ID = infile[infile.rfind('/')+1:infile.rfind('.dat')]
lines = open(infile).readlines()
for line in lines:
fields = map( float, line.split() )
date.append( fields[0] )
mag.append( fields[1] )
dmag.append( fields[2] )
date = np.array( date )
mag = np.array( mag )
dmag = np.array( dmag )
mag_data_dict['srcid'] = ID
mag_data_dict['t'] = date
mag_data_dict['m'] = mag
mag_data_dict['merr'] = dmag
return mag_data_dict
class StarVars_LINEAR_Feature_Generation:
"""
"""
def __init__(self, pars={}):
self.head_str = """<?xml version="1.0"?>
<VOSOURCE version="0.04">
<COOSYS ID="J2000" equinox="J2000." epoch="J2000." system="eq_FK5"/>
<history>
<created datetime="2009-12-02 20:56:18.880560" codebase="db_importer.pyc" codebase_version="9-Aug-2007"/>
</history>
<ID>6930531</ID>
<WhereWhen>
<Description>Best positional information of the source</Description>
<Position2D unit="deg">
<Value2>
<c1>323.47114731</c1>
<c2>-0.79916734036</c2>
</Value2>
<Error2>
<c1>0.000277777777778</c1>
<c2>0.000277777777778</c2>
</Error2>
</Position2D>
</WhereWhen>
<VOTimeseries version="0.04">
<TIMESYS>
<TimeType ucd="frame.time.system?">MJD</TimeType>
<TimeZero ucd="frame.time.zero">0.0 </TimeZero>
<TimeSystem ucd="frame.time.scale">UTC</TimeSystem>
<TimeRefPos ucd="pos;frame.time">TOPOCENTER</TimeRefPos>
</TIMESYS>
<Resource name="db photometry">
<TABLE name="v">
<FIELD name="t" ID="col1" system="TIMESYS" datatype="float" unit="day"/>
<FIELD name="m" ID="col2" ucd="phot.mag;em.opt.v" datatype="float" unit="mag"/>
<FIELD name="m_err" ID="col3" ucd="stat.error;phot.mag;em.opt.v" datatype="float" unit="mag"/>
<DATA>
<TABLEDATA>
"""
self.tail_str = """ </TABLEDATA>
</DATA>
</TABLE>
</Resource>
</VOTimeseries>
</VOSOURCE>"""
self.pars=pars
def write_limitmags_into_pkl(self, frame_limitmags):
""" This parses the adt.frame_limitmags dictionary which is contained
in a Pickle file and which was originally retrieved from
mysql and from adt.retrieve_fullcat_frame_limitmags()
"""
import cPickle
import gzip
### This is just for writing the pickle file:
fp = gzip.open(self.pars['limitmags_pkl_gz_fpath'],'w')
cPickle.dump(frame_limitmags, fp, 1) # 1 means binary pkl used
fp.close()
def retrieve_limitmags_from_pkl(self):
""" This parses the adt.frame_limitmags dictionary which is contained
in a Pickle file and which was originally retrieved from
mysql and from adt.retrieve_fullcat_frame_limitmags()
"""
import cPickle
import gzip
fp = gzip.open(self.pars['limitmags_pkl_gz_fpath'],'rb')
frame_limitmags = cPickle.load(fp)
fp.close()
return frame_limitmags
def form_xml_string(self, mag_data_dict):
"""
Take timeseries dict data and place into VOSource XML format,
which TCP feature generation code expects.
Adapted from: TCP/Software/feature_extract/format_csv_getfeats.py
"""
data_str_list = []
for i, t in enumerate(mag_data_dict['t']):
m = mag_data_dict['m'][i]
m_err = mag_data_dict['merr'][i]
data_str = ' <TR row="%d"><TD>%lf</TD><TD>%lf</TD><TD>%lf</TD></TR>' % \
(i, t, m, m_err)
data_str_list.append(data_str)
all_data_str = '\n'.join(data_str_list)
out_xml = self.head_str + all_data_str + self.tail_str
return out_xml
def example_dat_parse(self):
"""
"""
import tutor_database_project_insert
adt = tutor_database_project_insert.ASAS_Data_Tools(pars=pars)
if 0:
### requires mysql connection to TUTOR:
adt.retrieve_fullcat_frame_limitmags()
self.write_limitmags_into_pkl(adt.frame_limitmags)
### This is done when we don't have a connection to the mysql database.
adt.frame_limitmags = self.retrieve_limitmags_from_pkl()
dat_fpath = '/project/projectdirs/m1583/linear/allLINEARfinal_lc_dat/10003298.dat'
ts_str = open(dat_fpath).read()
source_intermed_dict = adt.parse_asas_ts_data_str(ts_str)
"""mag_data_dict = adt.filter_best_ts_aperture(source_intermed_dict)
"""
xml_str = self.form_xml_string(mag_data_dict)
### TODO Generate the features for this xml string
import pdb; pdb.set_trace()
print
def generate_arff_using_asasdat(self, data_fpaths=[], include_arff_header=False, arff_output_fp=None):
""" Given a list of LINEAR data file filepaths, for each source/file:
- choose the optimal aperture, depending upon median magnitude <---only for ASAS!!!
- exclude bad/flagged epochs
- generate features from timeseries (placing in intermediate XML-string format)
- collect resulting features for all given sources, and place in ARFF style file
which will later be read by ML training/classification code.
Partially adapted from: TCP/Software/citris33/arff_generation_master_using_generic_ts_data.py:get_dat_arffstrs()
"""
import tutor_database_project_insert
adt = tutor_database_project_insert.ASAS_Data_Tools(pars=pars)
adt.frame_limitmags = self.retrieve_limitmags_from_pkl()
sys.path.append(os.environ.get('TCP_DIR') + '/Software/feature_extract/MLData')
#sys.path.append(os.path.abspath(os.environ.get("TCP_DIR") + '/Software/feature_extract/Code/extractors'))
#print os.environ.get("TCP_DIR")
import arffify
sys.path.append(os.path.abspath(os.environ.get("TCP_DIR") + \
'Software/feature_extract/Code'))
import db_importer
from data_cleaning import sigmaclip_sdict_ts
sys.path.append(os.path.abspath(os.environ.get("TCP_DIR") + \
'Software/feature_extract'))
from Code import generators_importers
master_list = []
master_features_dict = {}
all_class_list = []
master_classes_dict = {}
for dat_fpath in data_fpaths:
new_srcid = dat_fpath[dat_fpath.rfind('/')+1:dat_fpath.rfind('.dat')]
ts_str = open(dat_fpath).read()
source_intermed_dict = adt.parse_asas_ts_data_str(ts_str)
"""mag_data_dict = adt.filter_best_ts_aperture(source_intermed_dict)
"""
# Need to have a function like this for LINEAR data:
xml_str = self.form_xml_string(mag_data_dict)
### Generate the features:
signals_list = []
gen = generators_importers.from_xml(signals_list)
gen.generate(xml_handle=xml_str)
gen.sig.add_features_to_xml_string(signals_list)
gen.sig.x_sdict['src_id'] = new_srcid
dbi_src = db_importer.Source(make_dict_if_given_xml=False)
dbi_src.source_dict_to_xml(gen.sig.x_sdict)
xml_fpath = dbi_src.xml_string
a = arffify.Maker(search=[], skip_class=False, local_xmls=True, convert_class_abrvs_to_names=False, flag_retrieve_class_abrvs_from_TUTOR=False, dorun=False)
out_dict = a.generate_arff_line_for_vosourcexml(num=new_srcid, xml_fpath=xml_fpath)
master_list.append(out_dict)
all_class_list.append(out_dict['class'])
master_classes_dict[out_dict['class']] = 0
for feat_tup in out_dict['features']:
master_features_dict[feat_tup] = 0 # just make sure there is this key in the dict. 0 is filler
master_features = master_features_dict.keys()
master_classes = master_classes_dict.keys()
a = arffify.Maker(search=[], skip_class=False, local_xmls=True,
convert_class_abrvs_to_names=False,
flag_retrieve_class_abrvs_from_TUTOR=False,
dorun=False, add_srcid_to_arff=True)
a.master_features = master_features
a.all_class_list = all_class_list
a.master_classes = master_classes
a.master_list = master_list
a.write_arff(outfile=arff_output_fp, \
remove_sparse_classes=True, \
n_sources_needed_for_class_inclusion=1,
include_header=include_arff_header,
use_str_srcid=True)#, classes_arff_str='', remove_sparse_classes=False)
if __name__ == '__main__':
startTime = time.time()
indir = '/project/projectdirs/m1583/linear/allLINEARfinal_lc_dat'
# print '\n indir =', indir
files = index( indir )
# print '\n files =', files[0:5]
pars = { \
#'tcp_hostname':'192.168.1.25',
#'tcp_username':'pteluser',
#'tcp_port': 3306, #23306,
#'tcp_database':'source_test_db',
#'limitmags_pkl_gz_fpath':'/home/dstarr/scratch/asas_limitmags.pkl.gz',
'limitmags_pkl_gz_fpath':'/project/projectdirs/m1583/ASAS_scratch/asas_limitmags.pkl.gz',
}
sv_asas = StarVars_LINEAR_Feature_Generation( pars=pars )
LC = {}
xml = {}
for i in range( 0, len(files) ):
try:
LC[i] = readLC( files[i] )
except:
print 'ERROR: File', i, 'in LC dictionary is not a light curve!'
continue
try:
xml[i] = sv_asas.form_xml_string( LC[i] )
except:
print 'ERROR: Unable to form xml string for LC['+str(i)+']'
buff = open( '/project/projectdirs/m1583/linear/allLINEARfinal_lc_dat/xml.pickle', 'wb' )
cPickle.dump(xml, buff )
buff.close()
endTime = time.time()
totalTime = endTime - startTime
print '\nTotal time:', totalTime, 's'
|
import imaging
import servo
import time
def main():
cam_ctrl = imaging.CameraControl()
servo_ctrl = servo.ServoControl()
cam_ctrl.set_exposure(100)
cam_ctrl.set_focus(20)
cam_ctrl.start_camera()
SERVO_DUTY_CYCLE = 30
TOTAL_IMAGES = 100
#servo_ctrl.start(SERVO_DUTY_CYCLE)
for i in range(TOTAL_IMAGES):
print("Acquiring image: " +str(i))
cam_ctrl.take_picture('./data/image_{:03d}.png'.format(i))
servo_ctrl.start(SERVO_DUTY_CYCLE)
time.sleep(2)
servo_ctrl.stop()
time.sleep(1)
if __name__== "__main__":
main()
|
import random as r
import os
minscore = 0 #최고점수
while True:
print("☆☆☆☆☆UPDOWN게임☆☆☆☆☆")
print("1.게임시작\n2.게임전적\n3.게임종료")
select = int(input(">>> "))
if select == 1:
computer = r.randint(1, 100)
count = 0 #시도한 횟수
os.system("cls")
while True:
print(computer) #정답확인용
player = int(input("정수 입력 : "))
count += 1
os.system("cls")
if player > computer:
print("=====DOWN=====")
elif player < computer:
print("=====U P=====")
else: #같을 경우
print("%d회 만에 맞췄습니다."%count)
if minscore == 0 or count < minscore:
print("☆☆최고기록갱신☆☆")
minscore = count
os.system("pause")
os.system("cls")
break
elif select == 2:
if minscore == 0:
print("===게임을 하고 와주세요===")
else:
print("최고점수는 %d점입니다."%minscore)
os.system("pause")
os.system("cls")
elif select == 3:
print("게임종료")
exit(0)
else:
print("잘못된 입력입니다.")
|
from abc import ABCMeta, abstractmethod
import os
class PackageAnalyzer(object):
"""
Abstract base class for plug-ins seeking to implement package analysis.
"""
__metaclass__ = ABCMeta
def __init__(self, settings):
"""
Creates a new instance of a package-analyzer class.
:param settings: settings containing information for the plug-ins.
"""
self._settings = settings
def add_dependency(self, dependant: str, dependency: str, packages: dict) -> None:
"""
Adds a dependency
:param dependant: The package that depends on the dependency
:param dependency: The package that the dependant is dependent on.
:param packages: The packages and depdendencies of this repository (key: package, value: list of dependencies).
:return: None
"""
if not dependant in packages:
packages[dependant] = dict()
if not "dependencies" in packages[dependant]:
packages[dependant]["dependencies"] = list()
packages[dependant]["name"] = dependant
packages[dependant]["dependencies"].append(dependency)
@abstractmethod
def _analyze(self, path: str) -> dict:
"""
Analyze the current path for packages (recursively)
:param path: Path to the repository that possibly contains files.
:return: Dictionary with package-names and dependencies.
"""
raise NotImplementedError
def analyze(self, path: str) -> list:
return list(self._analyze(path).values())
def search_files(self, path: str, pattern: str) -> list:
"""
Searches for files recursively in the file system matching the provided pattern.
:param path: The path to search in.
:param pattern: The pattern to search for.
:return: A list of paths to the found files.
"""
filellist = []
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(pattern):
filellist.append(os.path.join(root,str(pattern)))
return filellist |
from django.conf.urls import include, url
from django.contrib import admin
from rest_framework.authtoken import views
from windows.views import api_root
urlpatterns = [
url(r'^api/token/', views.obtain_auth_token, name='api-token'),
url(r'^api/$', api_root),
url(r'^api/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^', include('django.contrib.auth.urls')),
url(r'^', include('operators.urls')),
url(r'^', include('windows.urls')),
url(r'^', include('mimics.urls')),
url(r'^', include('vars.urls')),
url(r'^admin/', include(admin.site.urls)),
]
|
# -*- coding: utf-8 -*-
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth import login, logout, get_user_model
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.translation import gettext as _
from django.utils.translation import activate
from rest_framework.decorators import api_view
from rest_framework.decorators import authentication_classes, permission_classes
from django.template.loader import render_to_string
from .models import Token
from django.contrib.auth.signals import user_logged_in
from rest_framework.response import Response
from rest_framework.views import APIView
from django.contrib.auth import authenticate
from django.contrib.auth import login as django_login
from accounts.models import User
from accounts.forms import (
RegistrationForm,
EditProfileForm
)
from .tokens import account_activation_token
User = get_user_model()
activate('es')
@api_view(['GET', 'POST'])
@authentication_classes([])
@permission_classes([])
def register(request):
"""
View register (sign up).
"""
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
user = form.save()
user.is_active = False
user.save()
current_site = get_current_site(request)
subject = _('Enable your account')
message = render_to_string('email/account_activation_email.html', {
'user': user,
'rest_api': False,
'domain': current_site.domain,
'uid': force_text(urlsafe_base64_encode(force_bytes(user.pk))),
'token': account_activation_token.make_token(user),
})
user.email_user(subject, message)
return redirect('accounts:register_activation_sent')
else:
messages.warning(request, form.errors)
else:
form = RegistrationForm()
return render(request, 'accounts/register.html',
{'form': form})
def register_activation_sent(request):
"""
View after register.
"""
return render(request, 'accounts/register_activation_sent.html')
def register_activate(request, uidb64, token):
"""
Activate user after register.
"""
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.email_confirmed = True
user.save()
login(request, user)
messages.info(request, _("Your account has been enabled."))
return redirect('home')
else:
return render(request, 'accounts/register_activation_invalid.html')
class LoginView(APIView):
def post(self, request, format=None):
print(request.data,"---------")
user = authenticate(username=request.data.get('username'), password=request.data.get('password'))
django_login(request, user)
# token_ttl = self.get_token_ttl()
print(request.user)
token = Token.objects.create(user=request.user)
user_logged_in.send(sender=user.__class__,
request=request, user=user)
data = {
'user': user.username,
'token': token.key
}
return Response(data)
class LogoutView(APIView):
# authentication_classes = (TokenAuthentication, )
def post(self, request):
request.auth.delete()
logout(request)
return Response("Logout Successful!", status=204)
|
#!/usr/bin/env python3
import subprocess, os, csv, time, sys
my_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
os.chdir(my_dir)
subprocess.check_call(["dune", "build", "--profile=release", "carsales", "catrank", "eval"])
bin_dir = os.path.join(my_dir, '../../_build/default/src/benchmark')
switch = subprocess.check_output(["opam", "sw", "show"]).strip()
print("Current switch: %s" % switch)
baseline = {
('./carsales', 'pipe', 'none'): 67433620.95888832,
('./catrank', 'pipe', 'packed'): 65141369.52621308,
('./eval', 'pipe', 'none'): 61535819.20844552,
}
results = {}
def run(cmd, base_iters, scale):
key = '-'.join(([cmd[0][2:]] + cmd[1:]))
base = baseline[tuple(cmd)]
iters = int(base_iters * scale)
cmd = cmd + [str(iters)]
t0 = time.time()
throughput = int(subprocess.check_output(cmd, cwd = bin_dir))
t1 = time.time()
t = t1 - t0
rate = throughput / t
frac = 100 * rate / base
cmd = " ".join(cmd)
print("%6.2f%% of baseline: %3.1f x %s" % (frac, scale, key))
if scale not in results: results[scale] = {}
results[scale][key] = frac
scale = 1.0
while scale < 10:
run(["./carsales", "pipe", "none"], 5000, scale)
run(["./catrank", "pipe", "packed"], 500, scale)
run(["./eval", "pipe", "none"], 50000, scale)
scale *= 1.5
series = sorted(results[1.0].keys())
scales = sorted(results.keys())
with open('results-%s.csv' % switch, 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["Scale"] + series)
for scale in scales:
res = [results[scale][k] for k in series]
writer.writerow([scale] + res)
|
'''
This is a python wrapper around Peng's mRMR algorithm.
mRMR is the min redundancy max relevance feature selection algorithm by
Hanchuan Peng. See http://penglab.janelia.org/proj/mRMR for more details about
the code and its author, as well as the sources and the license.
Author: Brice Rebsamen
Version: 0.1
Released on: June 1st, 2011
Copyright 2011 Brice Rebsamen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
import os
from subprocess import Popen, PIPE
import tempfile
def _savemrmrdatafile(data, featNames, classNames):
'''
Save the data to a CSV file in the format required by mRMR.
- first row is the name of the features.
- first col is the class names.
- data is organized, with a sample per row.
Returns the filename (a temporary file with the .csv extension).
'''
f = tempfile.NamedTemporaryFile(suffix='.csv', prefix='tmp_mrmr_', delete=False, mode='w')
f.write(','.join(['class']+featNames)+os.linesep)
data = np.asarray(data)
for i in range(data.shape[0]):
f.write(','.join([str(classNames[i])]+[str(d) for d in data[i,:]])+os.linesep)
f.close()
return f.name
def mrmr(data, featNames, classNames, threshold=None, nFeats=None, selectionMethod='MID', mrmrexe='./mrmr'):
'''
A wrapper around the mrmr executable.
Arguments:
data: a 2D array (size NxF)
featNames: list of feature names (F elements)
classNames: list of class names (N elements)
Optional Arguments:
threshold: data must be discrete or discretized. The default value (None)
assumes that the data has already been discretized. Otherwise
it has to be discretized as below u-t*s, above u+t*s or between:
-1, +1 or 0, where u is the mean, s the standard deviation and
t the threshold. This is done feature by feature.
nFeats: the number of feature to select. If not given it defaults to all
features. This will only sort the features.
selectionMethod: either 'MID' or 'MIQ'. Default is 'MID'
mrmrexe: the path to the mrmr executable. Defaults to './mrmr'
Returns:
A dictionnary with 2 elements: MaxRel and mRMR, which are the 2 results
returned by mrmr (the 2 different feature selection criterions).
Each is a dictionnary, with fields Fea and Score, holding the feature
numbers and the scores respectively.
Example:
Generate some data: 200 samples, 2 classes, 7 features, the 2 first
features are correlated with the class label, the 5 others are
irrelevant. Feature names (fn) with a capital F are the relevant
features.
>>> N = 100
>>> data = np.r_[ np.random.randn(N,2)+2, np.random.randn(N,2)-2 ]
>>> data = np.c_[ data, np.random.randn(N*2,5) ]
>>> c = [1]*N+[-1]*N
>>> fn = ['F%d' % n for n in range(2)] + ['f%d' % n for n in range(5)]
Pass to the mRMR program
>>> mrmrout = mrmr(data, fn, c, threshold=0.5)
Get the result:
>>> R = mrmrout['mRMR']
>>> print 'Order \t Fea \t Name \t Score'
>>> for i in range(len(R['Fea'])):
... print '%d \t %d \t %s \t %f\n' % \
... (i, R['Fea'][i], fn[R['Fea'][i]], R['Score'][i])
...
Order Fea Name Score
0 1 F1 0.131000
1 0 F0 0.128000
2 4 f4 -0.008000
3 0 f0 -0.009000
4 3 f3 -0.010000
5 1 f1 -0.013000
6 2 f2 -0.015000
'''
data = np.asarray(data)
N,M = data.shape
if nFeats is None: nFeats=M
else: assert nFeats<=M
mrmrexe = os.path.abspath(mrmrexe)
assert os.path.exists(mrmrexe) and os.access(mrmrexe,os.X_OK)
# Save data to a temporary file that can be understood by the mrmr binary
fn = _savemrmrdatafile(data,featNames,classNames)
# Generate the command line. See the help of mrmr for info on options
cmdstr = mrmrexe
cmdstr += ' -i %s -n %d -s %d -v %d -m %s' % (fn, nFeats, N, M, selectionMethod)
if threshold is not None:
assert threshold>0
cmdstr += ' -t ' + str(threshold)
# Call mrmr. The result is printed to stdout.
mrmrout = Popen(cmdstr, stdout=PIPE, shell=True).stdout.read().split('\n')
# delete the temporary file
os.remove(fn)
# A function to parse the result
def extractRes(key):
Fea = []
Score = []
state = 0
for l in mrmrout:
if state==0:
if l.find(key)!=-1: state = 1
elif state==1:
state = 2
elif state==2:
if l=='':
break
else:
n,f,fn,s = l.split(' \t ')
Fea.append(int(f)-1)
Score.append(float(s))
return {'Fea':np.asarray(Fea), 'Score':np.asarray(Score)}
# Return a dictionnary holding the features and their score for both the
# MaxRel and mRMR criterions
return {'MaxRel': extractRes('MaxRel features'),
'mRMR': extractRes('mRMR features')}
if __name__=='__main__':
# Make some data
N = 100
data = np.c_[ np.r_[np.random.randn(N,5)+2, np.random.randn(N,5)-2], np.random.randn(N*2,30) ]
c = [1]*N+[-1]*N
fn = ['F%d' % n for n in range(5)] + ['f%d' % n for n in range(30)]
assert data.shape==(len(c),len(fn))
mrmrout = mrmr(data,fn,c,threshold=0.5)
R = mrmrout['mRMR']
print 'Order \t Fea \t Name \t Score'
for i in range(len(R['Fea'])):
print '%d \t %d \t %s \t %f' % \
(i, R['Fea'][i], fn[R['Fea'][i]], R['Score'][i]) |
import random
import time
from selenium.webdriver.common.by import By
from selenium_ui.base_page import BasePage
from selenium_ui.conftest import print_timing
from selenium_ui.jira.pages.pages import Login
from util.conf import JIRA_SETTINGS
def app_specific_action(webdriver, datasets):
page = BasePage(webdriver)
if datasets['custom_issues']:
issue_key = datasets['custom_issue_key']
# To run action as specific user uncomment code bellow.
# NOTE: If app_specific_action is running as specific user, make sure that app_specific_action is running
# just before test_2_selenium_z_log_out action
#
# @print_timing("selenium_app_specific_user_login")
# def measure():
# def app_specific_user_login(username='admin', password='admin'):
# login_page = Login(webdriver)
# login_page.delete_all_cookies()
# login_page.go_to()
# login_page.set_credentials(username=username, password=password)
# if login_page.is_first_login():
# login_page.first_login_setup()
# if login_page.is_first_login_second_page():
# login_page.first_login_second_page_setup()
# login_page.wait_for_page_loaded()
# app_specific_user_login(username='admin', password='admin')
# measure()
@print_timing("selenium_app_custom_action")
def measure():
@print_timing("selenium_app_custom_action:view_issue")
def sub_measure():
page.go_to_url(f"{JIRA_SETTINGS.server_url}/browse/{issue_key}")
page.wait_until_visible((By.ID, "summary-val")) # Wait for summary field visible
page.wait_until_visible((By.ID, "ID_OF_YOUR_APP_SPECIFIC_UI_ELEMENT")) # Wait for you app-specific UI element by ID selector
sub_measure()
measure()
def app_create_dashboard(jira_webdriver, jira_datasets):
page = BasePage(jira_webdriver)
@print_timing("selenium_create_board")
def measure():
dashboardTitle = f"My Dashboard {time.time()}"
page.go_to_url(f"{JIRA_SETTINGS.server_url}/secure/ConfigurePortalPages.jspa")
close_info_popups(page) # just in case of popups
page.get_element((By.ID,'create_page')).click()
page.wait_until_clickable((By.ID, 'edit-entity-dashboard-name')).send_keys(dashboardTitle)
page.get_element((By.ID,'edit-entity-submit')).click()
page.get_element((By.XPATH, f'//table//td//a[contains(.,\'{dashboardTitle}\')]')).click() # navigate to the new dashboard
measure()
# def load_more_items(jira_webdriver):
# page.wait_until_visible((By.ID,'message-panel'))
# try:
# page.wait_until_visible((By.ID,'load-more-directory-items')).click()
# except:
# print('Already loaded')
def app_add_gadget(jira_webdriver, jira_datasets, gadgetId, isLoadMore, isMulti, isHistory, isHeat):
page = BasePage(jira_webdriver)
gadgetPath = f'//div[@class=\'aui-dialog2-content\']//button[contains(@data-item-id,\'performance-objectives-for-jira:{gadgetId}\')]'
testName = f'selenium_app_add_gadget_{gadgetId}'
@print_timing(testName)
def measure():
dsName = 'This week'
PROJECT_KEY = 'TESTAUTO'
JQL=f'key={PROJECT_KEY}-1'
NUMBER_OF_ISSUES = 'Number of Issues'
RESOLUTION = 'Resolution'
ORIGINAL_ESTIMATE = 'Original Estimate (h)'
@print_timing(f'{testName}: add gadget')
def sub_measure():
page.get_element((By.ID,'add-gadget')).click()
page.wait_until_visible((By.ID,'list-panel'))
try:
page.get_element((By.XPATH, gadgetPath)).click()
except:
page.wait_until_visible((By.ID,'load-more-directory-items')).click()
page.wait_until_visible((By.XPATH, gadgetPath)).click()
page.wait_until_clickable((By.CSS_SELECTOR, '.aui-dialog2-header button.aui-close-button')).click()
sub_measure()
@print_timing(f'{testName}: init')
def sub_measure():
page.wait_until_invisible((By.CSS_SELECTOR, '.aui-dialog2-header'))
page.driver.switch_to.frame(page.wait_until_visible((By.CSS_SELECTOR,'.dashboard-item-content iframe')))
sub_measure()
@print_timing(f'{testName}: configure')
def sub_measure():
page.wait_until_clickable((By.ID, 'data-set-name')).send_keys(dsName)
page.get_element((By.ID, 'predefined-period')).click()
page.wait_until_visible((By.CSS_SELECTOR, '.MuiMenu-list li[data-value=\'thisWeek\']')).click()
page.get_element((By.CSS_SELECTOR, '.ReactModal__Content svg[data-icon=\'filter\']')).click()
page.wait_until_visible((By.ID, 'save-filters-picker')).click()
page.wait_until_clickable((By.ID, 'save-filters-picker')).send_keys(PROJECT_KEY)
page.wait_until_clickable((By.CSS_SELECTOR, f'.MuiAutocomplete-listbox li[title*=\'({PROJECT_KEY})\']')).click()
if isHistory:
page.get_element((By.CSS_SELECTOR, '.ReactModal__Content label[for=\'textarea-jql\']')).click()
page.wait_until_clickable((By.ID,'textarea-jql')).send_keys(JQL)
page.wait_until_clickable((By.CSS_SELECTOR, '.ReactModalPortal .maui-button-primary')).click()
page.wait_until_visible((By.CSS_SELECTOR,'.data-set-item-name'))
if isMulti:
page.get_element((By.CSS_SELECTOR, '.button-add-metric')).click()
page.wait_until_visible((By.ID, 'metric-filters-picker')).click()
page.wait_until_clickable((By.ID, 'metric-filters-picker')).send_keys(NUMBER_OF_ISSUES)
page.wait_until_visible((By.CSS_SELECTOR, f'.MuiAutocomplete-listbox li[title*=\'{NUMBER_OF_ISSUES}\']')).click()
page.wait_until_clickable((By.CSS_SELECTOR, '.ReactModal__Content .maui-button-primary')).click()
page.wait_until_visible((By.CSS_SELECTOR, '.multi-metric-item'))
if isHeat:
page.get_element((By.CSS_SELECTOR, '.group-by button[title=\'Edit\']')).click()
page.wait_until_visible((By.ID, 'field-picker')).click()
page.wait_until_clickable((By.ID, 'field-picker')).send_keys(RESOLUTION)
page.wait_until_visible((By.CSS_SELECTOR, f'.MuiAutocomplete-listbox li[title*=\'{RESOLUTION}\']')).click()
page.wait_until_clickable((By.CSS_SELECTOR, '.ReactModal__Content .maui-button-primary')).click()
page.wait_until_invisible((By.CSS_SELECTOR, '.ReactModal__Content'))
if isHistory:
page.get_element((By.CSS_SELECTOR, '.metric.single button')).click()
page.wait_until_visible((By.ID, 'metric-filters-picker')).click()
page.wait_until_clickable((By.ID, 'metric-filters-picker')).send_keys(ORIGINAL_ESTIMATE)
page.wait_until_visible((By.CSS_SELECTOR, f'.MuiAutocomplete-listbox li[title=\'{ORIGINAL_ESTIMATE}\']')).click()
page.wait_until_clickable((By.CSS_SELECTOR, '.ReactModal__Content .maui-button-primary')).click()
page.wait_until_invisible((By.CSS_SELECTOR, '.ReactModal__Content'))
sub_measure()
@print_timing(f'{testName}: save config')
def sub_measure():
page.wait_until_invisible((By.CSS_SELECTOR, '.ReactModal__Overlay'))
page.wait_until_clickable((By.XPATH, '//button[text()[contains(.,\'Save\')]]')).click()
# page.wait_until_invisible((By.CSS_SELECTOR,'button.maui-button-primary_wide'))
sub_measure()
@print_timing(f'{testName}: load chart')
def sub_measure():
page.wait_until_visible((By.CSS_SELECTOR,'.chart-footer'))
sub_measure()
@print_timing(f'{testName}: delete gadget')
def sub_measure():
page.driver.switch_to.parent_frame()
page.wait_until_clickable((By.CSS_SELECTOR, '.gadget-menu button')).click()
page.wait_until_clickable((By.CSS_SELECTOR, '.gadget-menu .dropdown-item .delete')).click()
page.driver.switch_to.alert.accept()
page.driver.switch_to.default_content()
page.wait_until_visible((By.CSS_SELECTOR, '.column.first.empty'))
sub_measure()
measure()
def app_add_remove_work_calendar(jira_webdriver, datasets):
page = BasePage(jira_webdriver) # /secure/ObjectivesWorkCalendarsAction!default.jspa
@print_timing("selenium_app_add_remove_work_calendar")
def measure():
calendarName = f"Cal {time.time()}" # the input field is ax length 25
# navigate to calendar page
page.go_to_url(f"{JIRA_SETTINGS.server_url}/secure/ObjectivesWorkCalendarsAction!default.jspa")
page.wait_until_clickable((By.ID, 'login-form-username')).send_keys(JIRA_SETTINGS.admin_login)
page.wait_until_clickable((By.ID, 'login-form-password'))
page.get_element((By.ID, 'login-form-password')).send_keys(JIRA_SETTINGS.admin_password)
page.get_element((By.ID, 'login-form-submit')).click()
page.get_element((By.ID, 'login-form-authenticatePassword')).send_keys(JIRA_SETTINGS.admin_password)
page.get_element((By.ID, 'login-form-submit')).click()
# add calendar
page.wait_until_visible((By.CSS_SELECTOR, '.pages-container'))
page.wait_until_clickable((By.CSS_SELECTOR, 'button[title=\'Add calendar\']')).click()
page.wait_until_clickable((By.ID, 'calendar-name')).send_keys(calendarName)
page.get_element((By.CSS_SELECTOR, 'div[class^=\'ReactModal\'] button.maui-button-primary_wide')).click()
page.wait_until_invisible((By.ID, 'calendar-name'))
# delete calendar
calPath = f'//div[@class=\'list-section\']//div[@class=\'row with-controls\']//span[text()[contains(.,\'{calendarName}\')]]/../..//button[@title=\'Delete\']'
page.wait_until_visible((By.XPATH, calPath));
page.get_element((By.XPATH, calPath)).click()
page.get_element((By.CSS_SELECTOR, '.maui-button-primary_red')).click()
page.wait_until_visible((By.CSS_SELECTOR, 'button[title=\'Add calendar\']'))
measure()
def app_change_color_pallete(jira_webdriver, datasets):
page = BasePage(jira_webdriver) # /secure/ObjectivesWorkCalendarsAction!default.jspa
@print_timing("selenium_app_change_color_pallete")
def measure():
# navigate to color pallete page
page.go_to_url(f"{JIRA_SETTINGS.server_url}/secure/ObjectivesColorPalleteAction!default.jspa")
# change color pallete theme
page.wait_until_visible((By.CSS_SELECTOR, '.pages-container'))
page.wait_until_clickable((By.CSS_SELECTOR, '.header-link[aria-label*=\'Atlas\']')).click()
page.get_element((By.CSS_SELECTOR, '.color-pallete button.maui-button-primary')).click()
page.wait_until_invisible((By.ID, '.color-pallete .list-section'))
measure()
def app_delete_dashboard(jira_webdriver, jira_datasets):
page = BasePage(jira_webdriver)
@print_timing("selenium_delete_board")
def measure():
page.wait_until_visible((By.CSS_SELECTOR,'#dash-options a.aui-dropdown2-trigger'))
page.get_element((By.CSS_SELECTOR, '#dash-options a.aui-dropdown2-trigger')).click() # Wait dashboadr list is visible
close_info_popups(page) # otherwise can't click on #delete_dashboard
page.wait_until_visible((By.ID,'delete_dashboard'))
page.get_element((By.ID,'delete_dashboard')).click() # click().perform()
page.wait_until_visible((By.ID,'delete-portal-page-submit'))
page.get_element((By.ID, 'delete-portal-page-submit')).click()
page.wait_until_invisible((By.CSS_SELECTOR, '.jira-dialog'))
measure()
def close_info_popups(page):
try:
info_popups = page.get_elements((By.CSS_SELECTOR, '.closeable.aui-message-info button'))
if info_popups:
popup = info_popups[0]
popup.click()
page.wait_until_invisible((By.CSS_SELECTOR, '.closeable.aui-message-info'))
except:
print('Err closing')
# def admin_login_prompt(page):
# try:
# login_prompt = page.get_elements((By.ID, 'login-form-username'))
# if login_prompt:
# page.wait_until_clickable((By.ID, 'login-form-username')).send_keys(JIRA_SETTINGS.admin_login)
# page.wait_until_clickable((By.ID, 'login-form-password'))
# page.get_element((By.ID, 'login-form-password')).send_keys(JIRA_SETTINGS.admin_password)
# page.get_element((By.ID, 'login-form-submit')).click()
# page.get_element((By.ID, 'login-form-authenticatePassword')).send_keys(JIRA_SETTINGS.admin_password)
# page.get_element((By.ID, 'login-form-submit')).click()
# except:
# print('Err closing') |
from django.urls import path
from yandex_bs.imports import views as import_views
urlpatterns = [
path("imports", import_views.create_import, name="create_import"),
path("imports/<int:import_id>/citizens", import_views.retrieve_import, name="retrieve_import"),
path("imports/<int:import_id>/citizens/<int:citizen_id>", import_views.patch_citizen, name="patch_citizen"),
path("imports/<int:import_id>/citizens/birthdays", import_views.retrieve_birthdays, name="birthdays"),
path("imports/<int:import_id>/towns/stat/percentile/age", import_views.retrieve_town_stats, name="town_stats"),
]
|
import os
from __setup import TestCase
from __setup import DATA_DIR
def join(path):
return os.path.join(DATA_DIR, path)
class TestLogger(TestCase):
def test_setup_storage_variables(self):
from graphitequery import settings
settings.setup_storage_variables(DATA_DIR)
self.assertEqual(settings.INDEX_FILE, join("index"))
if settings.CERES_DIR is not None:
self.assertEqual(settings.CERES_DIR, join("ceres"))
self.assertEqual(settings.WHISPER_DIR, join("whisper"))
self.assertEqual(settings.STANDARD_DIRS, [join("whisper")])
def test_creating_directories(self):
import os
from graphitequery import settings
settings.CREATE_DIRECTORIES = False
# Test non-creation of directories
self.assertFalse(os.path.exists(DATA_DIR))
settings.setup_storage_variables(DATA_DIR)
self.assertFalse(os.path.exists(DATA_DIR))
self.assertFalse(os.path.exists(settings.WHISPER_DIR))
if settings.CERES_DIR is not None:
self.assertFalse(os.path.exists(settings.CERES_DIR))
# Test creating directories
settings.setup_storage_variables(DATA_DIR, create_directories=True)
self.assertFalse(settings.CREATE_DIRECTORIES)
self.assertTrue(os.path.exists(DATA_DIR))
self.assertTrue(os.path.exists(settings.WHISPER_DIR))
if settings.CERES_DIR is not None:
self.assertTrue(os.path.exists(settings.CERES_DIR))
|
from .shapenet import shapenet
__all__ = ('shapenet','modelnet40','SHREC2016')
__all__ = ('point_cloud') |
from .ObjectProperty import ObjectProperty
class EntitySpawnflags(ObjectProperty):
def __init__(self, listOfFlags, mapObject):
ObjectProperty.__init__(self, mapObject)
self.flagList = listOfFlags
self.name = "spawnflags"
self.valueType = "flags"
self.defaultValue = 0
self.value = 0
for flag in listOfFlags:
if flag.default_value:
self.value |= flag.value
def clone(self, mapObject):
flags = EntitySpawnflags(list(self.flagList), mapObject)
self.copyBase(flags)
return flags
def getDisplayName(self):
return "Spawnflags"
def getDescription(self):
return "List of flags set on this entity."
def isExplicit(self):
# Not explicit, this came from the FGD file.
return False
def hasSpawnflags(self):
return len(self.flagList) > 0
def hasFlags(self, flags):
return (self.value & flags) != 0
def setFlags(self, flags):
self.value |= flags
def clearFlags(self, flags):
self.value &= ~(flags)
def writeKeyValues(self, kv):
kv.setKeyValue(self.name, str(self.value))
def readKeyValues(self, kv):
self.value = int(kv.getKeyValue(self.name))
|
from keras.layers import Flatten, Dense, Conv2D ,Dropout, MaxPooling2D, AveragePooling2D
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.optimizers import Adam
from keras.models import Sequential, Model
from keras.layers.normalization import BatchNormalization
from keras.applications.vgg16 import VGG16
from keras.applications.inception_v3 import InceptionV3
from keras.applications.resnet50 import ResNet50
from keras.applications.inception_resnet_v2 import InceptionResNetV2
import utils
def get_model2(input_shape=(128, 128, 3)):
model = Sequential()
model.add(BatchNormalization(input_shape=input_shape))
# Convolution + Pooling Layer
model.add(Conv2D(32, (5, 5), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Convolution + Pooling Layer
model.add(Conv2D(32, (5, 5), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Convolution + Pooling Layer
model.add(Conv2D(64, (5, 5), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Convolution + Pooling Layer
model.add(Conv2D(64, (5, 5), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Flatten
model.add(Flatten())
# Fully-Connection
model.add(Dense(64, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(32, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
# Output
model.add(Dense(len(utils.get_classes()), activation='softmax'))
optimizer = Adam(1e-4)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
def create_conv_model(model):
last_conv_idx = [i for i, layer in enumerate(model.layers) if type(layer) is Conv2D][-1]
layers = model.layers[:last_conv_idx+1]
return Model(inputs=model.input, outputs=layers[-1].output)
def stack_on_top(p, model):
inp = model.output
x = MaxPooling2D()(inp)
x = BatchNormalization(axis=1)(x)
x = Dropout(p / 4)(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(p)(x)
x = Dense(512, activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(p / 2)(x)
y = Dense(len(utils.get_classes()), activation='softmax')(x)
model = Model(inputs=[model.input], outputs=[y])
return model
def get_VGG16(input_shape=(128, 128, 3)):
model = VGG16(weights='imagenet', include_top=False, input_shape=input_shape)
model = create_conv_model(model)
model = stack_on_top(0.6, model)
optimizer = Adam(1e-4)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
def get_InceptionV3(input_shape=(299, 299, 3)):
model = InceptionV3(weights='imagenet', include_top=False, input_shape=input_shape)
model = create_conv_model(model)
model = stack_on_top(0.6, model)
optimizer = Adam(1e-4)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
def get_ResNet50(input_shape=(224, 224, 3)):
model = ResNet50(weights='imagenet', include_top=False, input_shape=input_shape)
model = create_conv_model(model)
model = stack_on_top(0.6, model)
optimizer = Adam(1e-4)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
def get_InceptionResNetV2(input_shape=(299, 299, 3)):
model = InceptionResNetV2(weights='imagenet', include_top=False, input_shape=input_shape)
model = create_conv_model(model)
model = stack_on_top(0.6, model)
optimizer = Adam(1e-4)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
|
from redis import Redis
redis_connection = Redis(decode_responses=True)
key = "some-key"
value = 55
key2 = "some-key2"
value2 = 123
redis_connection.set(key, value)
redis_connection.set(key2, value2)
print(redis_connection.get(key))
print(redis_connection.incr(key, 50))
print(redis_connection.decr(key, 23)) |
'''
Module to manage Zenny
'''
from __future__ import absolute_import
# Import salt libs
import salt.utils
import logging
log = logging.getLogger(__name__)
try:
import RPi.GPIO as GPIO
from gtts import gTTS
from tempfile import TemporaryFile
import pyttsx
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Define the module's virtual name
__virtualname__ = 'zenny'
def __virtual__():
if HAS_LIBS:
return __virtualname__
def _session():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT)
GPIO.setup(13, GPIO.OUT)
GPIO.setup(15, GPIO.OUT)
GPIO.output(11, 1)
GPIO.output(13, 1)
GPIO.output(15, 1)
return
def setcolor(intcolor):
log.debug('color int= ' + str(intcolor))
GPIO.output(11, int(intcolor[0]))
GPIO.output(13, int(intcolor[1]))
GPIO.output(15, int(intcolor[2]))
def statusupdate(color='clear'):
_session()
if color == 'blue':
setcolor('110')
return
if color == 'red':
setcolor('101')
return
if color == 'green':
setcolor('011')
return
if color == 'clear':
setcolor('111')
return
def say(msg='Testing the system'):
'''
filepath = '/tmp/hello.mp3'
tts = gTTS(text=msg, lang='en')
#f = TemporaryFile()
#tts.write_to_fp(f)
#f.close()
tts.save(filepath)
__salt__['cmd.run']('/usr/bin/mplayer -volume 100 {0}'.format(filepath))
return
'''
engine = pyttsx.init()
engine.setProperty('rate', 150)
engine.setProperty('volume', 8)
engine.say(msg)
engine.runAndWait()
return
def cleanup():
_session()
|
#!/usr/local/bin/python2.7
# -*- coding: utf-8 -*-
__author__ = 'https://github.com/password123456/'
import random
import numpy as np
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import requests
import urllib
import urllib2
import json
import datetime
import time
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def computer_random():
"""let the computer create a list of 6 unique random integers from 1 to 50"""
ok = False
lotto_num_list = np.arange(1,45)
while not ok:
ci = np.random.choice(lotto_num_list,6,replace=False)
tmp = np.where(ci == 0)
(m, )= tmp[0].shape
if(m == 0):
ok = True
return ci
def user_random():
time_now = time.strftime('%Y-%m-%d %H:%M:%S')
print "============================="
print " 로또 번호 조회기 "
print "============================="
print "[+] 시작: %s" % time_now
print "[+] 조건: 1~45 중 임의의 번호 6 개를 만듭니다."
ok = False
lotto_num_list = np.arange(1,45)
while not ok:
ui = np.random.choice(lotto_num_list,6,replace=False)
tmp = np.where(ui == 0)
(m, )= tmp[0].shape
if(m == 0):
ok = True
return ui
def match_lists(list1 , list2):
"""to find the number of matching items in each list use sets"""
set1 = set(list1)
set2 = set(list2)
set3 = set1.intersection(set2)
#print '컴퓨터번호-> %s | 내 번호-> %s | 일치번호 개수 %d' % (set1,set2,len(set3))
return len(set3)
def calculate():
# 사용자가 6개의 번호를 뽑는다.
user_list = user_random()
print "[+] 결과: %s" % user_list
global match3
global match4
global match5
global match6
match3 = 0
match4 = 0
match5 = 0
match6 = 0
# computer는 아래의 숫자만큼 번호를 다시 뽑는다.
tickets_sold = 8145060
print "[+] 계산: 1/%d 개의 난수를 생성하여 생성된 번호와 일치할 확률을 계산합니다." % tickets_sold
for k in range(tickets_sold):
comp_list = computer_random()
# 뽑은번호를 서로 비교한다
matches = match_lists(comp_list, user_list)
if matches == 3:
match3 += 1
elif matches == 4:
match4 += 1
elif matches == 5:
match5 += 1
elif matches == 6:
match6 += 1
def main():
count = 3
while True:
calculate()
print "[+] 분석"
print " - 5등/3 개 번호일치: %d 번" % match3
print " - 4등/4 개 번호일치: %d 번" % match4
print " - 3등/5 개 번호일치: %d 번" % match5
print " - 1등/모두 일치: %d 번" % match6
if (match6 >= count):
print "[+] 6 개 번호가 일치하는 번호가 %d 번 탐지 되었습니다." % (match6)
print "[-] 추첨을 종료합니다."
print "[-] 걸리면 반띵 알지?"
break
else:
print
print " --> 맞는 조건이 없어 처음부터 다시 번호를 뽑습니다."
print
continue
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(0)
except Exception, e:
print '%s[-] Exception::%s%s' % (bcolors.WARNING, e, bcolors.ENDC)
|
from twython import Twython
import time
import os
# This file is not included (my secrets!)
from my_keys import app_key, app_secret, oauth_token, oauth_token_secret
__author__ = 'mpolensek'
# Documentation is like sex.
# When it's good, it's very good.
# When it's bad, it's better than nothing.
# When it lies to you, it may be a while before you realize something's wrong.
class Uploader(object):
def __init__(self, folder="."):
self.folder = folder
self.already_uploaded_file = "uploaded.txt"
self.already_uploaded = []
self.image_format_extension = ".jpg"
# Uncomment and fill with your details
# app_key = "your key"
# app_secret = "your secret"
# oauth_token = "your otoken"
# oauth_token_secret = "your osecret"
self.twitter = Twython(app_key=app_key,
app_secret=app_secret,
oauth_token=oauth_token,
oauth_token_secret=oauth_token_secret)
self.load_already_uploaded()
def upload(self, new_file):
if not self.is_already_uploaded(new_file):
with open(new_file, "rb") as image:
image_ids = self.twitter.upload_media(media=image)
self.twitter.update_status(status="hello this is a status #testing_smth",
media_ids=[image_ids["media_id"]])
self.write_to_uploaded(new_file=new_file)
return True
else:
print "Already uploaded!"
return False
def load_already_uploaded(self):
try:
with open(self.already_uploaded_file, "r") as log_file:
con = log_file.read()
self.already_uploaded = [x.strip() for x in con.split("\n")]
except IOError as err:
pass
def write_to_uploaded(self, new_file):
with open(self.already_uploaded_file, "a+") as log_file:
log_file.write("{0}\n".format(new_file))
self.load_already_uploaded()
def is_already_uploaded(self, new_file):
self.load_already_uploaded()
if new_file.strip() in self.already_uploaded:
return True
return False
def run_watcher(self):
try:
while "pigs" != "fly":
all_files = os.listdir(self.folder)
for f in all_files:
if f.endswith(self.image_format_extension):
if self.upload(new_file=os.path.join(self.folder, f)):
print "Uploaded '{0}'".format(f)
time.sleep(1)
except KeyboardInterrupt as err:
print "Thank you Exiting..."
if __name__ == '__main__':
a = Uploader()
a.run_watcher()
|
import sys
print("Welcome. This program will calculate the minimum of three numbers you enter.")
try:
num1=int(input("First number please:"))
num2=int(input("Second number please:"))
num3=int(input("Third number please:"))
L=[]
L.append(num1)
L.append(num2)
L.append(num3)
print("The minimum number is",min(L))
except Exception as e:
print("Error",e)
print("Sorry we failed to compare the numbers")
sys.exit() |
#! /usr/bin/env python
"""
Given a databag and a video, generate crops of interesting particles.
CHANGELOG:
USING:
As a command line utility:
$ Cropper.py input_video input_bag output_dir [-p particle_id -pad padding]
As a module:
import Cropper from Cropper
cropper = Cropper(input_video, aDataBagObjectOrFilename)
crop = cropper.isolate(frame_id, particle_id)
Author: Martin Humphreys
"""
import cv2
from argparse import ArgumentParser
from math import floor, ceil
import os
import uuid
from DataBag import DataBag
from Query import Query
import numpy as np
from functions.to_precision import to_precision
from functions.dotdict import dotdict
from BackgroundExtractor import SimpleExtractor as BackgroundExtractor
from Normalizer import Normalizer
from FrameGrabber import FrameGrabber
import base64
import sqlite3
PAD = 5
SIZE = 64
class Crop(object):
def __init__(self, video, bag, opts):
self.bag = DataBag.fromArg(bag)
self.query = Query(self.bag)
self.grabber = FrameGrabber(video)
self.normalizer = Normalizer()
if isinstance(opts, dict):
self.opts = dotdict(opts)
else:
self.opts = opts
if self.opts.background is not None:
if isinstance(self.opts.background, (str, unicode)):
self.bg = cv2.imread(self.opts.background, 0)
elif isinstance(self.opts.background, np.ndarray):
self.bg = self.opts.background
if not hasattr(self, "bg"):
extractor = BackgroundExtractor(self.grabber.vc, self.opts)
if self.opts.verbose:
print "Extracting background for normalization..."
self.bg = extractor.extract()
if self.opts.verbose:
print "Background extraction complete."
# caching
self.isolate_last_frame = None
self.isolate_last_frame_id = -1
def crop(self, img, top_left_x, top_left_y, width, height):
"""
Crops a ROI from an image, handling OOB issues correctly.
https://stackoverflow.com/a/42032814
"""
bottom_right_x = top_left_x + width;
bottom_right_y = top_left_y + height;
if (top_left_x < 0 or top_left_y < 0 or bottom_right_x > img.shape[1] or bottom_right_y > img.shape[0]):
# border padding will be required
border_left, border_right, border_top, border_bottom = 0, 0, 0, 0
if (top_left_x < 0):
width = width + top_left_x
border_left = -1 * top_left_x
top_left_x = 0
if (top_left_y < 0):
height = height + top_left_y
border_top = -1 * top_left_y
top_left_y = 0
if (bottom_right_x > img.shape[1]):
width = width - (bottom_right_x - img.shape[1])
border_right = bottom_right_x - img.shape[1]
if (bottom_right_y > img.shape[0]):
height = height - (bottom_right_y - img.shape[0])
border_bottom = bottom_right_y - img.shape[0]
crop = img[top_left_y:top_left_y+height, top_left_x:top_left_x+width]
crop = cv2.copyMakeBorder(crop, border_top, border_bottom, border_left, border_right, cv2.BORDER_REPLICATE)
else:
# no border padding required
crop = img[top_left_y:top_left_y+height, top_left_x:top_left_x+width]
return crop;
def isolate(self, frame_no, particle_id):
props = self.query.particle_properties(frame_no, particle_id)
if props is None:
print frame_no, particle_id
x, y, a, r = int(round(props.x)), int(round(props.y)), float(props.area), float(props.radius)
r = int(max(r, np.sqrt(a/np.pi)))
x1, y1, d = x - r - PAD, y - r - PAD, (r*2)+(PAD*2)
if frame_no == self.isolate_last_frame_id:
ret, frame = True, self.isolate_last_frame
else:
frame = self.normalizer.normalizeFrame(self.bg, self.grabber.frame(frame_no, True))
self.isolate_last_frame = frame
self.isolate_last_frame_id = frame_no
return self.crop(frame, x1, y1, d, d)
def resize(self, img):
s = float(SIZE) / float(img.shape[0])
return cv2.resize(img, (SIZE, SIZE), interpolation = cv2.INTER_CUBIC), s
def get(self, frame_no, particle_id):
return self.resize(self.isolate(frame_no, particle_id))
def update(self, frame_no, particle_id):
crop, s = self.get(frame_no, particle_id)
crop = sqlite3.Binary(crop.tobytes())
c = self.bag.cursor()
c.execute("UPDATE assoc SET crop = ?, scale = ? WHERE frame = ? AND particle = ?", (crop, s, frame_no, particle_id))
def build_parser():
parser = ArgumentParser()
parser.add_argument('input_video', help='The video file to use')
parser.add_argument('bag', help='The databag file with stored detection or tracking results')
parser.add_argument("-b", "--background", help="Normalize with given background")
parser.add_argument('-v', "--verbose", help='print verbose statements while executing', action = 'store_true')
return parser
def main(opts):
if not os.path.isfile(opts.input_video):
parser.error("Input video file %s does not exist." % opts.bg)
if not os.path.isfile(opts.bag):
parser.error("DataBag file %s does not exist." % opts.bag)
cropper = Crop(opts.input_video, opts.bag, opts)
for f in cropper.query.frame_list():
if opts.verbose:
print "Extracting crops from frame", f.frame
for p in cropper.query.particles_in_frame(f.frame):
cropper.update(p.frame, p.id)
cropper.bag.commit()
if __name__ == '__main__':
main(build_parser().parse_args())
|
from Jumpscale import j
class web_interface(j.baseclasses.object):
__jslocation__ = "j.tools.packages.webinterface"
def test(self, port=None, prefix="", scheme="http"):
"""
kosmos `j.tools.packages.webinterface.test()'
:return:
"""
base_url = "0.0.0.0"
if port:
base_url = base_url + f":{port}"
if prefix:
base_url = base_url + f"/{prefix}"
url = f"{scheme}://{base_url}"
j.servers.threebot.start(background=True)
gedis_client = j.clients.gedis.get(
name="default", host="127.0.0.1", port=8901, package_name="zerobot.packagemanager"
)
gedis_client.actors.package_manager.package_add(
j.core.tools.text_replace(
"{DIR_BASE}/code/github/threefoldtech/jumpscaleX_core/JumpscaleCore/servers/gedis/pytests/test_package"
)
)
gedis_client.reload()
print("testing gedis http")
assert (
j.clients.http.post(
f"{url}/zerobot/test_package/actors/actor/echo",
data=b'{"args":{"_input":"hello world"}}',
headers={"Content-Type": "application/json"},
)
.read()
.decode()
== "hello world"
)
print("gedis http OK")
print("testing gedis websocker")
from websocket import WebSocket
import ssl
ws = WebSocket(sslopt={"cert_reqs": ssl.CERT_NONE})
ws.connect(f"wss://{base_url}/gedis/websocket")
assert ws.connected
payload = """{
"namespace": "default",
"actor": "echo",
"command": "actor.echo",
"args": {"_input": "hello world"},
"headers": {"response_type":"json"}
}"""
ws.send(payload)
assert ws.recv() == "hello world"
print("gedis websocket OK")
print("tearDown")
gedis_client.actors.package_manager.package_delete("zerobot.test_package")
j.servers.threebot.default.stop()
|
"""App related signal handlers."""
import redis
from django.conf import settings
from django.db.models import signals
from django.dispatch import receiver
from modoboa.admin import models as admin_models
from . import constants
def set_message_limit(instance, key):
"""Store message limit in Redis."""
old_message_limit = instance._loaded_values.get("message_limit")
if old_message_limit == instance.message_limit:
return
rclient = redis.Redis(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_QUOTA_DB
)
if instance.message_limit is None:
# delete existing key
if rclient.hexists(constants.REDIS_HASHNAME, key):
rclient.hdel(constants.REDIS_HASHNAME, key)
return
if old_message_limit is not None:
diff = instance.message_limit - old_message_limit
else:
diff = instance.message_limit
rclient.hincrby(constants.REDIS_HASHNAME, key, diff)
@receiver(signals.post_save, sender=admin_models.Domain)
def set_domain_message_limit(sender, instance, created, **kwargs):
"""Store domain message limit in Redis."""
set_message_limit(instance, instance.name)
@receiver(signals.post_save, sender=admin_models.Mailbox)
def set_mailbox_message_limit(sender, instance, created, **kwargs):
"""Store mailbox message limit in Redis."""
set_message_limit(instance, instance.full_address)
|
#from matrix_tracker import lattice
import copy
import multiprocessing
from pcaspy import Driver, SimpleServer
import time
from epics import caget, PV
import numpy as np
import random
from MakeModel import SurrogateModel
import json
class SimDriver(Driver):
def __init__(self,input_pv_state,output_pv_state,noise_params=None):
super(SimDriver, self).__init__()
self.input_pv_state = input_pv_state
self.output_pv_state = output_pv_state
if(noise_params):
self.noise_params = noise_params
else:
self.noise_params = {}
def read(self,reason):
if(reason in self.output_pv_state):
value = self.get_noisy_pv(reason)
else:
value = self.getParam(reason)
return value
def write(self,reason,value):
if(reason in self.output_pv_state):
print(reason+" is a read-only pv")
return False
else:
if(reason in self.input_pv_state):
self.input_pv_state[reason]=value
self.setParam(reason,value)
self.updatePVs()
return True
def set_output_pvs(self,outpvs):
post_updates=False
for opv in outpvs:
if(opv in self.output_pv_state):
self.output_pv_state[opv]=outpvs[opv]
self.setParam(opv,self.get_noisy_pv(opv))
post_updates=True
if(post_updates):
self.updatePVs()
def set_pvs(self,pvs):
post_updates=False
for pv in pvs:
self.setParam(pv,pvs[pv])
post_updates=True
if(post_updates):
self.updatePVs()
def get_noisy_pv(self,pv):
noise=0
if(pv in self.noise_params):
dist = self.getParam(pv+':dist') #self.noise_params[pv]['dist']
sigma = self.getParam(pv+':sigma')#self.noise_params[pv]['sigma']
if(dist=='uniform'):
full_width = np.sqrt(12)*sigma
noise = random.uniform(-full_width/2.0, full_width/2.0)
elif(dist=='normal'):
noise = random.uniform(0, sigma)
return self.output_pv_state[pv]+noise
class SyncedSimPVServer():
'''Defines basic PV server that continuously syncs the input model to the input (command) EPICS PV values
and publishes updated model data to output EPICS PVs. Assumes fast model execution, as the model executes
in the main CAS server thread. CAS for the input and ouput PVs is handled by the SimDriver object'''
def __init__(self, name, input_pvdb, output_pvdb, noise_params, model, sim_params=None):
self.name = name
self.pvdb = {}
self.input_pv_state = {}
self.output_pv_state = {}
self.model = model
for pv in input_pvdb:
#print(pv)
self.pvdb[pv]=input_pvdb[pv]
self.input_pv_state[pv] = input_pvdb[pv]["value"]
output_pv_state = {}
for pv in output_pvdb:
self.pvdb[pv]=output_pvdb[pv]
output_pv_state[pv]=output_pvdb[pv]["value"]
for pv in output_pvdb:
if(pv in noise_params):
self.pvdb[pv+':sigma']={'type':'float','value':noise_params[pv]['sigma']}
self.pvdb[pv+':dist']={'type':'char','count':100,'value':noise_params[pv]['dist']}
prefix = self.name+":"
self.server = SimpleServer()
self.server.createPV(prefix, self.pvdb)
self.driver = SimDriver(self.input_pv_state,output_pv_state,noise_params)
self.serve_data=False
self.sim_params=sim_params
def set_sim_params(**params):
self.sim_params=params
def start_server(self):
self.serve_data=True
sim_pv_state = copy.deepcopy(self.input_pv_state)
# Do initial simulation
print("Initializing sim...")
output_pv_state = self.model.run(self.input_pv_state,verbose=True)
self.driver.set_output_pvs(output_pv_state)
print("...done.")
while self.serve_data:
# process CA transactions
self.server.process(0.1)
while(sim_pv_state != self.input_pv_state):
sim_pv_state = copy.deepcopy(self.input_pv_state)
output_pv_state = self.model.run(self.input_pv_state,verbose=True)
self.driver.set_output_pvs(output_pv_state)
def stop_server(self):
self.serve_data=False
class OnlineSurrogateModel():
def __init__(self):
self.scalar_model = SurrogateModel(model_file = 'Scalar_NN_SurrogateModel.h5')
self.image_model = SurrogateModel(model_file = 'YAG_NN_SurrogateModel.h5')
def run(self,pv_state,verbose=True):
t1 = time.time()
print('Running model...',end='')
scalar_data = self.scalar_model.evaluate(pv_state)
image_array,ext = self.image_model.evaluate_image(pv_state)
output = {}
for scalar in scalar_data:
output[scalar]=scalar_data[scalar][0]
image_array,ext = self.image_model.evaluate_image_array(pv_state)
print(ext)
ext = [ext[0,0],ext[0,1],ext[0,2],ext[0,3]] # From Lipi: # At the moment there is some scaling done by hand, this can be changed!
image_values = np.zeros( (2+len(ext)+image_array.shape[1],) )
image_values[0] = self.image_model.bins[0]
image_values[1] = self.image_model.bins[1]
image_values[2:6] = ext
image_values[6:]=image_array
#output['z:pz']=image_values
output['x:y']=image_values
t2=time.time()
print('Ellapsed time: '+str(t2-t1))
return output
def fix_units(unit_str):
unit_str=unit_str.strip()
if(len(unit_str.split(' '))>1):
unit_str = unit_str.split(' ')[-1]
unit_str = unit_str.replace('(','')
unit_str = unit_str.replace(')','')
return unit_str
if __name__ == '__main__':
vmname = 'smvm'
sm = OnlineSurrogateModel()
# Start with the nice example from Lipi
default_inputs = {'maxb(2)': 0.06125866317542922, 'phi(1)': 8.351877669807294, 'q_total': 0.020414630732101164, 'sig_x': 0.4065596830730608}
default_output = sm.run(default_inputs)
cmd_pvdb = {}
for ii, input_name in enumerate(sm.scalar_model.input_names):
cmd_pvdb[input_name] = {'type': 'float', 'prec': 8, 'value': default_inputs[input_name], 'units':fix_units(sm.scalar_model.input_units[ii]),'range':list(sm.scalar_model.input_ranges[ii])}
sim_pvdb = {}
for ii, output_name in enumerate(sm.scalar_model.output_names):
sim_pvdb[output_name] = {'type': 'float', 'prec': 8, 'value': default_output[output_name],'units':fix_units(sm.scalar_model.output_units[ii])}
#sim_pvdb['z:pz']={'type': 'float', 'prec': 8, 'count':len(default_output['z:pz']),'units':'mm:delta','value':list(default_output['z:pz'])}
sim_pvdb['x:y']={'type': 'float', 'prec': 8, 'count':len(default_output['x:y']),'units':'mm:mm','value':list(default_output['x:y'])}
pv_def = {'prefix':vmname+':', 'input':cmd_pvdb, 'output':sim_pvdb}
with open('pvdef.json', 'w') as fp:
json.dump(pv_def, fp, sort_keys=True, indent=4)
# Add in noise for fun
#sim_pvdb['x_95coremit']['scan']=0.2
#noise_params = {'x_95coremit':{'sigma':0.5e-7,'dist':'uniform'}}
noise_params={}
server = SyncedSimPVServer(vmname,cmd_pvdb,sim_pvdb,noise_params,sm)
server.start_server()
|
# Generated by Django 3.0.1 on 2020-01-04 21:58
from django.db import migrations, models
import django.db.models.deletion
import django.utils.crypto
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GiftList',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('owner_link', models.SlugField(default=django.utils.crypto.get_random_string, editable=False)),
('contributor_link', models.SlugField(default=django.utils.crypto.get_random_string, editable=False)),
('recipient_link', models.SlugField(default=django.utils.crypto.get_random_string, editable=False)),
('title', models.CharField(max_length=120)),
('recipient', models.CharField(max_length=120)),
('description', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('created_by', models.CharField(max_length=120)),
],
),
migrations.CreateModel(
name='Gift',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(max_length=120)),
('description', models.TextField()),
('cost', models.IntegerField()),
('completed', models.BooleanField(default=False)),
('completed_on', models.DateTimeField(null=True)),
('completed_by', models.CharField(blank=True, max_length=120, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('created_by', models.CharField(max_length=120)),
('gift_list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gyft.GiftList')),
],
),
]
|
# -*- coding:utf-8 -*-
import os
import dlib
import glob
import cv2
from PIL import Image
import gc
import threading
import time
import queue
try:
import face_recognition_models
# 加载检测模型文件
detector = dlib.get_frontal_face_detector()
# predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
# pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)
shape_predictor_model = face_recognition_models.pose_predictor_five_point_model_location()
# pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)
shape_detector = dlib.shape_predictor(shape_predictor_model)
# cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location()
# cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)
face_rec_model = face_recognition_models.face_recognition_model_location()
# face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
face_recognizer = dlib.face_recognition_model_v1(face_rec_model)
except Exception:
print("Please install `face_recognition_models` with this command before using `face_recognition`:\n")
print("pip install git+https://github.com/ageitgey/face_recognition_models")
quit()
q = queue.Queue(50) # 生成一个队列,用来保存“包子”,最大数量为10
def productor(i):
# 生产者生产数据路径
# while True:
for path, dirs, files in os.walk(root):
for dir in dirs:
file_path = os.path.join(path, dir)
# print(len(os.listdir(root)))
# print('正在入队:', file_path)
out_path = os.path.join(output, dir)
q.put([file_path, out_path])
def consumer(j):
# 消费者拿到路径进行数据处理
while not q.empty():
face_folder, output_folder = q.get()
print(threading.current_thread().name, '正在出队:', face_folder)
# 为后面操作方便,建了几个列表
descriptors = []
images = []
# 遍历faces文件夹中所有的图片
for f in glob.glob(os.path.join(face_folder, "*.jpg")):
# print('Processing file:{}'.format(f))
# 读取图片
img = cv2.imread(f)
# 转换到rgb颜色空间
img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 检测人脸
dets = detector(img2, 1)
# print("Number of faces detected: {}".format(len(dets)))
# 遍历所有的人脸
for index, face in enumerate(dets):
# 检测人脸特征点
shape = shape_detector(img2, face)
# 投影到128D
face_descriptor = face_recognizer.compute_face_descriptor(img2, shape)
# 保存相关信息
descriptors.append(face_descriptor)
images.append((img2, shape))
# 聚类
labels = dlib.chinese_whispers_clustering(descriptors, 0.35)
# print("labels: {}".format(labels))
num_classes = len(set(labels))
print(threading.current_thread().name, "Number of clusters: {}".format(num_classes))
# 为了方便操作,用字典类型保存
face_dict = {}
for i in range(num_classes):
face_dict[i] = []
# print face_dict
for i in range(len(labels)):
face_dict[labels[i]].append(images[i])
# print face_dict.keys()
# 遍历字典,保存结果
# file_len = {}
for key in face_dict.keys():
file_dir = os.path.join(output_folder, str(key))
if not os.path.isdir(file_dir):
os.makedirs(file_dir, exist_ok=True)
for index, (image, shape) in enumerate(face_dict[key]):
file_path = os.path.join(file_dir, 'face_' + str(index))
# print(file_path)
im = Image.fromarray(image)
im.save(file_path + '.jpg')
try:
del descriptors, face_descriptor, face_folder, file_dir, file_path, image, img, img2, labels, shape, images, dets, face_dict, im
gc.collect()
except UnboundLocalError:
print('local variable referenced before assignment')
if __name__ == '__main__':
root = '/home/linkdata/face_server/daiab/ceshi/out'
output = '/home/linkdata/face_server/daiab/ceshi/1'
# threads = []
# 实例化了3个生产者
for i in range(1):
t = threading.Thread(target=productor, args=(i,), daemon=True)
t.start()
print('t_start')
time.sleep(5)
# 实例化了4个消费者
for j in range(12):
# print('v_strat')
v = threading.Thread(target=consumer, args=(j,))
v.start()
# print('v_starting')
|
import socket
import sys
data = sys.argv
data = [data[1], data[2]]
sock = socket.socket()
sock.connect(('localhost', 9090))
sock.send(', '.join(data))
#print data |
from __future__ import unicode_literals
from tinymce.models import HTMLField
from django.db import models
class faculty_data(models.Model):
faculty_id=models.CharField(primary_key=True,max_length=20,blank=False,null=False)
name=models.CharField(max_length=300,blank=True,null=True)
# designation_choice=(
# ("Professor","Professor"),
# ("Assistant Professor","Assistant Professor"),
# ("Temporary Faculty","Temporary Faculty"),
# )
designation=models.CharField(max_length=200,blank=True,null=True)
# designation=models.CharField(choices=designation_choice,max_length=200,blank=True,null=True)
education=models.CharField(max_length=300,blank=True,null=True)
email=models.CharField(max_length=300,blank=True,null=True)
mobile=models.CharField(max_length=20,blank=True,null=True)
photo=models.ImageField(upload_to="faculty_images/",default="media/default.png")
modified= models.DateTimeField(auto_now=True,auto_now_add=False)
created= models.DateTimeField(auto_now=False,auto_now_add=True)
other_details=HTMLField()
area_of_interest=models.CharField(max_length=300,blank=True,null=True)
# class area_of_interest_data(models.Model):
# faculty_id=models.SmallIntegerField(primary_key=True)
# area_of_interest=models.CharField(max_length=300,blank=True,null=True)
# modified= models.DateTimeField(auto_now=True,auto_now_add=False)
# created= models.DateTimeField(auto_now=False,auto_now_add=True)
# class other_details_data(models.Model):
# faculty_id=models.SmallIntegerField(primary_key=True)
# modified= models.DateTimeField(auto_now=True,auto_now_add=False)
# created= models.DateTimeField(auto_now=False,auto_now_add=True)
|
import unittest
from katas.kyu_7.find_the_volume_of_a_cone import volume
class ConeVolumeTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(volume(7, 3), 153)
def test_equal_2(self):
self.assertEqual(volume(56, 30), 98520)
def test_equal_3(self):
self.assertEqual(volume(0, 10), 0)
def test_equal_4(self):
self.assertEqual(volume(10, 0), 0)
def test_equal_5(self):
self.assertEqual(volume(0, 0), 0)
|
"""
Retrieve and store all new tweets from the user home timeline.
"""
import traceback
import db
import log
import twitter
def main():
""" Retrieve and store all new tweets from the user home timeline.
"""
# Logger
logger = log.file_console_log()
# Connect to the MongoDB database
stored_tweets = db.twitter_collection()
# Retrieve tweets from user timeline and store new ones in database
# If any error occurs, log it in log file
try:
timeline_tweets = twitter.home_timeline()
except Exception, error:
logger.error(traceback.format_exc()[:-1]) # log error
raise error
else:
before = db.size(stored_tweets) # initial collection size
# Insert each tweet in database
# If any error occurs, log it in log file
for tweet in timeline_tweets:
try:
db.insert(tweet, stored_tweets) # insert in mongoDB collection
# Note: if tweet already in DB, the insertion will fail silently
except db.DBError, error:
logger.error(traceback.format_exc()[:-1]) # log error
raise error
after = db.size(stored_tweets) # new collection size
# log insertion information
message = "[%s] +%d new, %d stored" % (db.name(stored_tweets),
after - before,
after)
logger.info(message)
if __name__ == '__main__':
main()
|
import turtle
trt=turtle.Turtle()
scr=turtle.Screen()
turtle.listen(xdummy=None, ydummy=None)
scr.bgcolor("black")
trt.color("white")
def w():
trt.seth(90)
trt.forward(10)
scr.onkeypress(w, "w")
scr.onkey(w, "w")
def s():
trt.seth(270)
trt.forward(10)
scr.onkeypress(s, "s")
scr.onkey(s, "s")
def a():
trt.seth(0)
trt.forward(10)
scr.onkeypress(a, "a")
scr.onkey(a, "a")
def d():
trt.seth(180)
trt.forward(10)
scr.onkeypress(d, "d")
scr.onkey(d, "d")
scr.listen()
scr.mainloop() |
#! /usr/bin/env python3
"""Example map generator: Woodbox (Block)
This script demonstrates vmflib by generating a map (consisting of a large
empty room) and writing it to "woodbox_block.vmf". You can open the resulting
file using the Valve Hammer Editor and compile it for use in-game.
This example shows off the tools.Block class, which allows for the easy
creation of 3D block brushes. It's pretty awesome.
"""
from vmflib2 import *
from vmflib2.games import base
from vmflib2.types import Vertex
from vmflib2.tools import Block
import math
import colorsys
m = vmf.ValveMap()
walls = []
# Floor
walls.append(Block(Vertex(0, 0, -512), (1024, 1024, 64)))
# Ceiling
walls.append(Block(Vertex(0, 0, 512), (1024, 1024, 64)))
# Left wall
walls.append(Block(Vertex(-512, 0, 0), (64, 1024, 1024)))
# Right wall
walls.append(Block(Vertex(512, 0, 0), (64, 1024, 1024)))
# Forward wall
walls.append(Block(Vertex(0, 512, 0), (1024, 64, 1024)))
# Rear wall
walls.append(Block(Vertex(0, -512, 0), (1024, 64, 1024)))
# Set each wall's material
for wall in walls:
wall.set_material('wood/woodwall009a')
# Add walls to world geometry
m.add_solids(walls)
spawn = base.InfoPlayerStart(m, origin=types.Origin(0, 0, -512 + 32))
# Generate the oblate-spheroid of light_spots that illuminate this room
for x_ang in range(-75,75, 15):
x_a = math.radians(x_ang + 90)
for y_ang in range(0, 360, 15):
y_a = math.radians(y_ang)
origin = types.Origin(
256 * math.cos(y_a) * math.sin(x_a),
256 * math.sin(y_a) * math.sin(x_a),
-128 * math.cos(x_a)
)
# Get an rgb value from a hsv value
rgb = colorsys.hsv_to_rgb(y_ang / 360, (x_ang / 180) + 0.5, 1)
# Convert from [0-1] to [0-255]
r, g, b = (int(v * 255) for v in rgb)
light = "{0} {1} {2} 400".format(r, g, b)
angles = types.Origin(x_ang, y_ang, 0)
base.LightSpot(m, origin=origin, angles=angles, pitch=x_ang, _light=light)
# Write the map to a file
m.write_vmf('woodbox_block.vmf')
|
#coding: utf-8
import os
from celery.decorators import task
from django.conf import settings
from Corretor.base import CorretorException
from Corretor.base import CompiladorException
from Corretor.chamada_sistema import ChamadaSistema
@task
def run_corretor(*args,**kwargs):
"""roda o corretor usando uma task do celery
Isso tem que ser feito, já que colocando o metodo 'corrigir' decorado como uma @task não adianta.
O problema é que ao chamar corretor.corrigir(...) ele pede pela instancia(o 'self'), que de alguma forma fica perdido quando se marca
com esse decorator. Assim teria que chamar: corretor.corrigir(corretor,...) o que é um tanto quanto estranho, e pode causar mais problemas
para frente.
Sendo assim acho que o mais adeguado é fazer essa funcao que deixa isso encapsulado e pode ser executado sem problemas.
"""
corretor = kwargs['corretor']
ret = None
ret = corretor.corrigir(*args,**kwargs)
return ret
@task
def run_corretor_validar_gabarito(*args,**kwargs):
"""roda o corretor usando uma task do celery para validar uma questao gabarito
Este método verifica se a questao gabarito passada no parametro é valida para o corretor, isto é:
- É capaz de compilar
- Possui entrada
- Executa sem problema com a entrada.
Caso o gabarito seja valido ele retorna True.
"""
# print ">>>run_corretor_validar_gabarito"
def res_incorreta(ret):
for res in ret:
if isinstance(res,ChamadaSistema):
res = res.returncode
if res != 0 and res != None:
return True
return False
corretor = kwargs['corretor']
gabarito = kwargs['questao']
verificada = False
erroException = None
try:
ret_compilar_gabarito = corretor.compilar_completo(questao=gabarito)
#se houver erro na compilação ja para aqui e levanta exception explicando qual foi o problema.
if res_incorreta(ret_compilar_gabarito):
raise CompiladorException("Erro na compilação: %s" % ret_compilar_gabarito[1].output)
entrada_gabarito=os.path.join(settings.MEDIA_ROOT,str(gabarito.get_rand_entrada()))
ret_executar_gabarito = corretor.executar_completo(questao=gabarito,entrada_gabarito=entrada_gabarito)
if not res_incorreta(ret_executar_gabarito):
verificada = True
except CorretorException,e:
# self.verificada = False
erroException=e
# print ">>>CorretorException"
finally:
# print ">>>fim"
gabarito_new = gabarito.__class__.objects.get(pk=gabarito.pk)
gabarito_new.verificada= verificada
# print ">>>f1"
retorno_correcao = gabarito_new.get_retorno_or_create
# print ">>>retorno_correcao",retorno_correcao
# print ">>>retorno_correcao.pk",retorno_correcao.pk
retorno_correcao.altera_dados(sucesso=verificada,erroException=erroException)
retorno_correcao.save()
gabarito_new.save(verificar=False)
class MyException(Exception):
pass
class Teste(object):
@task()
def add(self,x, y):
if x == y:
raise MyException("FUUUU")
return x + y
@task()
def add2(self,**kwargs):
x = kwargs.pop('x')
y = kwargs.pop('y')
if x == y:
raise MyException("FUUUU")
return x + y
|
import torch
from torch import nn as nn
from transformers import BertConfig
from transformers import BertModel
from transformers import BertPreTrainedModel
from spert import sampling
from spert import util
def get_token(h: torch.tensor, x: torch.tensor, token: int):
""" Get specific token embedding (e.g. [CLS]) """
emb_size = h.shape[-1]
token_h = h.view(-1, emb_size)
flat = x.contiguous().view(-1)
# get contextualized embedding of given token
token_h = token_h[flat == token, :]
return token_h
class SpERT(BertPreTrainedModel):
""" Span-based model to jointly extract entities and relations """
VERSION = '1.1'
def __init__(self, config: BertConfig, cls_token: int, relation_types: int, entity_types: int,
size_embedding: int, prop_drop: float, freeze_transformer: bool, max_pairs: int = 100):
super(SpERT, self).__init__(config)
# BERT model
self.bert = BertModel(config)
# layers
self.rel_classifier = nn.Linear(config.hidden_size * 3 + size_embedding * 2, relation_types)
self.entity_classifier = nn.Linear(config.hidden_size * 2 + size_embedding, entity_types)
self.size_embeddings = nn.Embedding(100, size_embedding)
self.dropout = nn.Dropout(prop_drop)
self._cls_token = cls_token
self._relation_types = relation_types
self._entity_types = entity_types
self._max_pairs = max_pairs
# weight initialization
self.init_weights()
if freeze_transformer:
print("Freeze transformer weights")
# freeze all transformer weights
for param in self.bert.parameters():
param.requires_grad = False
def _forward_train(self, encodings: torch.tensor, context_masks: torch.tensor, entity_masks: torch.tensor,
entity_sizes: torch.tensor, relations: torch.tensor, rel_masks: torch.tensor):
# get contextualized token embeddings from last transformer layer
context_masks = context_masks.float()
h = self.bert(input_ids=encodings, attention_mask=context_masks)['last_hidden_state']
batch_size = encodings.shape[0]
# classify entities
size_embeddings = self.size_embeddings(entity_sizes) # embed entity candidate sizes
entity_clf, entity_spans_pool = self._classify_entities(encodings, h, entity_masks, size_embeddings)
# classify relations
h_large = h.unsqueeze(1).repeat(1, max(min(relations.shape[1], self._max_pairs), 1), 1, 1)
rel_clf = torch.zeros([batch_size, relations.shape[1], self._relation_types]).to(
self.rel_classifier.weight.device)
# obtain relation logits
# chunk processing to reduce memory usage
for i in range(0, relations.shape[1], self._max_pairs):
# classify relation candidates
chunk_rel_logits = self._classify_relations(entity_spans_pool, size_embeddings,
relations, rel_masks, h_large, i)
rel_clf[:, i:i + self._max_pairs, :] = chunk_rel_logits
return entity_clf, rel_clf
def _forward_eval(self, encodings: torch.tensor, context_masks: torch.tensor, entity_masks: torch.tensor,
entity_sizes: torch.tensor, entity_spans: torch.tensor, entity_sample_masks: torch.tensor):
# get contextualized token embeddings from last transformer layer
context_masks = context_masks.float()
h = self.bert(input_ids=encodings, attention_mask=context_masks)['last_hidden_state']
batch_size = encodings.shape[0]
ctx_size = context_masks.shape[-1]
# classify entities
size_embeddings = self.size_embeddings(entity_sizes) # embed entity candidate sizes
entity_clf, entity_spans_pool = self._classify_entities(encodings, h, entity_masks, size_embeddings)
# ignore entity candidates that do not constitute an actual entity for relations (based on classifier)
relations, rel_masks, rel_sample_masks = self._filter_spans(entity_clf, entity_spans,
entity_sample_masks, ctx_size)
rel_sample_masks = rel_sample_masks.float().unsqueeze(-1)
h_large = h.unsqueeze(1).repeat(1, max(min(relations.shape[1], self._max_pairs), 1), 1, 1)
rel_clf = torch.zeros([batch_size, relations.shape[1], self._relation_types]).to(
self.rel_classifier.weight.device)
# obtain relation logits
# chunk processing to reduce memory usage
for i in range(0, relations.shape[1], self._max_pairs):
# classify relation candidates
chunk_rel_logits = self._classify_relations(entity_spans_pool, size_embeddings,
relations, rel_masks, h_large, i)
# apply sigmoid
chunk_rel_clf = torch.sigmoid(chunk_rel_logits)
rel_clf[:, i:i + self._max_pairs, :] = chunk_rel_clf
rel_clf = rel_clf * rel_sample_masks # mask
# apply softmax
entity_clf = torch.softmax(entity_clf, dim=2)
return entity_clf, rel_clf, relations
def _classify_entities(self, encodings, h, entity_masks, size_embeddings):
# max pool entity candidate spans
m = (entity_masks.unsqueeze(-1) == 0).float() * (-1e30)
entity_spans_pool = m + h.unsqueeze(1).repeat(1, entity_masks.shape[1], 1, 1)
entity_spans_pool = entity_spans_pool.max(dim=2)[0]
# get cls token as candidate context representation
entity_ctx = get_token(h, encodings, self._cls_token)
# create candidate representations including context, max pooled span and size embedding
entity_repr = torch.cat([entity_ctx.unsqueeze(1).repeat(1, entity_spans_pool.shape[1], 1),
entity_spans_pool, size_embeddings], dim=2)
entity_repr = self.dropout(entity_repr)
# classify entity candidates
entity_clf = self.entity_classifier(entity_repr)
return entity_clf, entity_spans_pool
def _classify_relations(self, entity_spans, size_embeddings, relations, rel_masks, h, chunk_start):
batch_size = relations.shape[0]
# create chunks if necessary
if relations.shape[1] > self._max_pairs:
relations = relations[:, chunk_start:chunk_start + self._max_pairs]
rel_masks = rel_masks[:, chunk_start:chunk_start + self._max_pairs]
h = h[:, :relations.shape[1], :]
# get pairs of entity candidate representations
entity_pairs = util.batch_index(entity_spans, relations)
entity_pairs = entity_pairs.view(batch_size, entity_pairs.shape[1], -1)
# get corresponding size embeddings
size_pair_embeddings = util.batch_index(size_embeddings, relations)
size_pair_embeddings = size_pair_embeddings.view(batch_size, size_pair_embeddings.shape[1], -1)
# relation context (context between entity candidate pair)
# mask non entity candidate tokens
m = ((rel_masks == 0).float() * (-1e30)).unsqueeze(-1)
rel_ctx = m + h
# max pooling
rel_ctx = rel_ctx.max(dim=2)[0]
# set the context vector of neighboring or adjacent entity candidates to zero
rel_ctx[rel_masks.to(torch.uint8).any(-1) == 0] = 0
# create relation candidate representations including context, max pooled entity candidate pairs
# and corresponding size embeddings
rel_repr = torch.cat([rel_ctx, entity_pairs, size_pair_embeddings], dim=2)
rel_repr = self.dropout(rel_repr)
# classify relation candidates
chunk_rel_logits = self.rel_classifier(rel_repr)
return chunk_rel_logits
def _filter_spans(self, entity_clf, entity_spans, entity_sample_masks, ctx_size):
batch_size = entity_clf.shape[0]
entity_logits_max = entity_clf.argmax(dim=-1) * entity_sample_masks.long() # get entity type (including none)
batch_relations = []
batch_rel_masks = []
batch_rel_sample_masks = []
for i in range(batch_size):
rels = []
rel_masks = []
sample_masks = []
# get spans classified as entities
non_zero_indices = (entity_logits_max[i] != 0).nonzero().view(-1)
non_zero_spans = entity_spans[i][non_zero_indices].tolist()
non_zero_indices = non_zero_indices.tolist()
# create relations and masks
for i1, s1 in zip(non_zero_indices, non_zero_spans):
for i2, s2 in zip(non_zero_indices, non_zero_spans):
if i1 != i2:
rels.append((i1, i2))
rel_masks.append(sampling.create_rel_mask(s1, s2, ctx_size))
sample_masks.append(1)
if not rels:
# case: no more than two spans classified as entities
batch_relations.append(torch.tensor([[0, 0]], dtype=torch.long))
batch_rel_masks.append(torch.tensor([[0] * ctx_size], dtype=torch.bool))
batch_rel_sample_masks.append(torch.tensor([0], dtype=torch.bool))
else:
# case: more than two spans classified as entities
batch_relations.append(torch.tensor(rels, dtype=torch.long))
batch_rel_masks.append(torch.stack(rel_masks))
batch_rel_sample_masks.append(torch.tensor(sample_masks, dtype=torch.bool))
# stack
device = self.rel_classifier.weight.device
batch_relations = util.padded_stack(batch_relations).to(device)
batch_rel_masks = util.padded_stack(batch_rel_masks).to(device)
batch_rel_sample_masks = util.padded_stack(batch_rel_sample_masks).to(device)
return batch_relations, batch_rel_masks, batch_rel_sample_masks
def forward(self, *args, evaluate=False, **kwargs):
if not evaluate:
return self._forward_train(*args, **kwargs)
else:
return self._forward_eval(*args, **kwargs)
# Model access
_MODELS = {
'spert': SpERT,
}
def get_model(name):
return _MODELS[name]
|
from .celery import hello_world
def require_channel(slack_event_json):
"""
Require a channel be present in the JSON from the Events API
:params dict slack_event_json: The JSON from the events API
:rtype: bool (False) or str
"""
# Get the channel, else false so we
# don't reply to mention without channel info
# eg: mentions in channels the bot isn't in
event = slack_event_json.get('event')
if event is None:
return False
channel = event.get('channel')
if not channel:
return False
return channel
def hello_world_rule(slack_event_json):
"""
If a channel is present, True, else False
"""
if require_channel(slack_event_json) is None:
return False
return True
#: A list of tuples, where the first element is a conditional function
#: that should accept the JSON from the slack event API as the only arg,
#: and the second element is a celery task function which should accept
#: the JSON from te slack event API as the only arg.
rule_list = [
(hello_world_rule, hello_world)
]
|
# coding: utf-8
# https://github.com/usnistgov/yabadaba
from yabadaba.tools import ModuleManager
databasemanager = ModuleManager('Database')
#from yabadaba import databasemanager as coredatabasemanager
# Local imports
from .reset_orphans import reset_orphans
from .prepare import prepare
from .master_prepare import master_prepare
from .runner import runner, RunManager
from .IprPyDatabase import IprPyDatabase
from .load_database import load_database
__all__ = sorted(['Database', 'databasemanager', 'load_database', 'runner',
'RunManager', 'reset_orphans', 'prepare', 'master_prepare'])
databasemanager.import_style('local', '.LocalDatabase', __name__)
databasemanager.import_style('mongo', '.MongoDatabase', __name__)
databasemanager.import_style('cdcs', '.CDCSDatabase', __name__)
# Upgrade LocalDatabase to an IprPyDatabase
#if 'local' in coredatabasemanager.loaded_styles:
# class LocalDatabase(coredatabasemanager.loaded_styles['local'], IprPyDatabase):
# pass
# databasemanager.loaded_styles['local'] = LocalDatabase
#else:
# databasemanager.failed_styles['local'] = coredatabasemanager.failed_styles['local']
# Upgrade MongoDatabase to an IprPyDatabase
#if 'mongo' in coredatabasemanager.loaded_styles:
# class MongoDatabase(coredatabasemanager.loaded_styles['mongo'], IprPyDatabase):
# pass
# databasemanager.loaded_styles['mongo'] = MongoDatabase
#else:
# databasemanager.failed_styles['mongo'] = coredatabasemanager.failed_styles['mongo']
# Upgrade CDCSDatabase to an IprPyDatabase
#if 'cdcs' in coredatabasemanager.loaded_styles:
# class CDCSDatabase(coredatabasemanager.loaded_styles['cdcs'], IprPyDatabase):
# pass
# databasemanager.loaded_styles['cdcs'] = CDCSDatabase
#else:
# databasemanager.failed_styles['cdcs'] = coredatabasemanager.failed_styles['cdcs'] |
import os
import ray
import logging
import hydra
from hydra.utils import get_original_cwd
import numpy as np
import torch
from torchvision import datasets, transforms
from torchfly.training.trainer import Trainer
from model import get_model
from dataloader import get_data_loader
logger = logging.getLogger(__name__)
def train_loader_fn(config):
return get_data_loader(config)
@hydra.main(config_path="config/config.yaml", strict=False)
def main(config=None):
# set data loader
val_loader = get_data_loader(config, evaluate=True)
model = get_model()
trainer = Trainer(config=config, model=model, validation_loader=None, train_loader_fn=train_loader_fn)
trainer.train()
if __name__ == "__main__":
main()
|
class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
row, col = 0, len(matrix[0])-1
right = len(matrix[0])
left = right - 1
# number of row
down = len(matrix)-1
up = down - 1
spiral = []
while right > 0:
# go to the right
for i in range(right):
num = matrix[row][i]
print("right: ", num)
spiral.append(num)
# put the row to the bottom
print("after right:", spiral)
# go to the down
for i in range(1, down+1):
num = matrix[i][col]
print("down: ", num)
spiral.append(num)
print("after down:", spiral)
print(f'before -> row: {row, }col: {col}')
row = col - 1
print(f'after -> row: {row, }col: {col}')
# go left
print(f"left: {left}")
for i in range(left-1, -1, -1):
num = matrix[row][i]
print("i: {row}, j: {i}")
print("left: ", num)
spiral.append(num)
print("after left:", spiral)
print(f'before -> row: {row, }col: {col}')
col = row
print(f'after -> row: {row, }col: {col}')
for i in range(up, -1, -1):
num = matrix[i][col]
print("up: ", num)
spiral.append(num)
print("after up:", spiral)
right -= 2
down -= 2
left -= 2
up -= 2
# break
return spiral
def spiral_order(self, matrix):
"""
Print matrix in a spiral order
"""
# k for row m for column
k, m = 0, 0
last_row = len(matrix) - 1
last_col = len(matrix[0]) - 1
spiral = []
while (k<= last_row and m <= last_col):
# from left to right
for i in range(m, last_col + 1):
num = matrix[k][i]
spiral.append(num)
# increment the row to move one down
k += 1
# move from up to bottom
for i in range(k, last_row + 1):
num = matrix[i][last_col]
spiral.append(num)
last_col -= 1
if k <= last_row:
# move from right to left
for i in range(last_col, m-1, -1):
num = matrix[last_row][i]
spiral.append(num)
last_row -= 1
if m <= last_col:
for i in range(last_row, k-1, -1):
num = matrix[i][m]
spiral.append(num)
m += 1
return spiral
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
]
# print(matrix[1][2])
output = [1, 2, 3, 4, 8, 12, 16, 15, 14, 13, 9, 5, 6, 7, 11, 10]
obj = Solution()
result = obj.spiral_order(matrix)
print(f"output -> {result}")
print(output == result)
|
# Alvin Radoncic
# CS-110-A
# Quiz Two Part Two
# I pledge my Honor that I have abided by the Stevens Honor System
def addition(x, y):
return x + y
def subtraction(x, y):
return x - y
def multiplication(x, y):
return x * y
def division(x, y):
return x / y
def vowels(z):
lowercased = z.lower()
num_of_vowels = lowercased.count("a") + lowercased.count("e") + lowercased.count("i") + lowercased.count("o") + lowercased.count("u")
return num_of_vowels
def encryption(z):
print("\nHere is your encrypted message: ")
for i in z:
x = ord(i)
print("", x ** 2 / 2, end="")
def main():
Main_Menu = float(input("For Mathematical Functions, Please Enter the Number 1\nFor String Operations, Please Enter the Number 2\n"))
if Main_Menu == 1:
Math_Menu = float(input("\nFor Addition, Please Enter the Number 1\nFor Subtraction, Please Enter the Number 2\nFor Multiplication, Please Enter the Number 3\nFor Division, Please Enter the Number 4\n"))
if Math_Menu == 1:
num1 = float(input("Enter your first number: "))
num2 = float(input("Enter your second number: "))
print(addition(num1, num2))
elif Math_Menu == 2:
num1 = float(input("Enter your first number: "))
num2 = float(input("Enter your second number: "))
print(subtraction(num1, num2))
elif Math_Menu == 3:
num1 = float(input("Enter your first number: "))
num2 = float(input("Enter your second number: "))
print(multiplication(num1, num2))
elif Math_Menu == 4:
num1 = float(input("Enter your first number: "))
num2 = float(input("Enter your second number: "))
print(division(num1, num2))
else:
print("\nError: Please input one of the given options.")
elif Main_Menu == 2:
String_Menu = float(input("\nTo Determine the Number of Vowels in a String; Enter the Number 1\nTo Encrypt a String; Enter the Number 2\n"))
if String_Menu == 1:
string = input("Enter a string: ")
print()
print(vowels(string))
elif String_Menu == 2:
string = input("Enter a string: ")
print()
encryption(string)
else:
print("\nError: Please input one of the given options.")
else:
print("\nError: Please input one of the given options.")
main()
|
from ..FeatureExtractor import FeatureExtractor
from common_functions.Example_Methods import Example_Methods
class example_extractor(FeatureExtractor,Example_Methods):
""" Just an example extractor skeleton. For full example, see:
http://lyra.berkeley.edu/dokuwiki/doku.php?id=tcp:feature_testing
"""
internal_use_only = True
active = True
extname = 'example' # identifier used in final extracted value dict.
def extract(self):
ls_result_dict = self.fetch_extr('lomb_scargle')
median_val = self.fetch_extr('median') # fetches the result from the media extractor (median_val is now the media value of the timecurve)
summed_val = self.example_main_method(self.flux_data,lambda x: median_val,x=self.time_data,rms=self.rms_data) # returns sum(self.flux_data)
return float(summed_val)/median_val
|
# This is the main testing script that we should be able to run to grade
# your model training for the assignment.
# You can create whatever additional modules and helper scripts you need,
# as long as all the training functionality can be reached from this script.
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
from numpy import array
from sklearn.decomposition import PCA
import mycoco
# Do not use GPU for testing (it is probably busy training)
from os import environ
print("Disabling GPU")
environ['CUDA_VISIBLE_DEVICES'] = '-1'
from argparse import ArgumentParser
from keras import Model
from keras.models import load_model
def autoencoder_generator(iterator, batch_size):
"""
Turns iterator of tuple(image, category) into generator of batch(image)
"""
while True:
batch = []
for b in range(batch_size):
sample = next(iterator)
batch.append(sample[0][0])
result = array(batch)
yield result
def opt_a():
"""
Option A - Convolutional image autoencoder
"""
mycoco.setmode('test')
# Load model
model: Model = load_model(args.modelfile)
encoder = Model(inputs=model.input, outputs=model.get_layer("encoder").output)
# Load image iterator, limit to args.maxinstances per category list
image_id_lists = mycoco.query(args.categories)
if args.maxinstances is not None:
image_id_lists = list(map(lambda list: list[:args.maxinstances], image_id_lists))
pyplot.figure(figsize=[6, 6])
for image_id_list in image_id_lists:
image_count = len(image_id_list)
image_iter = mycoco.iter_images([image_id_list], args.categories)
# Create predictions for images
batch_size = 1
generator = autoencoder_generator(image_iter, batch_size)
encoder_prediction = encoder.predict_generator(generator, steps=image_count / batch_size)
# Reduce dimensionality with PCA
reshaped_predictions = encoder_prediction.reshape((encoder_prediction.shape[0], -1))
pca = PCA(n_components=2)
pca_predictions = pca.fit_transform(reshaped_predictions)
# Plot values
pyplot.scatter(pca_predictions[:, 0], pca_predictions[:, 1])
pyplot.title('Clustering')
pyplot.legend(args.categories)
pyplot.savefig('cluster.svg', format='svg')
def opt_b():
"""
Option B - Multi-task caption predictor/classifier
"""
mycoco.setmode('test')
print("Option B not implemented!")
if __name__ == "__main__":
parser = ArgumentParser("Evaluate a model.")
# Add your own options as flags HERE as necessary (and some will be necessary!).
# You shouldn't touch the arguments below.
parser.add_argument('-P', '--option', type=str,
help="Either A or B, based on the version of the assignment you want to run. (REQUIRED)",
required=True)
parser.add_argument('-m', '--maxinstances', type=int,
help="The maximum number of instances to be processed per category. (optional)",
required=False)
parser.add_argument('modelfile', type=str, help="model file to evaluate")
parser.add_argument('categories', metavar='cat', type=str, nargs='+', help='COCO category labels')
args = parser.parse_args()
print("Output model in " + args.modelfile)
print("Maximum instances is " + str(args.maxinstances))
print("Executing option " + args.option)
if args.option == 'A':
opt_a()
elif args.option == 'B':
opt_b()
else:
print("Option does not exist.")
exit(0)
|
import random
import re
from flask import Flask, request
import telegram
from telebot.credentials import bot_token, bot_user_name,URL
global bot
global TOKEN
TOKEN = bot_token
bot = telegram.Bot(token=TOKEN)
def is_number_regex(s):
""" Returns True is string is a number. """
if re.match("^\d+?\.\d+?$", s) is None:
return s.isdigit()
return True
app = Flask(__name__)
@app.route('/{}'.format(TOKEN), methods=['POST'])
def respond():
# retrieve the message in JSON and then transform it to Telegram object
update = telegram.Update.de_json(request.get_json(force=True), bot)
chat_id = update.message.chat.id
msg_id = update.message.message_id
# Telegram understands UTF-8, so encode text for unicode compatibility
text = update.message.text.encode('utf-8').decode()
# for debugging purposes only
print("got text message :", text)
# the first time you chat with the bot AKA the welcoming message
if text == "/start":
# print the welcoming message
bot_welcome = """
Hello, I'm Matheus's robot.
Say ´random´ to get a random quote, or say anything to get the backwards.
"""
# send the welcoming message
bot.sendMessage(chat_id=chat_id, text=bot_welcome, reply_to_message_id=msg_id)
elif text.lower() == "random":
bot_text = [6]
bot_text.append("What you get by achieving your goals is not as important as what you become by achieving your goals")
bot_text.append("Live as if you were to die tomorrow. Learn as if you were to live forever")
bot_text.append("We may affirm absolutely that nothing great in the world has been accomplished without passion")
bot_text.append("Obstacles are those frightful things you see when you take your eyes off the goal")
bot_text.append("The way to get started is to quit talking and begin doing")
bot_text.append("If music be the food of love, play on")
i = random.randint(0,5)
bot.sendMessage(chat_id=chat_id, text=bot_text[i], reply_to_message_id=msg_id)
else:
resp = ""
for i in text:
resp = i + resp
bot.sendMessage(chat_id=chat_id, text=resp, reply_to_message_id=msg_id)
return 'ok'
@app.route('/set_webhook', methods=['GET', 'POST'])
def set_webhook():
s = bot.setWebhook('{URL}{HOOK}'.format(URL=URL, HOOK=TOKEN))
if s:
return "webhook setup ok"
else:
return "webhook setup failed"
@app.route('/')
def index():
return '.'
if __name__ == '__main__':
app.run(threaded=True)
|
from pydash import map_, find
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.common.exceptions import NoSuchElementException
def main():
landing_elements = {
"store": (By.CSS_SELECTOR, "a[title='Tienda']"),
'packs': (By.CSS_SELECTOR, "a[title='Packs Ya']"),
'change': (By.CSS_SELECTOR, "a[title='Pasate a Claro']"),
'footer_links': (By.CSS_SELECTOR, "li[class*='footer__item__links__item'] a")
}
driver = create_chrome_driver()
navigate_to(driver, 'https://www.claro.com.ar/personas')
wait_until_all_elements_visible(driver, landing_elements['footer_links'])
footer_link_texts = get_all_text_elements(driver, landing_elements['footer_links'])
print(footer_link_texts)
def create_chrome_driver(headless=False):
options = Options()
options.add_argument("--incognito")
options.add_argument("--start-maximized")
if headless:
options.add_argument('--no-sandbox')
options.add_argument('--window-size=1300,1080')
options.add_argument('--headless')
options.add_argument('--disable-gpu')
return webdriver.Chrome(desired_capabilities=options.to_capabilities())
def navigate_to(driver, url):
driver.get(url)
def get_element(driver, selector, wait_located=True, timeout=30):
selector_type = selector[0]
selector_value = selector[1]
try:
if wait_located:
return WebDriverWait(driver, timeout).until(
ec.presence_of_element_located(selector))
else:
return driver.find_element(selector_type, selector_value)
except NoSuchElementException:
raise NoSuchElementException(f"Could not find element by the locator: {str(selector)}")
def get_elements(driver, selector, wait_located=True, timeout=30):
selector_type = selector[0]
selector_value = selector[1]
try:
if wait_located:
return WebDriverWait(driver, timeout).until(
ec.presence_of_all_elements_located(selector))
else:
return driver.find_elements(selector_type, selector_value)
except NoSuchElementException:
raise NoSuchElementException(f"Could not find element by the locator: {str(selector)}")
# WAIT UNTIL ALL ELEMENTS EXISTS
def wait_until_exist_all_elements(driver, selector, timeout=20):
wait = WebDriverWait(driver, timeout)
return wait.until(ec.presence_of_all_elements_located(selector))
# WAIT UNTIL ALL ELEMENTS VISIBLE
def wait_until_all_elements_visible(driver, selector, timeout=20):
wait = WebDriverWait(driver, timeout)
return wait.until(ec.visibility_of_all_elements_located(selector))
# WAIT UNTIL ALL ELEMENTS NOT VISIBLE
def wait_until_all_element_not_visible(driver, selector, timeout=20):
wait = WebDriverWait(driver, timeout)
return wait.until(not ec.presence_of_all_elements_located(selector))
# GET ALL TEXT ELEMENTS
def get_all_text_elements(driver, selector, text_sanitize=True, exist_only=False):
if exist_only:
elements = driver.find_elements(selector[0], selector[1])
else:
elements = wait_until_all_elements_visible(driver, selector)
return map_(elements, lambda x: x.text.lower().strip() if text_sanitize else x.text)
# FROM ARRAY OF ELEMENTS
def click_with_index(driver, selector, index):
elements = wait_until_all_elements_visible(driver, selector)
elements[index].click()
# FROM ARRAY OF ELEMENTS
def click_with_text(driver, selector, text, exist_only=False, *args):
if exist_only:
elements = driver.find_elements(selector[0], selector[1])
else:
elements = wait_until_exist_all_elements(driver, selector)
element = find(elements, lambda x: _sanitize_text(x.text) == _sanitize_text(text))
if element is not None:
element.click()
else:
raise Exception('not exist element')
# FROM ELEMENT
def wait_until_is_visible(driver, selector, timeout=20):
wait = WebDriverWait(driver, timeout)
return wait.until(ec.visibility_of(selector))
# FROM ELEMENT
def wait_until_is_not_visible(driver, selector, timeout=20):
wait = WebDriverWait(driver, timeout)
return wait.until(ec.invisibility_of_element(selector))
def wait_until_exist(driver, selector, timeout=20):
wait = WebDriverWait(driver, timeout)
return wait.until(ec.presence_of_element_located(selector))
# FROM ELEMENT
def wait_until_not_exist(driver, selector, timeout=20):
wait = WebDriverWait(driver, timeout)
return wait.until_not(ec.presence_of_element_located(selector))
def get_element_into_view(driver, timeout=10):
element = wait_until_exist(driver, timeout)
if element:
driver.execute_script('arguments[0].scrollIntoView(true);', element)
# FROM ELEMENT
def _sanitize_text(text):
return text.lower().strip()
def take_screenshot(driver, screen_shot_name):
driver.save_screenshot(f'temp/{screen_shot_name}')
if __name__ == '__main__':
main()
main()
|
T = int(input())
for t in range(T):
N = int(input())
ss = {}
for n in range(N):
s = list(input().split())
ss[s[0]] = int(s[1])
print(max(ss.keys(), key=ss.get))
|
def expower(powr,num):
if powr <1:
return 1
else :
return num*expower(powr-1,num)
print(expower(3,5))
print()
print()
mylist = [-4, -6, -5, -1, 2, 3, 7, 9, 88]
print(mylist)
for i in mylist:
x = lambda a: print(a) if a > 0 else None
x(i)
|
from django.apps import AppConfig
class MobileWsConfig(AppConfig):
name = 'Mobile_WS'
|
from sqlalchemy import (
Column,
String,
Boolean
)
from db.database import Base
class User(Base):
__tablename__ = 'users'
username = Column(String, primary_key=True, index=True)
hashed_password = Column(String, nullable=False)
is_admin = Column(Boolean, nullable=False)
|
import pytest
from torchvision._utils import sequence_to_str
@pytest.mark.parametrize(
("seq", "separate_last", "expected"),
[
([], "", ""),
(["foo"], "", "'foo'"),
(["foo", "bar"], "", "'foo', 'bar'"),
(["foo", "bar"], "and ", "'foo' and 'bar'"),
(["foo", "bar", "baz"], "", "'foo', 'bar', 'baz'"),
(["foo", "bar", "baz"], "and ", "'foo', 'bar', and 'baz'"),
],
)
def test_sequence_to_str(seq, separate_last, expected):
assert sequence_to_str(seq, separate_last=separate_last) == expected
|
T = int(input())
childs = [[] for _ in range(T+1)]
for i in range(2, T+1):
par = int(input())
childs[par].append(i)
def dfs(x:int):
ret = 0
for item in childs[x]:
temp = dfs(item) + len(childs[x])
ret = max(ret, temp)
return ret
print(dfs(1))
|
# --------------------------------------------------------
# Tensorflow VCL
# Licensed under The MIT License [see LICENSE for details]
# Written by Zhi Hou, based on code from Transferable-Interactiveness-Network, Chen Gao, Zheqi he and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import tensorflow as tf
# import tensorflow.contrib.slim as slim
# from tensorflow.contrib.slim import arg_scope
# from tensorflow.contrib.slim.python.slim.nets import resnet_utils
# from tensorflow.contrib.slim.python.slim.nets import resnet_v1
# from tensorflow.python.framework import ops
from ult.tools import get_convert_matrix
from ult.config import cfg
from ult.visualization import draw_bounding_boxes_HOI
import torch
import numpy as np
import torch.nn as nn
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.reshape(x.size()[0], -1)
# return x.view(x.size()[0], -1)
class HICO_HOI(nn.Module):
def __init__(self, model_name):
super(HICO_HOI, self).__init__()
import torchvision.models as models
# model.eval()
# pred = model([img])
self.model_name = model_name
self.visualize = {}
self.test_visualize = {}
self.intermediate = {}
self.predictions = {}
self.score_summaries = {}
self.event_summaries = {}
self.train_summaries = []
self.losses = {}
# self.image = None # tf.placeholder(tf.float32, shape=[1, None, None, 3], name = 'image')
# self.spatial = None # tf.placeholder(tf.float32, shape=[None, 64, 64, 3], name = 'sp')
# self.H_boxes = None # tf.placeholder(tf.float32, shape=[None, 5], name = 'H_boxes')
# self.O_boxes = None # tf.placeholder(tf.float32, shape=[None, 5], name = 'O_boxes')
# gt_class_HO = None # tf.placeholder(tf.float32, shape=[None, 600], name = 'gt_class_HO')
# self.H_num = None # tf.placeholder(tf.int32) # positive nums
# self.image_id = None # tf.placeholder(tf.int32)
self.num_classes = 600
self.compose_num_classes = 600
self.num_fc = 1024
self.verb_num_classes = 117
self.obj_num_classes = 80
self.scope = 'resnet_v1_50'
self.stride = [16, ]
# if tf.__version__ == '1.1.0':
# raise Exception('wrong tensorflow version 1.1.0')
# else:
# from tensorflow.contrib.slim.python.slim.nets.resnet_v1 import resnet_v1_block
# self.blocks = [resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
# resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
# resnet_v1_block('block3', base_depth=256, num_units=6, stride=1),
# resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
# resnet_v1_block('block5', base_depth=512, num_units=3, stride=1)]
# if self.model_name.__contains__('unique_weights') or self.model_name.__contains__('_pa3')\
# or self.model_name.__contains__('_pa4'):
# print("add block6 unique_weights2")
# self.blocks.append(resnet_v1_block('block6', base_depth=512, num_units=3, stride=1))
"""We copy from TIN. calculated by log(1/(n_c/sum(n_c)) c is the category and n_c is the number of samples"""
self.HO_weight = np.array([
9.192927, 9.778443, 10.338059, 9.164914, 9.075144, 10.045923, 8.714437, 8.59822, 12.977117, 6.2745423,
11.227917, 6.765012, 9.436157, 9.56762, 11.0675745, 11.530198, 9.609821, 9.897503, 6.664475, 6.811699,
6.644726, 9.170454, 13.670264, 3.903943, 10.556748, 8.814335, 9.519224, 12.753973, 11.590822, 8.278912,
5.5245695, 9.7286825, 8.997436, 10.699849, 9.601237, 11.965516, 9.192927, 10.220277, 6.056692, 7.734048,
8.42324, 6.586457, 6.969533, 10.579222, 13.670264, 4.4531965, 9.326459, 9.288238, 8.071842, 10.431585,
12.417501, 11.530198, 11.227917, 4.0678477, 8.854023, 12.571651, 8.225684, 10.996116, 11.0675745,
10.100731,
7.0376034, 7.463688, 12.571651, 14.363411, 5.4902234, 11.0675745, 14.363411, 8.45805, 10.269067,
9.820116,
14.363411, 11.272368, 11.105314, 7.981595, 9.198626, 3.3284247, 14.363411, 12.977117, 9.300817,
10.032678,
12.571651, 10.114916, 10.471591, 13.264799, 14.363411, 8.01953, 10.412168, 9.644913, 9.981384,
7.2197933,
14.363411, 3.1178555, 11.031207, 8.934066, 7.546675, 6.386472, 12.060826, 8.862153, 9.799063, 12.753973,
12.753973, 10.412168, 10.8976755, 10.471591, 12.571651, 9.519224, 6.207762, 12.753973, 6.60636,
6.2896967,
4.5198326, 9.7887, 13.670264, 11.878505, 11.965516, 8.576513, 11.105314, 9.192927, 11.47304, 11.367679,
9.275815, 11.367679, 9.944571, 11.590822, 10.451388, 9.511381, 11.144535, 13.264799, 5.888291,
11.227917,
10.779892, 7.643191, 11.105314, 9.414651, 11.965516, 14.363411, 12.28397, 9.909063, 8.94731, 7.0330057,
8.129001, 7.2817025, 9.874775, 9.758241, 11.105314, 5.0690055, 7.4768796, 10.129305, 9.54313, 13.264799,
9.699972, 11.878505, 8.260853, 7.1437693, 6.9321113, 6.990665, 8.8104515, 11.655361, 13.264799,
4.515912,
9.897503, 11.418972, 8.113436, 8.795067, 10.236277, 12.753973, 14.363411, 9.352776, 12.417501,
0.6271591,
12.060826, 12.060826, 12.166186, 5.2946343, 11.318889, 9.8308115, 8.016022, 9.198626, 10.8976755,
13.670264,
11.105314, 14.363411, 9.653881, 9.503599, 12.753973, 5.80546, 9.653881, 9.592727, 12.977117, 13.670264,
7.995224, 8.639826, 12.28397, 6.586876, 10.929424, 13.264799, 8.94731, 6.1026597, 12.417501, 11.47304,
10.451388, 8.95624, 10.996116, 11.144535, 11.031207, 13.670264, 13.670264, 6.397866, 7.513285, 9.981384,
11.367679, 11.590822, 7.4348736, 4.415428, 12.166186, 8.573451, 12.977117, 9.609821, 8.601359, 9.055143,
11.965516, 11.105314, 13.264799, 5.8201604, 10.451388, 9.944571, 7.7855496, 14.363411, 8.5463,
13.670264,
7.9288645, 5.7561946, 9.075144, 9.0701065, 5.6871653, 11.318889, 10.252538, 9.758241, 9.407584,
13.670264,
8.570397, 9.326459, 7.488179, 11.798462, 9.897503, 6.7530537, 4.7828183, 9.519224, 7.6492405, 8.031909,
7.8180614, 4.451856, 10.045923, 10.83705, 13.264799, 13.670264, 4.5245686, 14.363411, 10.556748,
10.556748,
14.363411, 13.670264, 14.363411, 8.037262, 8.59197, 9.738439, 8.652985, 10.045923, 9.400566, 10.9622135,
11.965516, 10.032678, 5.9017305, 9.738439, 12.977117, 11.105314, 10.725825, 9.080208, 11.272368,
14.363411,
14.363411, 13.264799, 6.9279733, 9.153925, 8.075553, 9.126969, 14.363411, 8.903826, 9.488214, 5.4571533,
10.129305, 10.579222, 12.571651, 11.965516, 6.237189, 9.428937, 9.618479, 8.620408, 11.590822,
11.655361,
9.968962, 10.8080635, 10.431585, 14.363411, 3.796231, 12.060826, 10.302968, 9.551227, 8.75394,
10.579222,
9.944571, 14.363411, 6.272396, 10.625742, 9.690582, 13.670264, 11.798462, 13.670264, 11.724354,
9.993963,
8.230013, 9.100721, 10.374427, 7.865129, 6.514087, 14.363411, 11.031207, 11.655361, 12.166186, 7.419324,
9.421769, 9.653881, 10.996116, 12.571651, 13.670264, 5.912144, 9.7887, 8.585759, 8.272101, 11.530198,
8.886948,
5.9870906, 9.269661, 11.878505, 11.227917, 13.670264, 8.339964, 7.6763024, 10.471591, 10.451388,
13.670264,
11.185357, 10.032678, 9.313555, 12.571651, 3.993144, 9.379805, 9.609821, 14.363411, 9.709451, 8.965248,
10.451388, 7.0609145, 10.579222, 13.264799, 10.49221, 8.978916, 7.124196, 10.602211, 8.9743395, 7.77862,
8.073695, 9.644913, 9.339531, 8.272101, 4.794418, 9.016304, 8.012526, 10.674532, 14.363411, 7.995224,
12.753973, 5.5157638, 8.934066, 10.779892, 7.930471, 11.724354, 8.85808, 5.9025764, 14.363411,
12.753973,
12.417501, 8.59197, 10.513264, 10.338059, 14.363411, 7.7079706, 14.363411, 13.264799, 13.264799,
10.752493,
14.363411, 14.363411, 13.264799, 12.417501, 13.670264, 6.5661197, 12.977117, 11.798462, 9.968962,
12.753973,
11.47304, 11.227917, 7.6763024, 10.779892, 11.185357, 14.363411, 7.369478, 14.363411, 9.944571,
10.779892,
10.471591, 9.54313, 9.148476, 10.285873, 10.412168, 12.753973, 14.363411, 6.0308623, 13.670264,
10.725825,
12.977117, 11.272368, 7.663911, 9.137665, 10.236277, 13.264799, 6.715625, 10.9622135, 14.363411,
13.264799,
9.575919, 9.080208, 11.878505, 7.1863923, 9.366199, 8.854023, 9.874775, 8.2857685, 13.670264, 11.878505,
12.166186, 7.616999, 9.44343, 8.288065, 8.8104515, 8.347254, 7.4738197, 10.302968, 6.936267, 11.272368,
7.058223, 5.0138307, 12.753973, 10.173757, 9.863602, 11.318889, 9.54313, 10.996116, 12.753973,
7.8339925,
7.569945, 7.4427395, 5.560738, 12.753973, 10.725825, 10.252538, 9.307165, 8.491293, 7.9161053,
7.8849015,
7.782772, 6.3088884, 8.866243, 9.8308115, 14.363411, 10.8976755, 5.908519, 10.269067, 9.176025,
9.852551,
9.488214, 8.90809, 8.537411, 9.653881, 8.662968, 11.965516, 10.143904, 14.363411, 14.363411, 9.407584,
5.281472, 11.272368, 12.060826, 14.363411, 7.4135547, 8.920994, 9.618479, 8.891141, 14.363411,
12.060826,
11.965516, 10.9622135, 10.9622135, 14.363411, 5.658909, 8.934066, 12.571651, 8.614018, 11.655361,
13.264799,
10.996116, 13.670264, 8.965248, 9.326459, 11.144535, 14.363411, 6.0517673, 10.513264, 8.7430105,
10.338059,
13.264799, 6.878481, 9.065094, 8.87035, 14.363411, 9.92076, 6.5872955, 10.32036, 14.363411, 9.944571,
11.798462, 10.9622135, 11.031207, 7.652888, 4.334878, 13.670264, 13.670264, 14.363411, 10.725825,
12.417501,
14.363411, 13.264799, 11.655361, 10.338059, 13.264799, 12.753973, 8.206432, 8.916674, 8.59509,
14.363411,
7.376845, 11.798462, 11.530198, 11.318889, 11.185357, 5.0664344, 11.185357, 9.372978, 10.471591,
9.6629305,
11.367679, 8.73579, 9.080208, 11.724354, 5.04781, 7.3777695, 7.065643, 12.571651, 11.724354, 12.166186,
12.166186, 7.215852, 4.374113, 11.655361, 11.530198, 14.363411, 6.4993753, 11.031207, 8.344818,
10.513264,
10.032678, 14.363411, 14.363411, 4.5873594, 12.28397, 13.670264, 12.977117, 10.032678, 9.609821
], dtype='float32').reshape(1, 600)
self.HO_weight = torch.from_numpy(self.HO_weight)
num_inst_path = cfg.ROOT_DIR + '/Data/num_inst.npy'
num_inst = np.load(num_inst_path)
self.num_inst = num_inst
verb_to_HO_matrix, obj_to_HO_matrix = get_convert_matrix(self.verb_num_classes, self.obj_num_classes)
self.obj_to_HO_matrix = obj_to_HO_matrix
self.verb_to_HO_matrix = verb_to_HO_matrix
# self.gt_obj_class = torch.matmul(gt_class_HO, self.obj_to_HO_matrix.transpose())
# self.gt_verb_class = torch.matmul(gt_class_HO, self.verb_to_HO_matrix.transpose())
import torchvision
if model_name.__contains__('fpn'):
fpn_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
fpn_model = fpn_model.backbone.body
self.base_model = nn.Sequential(*list(fpn_model.children())[:-1])
else:
fpn_model = models.resnet50(pretrained=True)
self.base_model = nn.Sequential(*list(fpn_model.children())[0:7])
self.flat = Flatten()
# import ipdb
# ipdb.set_trace()
self.h_block = fpn_model.layer4
# print(self.h_block)
import copy
self.o_block = copy.deepcopy(self.h_block) # get a new instance
# print(self.o_block,)
# self.o_block.load_state_dict(self.h_block.state_dict()) # copy weights and stuff
# self.v_block = fpn_model.layer4
self.Conv_sp = nn.Sequential(
nn.Conv2d(2, 64, kernel_size=(5, 5), stride=(1, 1), bias=True),
# nn.BatchNorm2d(64, eps=1e-05, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
# nn.AdaptiveMaxPool2d([2, 2]),
nn.MaxPool2d([2, 2]),
nn.Conv2d(64, 32, kernel_size=(5, 5), stride=(1, 1), bias=True),
# nn.BatchNorm2d(32, eps=1e-05, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
nn.MaxPool2d([2, 2]),
# nn.ReLU(inplace=False),
Flatten(),
)
# conv1_sp = slim.conv2d(self.spatial[:, :, :, 0:ends], 64, [5, 5], padding='VALID', scope='conv1_sp')
# pool1_sp = slim.max_pool2d(conv1_sp, [2, 2], scope='pool1_sp')
# conv2_sp = slim.conv2d(pool1_sp, 32, [5, 5], padding='VALID', scope='conv2_sp')
# pool2_sp = slim.max_pool2d(conv2_sp, [2, 2], scope='pool2_sp')
# pool2_flat_sp = slim.flatten(pool2_sp)
self.HOI_MLP = nn.Sequential(
nn.Linear(2048*2, self.num_fc, bias=False),
nn.BatchNorm1d(self.num_fc, eps=1e-05, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
nn.Dropout(0.5),
nn.Linear(self.num_fc, self.num_fc, bias=False),
nn.BatchNorm1d(self.num_fc, eps=1e-05, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
nn.Dropout(0.5)
)
self.HOI_classifier = nn.Sequential(
nn.Linear(self.num_fc, self.num_classes)
)
self.sp_MLP = nn.Sequential(
nn.Linear(7456, self.num_fc, bias=False), # 5708
nn.BatchNorm1d(self.num_fc, eps=1e-05, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
nn.Dropout(0.5),
nn.Linear(self.num_fc, self.num_fc, bias=False),
nn.BatchNorm1d(self.num_fc, eps=1e-05, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
nn.Dropout(0.5)
)
self.sp_classifier = nn.Sequential(
nn.Linear(self.num_fc, self.num_classes)
)
self.loss = torch.nn.BCEWithLogitsLoss()
def image_to_head(self, is_training, images):
# import ipdb
# ipdb.set_trace()
# tmp = self.base_model[0](images)[0].tensors
# tmp = self.base_model[0].normalize(images)
# self.base_model[0].normalize(images)
from torchvision.models.detection.image_list import ImageList
# return self.base_model[1:](tmp)
return self.base_model(images)
def sp_to_head(self, spatial):
# import ipdb
# ipdb.set_trace()
return self.Conv_sp(spatial)
def res5(self, pool5_H, pool5_O, sp, is_training, name):
return self.h_block(pool5_H), self.o_block(pool5_O)
def crop_pool_layer(self, bottom, rois, name):
# import ipdb;ipdb.set_trace()
from torchvision.ops import roi_pool
result = roi_pool(
bottom, rois,
output_size=(cfg.POOLING_SIZE, cfg.POOLING_SIZE)
)
return result
def res5_ho(self, pool5_HO, is_training, name):
return self.h_block(pool5_HO)
# with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
# if self.model_name.startswith('VCL'):
# if self.model_name.__contains__('unique_weights'):
# print("unique_weights")
# st = -3
# reuse = tf.AUTO_REUSE
# if name != 'res5':
# reuse = True
# else:
# st = -2
# reuse = tf.AUTO_REUSE
# fc7_HO, _ = resnet_v1.resnet_v1(pool5_HO,
# self.blocks[st:st+1],
# global_pool=False,
# include_root_block=False,
# reuse=reuse,
# scope=self.scope)
# else:
# fc7_HO = None
# return fc7_HO
def head_to_tail_ho(self, fc7_O, fc7_verbs, fc7_O_raw, fc7_verbs_raw, is_training, name):
# import ipdb
# ipdb.set_trace()
return self.HOI_MLP(torch.cat([fc7_verbs, fc7_O], dim=1))
def head_to_tail_sp(self, fc7_H, fc7_O, sp, is_training, name):
# import ipdb
# ipdb.set_trace()
return self.sp_MLP(torch.cat([fc7_H, sp], dim=1))
def region_classification_sp(self, fc7_SHsp, is_training, initializer, name):
cls_score_sp = self.sp_classifier(fc7_SHsp)
cls_prob_sp = torch.sigmoid(cls_score_sp)
self.predictions["cls_score_sp"] = cls_score_sp
self.predictions["cls_prob_sp"] = cls_prob_sp
return cls_score_sp
def region_classification_ho(self, fc7_verbs, is_training, initializer, name, nameprefix = ''):
cls_score_hoi = self.HOI_classifier(fc7_verbs)
cls_prob_hoi = torch.sigmoid(cls_score_hoi)
self.predictions[nameprefix + "cls_score_hoi"] = cls_score_hoi
self.predictions[nameprefix + "cls_prob_hoi"] = cls_prob_hoi
if self.model_name.__contains__("VCOCO"):
# if self.model_name.__contains__('_CL_'):
# assert self.num_classes == 222
# print(cls_score_hoi, '=============================================')
if self.model_name.__contains__("VCL_V"):
self.predictions[nameprefix + "cls_prob_HO"] = cls_prob_hoi if nameprefix == '' else 0
else:
self.predictions[nameprefix+"cls_prob_HO"] = self.predictions["cls_prob_sp"] * cls_prob_hoi if nameprefix =='' else 0
return cls_score_hoi
def get_compose_boxes(self, h_boxes, o_boxes):
# import ipdb
# ipdb.set_trace()
# h_boxes = torch.unsqueeze(h_boxes, dim=-1)
# o_boxes = torch.unsqueeze(o_boxes, dim=-1)
# tmp_box = torch.cat([h_boxes, o_boxes], dim=-1)
# torch.redu
# torch.ca
x1 = torch.where(h_boxes[:, 1] < o_boxes[:, 1], h_boxes[:, 1], o_boxes[:, 1])
x2 = torch.where(h_boxes[:, 2] < o_boxes[:, 2], h_boxes[:, 2], o_boxes[:, 2])
y1 = torch.where(h_boxes[:, 3] > o_boxes[:, 3], h_boxes[:, 3], o_boxes[:, 3])
y2 = torch.where(h_boxes[:, 4] > o_boxes[:, 4], h_boxes[:, 4], o_boxes[:, 4])
union_boxes = torch.cat([h_boxes[:, 0:1], x1.unsqueeze(-1), x2.unsqueeze(-1),
y1.unsqueeze(-1), y2.unsqueeze(-1)], dim=1)
return union_boxes
def forward(self, im_orig, image_id, num_pos, H_boxes, O_boxes, action_HO, Pattern, is_training):
num_stop = self.get_num_stop(num_pos, H_boxes)
# ResNet Backbone
img_w = im_orig.shape[2]
img_h = im_orig.shape[3]
head = self.image_to_head(is_training, im_orig)
sp = self.sp_to_head(Pattern)
H_boxes = self.convert_rois(H_boxes, head, img_h, img_w)
O_boxes = self.convert_rois(O_boxes, head, img_h, img_w)
cboxes = self.get_compose_boxes(H_boxes[:num_stop] if self.model_name.__contains__('VCOCO') else H_boxes, O_boxes)
pool5_O = self.crop_pool_layer(head, O_boxes, 'Crop_O')
# import ipdb
# ipdb.set_trace()
pool5_H = self.crop_pool_layer(head, H_boxes, 'Crop_H')
cboxes = cboxes[:num_stop]
pool5_HO = self.extract_pool5_HO(head, cboxes, H_boxes[:num_stop], is_training, pool5_O, None, name='ho_')
# print('pool5_O:', pool5_O.shape)
# further resnet feature
fc7_H_raw, fc7_O_raw = self.res5(pool5_H, pool5_O, None, is_training, 'res5')
# print('fc7_H_raw', fc7_H_raw.shape)
# should be 7x7
fc7_H = torch.mean(fc7_H_raw, dim=[2, 3])
fc7_O = torch.mean(fc7_O_raw, dim=[2, 3])
# print(fc7_O.mean(), fc7_H.mean())
# import ipdb
# ipdb.set_trace()
fc7_H_pos = fc7_H[:num_stop]
fc7_O_pos = fc7_O[:num_stop]
fc7_HO_raw = self.res5_ho(pool5_HO, is_training, 'res5')
fc7_HO = None if fc7_HO_raw is None else torch.mean(fc7_HO_raw, dim=[2, 3])
# print('fc7_HO', fc7_HO.shape)
# if not is_training:
# # add visualization for test
# self.add_visual_for_test(fc7_HO_raw, fc7_H_raw, fc7_O_raw, head, is_training, pool5_O)
fc7_verbs_raw = fc7_HO_raw
fc7_verbs = fc7_HO
# self.score_summaries.update({'orth_HO': fc7_HO,
# 'orth_H': fc7_H, 'orth_O': fc7_O})
fc7_SHsp = self.head_to_tail_sp(fc7_H, fc7_O, sp, is_training, 'fc_HO')
cls_score_sp = self.region_classification_sp(fc7_SHsp, is_training, None, 'classification')
# print('verbs')
if self.model_name.__contains__('VCL_'):
if not is_training:
self.test_visualize['fc7_O_feats'] = fc7_O
self.test_visualize['fc7_verbs_feats'] = fc7_verbs
self.test_visualize['fc7_H_feats'] = fc7_H_pos
self.intermediate['fc7_O'] = fc7_O[:num_stop]
self.intermediate['fc7_verbs'] = fc7_verbs[:num_stop]
fc7_vo = self.head_to_tail_ho(fc7_O[:num_stop], fc7_verbs[:num_stop], fc7_O_raw, fc7_verbs_raw, is_training, 'fc_HO')
# print(fc7_vo.mean(), fc7_vo.std())
cls_score_hoi = self.region_classification_ho(fc7_vo, is_training, None, 'classification')
else:
cls_score_hoi = None
self.score_summaries.update(self.predictions)
#
# # label_HO = gt_class_HO_for_verbs
# label_HO = action_HO[:num_stop]
# label_sp = action_HO
#
# sp_cross_entropy = self.loss(cls_score_sp, label_sp)
# hoi_cross_entropy = self.loss(cls_score_hoi, label_HO)
#
# return sp_cross_entropy + hoi_cross_entropy
def convert_rois(self, H_boxes, head, img_h, img_w):
scale_w = head.shape[2] / img_w
scale_h = head.shape[3] / img_h
H_boxes[:, 1] = H_boxes[:, 1] * scale_w
H_boxes[:, 2] = H_boxes[:, 2] * scale_w
H_boxes[:, 3] = H_boxes[:, 3] * scale_h
H_boxes[:, 4] = H_boxes[:, 4] * scale_h
return H_boxes
# def add_visual_for_test(self, fc7_HO_raw, fc7_H_raw, fc7_O_raw, head, is_training, pool5_O):
# self.test_visualize['fc7_H_raw'] = tf.expand_dims(tf.reduce_mean(fc7_H_raw, axis=-1), axis=-1)
# self.test_visualize['fc7_O_raw'] = tf.expand_dims(tf.reduce_mean(fc7_O_raw, axis=-1), axis=-1)
# if fc7_HO_raw is not None:
# self.test_visualize['fc7_HO_raw'] = tf.expand_dims(tf.reduce_mean(fc7_HO_raw, axis=-1), axis=-1)
# self.test_visualize['fc7_H_acts_num'] = tf.reduce_sum(tf.cast(tf.greater(fc7_H_raw, 0), tf.float32))
# self.test_visualize['fc7_O_acts_num'] = tf.reduce_sum(tf.cast(tf.greater(fc7_O_raw, 0), tf.float32))
# if fc7_HO_raw is not None:
# self.test_visualize['fc7_HO_acts_num'] = tf.reduce_sum(tf.cast(tf.greater(fc7_HO_raw, 0), tf.float32))
# res5_ho_h = self.res5_ho(self.extract_pool5_HO(head, self.H_boxes, is_training, pool5_O, None), is_training,
# 'h')
# if self.model_name.__contains__('VCL_humans'):
# res5_ho_o = self.crop_pool_layer(head, self.O_boxes, 'Crop_HO_h')
# else:
# res5_ho_o = self.res5_ho(self.extract_pool5_HO(head, self.O_boxes, is_training, pool5_O, None), is_training,
# 'o')
# print("res5_ho_o", res5_ho_o, res5_ho_h)
# if res5_ho_h is not None and res5_ho_o is not None:
# self.test_visualize['res5_ho_H'] = tf.expand_dims(tf.reduce_mean(res5_ho_h, axis=-1), axis=-1)
# self.test_visualize['res5_ho_O'] = tf.expand_dims(tf.reduce_mean(res5_ho_o, axis=-1), axis=-1)
# self.test_visualize['res5_ho_H_acts_num'] = tf.reduce_sum(tf.cast(tf.greater(res5_ho_h, 0), tf.float32))
# self.test_visualize['res5_ho_O_acts_num'] = tf.reduce_sum(tf.cast(tf.greater(res5_ho_o, 0), tf.float32))
#
# def add_pose(self, name):
# with tf.variable_scope(name) as scope:
# conv1_pose_map = slim.conv2d(self.spatial[:, :, :, 2:][:self.get_num_stop()], 32, [5, 5], reuse=tf.AUTO_REUSE, padding='VALID', scope='conv1_pose_map')
# pool1_pose_map = slim.max_pool2d(conv1_pose_map, [2, 2], scope='pool1_pose_map')
# conv2_pose_map = slim.conv2d(pool1_pose_map, 16, [5, 5], reuse=tf.AUTO_REUSE, padding='VALID', scope='conv2_pose_map')
# pool2_pose_map = slim.max_pool2d(conv2_pose_map, [2, 2], scope='pool2_pose_map')
# pool2_flat_pose_map = slim.flatten(pool2_pose_map)
# return pool2_flat_pose_map
#
# def add_pose1(self, name):
# with tf.variable_scope(name) as scope:
# conv1_pose_map = slim.conv2d(self.spatial[:, :, :, 2:][:self.get_num_stop()], 64, [5, 5], reuse=tf.AUTO_REUSE, padding='VALID', scope='conv1_pose_map')
# pool1_pose_map = slim.max_pool2d(conv1_pose_map, [2, 2], scope='pool1_pose_map')
# conv2_pose_map = slim.conv2d(pool1_pose_map, 32, [5, 5], reuse=tf.AUTO_REUSE, padding='VALID', scope='conv2_pose_map')
# pool2_pose_map = slim.max_pool2d(conv2_pose_map, [2, 2], scope='pool2_pose_map')
# pool2_flat_pose_map = slim.flatten(pool2_pose_map)
# return pool2_flat_pose_map
#
# def add_pose_pattern(self, name = "pose_sp"):
# with tf.variable_scope(name) as scope:
# conv1_pose_map = slim.conv2d(self.spatial[:self.get_num_stop()], 64, [5, 5], reuse=tf.AUTO_REUSE, padding='VALID', scope='conv1_sp_pose_map')
# pool1_pose_map = slim.max_pool2d(conv1_pose_map, [2, 2], scope='pool1_sp_pose_map')
# conv2_pose_map = slim.conv2d(pool1_pose_map, 32, [5, 5], reuse=tf.AUTO_REUSE, padding='VALID', scope='conv2_sp_pose_map')
# pool2_pose_map = slim.max_pool2d(conv2_pose_map, [2, 2], scope='pool2_sp_pose_map')
# pool2_flat_pose_map = slim.flatten(pool2_pose_map)
# return pool2_flat_pose_map
#
# def add_pattern(self, name = 'pattern'):
# with tf.variable_scope(name) as scope:
# with tf.variable_scope(self.scope, self.scope):
# conv1_sp = slim.conv2d(self.spatial[:, :, :, 0:2][:self.get_num_stop()], 64, [5, 5], reuse=tf.AUTO_REUSE, padding='VALID', scope='conv1_sp')
# pool1_sp = slim.max_pool2d(conv1_sp, [2, 2], scope='pool1_sp')
# conv2_sp = slim.conv2d(pool1_sp, 32, [5, 5], reuse=tf.AUTO_REUSE, padding='VALID', scope='conv2_sp')
# pool2_sp = slim.max_pool2d(conv2_sp, [2, 2], scope='pool2_sp')
# pool2_flat_sp = slim.flatten(pool2_sp)
# return pool2_flat_sp
def get_num_stop(self, H_num, H_boxes):
"""
following iCAN, spatial pattern include all negative samples. verb-object branch is for positive samples
self.H_num is the partition for positive sample and negative samples.
:return:
"""
num_stop = len(H_boxes) # for selecting the positive items
if self.model_name.__contains__('_new') \
or not self.model_name.startswith('VCL_'):
print('new Add H_num constrains')
num_stop = H_num
elif self.model_name.__contains__('_x5new'): # contain some negative items
# I use this strategy cause I found by accident that including
# some negative samples in the positive samples can improve the performance a bit (abount 0.2%).
# TODO I think it might have a better solution.
# No-Frills Human-Object Interaction Detection provides some support
# I think VCL do not depend on this. If someone finds This has important impact on result,
# feel happy to contact me.
H_num_tmp = H_num
num_stop = num_stop
num_stop = H_num_tmp + (num_stop - H_num_tmp) // 8
else:
num_stop = H_num
return num_stop
def get_compose_num_stop(self, H_num, H_boxes):
num_stop = self.get_num_stop(H_num, H_boxes)
return num_stop
def extract_pool5_HO(self, head, cboxes, H_boxes, is_training, pool5_O, head_mask = None, name=''):
if self.model_name.__contains__('_union'):
pool5_HO = self.crop_pool_layer(head, cboxes, name + 'Crop_HO')
elif self.model_name.__contains__('_humans'):
# print("humans")
pool5_HO = self.crop_pool_layer(head, H_boxes, name + 'Crop_HO_h')
else:
# pool5_HO = self.crop_pool_layer(head, cboxes, 'Crop_HO')
pool5_HO = None
return pool5_HO
def add_loss(self, gt_class_HO, num_stop, device):
import math
self.HO_weight = self.HO_weight.to(device)
if self.model_name.__contains__('_VCOCO'):
label_H = self.gt_class_H
label_HO = gt_class_HO
label_sp = self.gt_class_sp
else:
label_H = gt_class_HO[:num_stop]
# label_HO = gt_class_HO_for_verbs
label_HO = gt_class_HO[:num_stop]
label_sp = gt_class_HO
# if "cls_score_H" in self.predictions:
# cls_score_H = self.predictions["cls_score_H"]
# """
# The re-weighting strategy has an important effect on the performance.
# This will also improve largely our baseline in both common and zero-shot setting.
# We copy from TIN.
# """
# if self.model_name.__contains__('_rew'):
# cls_score_H = torch.mul(cls_score_H, self.HO_weight)
#
# # H_cross_entropy = torch.enttf.reduce_mean(
# # tf.nn.sigmoid_cross_entropy_with_logits(labels=label_H,
# # logits=cls_score_H[:num_stop, :]))
#
# if "cls_score_O" in self.predictions:
# cls_score_O = self.predictions["cls_score_O"]
# if self.model_name.__contains__('_rew'):
# cls_score_O = tf.multiply(cls_score_O, self.HO_weight)
# O_cross_entropy = tf.reduce_mean(
# tf.nn.sigmoid_cross_entropy_with_logits(labels=label_HO,
# logits=cls_score_O[:num_stop, :]))
# self.losses['O_cross_entropy'] = O_cross_entropy
if "cls_score_sp" in self.predictions:
cls_score_sp = self.predictions["cls_score_sp"]
if self.model_name.__contains__('_rew'):
# print('reweight')
cls_score_sp = torch.mul(cls_score_sp, self.HO_weight)
# sp_cross_entropy = tf.reduce_mean(
# tf.nn.sigmoid_cross_entropy_with_logits(labels=label_sp, logits=cls_score_sp))
sp_cross_entropy = self.loss(cls_score_sp, label_sp)
# self.losses['H_cross_entropy'] = H_cross_entropy
self.losses['sp_cross_entropy'] = sp_cross_entropy
if self.model_name.startswith('VCL_V_'):
cls_score_hoi = self.predictions["cls_score_hoi"]
if self.model_name.__contains__('_rew'):
# print('reweight')
cls_score_hoi = torch.mul(cls_score_hoi, self.HO_weight)
hoi_cross_entropy = self.loss(cls_score_hoi[:num_stop, :], label_HO[:num_stop, :])
self.losses['hoi_cross_entropy'] = hoi_cross_entropy
loss = hoi_cross_entropy
elif self.model_name.startswith('VCL_'):
tmp_label_HO = gt_class_HO[:num_stop]
cls_score_hoi = self.predictions["cls_score_hoi"][:num_stop, :]
if self.model_name.__contains__('_rew'):
cls_score_hoi = torch.mul(cls_score_hoi, self.HO_weight)
# tmp_hoi_loss = tf.nn.sigmoid_cross_entropy_with_logits(
# labels=tmp_label_HO, logits=cls_score_hoi)
hoi_cross_entropy = self.loss(cls_score_hoi, tmp_label_HO)
self.losses['hoi_cross_entropy'] = hoi_cross_entropy
lamb = 1
if self.model_name.__contains__('_l05_'):
lamb = 0.5
elif self.model_name.__contains__('_l2_'):
lamb = 2
elif self.model_name.__contains__('_l0_'):
lamb = 0
elif self.model_name.__contains__('_l1_'):
lamb = 1
elif self.model_name.__contains__('_l15_'):
lamb = 1.5
elif self.model_name.__contains__('_l25_'):
lamb = 2.5
elif self.model_name.__contains__('_l3_'):
lamb = 3
elif self.model_name.__contains__('_l4_'):
lamb = 4
if "cls_score_sp" not in self.predictions:
sp_cross_entropy = 0
self.losses['sp_cross_entropy'] = 0
loss = sp_cross_entropy + hoi_cross_entropy * lamb
# else:
# loss = H_cross_entropy + O_cross_entropy + sp_cross_entropy
self.losses['total_loss'] = loss
# self.event_summaries.update(self.losses)
# print(self.losses)
# print(self.predictions)
return loss |
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from pants.testutil.pants_integration_test import run_pants, setup_tmpdir
def test_synthesized_python_is_included_in_package() -> None:
sources = {
"src/BUILD": dedent(
"""\
shell_command(
name="manufacture_python_code",
tools=["touch",],
command='echo print\\\\(\\\\"Hello, World!\\\\"\\\\) > hello_world.py',
execution_dependencies=(),
output_files=["hello_world.py",],
workdir=".",
root_output_directory="/",
)
experimental_wrap_as_python_sources(
name="python_dependency",
inputs=[":manufacture_python_code"],
)
pex_binary(
name="app",
dependencies=[":python_dependency"],
entry_point=str(build_file_dir()).replace("/", ".") + ".hello_world",
)
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=['pants.backend.python','pants.backend.shell']",
f"--source-root-patterns=['/{tmpdir}/src']",
"run",
f"{tmpdir}/src:app",
]
result = run_pants(args)
result.assert_success()
assert "Hello, World!" in result.stdout.strip()
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from KNN import KNN
data = pd.read_csv('Social_Network_Ads.csv')
# print(data)
data = data.drop(['User ID'], axis=1)
data['Gender'] = data['Gender'].map({'Male': 0, 'Female': 1}).astype(int)
# print(data)
X = data.iloc[:, 0:3].values
Y = data.iloc[:, 3].values
# print(X)
# print(Y)
sc = StandardScaler()
X = sc.fit_transform(X)
X_train, X_test, Y_train ,Y_test = train_test_split(X, Y, test_size=0.15, random_state=99)
classifier = KNN(k=5)
classifier.fit(X_train, Y_train)
y_pred = classifier.predict(X_test)
print(y_pred)
score = classifier.score(Y_test, y_pred)
print(score)
|
from pathlib import Path
class Dirs:
root = Path(__file__).parent.parent
data = root / 'data'
data_tools = root / 'data_tools'
class Data:
min_input_length = 3
max_input_length = 128
train_prob = 0.8 # probability that utterance is assigned to train split
special_symbols = ['[PAD]', '[UNK]', '[CLS]', '[SEP]', '[MASK]'] # order matters
childes_symbols = ['[NAME]', '[PLACE]', '[MISC]']
mask_vocab_id = 4
class Training:
feedback_interval = 100
ignored_index = -1 # any ids in argument "tags" to cross-entropy fn are ignored
debug = False
class Eval:
interval = 10_000
test_sentences = False
train_split = False
print_perl_script_output = False # happens at every batch so not very useful
probe_at_step_zero = True
probe_at_end = False
probing_names = [
'dummy',
'agreement_across_adjectives',
'agreement_across_PP',
'agreement_across_RC',
'agreement_in_1_verb_question',
'agreement_in_2_verb_question',
]
class Wordpieces:
verbose = False
warn_on_mismatch = False
|
import json
import os
import re
from pymongo import MongoClient
### Load Mongo Connection String from environment on PROD
### Otherwise load it from local file
MONGO_CONNECTION_URI = os.environ.get('MONGO_CONNECTION_URI')
if MONGO_CONNECTION_URI is None:
env_config = json.loads(open('dev.json', 'r').read())
MONGO_CONNECTION_URI = env_config.get('MONGO_CONNECTION_URI')
### Decorator to only ever have a single instance of DB
def singleton(cls):
def wrapper(*args, **kwargs):
if wrapper.instance is None:
wrapper.instance = cls(*args, **kwargs)
return wrapper.instance
wrapper.instance = None
return wrapper
@singleton
class DB:
def __init__(cls):
### Initialise the MongoDB Client
cls.client = MongoClient(MONGO_CONNECTION_URI)
### Initialise the Collection to use for storing data
cls.collection = cls.client['primary']['searchhistory']
def store(cls, **kwargs):
"""
Method to store information in Database
"""
try:
### Try inserting data inside DB as a single document
store_result = cls.collection.insert_one(dict(kwargs))
except Exception as e:
### Log the error in case insertion operation raised an Exception
print(f"Exception occured while storing search request data {dict(kwargs)} as {e}")
else:
### Log the response in case the insertion operation was successful
print(f"Storing search request data successful with response: {store_result}")
def search_term(cls, term, guild_id):
"""
Method to fetch documents from database corresponding to
specified Guild ID and Search Term
term: Search Term based on which regex will be compiled to query
guild_id: Guild ID to find results pertaining to that Guild only
"""
### Define response structure
response = {
"success": False,
"results": []
}
try:
### Compiling regex to query for any documents containing the search term
rgx = re.compile(f'.*{term}.*', re.IGNORECASE)
### Try to find documents from DB containing search term and provided Guild ID
### Only return 'term' from document for matching documents
search_result = cls.collection.find({
'term': rgx,
'guild_id': guild_id
},{
'term':1
}
)
except Exception as e:
### Log the error in case query operation raised an Exception
print(f"Exception occured while querying search history with term {term} and guild ID {guild_id} as {e}")
else:
### Prepare response dict in case query operation was successful
response['success'] = True
response['results'] = list(search_result)
print(f"Query Response is: {response['results']}")
return response
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.file_upload, name="file_upload"),
path('delete/<int:pk>/',views.remove_upload, name='remove_upload'),
path('download/', views.file_download, name='file_download')
] |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Ciência de Dados e Inteligência Artificial
# Passo 1: Entendimento do Desafio
# Passo 2: Entendimento da Área/Empresa
# Passo 3: Extração/Obtenção de Dados
# Passo 4: Ajuste de Dados (Tratamento/Limpeza)
# Passo 5: Análise Exploratória
# Passo 6: Modelagem + Algoritmos (Aqui que entra a Inteligência Artificial, se necessário)
# Passo 7: Interpretação de Resultados
# In[3]:
# Passo 3
import pandas as pd
bd = pd.read_csv("advertising.csv")
display(bd)
# In[8]:
import matplotlib.pyplot as plt
import seaborn as sns
sns.heatmap(bd.corr(), cmap="Wistia", annot=True)
plt.show()
sns.pairplot(bd)
plt.show()
# In[14]:
from sklearn.model_selection import train_test_split
# separar dados de x e de y
# y - é quem a gente quer descobrir
y = bd["Vendas"]
# x - é o resto
x = bd.drop("Vendas", axis=1)
# aplicar o train_test_split
x_treino, x_teste, y_treino, y_teste = train_test_split(x, y, random_state = 1)
# In[15]:
#Testar os modelos
#Regressão Linear
#RandomForest(Árvore de decisão)
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
modelo_regressaolinear = LinearRegression()
modelo_randomforest = RandomForestRegressor()
modelo_regressaolinear.fit(x_treino, y_treino)
modelo_randomforest.fit(x_treino, y_treino)
# In[16]:
previsao_regressaolinear = modelo_regressaolinear.predict(x_teste)
previsao_randomforest = modelo_randomforest.predict(x_teste)
# R² vai de 0 a 100%, quanto maior melhor
from sklearn import metrics
print(metrics.r2_score(y_teste, previsao_regressaolinear))
print(metrics.r2_score(y_teste, previsao_randomforest))
# In[20]:
# RandomForest é o modelo vencedor
bd_auxiliar = pd.DataFrame()
bd_auxiliar["y_teste"] = y_teste
bd_auxiliar["regressao linear"] = previsao_regressaolinear
bd_auxiliar["random forest"] = previsao_randomforest
plt.figure(figsize=(15,7))
sns.lineplot(data=bd_auxiliar)
plt.show()
# In[22]:
sns.barplot(x=x_treino.columns, y=modelo_randomforest.feature_importances_)
plt.show()
# In[25]:
# Qual a importância?
#importar a nova_tabela com o pandas
#import pandas as pd
#novo_bd = pd.read_excel("novo_bd.xlsx")
#previsao = modelo_randomforest.predict(novo_bd)
#print(previsao)
# In[ ]:
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 22:53:28 2019
@author: HP
"""
import math
DP=[None for i in range(100001)]
DP[1]=1
def sum_odd(n):
res=1
t=n
while t%2==0:
t=int(t/2)
if DP[t]!=None:
DP[n]=DP[t]
return
else:
for i in range(3,int(math.sqrt(t)+1),2):
curr_sum=1
curr_term=1
while t%i==0:
curr_term=curr_term*i
curr_sum=curr_sum+curr_term
t=int(t/i)
res=res*curr_sum
if t>=2:
res=res*(t+1)
DP[n]=res
T=int(input())
while(T):
l,r=[int(x) for x in input().split()]
sum=0
for i in range(l,r+1):
if DP[i]!=None:
sum=sum+DP[i]
else:
sum_odd(i)
sum=sum+DP[i]
print(sum)
T=T-1 |
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import logging
import numpy as np
from nomad.datamodel import EntryArchive
from nomad.units import ureg as units
from openmxparser import OpenmxParser
@pytest.fixture
def parser():
return OpenmxParser()
def A_to_m(value):
return (value * units.angstrom).to_base_units().magnitude
def Ha_to_J(value):
return (value * units.hartree).to_base_units().magnitude
def K_to_J(value):
return (value * units.joule * units.k).to_base_units().magnitude
def HaB_to_N(value):
return (value * units.hartree / units.bohr).to_base_units().magnitude
# default pytest.approx settings are abs=1e-12, rel=1e-6 so it doesn't work for small numbers
# use the default just for comparison with zero
def approx(value):
return pytest.approx(value, abs=0, rel=1e-6)
def test_HfO2(parser):
'''
Simple single point calculation monoclinic HfO2 test case.
'''
archive = EntryArchive()
parser.parse('tests/HfO2_single_point/m-HfO2.out', archive, logging)
run = archive.section_run[0]
assert run.program_version == '3.9.2'
assert run.program_basis_set_type == 'Numeric AOs'
assert run.run_clean_end
scc = run.section_single_configuration_calculation
assert len(scc) == 1
assert scc[0].energy_total.magnitude == approx(Ha_to_J(-346.328738171942))
scf = scc[0].section_scf_iteration
assert len(scf) == 24
scf[3].energy_sum_eigenvalues_scf_iteration == approx(-3.916702417016777e-16)
method = run.section_method[0]
section_XC_functionals1 = method.section_XC_functionals[0]
section_XC_functionals2 = method.section_XC_functionals[1]
assert method.number_of_spin_channels == 1
assert method.electronic_structure_method == 'DFT'
assert method.smearing_width == approx(K_to_J(300))
assert section_XC_functionals1.XC_functional_name == 'GGA_C_PBE'
assert section_XC_functionals2.XC_functional_name == 'GGA_X_PBE'
assert run.section_sampling_method == []
system = run.section_system[0]
assert all([a == b for a, b in zip(system.configuration_periodic_dimensions,
[True, True, True])])
assert system.lattice_vectors[0][0].magnitude == approx(A_to_m(5.1156000))
assert system.lattice_vectors[2][2].magnitude == approx(A_to_m(5.2269843))
assert len(system.atom_positions) == 12
assert system.atom_positions[5][0].magnitude == approx(A_to_m(-0.3293636))
assert system.atom_positions[11][2].magnitude == approx(A_to_m(2.6762159))
assert len(system.atom_labels) == 12
assert system.atom_labels[9] == 'O'
def test_AlN(parser):
'''
Geometry optimization (atomic positions only) AlN test case.
'''
archive = EntryArchive()
parser.parse('tests/AlN_ionic_optimization/AlN.out', archive, logging)
run = archive.section_run[0]
assert run.program_version == '3.9.2'
assert run.program_basis_set_type == 'Numeric AOs'
assert run.run_clean_end
scc = run.section_single_configuration_calculation
assert len(scc) == 5
assert scc[0].energy_total.magnitude == approx(Ha_to_J(-25.194346653540))
assert scc[4].energy_total.magnitude == approx(Ha_to_J(-25.194358042252))
assert np.shape(scc[0].atom_forces) == (4, 3)
assert scc[0].atom_forces[0][2].magnitude == approx(HaB_to_N(0.00139))
assert scc[4].atom_forces[3][2].magnitude == approx(HaB_to_N(-0.00018))
scf = scc[0].section_scf_iteration
assert len(scf) == 21
scf[20].energy_sum_eigenvalues_scf_iteration == approx(-3.4038353611878345e-17)
scf = scc[3].section_scf_iteration
assert len(scf) == 6
scf[5].energy_sum_eigenvalues_scf_iteration == approx(-3.4038520917173614e-17)
method = run.section_method[0]
section_XC_functionals1 = method.section_XC_functionals[0]
section_XC_functionals2 = method.section_XC_functionals[1]
assert method.number_of_spin_channels == 1
assert method.electronic_structure_method == 'DFT'
assert method.smearing_width == approx(K_to_J(300))
assert section_XC_functionals1.XC_functional_name == 'GGA_C_PBE'
assert section_XC_functionals2.XC_functional_name == 'GGA_X_PBE'
assert method.scf_max_iteration == 100
assert method.scf_threshold_energy_change.magnitude == approx(Ha_to_J(1e-7))
sampling_method = run.section_sampling_method
assert len(sampling_method) == 1
assert sampling_method[0].geometry_optimization_method == "steepest_descent"
assert sampling_method[0].sampling_method == "geometry_optimization"
assert sampling_method[0].geometry_optimization_threshold_force.magnitude == approx(
(0.0003 * units.hartree / units.bohr).to_base_units().magnitude)
assert len(run.section_system) == 5
system = run.section_system[0]
assert all([a == b for a, b in zip(system.configuration_periodic_dimensions,
[True, True, True])])
assert system.lattice_vectors[0][0].magnitude == approx(A_to_m(3.10997))
assert system.lattice_vectors[1][0].magnitude == approx(A_to_m(-1.55499))
assert len(system.atom_positions) == 4
assert system.atom_positions[0][0].magnitude == approx(A_to_m(1.55499))
assert system.atom_positions[3][2].magnitude == approx(A_to_m(4.39210))
assert len(system.atom_labels) == 4
assert system.atom_labels[3] == 'N'
system = run.section_system[0]
assert np.shape(system.velocities) == (4, 3)
assert system.velocities[0][0].magnitude == pytest.approx(0.0)
assert system.velocities[3][2].magnitude == pytest.approx(0.0)
system = run.section_system[3]
assert system.lattice_vectors[1][1].magnitude == approx(A_to_m(2.69331))
assert system.lattice_vectors[2][2].magnitude == approx(A_to_m(4.98010))
assert len(system.atom_positions) == 4
assert system.atom_positions[0][1].magnitude == approx(A_to_m(0.89807))
assert system.atom_positions[2][2].magnitude == approx(A_to_m(1.90030))
assert len(system.atom_labels) == 4
assert system.atom_labels[0] == 'Al'
system = run.section_system[4]
assert system.lattice_vectors[0][0].magnitude == approx(A_to_m(3.10997))
assert system.lattice_vectors[2][2].magnitude == approx(A_to_m(4.98010))
assert len(system.atom_positions) == 4
assert system.atom_positions[0][2].magnitude == approx(A_to_m(0.00253))
assert system.atom_positions[3][2].magnitude == approx(A_to_m(4.39015))
assert len(system.atom_labels) == 4
assert system.atom_labels[1] == 'Al'
eigenvalues = run.section_single_configuration_calculation[-1].section_eigenvalues[0]
assert eigenvalues.eigenvalues_kind == 'normal'
assert eigenvalues.number_of_eigenvalues_kpoints == 74
assert np.shape(eigenvalues.eigenvalues_kpoints) == (74, 3)
assert eigenvalues.eigenvalues_kpoints[0][0] == approx(-0.42857)
assert eigenvalues.eigenvalues_kpoints[0][2] == approx(-0.33333)
assert eigenvalues.eigenvalues_kpoints[73][2] == pytest.approx(0.0)
assert eigenvalues.number_of_eigenvalues == 52
assert np.shape(eigenvalues.eigenvalues_values) == (1, 74, 52)
assert eigenvalues.eigenvalues_values[0, 0, 0].magnitude == approx(Ha_to_J(-0.77128985545768))
assert eigenvalues.eigenvalues_values[0, 73, 51].magnitude == approx(Ha_to_J(4.86822333092339))
def test_C2N2(parser):
'''
Molecular dynamics using the Nose-Hover thermostat for simple N2H2 molecule
'''
archive = EntryArchive()
parser.parse('tests/C2H2_molecular_dynamics/C2H2.out', archive, logging)
run = archive.section_run[0]
assert run.program_version == '3.9.2'
assert run.program_basis_set_type == 'Numeric AOs'
assert run.run_clean_end
scc = run.section_single_configuration_calculation
assert len(scc) == 100
assert scc[0].temperature.magnitude == approx(300.0)
assert scc[99].temperature.magnitude == approx(46.053)
assert np.shape(scc[0].atom_forces) == (4, 3)
assert scc[0].atom_forces[0][0].magnitude == approx(HaB_to_N(0.10002))
assert scc[99].atom_forces[2][2].magnitude == approx(HaB_to_N(-0.00989))
assert len(run.section_system) == 100
method = run.section_method[0]
assert method.number_of_spin_channels == 1
assert method.electronic_structure_method == 'DFT'
assert method.smearing_width == approx(K_to_J(500))
assert method.section_XC_functionals[0].XC_functional_name == 'LDA_X'
assert method.section_XC_functionals[1].XC_functional_name == 'LDA_C_PZ'
sampling_method = run.section_sampling_method
assert len(sampling_method) == 1
assert sampling_method[0].sampling_method == "molecular_dynamics"
assert sampling_method[0].ensemble_type == "NVT"
system = run.section_system[0]
assert np.shape(system.velocities) == (4, 3)
assert system.velocities[0][0].magnitude == approx(396.15464)
assert system.velocities[3][2].magnitude == approx(2359.24208)
system = run.section_system[99]
assert system.velocities[0][1].magnitude == approx(-353.94304)
assert system.velocities[3][0].magnitude == approx(-315.74184)
eigenvalues = run.section_single_configuration_calculation[-1].section_eigenvalues[0]
assert eigenvalues.eigenvalues_kind == 'normal'
assert eigenvalues.number_of_eigenvalues_kpoints == 1
assert np.shape(eigenvalues.eigenvalues_kpoints) == (1, 3)
assert all([a == pytest.approx(b) for a, b in zip(eigenvalues.eigenvalues_kpoints[0],
[0, 0, 0])])
assert eigenvalues.number_of_eigenvalues == 64
assert np.shape(eigenvalues.eigenvalues_values) == (1, 1, 64)
assert eigenvalues.eigenvalues_values[0, 0, 0].magnitude == approx(Ha_to_J(-0.67352892393426))
assert eigenvalues.eigenvalues_values[0, 0, 63].magnitude == approx(Ha_to_J(7.29352095903235))
def test_CrO2(parser):
'''
Single run of feromagnetic CrO2 using LDA+U
'''
archive = EntryArchive()
parser.parse('tests/CrO2_single_point/CrO2.out', archive, logging)
run = archive.section_run[0]
method = run.section_method[0]
assert method.number_of_spin_channels == 2
assert method.electronic_structure_method == 'DFT+U'
assert method.smearing_width == approx(K_to_J(500))
assert method.section_XC_functionals[0].XC_functional_name == 'LDA_X'
assert method.section_XC_functionals[1].XC_functional_name == 'LDA_C_PW'
assert method.scf_max_iteration == 40
assert method.scf_threshold_energy_change.magnitude == approx(Ha_to_J(1e-7))
eigenvalues = run.section_single_configuration_calculation[-1].section_eigenvalues[0]
assert eigenvalues.eigenvalues_kind == 'normal'
assert eigenvalues.number_of_eigenvalues_kpoints == 100
assert np.shape(eigenvalues.eigenvalues_kpoints) == (100, 3)
assert eigenvalues.eigenvalues_kpoints[39, 2] == approx(0.43750)
assert eigenvalues.number_of_eigenvalues == 90
assert np.shape(eigenvalues.eigenvalues_values) == (2, 100, 90)
assert eigenvalues.eigenvalues_values[0, 42, 8].magnitude == approx(Ha_to_J(-0.92962144255459))
assert eigenvalues.eigenvalues_values[1, 99, 89].magnitude == approx(Ha_to_J(13.66867939417960))
|
###Exercises for Chapter 3
#Ex. 3.1
hours = float(raw_input('Enter Hours Worked: '))
rate = float(raw_input('Enter Rate: '))
if (hours > 40):
ot = hours - 40
otpay = ot * (1.5 * rate)
pay = (40 * rate) + otpay
print 'Your pay = ' + str(pay)
else:
pay = int(hours) * float(rate)
print 'Your pay = ' + str(pay)
#Ex. 3.2
try:
input_hours = raw_input('Enter Hours Worked: ')
hours = float(input_hours)
input_rate = raw_input('Enter Rate: ')
rate = float(input_rate)
if (hours > 40):
ot = hours - 40
otpay = ot * (1.5 * rate)
pay = (40 * rate) + otpay
print 'Your pay = ' + str(pay)
else:
pay = int(hours) * float(rate)
print 'Your pay = ' + str(pay)
except:
print 'Error, please enter numeric input'
#Ex. 3.3
try:
input_score = raw_input('Enter Test Score for Grade: ')
score = float(input_score)
if score <= 1.0 and score >= 0.0:
if score >= 0.9:
grade = 'A'
elif score >= 0.8:
grade = 'B'
elif score >= 0.7:
grade = 'C'
elif score >= 0.6:
grade = 'D'
else:
grade = 'F'
print grade
else:
print 'Bad score'
except:
print 'Bad score'
|
#Function One - Sum array
def sum_array(array):
'''Return sum of all items in array'''
if len(array)==1:
return array[0]
else:
return array[0] + sum_array(array[1:])
#Function Two - Find nth fibonacci number
def fibonacci(n):
'''Return nth term in fibonacci sequence'''
if n < 0:
print('Invalid input, try again')
elif n == 0:
return 0
elif n == 1:
return 1
else:
nth = fibonacci(n - 1) + fibonacci(n - 2)
return nth
#Function Three - find n!
def factorial(n):
'''Return n!'''
if n < 0:
print('No Factorial For Negative Number')
elif (n == 0) or (n == 1):
return 1
else:
return n * factorial(n - 1)
#Function Four - reverse word
def reverse(word):
'''Return word in reverse'''
rev_word = ""
for i in word:
rev_word = i + rev_word
return rev_word
|
#taking arguments
def sumAll(*args):
sum = 0
for i in args:
sum += i
return sum
print("Sum :", sumAll(1,2,3,4,5))
|
from .point import point_trans, gray, xy2index |
# Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import re
import shlex
import subprocess
import logging
from threading import Thread
import Utils
class AnalyzeException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class DataCollector:
logger = logging.getLogger("Analyzer.DataCollector")
def __init__(self, config, mjTable):
self.config = config
self.mjTable = mjTable
self.active = -1
self.free = -1
self.now = -1
self.cycle = -1
if config.has_option('Scheduler','cycle_time'):
try:
self.cycle = int(config.get('Scheduler','cycle_time'))
except:
DataCollector.logger.error("Wrong cycle_time; default used")
self.njStateVO = {}
self.njQueueState = {}
self.njQueueStateVO = {}
self.ert = {}
self.wrt = {}
def register(self, evndict):
pass
def estimate(self):
pass
def freeSlots(self, queue, vo):
#Since the scheduler handles free slots and max free slots per VO
#without considering the queue, it is necessary to aggregate
#the value for all queues, so first parameter is ignored
if self.queuedJobsForVO(vo) > 0:
return 0
if vo in self.mjTable:
availSlots = max(self.mjTable[vo]-self.runningJobsForVO(vo), 0)
return min(self.free, availSlots)
return self.free
def load(self, event):
tmpdict = eval(event, {"__builtins__" : {}})
for label in ['group', 'queue', 'state']:
if not label in tmpdict:
raise AnalyzeException("Missing %s in %s" % (label, event))
vomap = self.config.get('Main','vomap')
if tmpdict['group'] in vomap:
tmpdict['group'] = vomap[tmpdict['group']]
key1 = (tmpdict['state'], tmpdict['group'])
key2 = (tmpdict['queue'], tmpdict['state'])
key3 = (tmpdict['queue'], tmpdict['state'], tmpdict['group'])
if key1 in self.njStateVO:
self.njStateVO[key1] += 1
else:
self.njStateVO[key1] = 1
if key2 in self.njQueueState:
self.njQueueState[key2] += 1
else:
self.njQueueState[key2] = 1
if key3 in self.njQueueStateVO:
self.njQueueStateVO[key3] += 1
else:
self.njQueueStateVO[key3] = 1
self.register(tmpdict)
def getERT(self, qName):
return self.ert[qName]
def setERT(self, qName, value):
if value < self.cycle:
self.ert[qName] = int(self.cycle / 2)
else:
self.ert[qName] = int(value)
def isSetERT(self, qName):
return qName in self.ert
def getWRT(self, qName):
return self.wrt[qName]
def setWRT(self, qName, value):
if value < self.cycle:
self.wrt[qName] = int(self.cycle)
else:
self.wrt[qName] = int(value)
def isSetWRT(self, qName):
return qName in self.wrt
def runningJobsForVO(self, voname):
key = ('running', voname)
if key in self.njStateVO:
return self.njStateVO[key]
return 0
def runningJobsOnQueue(self, qname):
key = (qname, 'running')
if key in self.njQueueState:
return self.njQueueState[key]
return 0
def runningJobsOnQueueForVO(self, qname, voname):
key = (qname, 'running', voname)
if key in self.njQueueStateVO:
return self.njQueueStateVO[key]
return 0
def queuedJobsForVO(self, voname):
key = ('queued', voname)
if key in self.njStateVO:
return self.njStateVO[key]
return 0
def queuedJobsOnQueue(self, qname):
key = (qname, 'queued')
if key in self.njQueueState:
return self.njQueueState[key]
return 0
def queuedJobsOnQueueForVO(self, qname, voname):
key = (qname, 'queued', voname)
if key in self.njQueueStateVO:
return self.njQueueStateVO[key]
return 0
class DataHandler(Thread):
logger = logging.getLogger("Analyzer.DataHandler")
def __init__(self, in_stream, collector):
Thread.__init__(self)
self.stream = in_stream
self.collector = collector
self.evn_re = re.compile("^\s*(\{[^}]+\})\s*$")
self.prop_re = re.compile("^\s*(\w+)\s+(\d+)\s*$")
self.internerr = None
def run(self):
try:
line = self.stream.readline();
while line:
pmatch = self.prop_re.match(line)
if pmatch:
key = pmatch.group(1).lower()
value = pmatch.group(2)
if key == 'nactive':
self.collector.active = int(value)
elif key == 'nfree':
self.collector.free = int(value)
elif key == 'now':
self.collector.now = int(value)
elif key == 'schedcycle':
self.collector.cycle = int(value)
ematch = self.evn_re.match(line)
if ematch:
try:
self.collector.load(ematch.group(1))
except AnalyzeException, collect_error:
DataHandler.logger.error("Cannot analyze: %s (%s)" %(ematch.group(1), str(collect_error)))
line = self.stream.readline();
self.collector.estimate()
except:
etype, evalue, etraceback = sys.exc_info()
sys.excepthook(etype, evalue, etraceback)
self.internerr = "%s: (%s)" % (etype, evalue)
class ErrorHandler(Thread):
def __init__(self, err_stream):
Thread.__init__(self)
self.stream = err_stream
self.message = ""
def run(self):
line = self.stream.readline()
while line:
self.message = self.message + line
line = self.stream.readline()
def analyze(config, maxjobTable):
if not config.has_option('LRMS','lrms_backend_cmd'):
raise AnalyzeException("Missing LRMS backend command in configuration")
estimatorClass = Utils.loadEstimator(config)
collector = estimatorClass(config, maxjobTable)
cmd = shlex.split(config.get('LRMS','lrms_backend_cmd'))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_thread = DataHandler(process.stdout, collector)
stderr_thread = ErrorHandler(process.stderr)
stdout_thread.start()
stderr_thread.start()
ret_code = process.wait()
stdout_thread.join()
stderr_thread.join()
if ret_code > 0:
raise AnalyzeException(stderr_thread.message)
if stdout_thread.internerr:
raise AnalyzeException(stdout_thread.internerr)
return collector
|
import os
import logging
import signal
import sys
import json
from time import sleep
import os
import subprocess
import time
import paho.mqtt.client as mqtt
import threading
import hashlib
import database
from config import MQT
# Initialize Logging
logging.basicConfig(level=logging.WARNING) # Global logging configuration
logger = logging.getLogger("mqtt.MQTT_Server") # Logger for this module
logger.setLevel(logging.INFO) # Debugging for this file.
# Global Variables
BROKER_HOST = MQT.BROKER_HOST
BROKER_PORT = MQT.BROKER_PORT
CLIENT_ID = MQT.CLIENT_ID
TOPIC = MQT.TOPIC
DATA_BLOCK_SIZE = 2000
process = None
client = None # MQTT client instance. See init_mqtt()
logger = logging.getLogger("mqtt.MQTT_Server")
logger.info("Creating an instance of MQTT_Server")
def switch(msg):
msg_dec = msg.payload.decode("utf-8") # Writes the decoded msg to an object
msg_top = msg.topic
if msg_top == 'cycle/init':
database.addDeviceToDB(msg_dec)
# --- MQTT Related Functions and Callbacks --------------------------------------------------------------
def on_connect( client, user_data, flags, connection_result_code):
if connection_result_code == 0:
logger.info("Connected to MQTT Broker")
else:
logger.error("Failed to connect to MQTT Broker: " + mqtt.connack_string(connection_result_code))
client.subscribe(TOPIC)
def on_disconnect( client, user_data, disconnection_result_code):
logger.error("Disconnected from MQTT Broker")
def on_message( client, user_data, msg): # Callback called when a message is received on a subscribed topic.
logger.debug("Received message for topic {}: {}".format( msg.topic, msg.payload))
switch(msg)
def on_publish(client, user_data, connection_result_code):
logger.info("Message Published")
pass
def signal_handler( sig, frame):
"""Capture Control+C and disconnect from Broker."""
logger.info("You pressed Control + C. Shutting down, please wait...")
client.disconnect() # Graceful disconnection.
sys.exit(0)
def init_mqtt():
global client
# Our MQTT Client. See PAHO documentation for all configurable options.
# "clean_session=True" means we don"t want Broker to retain QoS 1 and 2 messages
# for us when we"re offline. You"ll see the "{"session present": 0}" logged when
# connected.
logger.info("Initialising Client")
client = mqtt.Client(
client_id=CLIENT_ID,
clean_session=False)
# Route Paho logging to Python logging.
client.enable_logger()
# Setup callbacks
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
client.on_publish = on_publish
# Connect to Broker.
client.connect(BROKER_HOST, BROKER_PORT)
client.loop_start()
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler) # Capture Control + C
logger.info("Listening for messages on topic '" + str(TOPIC) + "'. Press Control + C to exit.")
init_mqtt()
signal.pause() |
"""
Override `error` method of `argparse.ArgumentParser`
in order to print the complete help on error.
"""
import argparse
import sys
SUPPRESS = argparse.SUPPRESS
class HelpParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
|
import numpy as np
import math
import matplotlib.pyplot as plt
#plt.switch_backend('Qt4Agg')
# read in MSD and plot
data = np.loadtxt("msd.txt", skiprows=2)
fig, ax = plt.subplots()
ax.plot(data[:,0], data[:,4], "r")
# compare to power law
x = np.arange(100, 500, step=100)
ax.plot(x, 0.00007* x**2, "b--")
ax.text(100, 50, r"$\sim t^2$", color="b", fontsize=24)
xx = np.arange(1000, 25000, step=100)
ax.plot(xx, 0.5*xx**(0.5), "g--")
ax.text(1000, 100, r"$\sim t^{1/2}$", color="g", fontsize=24)
xxx = np.arange(140000, 1200000, step=1000)
ax.plot(xxx, 0.002*xxx, "c--")
ax.text(70000, 800, r"$\sim t$", color="c", fontsize=24)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel("time steps")
ax.set_ylabel("MSD [LJ units]")
plt.savefig("MSD.png", dpi=200, bbox_inches="tight")
# simple moving average
def sma(array, period):
ret = np.cumsum(array, dtype=float)
ret[period:] = ret[period:] - ret[:-period]
return ret[period - 1:] / period
# calulate and plot D(t)
delta_step = data[1,0] - data[0,0]
timestep = 0.005
dim = 2
D = 1 / (2*dim) * np.diff(data[:,4])/(delta_step*timestep)
fig2, ax2 = plt.subplots()
ax2.plot(data[1:,0], D, "r")
ax2.plot(sma(data[1:,0],1000), sma(D, 1000), "b")
ax2.set_xlabel("time")
ax2.set_ylabel("D(t)")
plt.savefig("D.png", dpi=200, bbox_inches="tight")
plt.show()
|
# -*- coding: utf-8 -*-
import scrapy
import json,re
from lxml import etree
from loguru import logger
from scrapy.utils.project import get_project_settings
from ProxyIP.items import FreeProxyIPItem
class Ip3366FreeSpider(scrapy.Spider):
name = 'ip3366_free'
settings = get_project_settings()
spider_page_start = settings.get('SPIDER_PAGE_START')
spider_page_end = settings.get('SPIDER_PAGE_END')
auth_urls_info = settings.get('AUTH_URLS_INFO')
# allowed_domains = ['www.ip3366.net']
start_urls = ['http://www.ip3366.net/free/?stype=1&page={}'.format(page) for page in
range(spider_page_start, spider_page_end)]
def start_requests(self):
for start_url in self.start_urls:
yield scrapy.Request(start_url,
callback=self.parse, dont_filter=True)
def parse(self, response):
resp_txt = response.text
html = etree.HTML(resp_txt)
tbody_trs = html.xpath('//*[@id="list"]/table/tbody/tr')
for auth_url_info in self.auth_urls_info:
self.proxy_ip_item = FreeProxyIPItem()
self.proxy_ip_item['name'] = self.name
self.proxy_ip_item['url'] = auth_url_info['url']
self.proxy_ip_item['url_name'] = auth_url_info['name']
for tbody_tr in tbody_trs:
try:
ip = tbody_tr.xpath('td')[0].text.replace(' ', '').replace('\r\n', '')
port = tbody_tr.xpath('td')[1].text.replace(' ', '').replace('\r\n', '')
ip_types = tbody_tr.xpath('td')[3].text.replace(' ', '').replace('\r\n', '').lower()
resp_speed = float(tbody_tr.xpath('td')[5].text.replace(' ', '').replace('\r\n', '')[:-1])
update_time = tbody_tr.xpath('td')[6].text.replace('\r\n', '')
url_type = re.findall('(http|https)://.*?',auth_url_info['url'])[0].lower()
if url_type == 'https' and 'https' not in ip_types:
continue
proxy = '{}://{}:{}'.format(url_type,ip,port)
logger.info('CHECK PROXY:{}'.format(proxy))
headers = {'content-type': 'application/json'}
yield scrapy.Request(auth_url_info['url'], headers=headers, body=json.dumps(auth_url_info['body']),
method='POST', callback=self.auth_proxyip,
meta={'bodyJson': auth_url_info['body'],'proxy':proxy},
dont_filter=True
)
except Exception as e:
logger.warning('警告:解析失败!错误提示:{}'.format(e))
def auth_proxyip(self,response):
self.proxy_ip_item['proxy'] = response.meta['proxy']
logger.info(response.meta['proxy'])
yield self.proxy_ip_item
|
class UserSignalUIParameters:
# ACCELERATION TIME
AccelerationTimeMin = 0.1
AccelerationTimeMax = 600.0
AccelerationTimeLineEditAccuracy = 2
AccelerationTimeCalcConstant = 100 # Раз 100, значит цифры с точностью до 10**2
AccelerationTimeSliderMin = AccelerationTimeMin * AccelerationTimeCalcConstant
AccelerationTimeSliderMax = AccelerationTimeMax * AccelerationTimeCalcConstant
# PLATEAU TIME
PlateauTimeMin = 0.0
PlateauTimeMax = 600.0
PlateauTimeLineEditAccuracy = 2
PlateauTimeCalcConstant = 100 # Раз 100, значит цифры с точностью до 10**2
PlateauTimeSliderMin = PlateauTimeMin * PlateauTimeCalcConstant
PlateauTimeSliderMax = PlateauTimeMax * PlateauTimeCalcConstant
# DECELERATION TIME
DecelerationTimeMin = 0.1
DecelerationTimeMax = 600.0
DecelerationTimeLineEditAccuracy = 2
DecelerationTimeCalcConstant = 100 # Раз 100, значит цифры с точностью до 10**2
DecelerationTimeSliderMin = DecelerationTimeMin * DecelerationTimeCalcConstant
DecelerationTimeSliderMax = DecelerationTimeMax * DecelerationTimeCalcConstant
# END TIME
EndTimeMin = 0.1
EndTimeMax = 600.0
EndTimeLineEditAccuracy = 2
EndTimeCalcConstant = 100 # Раз 100, значит цифры с точностью до 10**2
EndTimeSliderMin = EndTimeMin * EndTimeCalcConstant
EndTimeSliderMax = EndTimeMax * EndTimeCalcConstant
# START TIME
StartTimeMin = 0.1
StartTimeMax = 600.0
StartTimeLineEditAccuracy = 2
StartTimeCalcConstant = 100 # Раз 100, значит цифры с точностью до 10**2
StartTimeSliderMin = StartTimeMin * StartTimeCalcConstant
StartTimeSliderMax = StartTimeMax * StartTimeCalcConstant
# LOW LEVEL
LowLevelFrequencyMin = 0.1
LowLevelFrequencyMax = 20.0
LowLevelFrequencyLineEditAccuracy = 2
LowLevelFrequencyCalcConstant = 100 # Раз 100, значит цифры с точностью до 10**2
LowLevelFrequencySliderMin = LowLevelFrequencyMin * LowLevelFrequencyCalcConstant
LowLevelFrequencySliderMax = LowLevelFrequencyMax * LowLevelFrequencyCalcConstant
# HIGH LEVEL
HighLevelFrequencyMin = 0.1
HighLevelFrequencyMax = 80.0
HighLevelFrequencyLineEditAccuracy = 2
HighLevelFrequencyCalcConstant = 100 # Раз 100, значит цифры с точностью до 10**2
HighLevelFrequencySliderMin = HighLevelFrequencyMin * HighLevelFrequencyCalcConstant
HighLevelFrequencySliderMax = HighLevelFrequencyMax * HighLevelFrequencyCalcConstant
# POINTS NUMBER
PointsNumberMin = 0
PointsNumberMax = 200
PointsNumberLineEditAccuracy = 0
PointsNumberCalcConstant = 1
PointsNumberSliderMin = PointsNumberMin * PointsNumberCalcConstant
PointsNumberSliderMax = PointsNumberMax * PointsNumberCalcConstant
# VERTICAL OFFSET
VerticalOffsetMin = 0
VerticalOffsetMax = 20
VerticalOffsetLineEditAccuracy = 1
VerticalOffsetCalcConstant = 10
VerticalOffsetSliderMin = VerticalOffsetMin * VerticalOffsetCalcConstant
VerticalOffsetSliderMax = VerticalOffsetMax * VerticalOffsetCalcConstant
# REQUEST FREQUENCY
RequestFreqLineEditAccuracy = 2
RequestFreqCalcConstant = 100
RequestFreqSliderMin = 0.1 * RequestFreqCalcConstant
RequestFreqSliderMax = 1.1 * RequestFreqCalcConstant
def __init__(self):
pass |
from test.helper import TestHelper
from beetsplug.plexupdate import get_music_section, update_plex
import unittest
import responses
class PlexUpdateTest(unittest.TestCase, TestHelper):
def add_response_get_music_section(self, section_name='Music'):
"""Create response for mocking the get_music_section function.
"""
escaped_section_name = section_name.replace('"', '\\"')
body = (
'<?xml version="1.0" encoding="UTF-8"?>'
'<MediaContainer size="3" allowSync="0" '
'identifier="com.plexapp.plugins.library" '
'mediaTagPrefix="/system/bundle/media/flags/" '
'mediaTagVersion="1413367228" title1="Plex Library">'
'<Directory allowSync="0" art="/:/resources/movie-fanart.jpg" '
'filters="1" refreshing="0" thumb="/:/resources/movie.png" '
'key="3" type="movie" title="Movies" '
'composite="/library/sections/3/composite/1416232668" '
'agent="com.plexapp.agents.imdb" scanner="Plex Movie Scanner" '
'language="de" uuid="92f68526-21eb-4ee2-8e22-d36355a17f1f" '
'updatedAt="1416232668" createdAt="1415720680">'
'<Location id="3" path="/home/marv/Media/Videos/Movies" />'
'</Directory>'
'<Directory allowSync="0" art="/:/resources/artist-fanart.jpg" '
'filters="1" refreshing="0" thumb="/:/resources/artist.png" '
'key="2" type="artist" title="' + escaped_section_name + '" '
'composite="/library/sections/2/composite/1416929243" '
'agent="com.plexapp.agents.lastfm" scanner="Plex Music Scanner" '
'language="en" uuid="90897c95-b3bd-4778-a9c8-1f43cb78f047" '
'updatedAt="1416929243" createdAt="1415691331">'
'<Location id="2" path="/home/marv/Media/Musik" />'
'</Directory>'
'<Directory allowSync="0" art="/:/resources/show-fanart.jpg" '
'filters="1" refreshing="0" thumb="/:/resources/show.png" '
'key="1" type="show" title="TV Shows" '
'composite="/library/sections/1/composite/1416320800" '
'agent="com.plexapp.agents.thetvdb" scanner="Plex Series Scanner" '
'language="de" uuid="04d2249b-160a-4ae9-8100-106f4ec1a218" '
'updatedAt="1416320800" createdAt="1415690983">'
'<Location id="1" path="/home/marv/Media/Videos/Series" />'
'</Directory>'
'</MediaContainer>')
status = 200
content_type = 'text/xml;charset=utf-8'
responses.add(responses.GET,
'http://localhost:32400/library/sections',
body=body,
status=status,
content_type=content_type)
def add_response_update_plex(self):
"""Create response for mocking the update_plex function.
"""
body = ''
status = 200
content_type = 'text/html'
responses.add(responses.GET,
'http://localhost:32400/library/sections/2/refresh',
body=body,
status=status,
content_type=content_type)
def setUp(self):
self.setup_beets()
self.load_plugins('plexupdate')
self.config['plex'] = {
'host': 'localhost',
'port': 32400}
def tearDown(self):
self.teardown_beets()
self.unload_plugins()
@responses.activate
def test_get_music_section(self):
# Adding response.
self.add_response_get_music_section()
# Test if section key is "2" out of the mocking data.
self.assertEqual(get_music_section(
self.config['plex']['host'],
self.config['plex']['port'],
self.config['plex']['token'],
self.config['plex']['library_name'].get(),
self.config['plex']['secure'],
self.config['plex']['ignore_cert_errors']), '2')
@responses.activate
def test_get_named_music_section(self):
# Adding response.
self.add_response_get_music_section('My Music Library')
self.assertEqual(get_music_section(
self.config['plex']['host'],
self.config['plex']['port'],
self.config['plex']['token'],
'My Music Library',
self.config['plex']['secure'],
self.config['plex']['ignore_cert_errors']), '2')
@responses.activate
def test_update_plex(self):
# Adding responses.
self.add_response_get_music_section()
self.add_response_update_plex()
# Testing status code of the mocking request.
self.assertEqual(update_plex(
self.config['plex']['host'],
self.config['plex']['port'],
self.config['plex']['token'],
self.config['plex']['library_name'].get(),
self.config['plex']['secure'],
self.config['plex']['ignore_cert_errors']).status_code, 200)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
from django.contrib.auth.views import LoginView, LogoutView
from django.urls import path
from contest import views
app_name = 'contest'
urlpatterns = [
path('', views.contest.Index.as_view(), name='index'),
path('login', LoginView.as_view(), name='login'),
path('logout', LogoutView.as_view(), name='logout'),
path('signup', views.SignupView.as_view(), name='signup'),
path('p/new', views.ProblemCreate.as_view(), name='problem-create'),
path('p/<slug>', views.ProblemDetail.as_view(), name='problem'),
path('p/<slug>/update', views.ProblemUpdateUpload.as_view(), name='problem-update'),
path('p/<slug>/delete', views.ProblemDelete.as_view(), name='problem-delete'),
path('p/<slug>/submit', views.ProblemSubmit.as_view(), name='problem-submit'),
path('p/<slug>/submit/upload', views.ProblemSubmitUpload.as_view(),
name='problem-submit-upload'),
path('input/<slug>', views.ProblemDownload.as_view(),
name='problem-download'),
path('scoreboard', views.ScoreBoard.as_view(), name='scoreboard'),
path('submissions', views.SubmissionList.as_view(), name='submissions'),
path('submissions/<pk>', views.SubmissionDetail.as_view(), name='submission'),
]
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Pre-made gateware that implements an ILA connection serial. """
from amaranth import Elaboratable, Module, Signal, Cat
from ...debug.ila import StreamILA, ILAFrontend
from ...stream import StreamInterface
from ..usb2.device import USBDevice
from ..usb2.request import USBRequestHandler, StallOnlyRequestHandler
from ..usb2.endpoints.stream import USBMultibyteStreamInEndpoint
from usb_protocol.types import USBRequestType
from usb_protocol.emitters import DeviceDescriptorCollection
from usb_protocol.emitters.descriptors import cdc
class USBIntegratedLogicAnalyzer(Elaboratable):
""" Pre-made gateware that presents a USB-connected ILA.
Samples are presented over a USB endpoint.
"""
BULK_ENDPOINT_NUMBER = 1
def __init__(self, *args, bus=None, delayed_connect=False, max_packet_size=512, **kwargs):
self._delayed_connect = delayed_connect
self._max_packet_size = max_packet_size
# Store our USB bus.
self._bus = bus
# Force the ILA's output into the USB domain.
kwargs['o_domain'] = 'usb'
# Create our core ILA, which we'll use later.
self.ila = StreamILA(*args, **kwargs)
#
# I/O port
#
# Copy some core parameters from our inner ILA.
self.signals = self.ila.signals
self.sample_width = self.ila.sample_width
self.sample_depth = self.ila.sample_depth
self.sample_rate = self.ila.sample_rate
self.sample_period = self.ila.sample_period
self.bits_per_sample = self.ila.bits_per_sample
self.bytes_per_sample = self.ila.bytes_per_sample
# Expose our ILA's trigger and status ports directly.
self.trigger = self.ila.trigger
self.sampling = self.ila.sampling
self.complete = self.ila.complete
def create_descriptors(self):
""" Create the descriptors we want to use for our device. """
descriptors = DeviceDescriptorCollection()
#
# We'll add the major components of the descriptors we we want.
# The collection we build here will be necessary to create a standard endpoint.
#
# We'll need a device descriptor...
with descriptors.DeviceDescriptor() as d:
d.idVendor = 0x16d0
d.idProduct = 0x05a5
d.iManufacturer = "LUNA"
d.iProduct = "Integrated Logic Analyzer"
d.iSerialNumber = "no serial"
d.bNumConfigurations = 1
# ... and a description of the USB configuration we'll provide.
with descriptors.ConfigurationDescriptor() as c:
with c.InterfaceDescriptor() as i:
i.bInterfaceNumber = 0
with i.EndpointDescriptor() as e:
e.bEndpointAddress = 0x80 | self.BULK_ENDPOINT_NUMBER
e.wMaxPacketSize = self._max_packet_size
return descriptors
def elaborate(self, platform):
m = Module()
m.submodules.ila = self.ila
# If we have a bus name rather than a bus object,
# request the bus from our platform.
if isinstance(self._bus, str):
self._bus = platform.request(self._bus)
# If we have no bus, grab the platform's default USB connection.
if self._bus is None:
self._bus = platform.request(platform.default_usb_connection)
m.submodules.usb = usb = USBDevice(bus=self._bus)
# Add our standard control endpoint to the device.
descriptors = self.create_descriptors()
usb.add_standard_control_endpoint(descriptors)
# Add a stream endpoint to our device.
stream_ep = USBMultibyteStreamInEndpoint(
endpoint_number=self.BULK_ENDPOINT_NUMBER,
max_packet_size=self._max_packet_size,
byte_width=self.ila.bytes_per_sample
)
usb.add_endpoint(stream_ep)
# Handle our connection criteria: we'll either connect immediately,
# or once sampling is done, depending on our _delayed_connect setting.
connect = Signal()
if self._delayed_connect:
with m.If(self.ila.complete):
m.d.usb += connect.eq(1)
else:
m.d.comb += connect.eq(1)
# Connect up our I/O and our ILA streams.
m.d.comb += [
stream_ep.stream .stream_eq(self.ila.stream),
usb.connect .eq(connect)
]
return m
class USBIntegratedLogicAnalyzerFrontend(ILAFrontend):
""" Frontend for USB-attached integrated logic analyzers.
Parameters
------------
delay: int
The number of seconds to wait before trying to connect.
ila: IntegratedLogicAnalyzer
The ILA object to work with.
"""
def __init__(self, *args, ila, delay=3, **kwargs):
import usb
import time
# If we have a connection delay, wait that long.
if delay:
time.sleep(delay)
# Create our USB connection the device
self._device = usb.core.find(idVendor=0x16d0, idProduct=0x5a5)
super().__init__(ila)
def _split_samples(self, all_samples):
""" Returns an iterator that iterates over each sample in the raw binary of samples. """
from apollo_fpga.support.bits import bits
sample_width_bytes = self.ila.bytes_per_sample
# Iterate over each sample, and yield its value as a bits object.
for i in range(0, len(all_samples), sample_width_bytes):
raw_sample = all_samples[i:i + sample_width_bytes]
sample_length = len(Cat(self.ila.signals))
yield bits.from_bytes(raw_sample, length=sample_length, byteorder='little')
def _read_samples(self):
""" Reads a set of ILA samples, and returns them. """
sample_width_bytes = self.ila.bytes_per_sample
total_to_read = self.ila.sample_depth * sample_width_bytes
# Fetch all of our samples from the given device.
all_samples = self._device.read(0x81, total_to_read, timeout=0)
return list(self._split_samples(all_samples))
|
from ryu.base import app_manager
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER
from ryu.controller import ofp_event
from ryu.lib.packet import packet, ether_types, ethernet, arp
# ofproto 在这个目录下,基本分为两类文件,一类是协议的数据结构定义,另一类是协议解析,也即数据包处理函数文件。
# Its like database that save the ip-mac table
arp_table = {"10.0.0.1": "00:00:00:00:00:01",
"10.0.0.2": "00:00:00:00:00:02",
"10.0.0.3": "00:00:00:00:00:03",
"10.0.0.4": "00:00:00:00:00:04"
}
class swich(app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(swich, self).__init__(*args, **kwargs)
self.Mac_Port_Table = {} # mac learn, change
#实现初始连接处理和公共函数--下发流表
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
match = ofproto_parser.OFPMatch()
action = [ofproto_parser.OFPActionOutput(ofproto.OFPP_NORMAL,ofproto.OFPCML_NO_BUFFER)] # (port,max)
inst = [ofproto_parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, action)]
Out = ofproto_parser.OFPFlowMod(
priority=0,
datapath=datapath,
match=match,
instructions=inst,
)
datapath.send_msg(Out) # The controller sends this message to modify the flow table.
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
in_port = msg.match['in_port']
dpid = datapath.dpid
#avoid ARP Flow
self.Mac_Port_Table.setdefault(dpid, {})
pkt = packet.Packet(msg.data)
pkt_ethernet = pkt.get_protocol(ethernet.ethernet)
dst_mac = pkt_ethernet.dst # controller MAC
src_mac = pkt_ethernet.src # switch MAC
if pkt_ethernet :
self.Mac_Port_Table[dpid][src_mac] = in_port
pkt_arp = pkt.get_protocol(arp.arp)
if pkt_arp:
print("datapath id: " + str(dpid))
print("port: " + str(in_port))
print("pkt_eth.dst: " + str(pkt_ethernet.dst))
print("pkt_eth.src: " + str(pkt_ethernet.src))
print("pkt_arp: " + str(pkt_arp))
print("pkt_arp:src_ip: " + str(pkt_arp.src_ip))
print("pkt_arp:dst_ip: " + str(pkt_arp.dst_ip))
print("pkt_arp:src_mac: " + str(pkt_arp.src_mac))
print("pkt_arp:dst_mac: " + str(pkt_arp.dst_mac))
try:
if len(self.Mac_Port_Table[dpid][src_mac]) !=0 :
self.arp_process(datapath,pkt_ethernet,pkt_arp,in_port)
else:
pass
except BaseException:
print("The ARP Packet-src_mac is illegal ")
def arp_process(self, datapath, pkt_ethernet, pkt_arp, in_port):
"""
ARP : {
Attribute Description Example
hwtype Hardware address.
proto Protocol address.
hlen byte length of each hardware address.
plen byte length of each protocol address.
opcode operation codes.(Opcode 1: ARP Request(请求)。Opcode 2: ARP Reply(应答))
src_mac Hardware address of sender. '08:60:6e:7f:74:e7'
src_ip Protocol address of sender. '192.0.2.1'
dst_mac Hardware address of target. '00:00:00:00:00:00'
dst_ip Protocol address of target. '192.0.2.2'
}
:param datapath:
:param pkt_ethernet:
:param pkt_arp:
:param in_port:
:return:
"""
if pkt_arp.opcode != arp.ARP_REQUEST:
return
# -------- Check database -------------------
dst_mac = arp_table.get(pkt_arp.dst_ip)
# ----------------------------
if len(dst_mac) != 0:
arp_resp = packet.Packet() # Construct a packet
arp_resp.add_protocol(ethernet.ethernet(
dst=pkt_ethernet.src, #目的地址
src=dst_mac, #发送地址
ethertype=pkt_ethernet.ethertype
))
arp_resp.add_protocol(arp.arp(
opcode=arp.ARP_REPLY,
# Target IP and MAC
src_mac=dst_mac,
src_ip=pkt_arp.dst_ip,
# Source IP and MAC
dst_mac=pkt_arp.src_mac,
dst_ip=pkt_arp.src_ip
))
arp_resp.serialize()
ofproto_parser = datapath.ofproto_parser
ofproto = datapath.ofproto
actions = [ofproto.OFPActionOutput(in_port)]
Out = ofproto_parser.OFPPacketOut(datapath, in_port, actions,arp_resp)
datapath.send_msg(Out) |
# -*- coding: utf-8 -*-
kim = input('Kim?\n')
kiminle = input('Kiminle?\n')
nerede = input('Nerede?\n')
neYapiyor = input('Ne yapıyor?\n')
print(kim, kiminle, nerede, neYapiyor) |
# Generated by Django 2.0.7 on 2018-09-13 16:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ProductDT', '0003_auto_20180913_1509'),
]
operations = [
migrations.AlterField(
model_name='hotelmsg',
name='id',
field=models.CharField(max_length=100, primary_key=True, serialize=False, verbose_name='酒店id'),
),
migrations.AlterField(
model_name='productmsg',
name='id',
field=models.CharField(max_length=50, primary_key=True, serialize=False, verbose_name='产品id'),
),
migrations.AlterField(
model_name='productmsg',
name='name',
field=models.CharField(max_length=100, verbose_name='产品名称'),
),
migrations.AlterField(
model_name='productmsg',
name='product_type',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='产品类型'),
),
migrations.AlterField(
model_name='supplier',
name='id',
field=models.CharField(max_length=100, primary_key=True, serialize=False, verbose_name='供应商id'),
),
]
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from .models import Player
from . import makeTeam
def index(request):
return HttpResponse("Test")
def player(request):
context = {
"players": Player.objects.all(),
}
return render(request, "team/player.html", context)
#チーム分け
def make_team(request):
if request.method == "POST":
player1 = Player.objects.get(pk=int(request.POST["player1"]))
player2 = Player.objects.get(pk=int(request.POST["player2"]))
player3 = Player.objects.get(pk=int(request.POST["player3"]))
player4 = Player.objects.get(pk=int(request.POST["player4"]))
player5 = Player.objects.get(pk=int(request.POST["player5"]))
player6 = Player.objects.get(pk=int(request.POST["player6"]))
player7 = Player.objects.get(pk=int(request.POST["player7"]))
player8 = Player.objects.get(pk=int(request.POST["player8"]))
player_name = [player1.name, player2.name, player3.name, player4.name, player5.name, player6.name, player7.name, player8.name]
player_rate = [player1.rate, player2.rate, player3.rate, player4.rate, player5.rate, player6.rate, player7.rate, player8.rate]
four_vs_four_team = makeTeam.MakeTeam()
four_vs_four_team.player_name = player_name
four_vs_four_team.player_rate = player_rate
four_vs_four_team.make_player_dict()
four_vs_four_team.make_combinations_of_team()
four_vs_four_team.extract_team_from_combinations_of_team()
four_vs_four_team.choose_a_random_team()
four_vs_four_team.extract_player_name()
four_vs_four_team.calculate_total_rate()
context = {
"t1_player1": four_vs_four_team.t1_player[0],
"t1_player2": four_vs_four_team.t1_player[1],
"t1_player3": four_vs_four_team.t1_player[2],
"t1_player4": four_vs_four_team.t1_player[3],
"t2_player1": four_vs_four_team.t2_player[0],
"t2_player2": four_vs_four_team.t2_player[1],
"t2_player3": four_vs_four_team.t2_player[2],
"t2_player4": four_vs_four_team.t2_player[3],
"t1_win_rate": four_vs_four_team.t1_win_rate,
"t1_total_rate": four_vs_four_team.t1_total_rate,
"t2_total_rate": four_vs_four_team.t2_total_rate,
}
return render(request, "team/team.html", context)
#チーム勝利後のレート確定処理
def which_team_won(request):
if request.method == "POST":
won_team = request.POST.getlist("won_team")
if won_team == 1:
context = {
"won_team": won_team
}
return render(request, "team/result.html", context)
#TODO
#total_rateとwin_rateの修正 done
#各処理関数化 done
#クラス作成、モジュール化 done
#Team1 or Team2勝利のSubmitボタンとレート処理作成
#post data処理時の例外処理作成
#勝率3-7割のチームが見つからない場合の例外処理作成
#selectボタンで同一のプレイヤーを選択させないようにする処理作成
#汎用処理のView化
#UnitTest作成
#勝率ではなく合計レートの間がいくつまで、とかにしてみる?
#その場合acceptable rate differenceの処理が必要 |
import unittest
from katas.kyu_6.your_ride_is_here import ride
class RideTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(ride('COMETQ', 'HVNGAT'), 'GO')
def test_equals_2(self):
self.assertEqual(ride('STARAB', 'USACO'), 'STAY')
|
import math
from typing import List, Optional
import numpy as np
from pypm import DataPoints
from pypm.icp import ICP, Penalty
from picp.util.geometry import generate_tf_mat, extract_xyt_from_tf_mat, make_homogeneous
from picp.util.pose import Pose
def icp_with_random_perturbation(icp: ICP, read: DataPoints, ref: DataPoints, init_tf: np.ndarray,
nb_sample: int, pertur_cov: np.ndarray, penalties: List[Penalty]=[]):
tfs = []
iter_data = []
perturbations = np.random.multivariate_normal([0, 0, 0], pertur_cov, nb_sample)
for i in range(0, nb_sample):
pertu = generate_tf_mat(perturbations[i, 0:2], perturbations[i, 2])
icp.enable_dump(tf=True)
tf, iter_datum = icp.compute(read, ref, init_tf @ pertu, penalties)
iter_data.append(iter_datum)
tfs.append((tf[0, 2], #x
tf[1, 2], #y
np.arccos(tf[0, 0]) if 1.0 - tf[0, 0] > 1e-6 else 0.0)) # theta
return np.array(tfs), iter_data
def icp_mapping_with_random_perturbation(icp: ICP, scans: List[DataPoints], gt_tfs: List[np.ndarray],
nb_sample: int, pertur_cov: np.ndarray, penalties: List[Penalty]=[]):
trajectories = []
for _ in range(0, nb_sample):
steps = icp_mapping(icp, scans, gt_tfs, pertur_cov, penalties)
trajectories.append(steps)
return trajectories
def apply_tf_on_scan(tf: np.ndarray, scan: np.ndarray):
moved_scan = tf @ make_homogeneous(scan)
return moved_scan[0:2, :] # Remove homogeneous
def icp_mapping(icp: ICP, scans: List[np.ndarray], gt_tfs: List[Pose], pertur_cov: np.ndarray=None, penalties: List[Penalty]=[],
update_map_with_gt=False):
steps = []
if pertur_cov is None:
perturbations = np.zeros((len(scans), 3))
else:
perturbations = np.random.multivariate_normal([0, 0, 0], pertur_cov, len(scans))
# Convert global frame tf to between scan
between_scan_tfs = [curr.to_tf() @ np.linalg.inv(prev.to_tf()) for prev, curr in zip(gt_tfs, gt_tfs[1:])]
# The first scan is always the map
map = apply_tf_on_scan(gt_tfs[0].to_tf(), scans[0])
prev_tf = gt_tfs[0]
steps.append((gt_tfs[0], {}))
for i, (read, between_scan_tf) in enumerate(zip(scans[1:], between_scan_tfs)):
# This handle the case where no penalties have been provided
try:
penalty = penalties[i + 1]
except IndexError:
penalty = []
pertu = generate_tf_mat(perturbations[i, 0:2], perturbations[i, 2])
# `prev_tf` is in the global frame, its the registration result of the previous pair of scans.
init_tf = between_scan_tf @ prev_tf.to_tf() @ pertu
#print(f"{i}: gt_tf{gt_tfs[i+1]}\n init_tf{Pose.from_tf(init_tf)}, prev_tf{prev_tf} between_scan {between_scan_tf}")
# Note: `penalty` are in global frame and are independent from the `init_tf`
tf, iter_datum = icp.compute(read, map, init_tf, penalty if isinstance(penalty, list) else [penalty])
so2_tf = Pose.from_tf(tf)
# print(f"gt {extract_xyt_from_tf_mat(gt_tf.to_tf())} vs result {so2_tf}")
steps.append((so2_tf, iter_datum))
# This flag used the ground truth to assemble the map instead of the registration results
if update_map_with_gt:
moved_read = apply_tf_on_scan(gt_tfs[i+1].to_tf(), read)
prev_tf = gt_tfs[i+1]
else:
moved_read = apply_tf_on_scan(tf, read)
prev_tf = so2_tf
# prev_tf.orientation = gt_tfs[i+1].orientation # Ignore rotation results
map = np.hstack((map, moved_read))
return steps |
# -*- coding: utf-8 -*-
from django.utils.functional import cached_property
__all__ = [
'FetchIssueByNumber',
'UpdateIssueCacheTemplate',
'AskForUpdateIssuesCacheTemplate',
'IssuePRBranchDeleteJob',
'IssueCreateJob',
'IssueEditStateJob',
'IssueEditTitleJob',
'IssueEditBodyJob',
'IssueEditMilestoneJob',
'IssueEditAssigneesJob',
'IssueEditRequestedReviewersJob',
'IssueEditLabelsJob',
'IssueEditProjectsJob',
]
import json
import time
from datetime import datetime
from django.db import IntegrityError
from async_messages import message_users, constants, messages
from limpyd import fields
from limpyd_jobs import STATUSES
from gim.core.models import Issue, Repository, GithubUser, Card, Column, Label
from gim.core.ghpool import ApiError, ApiNotFoundError, prepare_fetch_headers
from .base import DjangoModelJob, Job
class FetchIssueByNumber(Job):
"""
Fetch the whole issue for a repository, given only the issue's number
"""
queue_name = 'fetch-issue-by-number'
deleted = fields.InstanceHashField()
force_fetch = fields.InstanceHashField() # will only force the issue/pr api call
force_fetch_all = fields.InstanceHashField() # will be used for fetch_all
users_to_inform = fields.SetField()
permission = 'read'
@property
def repository(self):
if not hasattr(self, '_repository'):
repository_id, issue_number = self.identifier.hget().split('#')
try:
self._repository = Repository.objects.get(id=repository_id)
except Repository.DoesNotExist:
# We can cancel the job if the repository does not exist anymore
self.hmset(status=STATUSES.CANCELED, cancel_on_error=1)
raise
return self._repository
def run(self, queue):
"""
Fetch the issue with the given number for the current repository
"""
super(FetchIssueByNumber, self).run(queue)
gh = self.gh
if not gh:
return # it's delayed !
repository_id, issue_number = self.identifier.hget().split('#')
repository = self.repository
users_to_inform = self.users_to_inform.smembers()
if users_to_inform:
try:
users_to_inform = GithubUser.objects.filter(id__in=users_to_inform)
except GithubUser.DoesNotExist:
users_to_inform = []
try:
issue = repository.issues.get(number=issue_number)
except Issue.DoesNotExist:
issue = Issue(repository=repository, number=issue_number)
force_fetch = self.force_fetch.hget() == '1'
force_fetch_all = self.force_fetch_all.hget() == '1'
try:
issue.fetch_all(gh, force_fetch=force_fetch or force_fetch_all) # both flags for legacy jobs
except ApiNotFoundError, e:
# we have a 404, but... check if it's the issue itself
try:
issue.fetch(gh)
except ApiNotFoundError:
# ok the issue doesn't exist anymore, delete id
if users_to_inform:
message_users(users_to_inform,
'The %s <strong>#%d</strong> from <strong>%s</strong> you asked to fetch from Github doesn\'t exist anymore!' % (
issue.type, issue.number, issue.repository.full_name),
constants.ERROR)
if issue.pk:
issue.delete()
self.deleted.hset(1)
return False
else:
if users_to_inform:
message_users(users_to_inform,
'The %s <strong>#%d</strong> from <strong>%s</strong> you asked to fetch from Github couldn\'t be fetched!' % (
issue.type, issue.number, issue.repository.full_name),
constants.ERROR)
raise e
else:
if users_to_inform:
message_users(users_to_inform,
'The %s <strong>#%d</strong> from <strong>%s</strong> you asked to fetch from Github was updated' % (
issue.type, issue.number, issue.repository.full_name),
constants.SUCCESS)
return True
def success_message_addon(self, queue, result):
result = ''
if self.force_fetch_all.hget() == '1':
result += ' [force_fetch=all]'
elif self.force_fetch.hget() == '1':
result += ' [force_fetch=1]'
if result is False:
result += ' [deleted]'
return result
class IssueJob(DjangoModelJob):
"""
Abstract job model for jobs based on the Issue model
"""
abstract = True
model = Issue
@property
def issue(self):
if not hasattr(self, '_issue'):
self._issue = self.object
return self._issue
@property
def repository(self):
if not hasattr(self, '_repository'):
self._repository = self.issue.repository
return self._repository
class UpdateIssueCacheTemplate(IssueJob):
"""
Job that update the cached template of an issue
"""
queue_name = 'update-issue-tmpl'
force_regenerate = fields.InstanceHashField()
update_duration = fields.InstanceHashField()
def run(self, queue):
"""
Update the cached template of the issue and save the spent duration
"""
super(UpdateIssueCacheTemplate, self).run(queue)
start_time = time.time()
try:
issue = self.issue
except Issue.DoesNotExist:
# the issue doesn't exist anymore, stop here
self.status.hset(STATUSES.CANCELED)
return False
issue.update_saved_hash()
issue.update_cached_template(
force_regenerate=self.force_regenerate.hget())
duration = '%.2f' % ((time.time() - start_time) * 1000)
self.update_duration.hset(duration)
return duration
def success_message_addon(self, queue, result):
"""
Display the duration of the cached template update
"""
msg = 'duration=%sms' % self.update_duration.hget()
if self.force_regenerate.hget():
return ' [forced=True, %s]' % msg
else:
return ' [%s]' % msg
class AskForUpdateIssuesCacheTemplate(Job):
queue_name = 'ask-for-update-issues-tmpl'
issue_ids = fields.ListField()
force_regenerate = fields.InstanceHashField()
def run(self, queue):
for issue_id in self.issue_ids.lmembers():
UpdateIssueCacheTemplate.add_job(
issue_id,
priority=self.priority.hget() or 0,
force_regenerate=self.force_regenerate.hget()
)
return self.issue_ids.llen()
def success_message_addon(self, queue, result):
return ' [nb issues=%s]' % result
class IssuePRBranchDeleteJob(IssueJob):
queue_name = 'delete-pr-branch'
permission = 'push'
def run(self, queue):
super(IssuePRBranchDeleteJob, self).run(queue)
issue = self.issue
branch = issue.pr_head_branch
if branch:
gh = self.gh
if not gh:
return # it's delayed !
try:
branch.dist_delete(gh)
except ApiNotFoundError:
# already deleted !
if branch.pk:
branch.delete()
except ApiError, e:
message = None
if e.code == 422:
if e.response.get('json', {}).get('message', '') == u'Reference does not exist':
if branch.pk:
branch.delete()
return
message = u'Github refused to delete the branch <strong>%s</strong> on <strong>%s</strong>' % (
branch.ref, issue.repository.full_name)
self.status.hset(STATUSES.CANCELED)
elif e.code in (401, 403):
tries = self.tries.hget()
if tries and int(tries) >= 5:
message = u'You seem to not have the right to delete the branch <strong>%s</strong> on <strong>%s</strong>' % (
branch.ref, issue.repository.full_name)
self.status.hset(STATUSES.CANCELED)
if message:
messages.error(self.gh_user, message)
return None
else:
raise
class BaseIssueEditJob(IssueJob):
abstract = True
permission = 'self'
editable_fields = None
values = None
@property
def action_verb(self):
return self.edit_mode
@property
def action_done(self):
return self.edit_mode + 'd'
def get_issue_title_for_message(self, issue, number=True):
if number and issue.number:
return '<strong>#%d</strong>' % issue.number
else:
title = issue.title
if len(title) > 30:
title = title[:30] + u'…'
return '"<strong>%s</strong>"' % title
def do_action(self, issue, gh):
try:
issue = issue.dist_edit(mode=self.edit_mode, gh=gh, fields=self.editable_fields, values=self.values)
except IntegrityError as exc:
if self.edit_mode != 'create':
raise
# The issue may have already been created for example via a hook
# So we get the github_id and retrieve it
github_data = getattr(exc, 'github_data', None)
if not github_data or not github_data['id']:
raise
issue = Issue.objects.get(github_id=github_data['id'])
external = True
else:
external = False
return issue, external
def run(self, queue):
"""
Get the issue and update it
"""
super(BaseIssueEditJob, self).run(queue)
gh = self.gh
if not gh:
return # it's delayed !
try:
issue = self.issue
except Issue.DoesNotExist:
# the issue doesn't exist anymore, stop here
self.status.hset(STATUSES.CANCELED)
messages.error(self.gh_user, 'The issue you wanted to %s seems to have been deleted' % self.action_verb)
return False
try:
issue, external = self.do_action(issue, gh)
if not external and issue.github_status != issue.GITHUB_STATUS_CHOICES.FETCHED:
# Maybe it was still in saving mode but we didn't have anything new to get
# We need to be sure to have the right status to trigger the signals
issue.github_status = issue.GITHUB_STATUS_CHOICES.FETCHED
issue.save(update_fields=['github_status'])
except ApiError, e:
message = None
if e.code == 422:
message = u'Github refused to %s the %s %s on <strong>%s</strong>' % (
self.action_verb, issue.type, self.get_issue_title_for_message(issue),
issue.repository.full_name)
self.status.hset(STATUSES.CANCELED)
elif e.code in (401, 403):
tries = self.tries.hget()
if tries and int(tries) >= 5:
message = u'You seem to not have the right to %s the %s %s on <strong>%s</strong>' % (
self.action_verb, issue.type, self.get_issue_title_for_message(issue),
issue.repository.full_name)
self.status.hset(STATUSES.CANCELED)
if message:
messages.error(self.gh_user, message)
try:
# don't use "issue" cache
self.object.fetch(gh, force_fetch=True)
except Exception:
pass
return None
else:
raise
messages.success(self.gh_user, self.get_success_user_message(issue))
# ask for fresh data
if not external:
FetchIssueByNumber.add_job('%s#%s' % (issue.repository_id, issue.number), gh=gh)
return None
def get_success_user_message(self, issue):
return u'The %s %s on <strong>%s</strong> was correctly %s' % (
issue.type,
self.get_issue_title_for_message(issue),
issue.repository.full_name,
self.action_done
)
class IssueEditFieldJob(BaseIssueEditJob):
abstract = True
edit_mode = 'update'
value = fields.InstanceHashField()
def get_field_value(self):
return self.value.hget()
@property
def values(self):
return {
self.editable_fields[0]: self.get_field_value()
}
def get_success_user_message(self, issue):
message = super(IssueEditFieldJob, self).get_success_user_message(issue)
return message + u' (updated: <strong>%s</strong>)' % self.editable_fields[0].replace('_', ' ')
class IssueEditStateJob(IssueEditFieldJob):
queue_name = 'edit-issue-state'
editable_fields = ['state']
@property
def action_done(self):
value = self.value.hget()
return 'reopened' if value == 'open' else 'closed'
@property
def action_verb(self):
value = self.value.hget()
return 'reopen' if value == 'open' else 'close'
def get_success_user_message(self, issue):
# call the one from BaseIssueEditJob
super(IssueEditFieldJob, self).get_success_user_message(issue)
class IssueEditTitleJob(IssueEditFieldJob):
queue_name = 'edit-issue-title'
editable_fields = ['title']
class IssueEditBodyJob(IssueEditFieldJob):
queue_name = 'edit-issue-body'
editable_fields = ['body']
class IssueEditMilestoneJob(IssueEditFieldJob):
queue_name = 'edit-issue-milestone'
editable_fields = ['milestone']
def get_field_value(self):
return self.value.hget() or None
class IssueEditAssigneesJob(IssueEditFieldJob):
queue_name = 'edit-issue-assignees'
editable_fields = ['assignees']
def get_field_value(self):
usernames = self.value.hget() or '[]'
return json.loads(usernames)
class IssueEditRequestedReviewersJob(IssueEditFieldJob):
queue_name = 'edit-issue-requested-reviewers'
editable_fields = ['requested_reviewers']
def get_field_value(self):
usernames = self.value.hget() or '[]'
return json.loads(usernames)
def do_action(self, issue, gh):
identifiers = self.issue.github_callable_identifiers_for_requested_reviewers
# start by retrieving the actual users
actual_usernames = {
entry['login']
for entry in
Issue.objects.get_data_from_github(gh=gh, identifiers=identifiers)
}
wanted_usernames = set(self.get_field_value()) - {self.issue.user} # PR author is not allowed
# we may have ones to remove and add
to_remove = list(actual_usernames - wanted_usernames)
to_add = list(wanted_usernames - actual_usernames)
if not to_remove and not to_add:
return issue, False
result = None
if to_remove:
gh_callable = Issue.objects.get_github_callable(gh, identifiers)
request_headers = prepare_fetch_headers(
version=Issue.github_api_version,
)
result = gh_callable.delete(request_headers=request_headers, reviewers=to_remove)
if to_add:
gh_callable = Issue.objects.get_github_callable(gh, identifiers)
request_headers = prepare_fetch_headers(
version=Issue.github_api_version,
)
result = gh_callable.post(request_headers=request_headers, reviewers=to_add)
# update the object on our side
if result:
try:
issue = Issue.objects.create_or_update_from_dict(
data=result,
defaults=issue.defaults_create_values('update'),
force_update=True,
ignore_github_status=True)
except IntegrityError as exc:
exc.github_data = result or {}
raise
return issue, True
class IssueEditLabelsJob(IssueEditFieldJob):
queue_name = 'edit-issue-labels'
editable_fields = ['labels']
def get_field_value(self):
labels = self.value.hget() or '[]'
return json.loads(labels)
class IssueEditProjectsJob(IssueEditFieldJob):
queue_name = 'edit-issue-projects'
editable_fields = ['projects']
def get_field_value(self):
columns = self.value.hget() or '[]'
return json.loads(columns)
def send_error_message_removed_column(self, issue, action):
direction = {
'removing': 'from',
'adding': 'to',
'moving': 'to', # we only check destination column
}
messages.error(
self.gh_user,
"There was a problem %s the %s %s on <strong>%s</strong> %s a project's column that do "
"not exist anymore" % (
action,
issue.type,
self.get_issue_title_for_message(issue),
issue.repository.full_name,
direction[action]
)
)
def do_action(self, issue, gh):
actions = self.values['projects']
all_columns_by_id = {
c.id: c
for c in Column.objects.filter(
project__repository=issue.repository
).select_related('project')
}
cards_qs = issue.cards
# start by removing cards from projects
if actions.get('remove_from_columns'):
for column_id, github_id in actions['remove_from_columns'].items():
if not github_id:
continue
column_id = int(column_id)
try:
column = all_columns_by_id[column_id]
except KeyError:
# the column doesn't exist anymore, we can't do anything
self.send_error_message_removed_column(issue, 'removing')
continue
# do we have an existing card?
try:
card = cards_qs.get(github_id=github_id)
except Card.DoesNotExist:
try:
card = cards_qs.get(column_id=column_id)
except Card.DoesNotExist:
# make a fake card to use dist_delete
card = Card(github_id=github_id, column=column)
try:
card.dist_delete(gh)
except ApiNotFoundError:
# card already deleted, we can ignore
pass
# now add new cards
if actions.get('add_to_columns'):
for column_id in actions['add_to_columns']:
column_id = int(column_id)
try:
column = all_columns_by_id[column_id]
except KeyError:
# the column doesn't exist anymore, we can't do anything
self.send_error_message_removed_column(issue, 'adding')
continue
# do we have an existing card?
try:
card = cards_qs.get(column__project_id=column.project_id)
except Card.DoesNotExist:
# make a fake card to use dist_edit
card = Card(
column=column,
type=Card.CARDTYPE.ISSUE,
issue=issue,
)
if card.github_id:
# we have a github_id, so we cannot add it
if card.column_id == column_id:
# already in the correct column, nothing to do
continue
else:
data = {
'content_type': 'PullRequest' if issue.is_pull_request else 'Issue',
'content_id': issue.github_pr_id if issue.is_pull_request else issue.github_id
}
try:
card = card.dist_edit(gh, mode='create', fields=data.keys(), values=data)
except ApiError, e:
if e.code == 422:
# a card for this issue may already exists in this column on github
pass
else:
raise
# move the card to the bottom
data = {
'position': 'bottom',
'column_id': column.github_id
}
card.dist_edit(gh, mode='create', fields=data.keys(), values=data,
meta_base_name='moves', update_object=False)
# and finally move cards between columns
if actions.get('move_between_columns'):
for old_column_id, new_column_id in actions['move_between_columns'].items():
new_column_id = int(new_column_id)
try:
column = all_columns_by_id[new_column_id]
except KeyError:
# the column doesn't exist anymore, we can't do anything
self.send_error_message_removed_column(issue, 'moving')
continue
# we should have a card
try:
card = cards_qs.get(column__project_id=column.project_id)
except Card.DoesNotExist:
# it was removed in the meantime
continue
# we can move
data = {
'position': 'bottom',
'column_id': column.github_id
}
card.dist_edit(gh, mode='create', fields=data.keys(), values=data,
meta_base_name='moves', update_object=False)
# now we have to update the projects
issue.repository.fetch_all_projects(gh)
return issue, False
class IssueCreateJob(BaseIssueEditJob):
queue_name = 'create-issue'
edit_mode = 'create'
created_pk = fields.InstanceHashField(indexable=True)
@property
def issue(self):
issue = super(IssueCreateJob, self).issue
issue.is_new = True
return issue
def get_issue_title_for_message(self, issue, number=False):
# dont use the number in create mode, but the title
return super(IssueCreateJob, self).get_issue_title_for_message(issue, number)
def do_action(self, issue, gh):
# get the projects to save it on github after
columns = list(Column.objects.filter(cards__issue=issue))
issue, external = super(IssueCreateJob, self).do_action(issue, gh)
self.created_pk.hset(issue.pk)
# now save the columns
if columns:
job_data = {
'add_to_columns': []
}
now = datetime.utcnow()
# locally
for column in columns:
last_card = column.cards.order_by('position').last()
Card.objects.create(
type=Card.CARDTYPE.ISSUE,
created_at=now,
updated_at=now,
issue=issue,
column=column,
position=last_card.position + 1 if last_card is not None else 1,
)
job_data['add_to_columns'].append(column.id)
# and on github
IssueEditProjectsJob.add_job(
self.object.pk,
gh=gh,
value=json.dumps(job_data)
)
return issue, external
class ManageDualLabel(Job):
"""
Job that tries to resolve a new github label that cannot be inserted because one with the same
name but a different github_id already exist.
"""
queue_name = 'manage-dual-label'
new_github_id = fields.InstanceHashField()
resolution = fields.InstanceHashField()
update_related_output = fields.InstanceHashField()
clonable_fields = ('new_github_id', )
permission = 'read'
@cached_property
def repository(self):
identifier = self.identifier.hget()
repository_id = identifier.split('/', 1)[0]
return Repository.objects.get(pk=repository_id)
def run(self, queue):
"""
Will return `False` if nothing had to be done, `True`.
Will raise if we cannot be sure at the end that the problem was solved.
"""
super(ManageDualLabel, self).run(queue)
identifier, new_github_id = self.hmget('identifier', 'new_github_id')
name = identifier.split('/', 1)[1]
new_github_id = int(new_github_id)
try:
label = self.repository.labels.exclude(github_id=new_github_id).get(name=name)
except Label.DoesNotExist:
# Something already corrected the problem
self.resolution.hset('Problem already managed')
return False
# Token to fetch the labels
gh = self.gh
if not gh:
return # it's delayed !
# Start by fetching the existing label, maybe he just changed its name
g_label = None
per_page = Label.github_per_page['max']
page = 1
request_headers = prepare_fetch_headers(
github_format=Label.github_format,
version=Label.github_api_version
)
while True:
try:
new_g_labels = Label.objects.get_data_from_github(
gh=gh,
identifiers=self.repository.github_callable_identifiers_for_labels,
parameters={'per_page': per_page, 'page': page},
request_headers=request_headers,
)
except ApiNotFoundError:
break
else:
for new_g_label in new_g_labels:
if int(new_g_label['id']) == label.github_id:
# we found it !
g_label = new_g_label
break
if g_label:
break
# continue if more pages
if len(new_g_labels) >= per_page:
page += 1
else:
break
if g_label:
# we have it, with a new name
label.name = g_label['name']
label.color = g_label['color']
label.save()
self.resolution.hset('Label has a new name: %s' % label.name)
else:
# we don't have it, we can delete it
label.delete()
self.resolution.hset('Label deleted')
# and now to be fresh, we really fetch all labels
self.repository.fetch_labels(gh, force_fetch=True)
return True
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 18:23:28 2021
@author: gabri
"""
lista=[]
file=open('devices.txt')
for a in file:
a=a.strip()
lista.append(a)
print(a)
file.close()
print(lista) |
import json
from flask import request
class PeopleViews(object):
def __init__(self, service, router):
self.service = service
self.router = router
self._create_routes()
def _create_routes(self):
@self.router.route('/', methods=['GET'])
def home():
return '', 200
@self.router.route('/people', methods=['GET'])
def get_all():
people = self.service.get_all()
return json.dumps(people), 200
@self.router.route('/people/<name>', methods=['GET'])
def get_person_friends(name):
if name == '':
return json.dumps({}), 400
friends = self.service.get_friends(name)
return json.dumps(friends), 200
@self.router.route('/people/<name>/level2', methods=['GET'])
def get_second_level(name):
unknown_people = self.service.get_unknown_people(name)
return json.dumps(unknown_people), 200
@self.router.route('/people', methods=['POST'])
def add_people():
# recebe um dict cuja key é o nome da pessoa a ser inserida, e o value é uma lista de nomes que essa pessoa conhece
payload = request.json
if payload is None:
message = json.dumps({'message': 'The body of the request is a not valid JSON.'})
return message, 400
result = self.service.add_people(payload)
if result is None:
return json.dumps({'message': 'Could not insert one or more people.'}), 400
return json.dumps({'message': 'All people inserted.'}), 201
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.